2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 15:44:02 +08:00

SCSI updates on 20120319

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.18 (GNU/Linux)
 
 iQEcBAABAgAGBQJPZxSnAAoJEDeqqVYsXL0M0Y4IAMX0vrTVZbg6psA5/gMcWGRP
 CkFXEQ8n0PL2SCaj6BoDqamJFe5Nc7dnqxM0fGawB4S9vr3rHhiOlwO+NbV9zFYC
 2skBTpeL3sjgtN/jTBdfeeAa7xTYpu/XGyei0NS1A5c2AyMVXV0uYV2s4VNZxe44
 tVIn1OEzM2giZ9EB1OZslDMrg5XXm3MBIUECP0LbWUhBm/35caSFKzMXRwhh7WiK
 +AVmc2AZYtdEwuknDyiH7KlsaoB3vGL9pPrAUJzIgEhy2pOo2A7W72HfA4Fj+y6a
 uF9HBS5zciMp1+sGWry62AjNbWgin9BRlozBEO/lJhIfMGDV1nXEIJsOkOgkdoE=
 =1TxB
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

SCSI updates from James Bottomley:
 "The update includes the usual assortment of driver updates (lpfc,
  qla2xxx, qla4xxx, bfa, bnx2fc, bnx2i, isci, fcoe, hpsa) plus a huge
  amount of infrastructure work in the SAS library and transport class
  as well as an iSCSI update.  There's also a new SCSI based virtio
  driver."

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (177 commits)
  [SCSI] qla4xxx: Update driver version to 5.02.00-k15
  [SCSI] qla4xxx: trivial cleanup
  [SCSI] qla4xxx: Fix sparse warning
  [SCSI] qla4xxx: Add support for multiple session per host.
  [SCSI] qla4xxx: Export CHAP index as sysfs attribute
  [SCSI] scsi_transport: Export CHAP index as sysfs attribute
  [SCSI] qla4xxx: Add support to display CHAP list and delete CHAP entry
  [SCSI] iscsi_transport: Add support to display CHAP list and delete CHAP entry
  [SCSI] pm8001: fix endian issue with code optimization.
  [SCSI] pm8001: Fix possible racing condition.
  [SCSI] pm8001: Fix bogus interrupt state flag issue.
  [SCSI] ipr: update PCI ID definitions for new adapters
  [SCSI] qla2xxx: handle default case in qla2x00_request_firmware()
  [SCSI] isci: improvements in driver unloading routine
  [SCSI] isci: improve phy event warnings
  [SCSI] isci: debug, provide state-enum-to-string conversions
  [SCSI] scsi_transport_sas: 'enable' phys on reset
  [SCSI] libsas: don't recover end devices attached to disabled phys
  [SCSI] libsas: fixup target_port_protocols for expanders that don't report sata
  [SCSI] libsas: set attached device type and target protocols for local phys
  ...
This commit is contained in:
Linus Torvalds 2012-03-22 12:55:29 -07:00
commit 424a6f6ef9
154 changed files with 10279 additions and 4382 deletions

View File

@ -1,48 +1,11 @@
Copyright (c) 2003-2011 QLogic Corporation
QLogic Linux/ESX Fibre Channel HBA Driver
QLogic Linux FC-FCoE Driver
This program includes a device driver for Linux 2.6/ESX that may be
distributed with QLogic hardware specific firmware binary file.
This program includes a device driver for Linux 3.x.
You may modify and redistribute the device driver code under the
GNU General Public License (a copy of which is attached hereto as
Exhibit A) published by the Free Software Foundation (version 2).
You may redistribute the hardware specific firmware binary file
under the following terms:
1. Redistribution of source code (only if applicable),
must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistribution in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
3. The name of QLogic Corporation may not be used to
endorse or promote products derived from this software
without specific prior written permission
REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
COMBINATION WITH THIS PROGRAM.
EXHIBIT A

View File

@ -0,0 +1,82 @@
Linux driver for Brocade FC/FCOE adapters
Supported Hardware
------------------
bfa 3.0.2.2 driver supports all Brocade FC/FCOE adapters. Below is a list of
adapter models with corresponding PCIIDs.
PCIID Model
1657:0013:1657:0014 425 4Gbps dual port FC HBA
1657:0013:1657:0014 825 8Gbps PCIe dual port FC HBA
1657:0013:103c:1742 HP 82B 8Gbps PCIedual port FC HBA
1657:0013:103c:1744 HP 42B 4Gbps dual port FC HBA
1657:0017:1657:0014 415 4Gbps single port FC HBA
1657:0017:1657:0014 815 8Gbps single port FC HBA
1657:0017:103c:1741 HP 41B 4Gbps single port FC HBA
1657:0017:103c 1743 HP 81B 8Gbps single port FC HBA
1657:0021:103c:1779 804 8Gbps FC HBA for HP Bladesystem c-class
1657:0014:1657:0014 1010 10Gbps single port CNA - FCOE
1657:0014:1657:0014 1020 10Gbps dual port CNA - FCOE
1657:0014:1657:0014 1007 10Gbps dual port CNA - FCOE
1657:0014:1657:0014 1741 10Gbps dual port CNA - FCOE
1657:0022:1657:0024 1860 16Gbps FC HBA
1657:0022:1657:0022 1860 10Gbps CNA - FCOE
Firmware download
-----------------
The latest Firmware package for 3.0.2.2 bfa driver can be found at:
http://www.brocade.com/services-support/drivers-downloads/adapters/Linux.page
and then click following respective util package link:
Version Link
v3.0.0.0 Linux Adapter Firmware package for RHEL 6.2, SLES 11SP2
Configuration & Management utility download
-------------------------------------------
The latest driver configuration & management utility for 3.0.2.2 bfa driver can
be found at:
http://www.brocade.com/services-support/drivers-downloads/adapters/Linux.page
and then click following respective util pacakge link
Version Link
v3.0.2.0 Linux Adapter Firmware package for RHEL 6.2, SLES 11SP2
Documentation
-------------
The latest Administration's Guide, Installation and Reference Manual,
Troubleshooting Guide, and Release Notes for the corresponding out-of-box
driver can be found at:
http://www.brocade.com/services-support/drivers-downloads/adapters/Linux.page
and use the following inbox and out-of-box driver version mapping to find
the corresponding documentation:
Inbox Version Out-of-box Version
v3.0.2.2 v3.0.0.0
Support
-------
For general product and support info, go to the Brocade website at:
http://www.brocade.com/services-support/index.page

View File

@ -398,21 +398,6 @@ struct sas_task {
task_done -- callback when the task has finished execution
};
When an external entity, entity other than the LLDD or the
SAS Layer, wants to work with a struct domain_device, it
_must_ call kobject_get() when getting a handle on the
device and kobject_put() when it is done with the device.
This does two things:
A) implements proper kfree() for the device;
B) increments/decrements the kref for all players:
domain_device
all domain_device's ... (if past an expander)
port
host adapter
pci device
and up the ladder, etc.
DISCOVERY
---------

View File

@ -5936,29 +5936,31 @@ void ata_host_init(struct ata_host *host, struct device *dev,
host->ops = ops;
}
void __ata_port_probe(struct ata_port *ap)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
ehi->probe_mask |= ATA_ALL_DEVICES;
ehi->action |= ATA_EH_RESET;
ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
ap->pflags &= ~ATA_PFLAG_INITIALIZING;
ap->pflags |= ATA_PFLAG_LOADING;
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
}
int ata_port_probe(struct ata_port *ap)
{
int rc = 0;
/* probe */
if (ap->ops->error_handler) {
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
ehi->probe_mask |= ATA_ALL_DEVICES;
ehi->action |= ATA_EH_RESET;
ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
ap->pflags &= ~ATA_PFLAG_INITIALIZING;
ap->pflags |= ATA_PFLAG_LOADING;
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
/* wait for EH to finish */
__ata_port_probe(ap);
ata_port_wait_eh(ap);
} else {
DPRINTK("ata%u: bus probe begin\n", ap->print_id);

View File

@ -863,6 +863,7 @@ void ata_port_wait_eh(struct ata_port *ap)
goto retry;
}
}
EXPORT_SYMBOL_GPL(ata_port_wait_eh);
static int ata_eh_nr_in_flight(struct ata_port *ap)
{

View File

@ -3838,6 +3838,19 @@ void ata_sas_port_stop(struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ata_sas_port_stop);
int ata_sas_async_port_init(struct ata_port *ap)
{
int rc = ap->ops->port_start(ap);
if (!rc) {
ap->print_id = ata_print_id++;
__ata_port_probe(ap);
}
return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_async_port_init);
/**
* ata_sas_port_init - Initialize a SATA device
* @ap: SATA port to initialize

View File

@ -105,6 +105,7 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern const char *sata_spd_string(unsigned int spd);
extern int ata_port_probe(struct ata_port *ap);
extern void __ata_port_probe(struct ata_port *ap);
/* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI
@ -151,7 +152,6 @@ extern void ata_eh_acquire(struct ata_port *ap);
extern void ata_eh_release(struct ata_port *ap);
extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern void ata_scsi_error(struct Scsi_Host *host);
extern void ata_port_wait_eh(struct ata_port *ap);
extern void ata_eh_fastdrain_timerfn(unsigned long arg);
extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
extern void ata_dev_disable(struct ata_device *dev);

View File

@ -1903,6 +1903,14 @@ config SCSI_BFA_FC
To compile this driver as a module, choose M here. The module will
be called bfa.
config SCSI_VIRTIO
tristate "virtio-scsi support (EXPERIMENTAL)"
depends on EXPERIMENTAL && VIRTIO
help
This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M.
endif # SCSI_LOWLEVEL
source "drivers/scsi/pcmcia/Kconfig"

View File

@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o

View File

@ -151,7 +151,11 @@ int aac_msi;
int aac_commit = -1;
int startup_timeout = 180;
int aif_timeout = 120;
int aac_sync_mode; /* Only Sync. transfer - disabled */
module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
" 0=off, 1=on");
module_param(nondasd, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
" 0=off, 1=on");

View File

@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
# define AAC_DRIVER_BUILD 28000
# define AAC_DRIVER_BUILD 28900
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@ -756,8 +756,16 @@ struct src_mu_registers {
struct src_registers {
struct src_mu_registers MUnit; /* 00h - c7h */
__le32 reserved1[130790]; /* c8h - 7fc5fh */
struct src_inbound IndexRegs; /* 7fc60h */
union {
struct {
__le32 reserved1[130790]; /* c8h - 7fc5fh */
struct src_inbound IndexRegs; /* 7fc60h */
} tupelo;
struct {
__le32 reserved1[974]; /* c8h - fffh */
struct src_inbound IndexRegs; /* 1000h */
} denali;
} u;
};
#define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR))
@ -999,6 +1007,10 @@ struct aac_bus_info_response {
#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18)
#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28)
#define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29)
#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
struct aac_dev
{
@ -1076,6 +1088,8 @@ struct aac_dev
# define AAC_MIN_FOOTPRINT_SIZE 8192
# define AAC_MIN_SRC_BAR0_SIZE 0x400000
# define AAC_MIN_SRC_BAR1_SIZE 0x800
# define AAC_MIN_SRCV_BAR0_SIZE 0x100000
# define AAC_MIN_SRCV_BAR1_SIZE 0x400
#endif
union
{
@ -1116,7 +1130,10 @@ struct aac_dev
u8 msi;
int management_fib_count;
spinlock_t manage_lock;
spinlock_t sync_lock;
int sync_mode;
struct fib *sync_fib;
struct list_head sync_fib_list;
};
#define aac_adapter_interrupt(dev) \
@ -1163,6 +1180,7 @@ struct aac_dev
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
#define FIB_CONTEXT_FLAG (0x00000002)
#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
/*
* Define the command values
@ -1970,6 +1988,7 @@ int aac_rkt_init(struct aac_dev *dev);
int aac_nark_init(struct aac_dev *dev);
int aac_sa_init(struct aac_dev *dev);
int aac_src_init(struct aac_dev *dev);
int aac_srcv_init(struct aac_dev *dev);
int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);

View File

@ -325,12 +325,14 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
{
u32 status[5];
struct Scsi_Host * host = dev->scsi_host_ptr;
extern int aac_sync_mode;
/*
* Check the preferred comm settings, defaults from template.
*/
dev->management_fib_count = 0;
spin_lock_init(&dev->manage_lock);
spin_lock_init(&dev->sync_lock);
dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- sizeof(struct aac_fibhdr)
@ -344,13 +346,21 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
(status[0] == 0x00000001)) {
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
dev->raw_io_64 = 1;
if (dev->a_ops.adapter_comm) {
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) {
dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
dev->raw_io_interface = 1;
} else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) {
dev->sync_mode = aac_sync_mode;
if (dev->a_ops.adapter_comm &&
(status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) {
dev->comm_interface = AAC_COMM_MESSAGE;
dev->raw_io_interface = 1;
if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
/* driver supports TYPE1 (Tupelo) */
dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
} else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
(status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3)) ||
(status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
/* driver doesn't support TYPE2 (Series7), TYPE3 and TYPE4 */
/* switch to sync. mode */
dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
dev->sync_mode = 1;
}
}
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
@ -455,6 +465,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
INIT_LIST_HEAD(&dev->fib_list);
INIT_LIST_HEAD(&dev->sync_fib_list);
return dev;
}

View File

@ -416,6 +416,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
unsigned long flags = 0;
unsigned long qflags;
unsigned long mflags = 0;
unsigned long sflags = 0;
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
@ -512,6 +513,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
spin_lock_irqsave(&fibptr->event_lock, flags);
}
if (dev->sync_mode) {
if (wait)
spin_unlock_irqrestore(&fibptr->event_lock, flags);
spin_lock_irqsave(&dev->sync_lock, sflags);
if (dev->sync_fib) {
list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
spin_unlock_irqrestore(&dev->sync_lock, sflags);
} else {
dev->sync_fib = fibptr;
spin_unlock_irqrestore(&dev->sync_lock, sflags);
aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
(u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
if (wait) {
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
if (down_interruptible(&fibptr->event_wait)) {
fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
return -EFAULT;
}
return 0;
}
return -EINPROGRESS;
}
if (aac_adapter_deliver(fibptr) != 0) {
printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
if (wait) {

View File

@ -56,7 +56,7 @@
#include "aacraid.h"
#define AAC_DRIVER_VERSION "1.1-7"
#define AAC_DRIVER_VERSION "1.2-0"
#ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
@ -162,7 +162,10 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */
{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
{ 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
{ 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
{ 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
{ 0,}
};
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@ -238,7 +241,10 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
{ aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Catch All */
{ aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
};
/**
@ -1102,6 +1108,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
int error = -ENODEV;
int unique_id = 0;
u64 dmamask;
extern int aac_sync_mode;
list_for_each_entry(aac, &aac_devices, entry) {
if (aac->id > unique_id)
@ -1162,6 +1169,21 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if ((*aac_drivers[index].init)(aac))
goto out_unmap;
if (aac->sync_mode) {
if (aac_sync_mode)
printk(KERN_INFO "%s%d: Sync. mode enforced "
"by driver parameter. This will cause "
"a significant performance decrease!\n",
aac->name,
aac->id);
else
printk(KERN_INFO "%s%d: Async. mode not supported "
"by current driver, sync. mode enforced."
"\nPlease update driver to get full performance.\n",
aac->name,
aac->id);
}
/*
* Start any kernel threads needed
*/

View File

@ -643,6 +643,7 @@ int _aac_rx_init(struct aac_dev *dev)
if (aac_init_adapter(dev) == NULL)
goto error_iounmap;
aac_adapter_comm(dev, dev->comm_interface);
dev->sync_mode = 0; /* sync. mode not supported */
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {

View File

@ -385,6 +385,7 @@ int aac_sa_init(struct aac_dev *dev)
if(aac_init_adapter(dev) == NULL)
goto error_irq;
dev->sync_mode = 0; /* sync. mode not supported */
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED,
"aacraid", (void *)dev ) < 0) {

View File

@ -96,6 +96,38 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
our_interrupt = 1;
/* handle AIF */
aac_intr_normal(dev, 0, 2, 0, NULL);
} else if (bellbits_shifted & OUTBOUNDDOORBELL_0) {
unsigned long sflags;
struct list_head *entry;
int send_it = 0;
if (dev->sync_fib) {
our_interrupt = 1;
if (dev->sync_fib->callback)
dev->sync_fib->callback(dev->sync_fib->callback_data,
dev->sync_fib);
spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
dev->management_fib_count--;
up(&dev->sync_fib->event_wait);
}
spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags);
spin_lock_irqsave(&dev->sync_lock, sflags);
if (!list_empty(&dev->sync_fib_list)) {
entry = dev->sync_fib_list.next;
dev->sync_fib = list_entry(entry, struct fib, fiblink);
list_del(entry);
send_it = 1;
} else {
dev->sync_fib = NULL;
}
spin_unlock_irqrestore(&dev->sync_lock, sflags);
if (send_it) {
aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
(u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0,
NULL, NULL, NULL, NULL, NULL);
}
}
}
}
@ -177,56 +209,63 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
*/
src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
ok = 0;
start = jiffies;
if (!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) {
ok = 0;
start = jiffies;
/*
* Wait up to 5 minutes
*/
while (time_before(jiffies, start+300*HZ)) {
udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
/*
* Mon960 will set doorbell0 bit when it has completed the command.
*/
if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
/*
* Clear the doorbell.
*/
src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
ok = 1;
break;
}
/*
* Yield the processor in case we are slow
*/
msleep(1);
}
if (unlikely(ok != 1)) {
/*
* Restore interrupt mask even though we timed out
*/
aac_adapter_enable_int(dev);
return -ETIMEDOUT;
}
/*
* Pull the synch status from Mailbox 0.
*/
if (status)
*status = readl(&dev->IndexRegs->Mailbox[0]);
if (r1)
*r1 = readl(&dev->IndexRegs->Mailbox[1]);
if (r2)
*r2 = readl(&dev->IndexRegs->Mailbox[2]);
if (r3)
*r3 = readl(&dev->IndexRegs->Mailbox[3]);
if (r4)
*r4 = readl(&dev->IndexRegs->Mailbox[4]);
/*
* Clear the synch command doorbell.
*/
src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
}
/*
* Wait up to 30 seconds
* Restore interrupt mask
*/
while (time_before(jiffies, start+30*HZ)) {
/* Delay 5 microseconds to let Mon960 get info. */
udelay(5);
/* Mon960 will set doorbell0 bit
* when it has completed the command
*/
if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
/* Clear the doorbell */
src_writel(dev,
MUnit.ODR_C,
OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
ok = 1;
break;
}
/* Yield the processor in case we are slow */
msleep(1);
}
if (unlikely(ok != 1)) {
/* Restore interrupt mask even though we timed out */
aac_adapter_enable_int(dev);
return -ETIMEDOUT;
}
/* Pull the synch status from Mailbox 0 */
if (status)
*status = readl(&dev->IndexRegs->Mailbox[0]);
if (r1)
*r1 = readl(&dev->IndexRegs->Mailbox[1]);
if (r2)
*r2 = readl(&dev->IndexRegs->Mailbox[2]);
if (r3)
*r3 = readl(&dev->IndexRegs->Mailbox[3]);
if (r4)
*r4 = readl(&dev->IndexRegs->Mailbox[4]);
/* Clear the synch command doorbell */
src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
/* Restore interrupt mask */
aac_adapter_enable_int(dev);
return 0;
}
/**
@ -386,9 +425,7 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
{
if (!size) {
iounmap(dev->regs.src.bar0);
dev->regs.src.bar0 = NULL;
iounmap(dev->base);
dev->base = NULL;
dev->base = dev->regs.src.bar0 = NULL;
return 0;
}
dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
@ -404,7 +441,27 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
return -1;
}
dev->IndexRegs = &((struct src_registers __iomem *)
dev->base)->IndexRegs;
dev->base)->u.tupelo.IndexRegs;
return 0;
}
/**
* aac_srcv_ioremap
* @size: mapping resize request
*
*/
static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
{
if (!size) {
iounmap(dev->regs.src.bar0);
dev->base = dev->regs.src.bar0 = NULL;
return 0;
}
dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, size);
if (dev->base == NULL)
return -1;
dev->IndexRegs = &((struct src_registers __iomem *)
dev->base)->u.denali.IndexRegs;
return 0;
}
@ -419,7 +476,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
if (bled || (var != 0x00000001))
bled = -EINVAL;
return -EINVAL;
if (dev->supplement_adapter_info.SupportedOptions2 &
AAC_OPTION_DOORBELL_RESET) {
src_writel(dev, MUnit.IDR, reset_mask);
@ -579,15 +636,149 @@ int aac_src_init(struct aac_dev *dev)
dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
aac_adapter_enable_int(dev);
/*
* Tell the adapter that all is configured, and it can
* start accepting requests
*/
aac_src_start_adapter(dev);
if (!dev->sync_mode) {
/*
* Tell the adapter that all is configured, and it can
* start accepting requests
*/
aac_src_start_adapter(dev);
}
return 0;
error_iounmap:
return -1;
}
/**
* aac_srcv_init - initialize an SRCv card
* @dev: device to configure
*
*/
int aac_srcv_init(struct aac_dev *dev)
{
unsigned long start;
unsigned long status;
int restart = 0;
int instance = dev->id;
const char *name = dev->name;
dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
dev->a_ops.adapter_comm = aac_src_select_comm;
dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
if (aac_adapter_ioremap(dev, dev->base_size)) {
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
}
/* Failure to reset here is an option ... */
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
if ((aac_reset_devices || reset_devices) &&
!aac_src_restart_adapter(dev, 0))
++restart;
/*
* Check to see if the board panic'd while booting.
*/
status = src_readl(dev, MUnit.OMR);
if (status & KERNEL_PANIC) {
if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
goto error_iounmap;
++restart;
}
/*
* Check to see if the board failed any self tests.
*/
status = src_readl(dev, MUnit.OMR);
if (status & SELF_TEST_FAILED) {
printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
goto error_iounmap;
}
/*
* Check to see if the monitor panic'd while booting.
*/
if (status & MONITOR_PANIC) {
printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
goto error_iounmap;
}
start = jiffies;
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes
*/
while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) {
if ((restart &&
(status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
time_after(jiffies, start+HZ*startup_timeout)) {
printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
dev->name, instance, status);
goto error_iounmap;
}
if (!restart &&
((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
time_after(jiffies, start + HZ *
((startup_timeout > 60)
? (startup_timeout - 60)
: (startup_timeout / 2))))) {
if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev))))
start = jiffies;
++restart;
}
msleep(1);
}
if (restart && aac_commit)
aac_commit = 1;
/*
* Fill in the common function dispatch table.
*/
dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
dev->a_ops.adapter_notify = aac_src_notify_adapter;
dev->a_ops.adapter_sync_cmd = src_sync_cmd;
dev->a_ops.adapter_check_health = aac_src_check_health;
dev->a_ops.adapter_restart = aac_src_restart_adapter;
/*
* First clear out all interrupts. Then enable the one's that we
* can handle.
*/
aac_adapter_comm(dev, AAC_COMM_MESSAGE);
aac_adapter_disable_int(dev);
src_writel(dev, MUnit.ODR_C, 0xffffffff);
aac_adapter_enable_int(dev);
if (aac_init_adapter(dev) == NULL)
goto error_iounmap;
if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
goto error_iounmap;
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
if (dev->msi)
pci_disable_msi(dev->pdev);
printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
name, instance);
goto error_iounmap;
}
dev->dbg_base = dev->scsi_host_ptr->base;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
aac_adapter_enable_int(dev);
if (!dev->sync_mode) {
/*
* Tell the adapter that all is configured, and it can
* start accepting requests
*/
aac_src_start_adapter(dev);
}
return 0;
error_iounmap:
return -1;
}

View File

@ -80,6 +80,8 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
int asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags);
void asd_set_dmamode(struct domain_device *dev);
/* ---------- TMFs ---------- */
int asd_abort_task(struct sas_task *);
int asd_abort_task_set(struct domain_device *, u8 *lun);

View File

@ -109,26 +109,37 @@ static int asd_init_sata_tag_ddb(struct domain_device *dev)
return 0;
}
static int asd_init_sata(struct domain_device *dev)
void asd_set_dmamode(struct domain_device *dev)
{
struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
struct ata_device *ata_dev = sas_to_ata_dev(dev);
int ddb = (int) (unsigned long) dev->lldd_dev;
u32 qdepth = 0;
int res = 0;
asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
if ((dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) &&
dev->sata_dev.identify_device &&
dev->sata_dev.identify_device[10] != 0) {
u16 w75 = le16_to_cpu(dev->sata_dev.identify_device[75]);
u16 w76 = le16_to_cpu(dev->sata_dev.identify_device[76]);
if (w76 & 0x100) /* NCQ? */
qdepth = (w75 & 0x1F) + 1;
if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
if (ata_id_has_ncq(ata_dev->id))
qdepth = ata_id_queue_depth(ata_dev->id);
asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
(1ULL<<qdepth)-1);
asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
}
if (qdepth > 0)
if (asd_init_sata_tag_ddb(dev) != 0) {
unsigned long flags;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
ata_dev->flags |= ATA_DFLAG_NCQ_OFF;
spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
}
}
static int asd_init_sata(struct domain_device *dev)
{
struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
int ddb = (int) (unsigned long) dev->lldd_dev;
asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
dev->dev_type == SATA_PM_PORT) {
struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
@ -136,9 +147,8 @@ static int asd_init_sata(struct domain_device *dev)
asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
}
asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF);
if (qdepth > 0)
res = asd_init_sata_tag_ddb(dev);
return res;
return 0;
}
static int asd_init_target_ddb(struct domain_device *dev)

View File

@ -68,7 +68,6 @@ static struct scsi_host_template aic94xx_sht = {
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.slave_destroy = sas_slave_destroy,
.scan_finished = asd_scan_finished,
.scan_start = asd_scan_start,
.change_queue_depth = sas_change_queue_depth,
@ -82,7 +81,6 @@ static struct scsi_host_template aic94xx_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
};
@ -972,7 +970,7 @@ static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (time < HZ)
return 0;
/* Wait for discovery to finish */
scsi_flush_work(shost);
sas_drain_work(SHOST_TO_SAS_HA(shost));
return 1;
}
@ -1010,6 +1008,8 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
.lldd_clear_nexus_ha = asd_clear_nexus_ha,
.lldd_control_phy = asd_control_phy,
.lldd_ata_set_dmamode = asd_set_dmamode,
};
static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {

View File

@ -181,7 +181,7 @@ static int asd_clear_nexus_I_T(struct domain_device *dev,
int asd_I_T_nexus_reset(struct domain_device *dev)
{
int res, tmp_res, i;
struct sas_phy *phy = sas_find_local_phy(dev);
struct sas_phy *phy = sas_get_local_phy(dev);
/* Standard mandates link reset for ATA (type 0) and
* hard reset for SSP (type 1) */
int reset_type = (dev->dev_type == SATA_DEV ||
@ -192,7 +192,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
ASD_DPRINTK("sending %s reset to %s\n",
reset_type ? "hard" : "soft", dev_name(&phy->dev));
res = sas_phy_reset(phy, reset_type);
if (res == TMF_RESP_FUNC_COMPLETE) {
if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
/* wait for the maximum settle time */
msleep(500);
/* clear all outstanding commands (keep nexus suspended) */
@ -201,7 +201,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
for (i = 0 ; i < 3; i++) {
tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
if (tmp_res == TC_RESUME)
return res;
goto out;
msleep(500);
}
@ -211,7 +211,10 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
dev_printk(KERN_ERR, &phy->dev,
"Failed to resume nexus after reset 0x%x\n", tmp_res);
return TMF_RESP_FUNC_FAILED;
res = TMF_RESP_FUNC_FAILED;
out:
sas_put_local_phy(phy);
return res;
}
static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)

View File

@ -3047,8 +3047,7 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
* Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
* buffer of size bsg_data->payload_len
*/
bsg_fcpt = (struct bfa_bsg_fcpt_s *)
kzalloc(bsg_data->payload_len, GFP_KERNEL);
bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
if (!bsg_fcpt)
goto out;
@ -3060,6 +3059,7 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
if (drv_fcxp == NULL) {
kfree(bsg_fcpt);
rc = -ENOMEM;
goto out;
}

View File

@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
#define BNX2FC_VERSION "1.0.9"
#define BNX2FC_VERSION "1.0.10"
#define PFX "bnx2fc: "
@ -114,6 +114,8 @@
#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
#define BNX2FC_MAX_SEQS 255
#define BNX2FC_MAX_RETRY_CNT 3
#define BNX2FC_MAX_RPORT_RETRY_CNT 255
#define BNX2FC_READ (1 << 1)
#define BNX2FC_WRITE (1 << 0)
@ -121,8 +123,10 @@
#define BNX2FC_MIN_XID 0
#define BNX2FC_MAX_XID \
(BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
#define FCOE_MAX_NUM_XIDS 0x2000
#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1)
#define FCOE_MAX_XID (FCOE_MIN_XID + 4095)
#define FCOE_MAX_XID (FCOE_MIN_XID + FCOE_MAX_NUM_XIDS - 1)
#define FCOE_XIDS_PER_CPU (FCOE_MIN_XID + (512 * nr_cpu_ids) - 1)
#define BNX2FC_MAX_LUN 0xFFFF
#define BNX2FC_MAX_FCP_TGT 256
#define BNX2FC_MAX_CMD_LEN 16

View File

@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
#define DRV_MODULE_RELDATE "Oct 21, 2011"
#define DRV_MODULE_RELDATE "Jan 22, 2011"
static char version[] __devinitdata =
@ -939,8 +939,14 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
static int bnx2fc_em_config(struct fc_lport *lport)
{
int max_xid;
if (nr_cpu_ids <= 2)
max_xid = FCOE_XIDS_PER_CPU;
else
max_xid = FCOE_MAX_XID;
if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
FCOE_MAX_XID, NULL)) {
max_xid, NULL)) {
printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
return -ENOMEM;
}
@ -952,8 +958,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport)
{
lport->link_up = 0;
lport->qfull = 0;
lport->max_retry_count = 3;
lport->max_rport_retry_count = 3;
lport->max_retry_count = BNX2FC_MAX_RETRY_CNT;
lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT;
lport->e_d_tov = 2 * 1000;
lport->r_a_tov = 10 * 1000;
@ -1536,6 +1542,7 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
static int bnx2fc_destroy(struct net_device *netdev)
{
struct bnx2fc_interface *interface = NULL;
struct workqueue_struct *timer_work_queue;
int rc = 0;
rtnl_lock();
@ -1548,9 +1555,9 @@ static int bnx2fc_destroy(struct net_device *netdev)
goto netdev_err;
}
destroy_workqueue(interface->timer_work_queue);
timer_work_queue = interface->timer_work_queue;
__bnx2fc_destroy(interface);
destroy_workqueue(timer_work_queue);
netdev_err:
mutex_unlock(&bnx2fc_dev_lock);
@ -2054,6 +2061,7 @@ if_create_err:
ifput_err:
bnx2fc_net_cleanup(interface);
bnx2fc_interface_put(interface);
goto mod_err;
netdev_err:
module_put(THIS_MODULE);
mod_err:

View File

@ -1312,14 +1312,18 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
/* EMC */
(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
if (error_mask1)
if (error_mask1) {
iscsi_init2.error_bit_map[0] = error_mask1;
else
mask64 &= (u32)(~mask64);
mask64 |= error_mask1;
} else
iscsi_init2.error_bit_map[0] = (u32) mask64;
if (error_mask2)
if (error_mask2) {
iscsi_init2.error_bit_map[1] = error_mask2;
else
mask64 &= 0xffffffff;
mask64 |= ((u64)error_mask2 << 32);
} else
iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
iscsi_error_mask = mask64;

View File

@ -49,11 +49,11 @@ module_param(en_tcp_dack, int, 0664);
MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
unsigned int error_mask1 = 0x00;
module_param(error_mask1, int, 0664);
module_param(error_mask1, uint, 0664);
MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
unsigned int error_mask2 = 0x00;
module_param(error_mask2, int, 0664);
module_param(error_mask2, uint, 0664);
MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
unsigned int sq_size;
@ -393,8 +393,9 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
p = &per_cpu(bnx2i_percpu, cpu);
thread = kthread_create(bnx2i_percpu_io_thread, (void *)p,
"bnx2i_thread/%d", cpu);
thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
cpu_to_node(cpu),
"bnx2i_thread/%d", cpu);
/* bind thread to the cpu */
if (likely(!IS_ERR(thread))) {
kthread_bind(thread, cpu);

View File

@ -2147,11 +2147,10 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgbi_conn *cconn = tcp_conn->dd_data;
struct cxgbi_sock *csk = cconn->cep->csk;
int value, err = 0;
int err;
log_debug(1 << CXGBI_DBG_ISCSI,
"cls_conn 0x%p, param %d, buf(%d) %s.\n",
@ -2173,15 +2172,7 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
conn->datadgst_en, 0);
break;
case ISCSI_PARAM_MAX_R2T:
sscanf(buf, "%d", &value);
if (value <= 0 || !is_power_of_2(value))
return -EINVAL;
if (session->max_r2t == value)
break;
iscsi_tcp_r2tpool_free(session);
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err && iscsi_tcp_r2tpool_alloc(session))
return -ENOMEM;
return iscsi_tcp_set_max_r2t(conn, buf);
case ISCSI_PARAM_MAX_RECV_DLENGTH:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err)

View File

@ -168,6 +168,14 @@ static struct fc_function_template fcoe_nport_fc_functions = {
.show_host_supported_fc4s = 1,
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
.show_host_manufacturer = 1,
.show_host_model = 1,
.show_host_model_description = 1,
.show_host_hardware_version = 1,
.show_host_driver_version = 1,
.show_host_firmware_version = 1,
.show_host_optionrom_version = 1,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
@ -208,6 +216,14 @@ static struct fc_function_template fcoe_vport_fc_functions = {
.show_host_supported_fc4s = 1,
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
.show_host_serial_number = 1,
.show_host_manufacturer = 1,
.show_host_model = 1,
.show_host_model_description = 1,
.show_host_hardware_version = 1,
.show_host_driver_version = 1,
.show_host_firmware_version = 1,
.show_host_optionrom_version = 1,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
@ -364,11 +380,10 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
if (!fcoe) {
FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
fcoe = ERR_PTR(-ENOMEM);
goto out_nomod;
goto out_putmod;
}
dev_hold(netdev);
kref_init(&fcoe->kref);
/*
* Initialize FIP.
@ -384,53 +399,17 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
kfree(fcoe);
dev_put(netdev);
fcoe = ERR_PTR(err);
goto out_nomod;
goto out_putmod;
}
goto out;
out_nomod:
out_putmod:
module_put(THIS_MODULE);
out:
return fcoe;
}
/**
* fcoe_interface_release() - fcoe_port kref release function
* @kref: Embedded reference count in an fcoe_interface struct
*/
static void fcoe_interface_release(struct kref *kref)
{
struct fcoe_interface *fcoe;
struct net_device *netdev;
fcoe = container_of(kref, struct fcoe_interface, kref);
netdev = fcoe->netdev;
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
module_put(THIS_MODULE);
}
/**
* fcoe_interface_get() - Get a reference to a FCoE interface
* @fcoe: The FCoE interface to be held
*/
static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
{
kref_get(&fcoe->kref);
}
/**
* fcoe_interface_put() - Put a reference to a FCoE interface
* @fcoe: The FCoE interface to be released
*/
static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
{
kref_put(&fcoe->kref, fcoe_interface_release);
}
/**
* fcoe_interface_cleanup() - Clean up a FCoE interface
* @fcoe: The FCoE interface to be cleaned up
@ -478,7 +457,11 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
rtnl_unlock();
/* Release the self-reference taken during fcoe_interface_create() */
fcoe_interface_put(fcoe);
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
kfree(fcoe);
dev_put(netdev);
module_put(THIS_MODULE);
}
/**
@ -734,6 +717,85 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
return 0;
}
/**
* fcoe_fdmi_info() - Get FDMI related info from net devive for SW FCoE
* @lport: The local port that is associated with the net device
* @netdev: The associated net device
*
* Must be called after fcoe_shost_config() as it will use local port mutex
*
*/
static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev)
{
struct fcoe_interface *fcoe;
struct fcoe_port *port;
struct net_device *realdev;
int rc;
struct netdev_fcoe_hbainfo fdmi;
port = lport_priv(lport);
fcoe = port->priv;
realdev = fcoe->realdev;
if (!realdev)
return;
/* No FDMI state m/c for NPIV ports */
if (lport->vport)
return;
if (realdev->netdev_ops->ndo_fcoe_get_hbainfo) {
memset(&fdmi, 0, sizeof(fdmi));
rc = realdev->netdev_ops->ndo_fcoe_get_hbainfo(realdev,
&fdmi);
if (rc) {
printk(KERN_INFO "fcoe: Failed to retrieve FDMI "
"information from netdev.\n");
return;
}
snprintf(fc_host_serial_number(lport->host),
FC_SERIAL_NUMBER_SIZE,
"%s",
fdmi.serial_number);
snprintf(fc_host_manufacturer(lport->host),
FC_SERIAL_NUMBER_SIZE,
"%s",
fdmi.manufacturer);
snprintf(fc_host_model(lport->host),
FC_SYMBOLIC_NAME_SIZE,
"%s",
fdmi.model);
snprintf(fc_host_model_description(lport->host),
FC_SYMBOLIC_NAME_SIZE,
"%s",
fdmi.model_description);
snprintf(fc_host_hardware_version(lport->host),
FC_VERSION_STRING_SIZE,
"%s",
fdmi.hardware_version);
snprintf(fc_host_driver_version(lport->host),
FC_VERSION_STRING_SIZE,
"%s",
fdmi.driver_version);
snprintf(fc_host_optionrom_version(lport->host),
FC_VERSION_STRING_SIZE,
"%s",
fdmi.optionrom_version);
snprintf(fc_host_firmware_version(lport->host),
FC_VERSION_STRING_SIZE,
"%s",
fdmi.firmware_version);
/* Enable FDMI lport states */
lport->fdmi_enabled = 1;
} else {
lport->fdmi_enabled = 0;
printk(KERN_INFO "fcoe: No FDMI support.\n");
}
}
/**
* fcoe_oem_match() - The match routine for the offloaded exchange manager
* @fp: The I/O frame
@ -881,9 +943,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
dev_uc_del(netdev, port->data_src_addr);
rtnl_unlock();
/* Release reference held in fcoe_if_create() */
fcoe_interface_put(fcoe);
/* Free queued packets for the per-CPU receive threads */
fcoe_percpu_clean(lport);
@ -1047,6 +1106,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
goto out_lp_destroy;
}
/* Initialized FDMI information */
fcoe_fdmi_info(lport, netdev);
/*
* fcoe_em_alloc() and fcoe_hostlist_add() both
* need to be atomic with respect to other changes to the
@ -1070,7 +1132,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
goto out_lp_destroy;
}
fcoe_interface_get(fcoe);
return lport;
out_lp_destroy:
@ -2009,20 +2070,13 @@ static void fcoe_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
struct fcoe_interface *fcoe;
int npiv = 0;
port = container_of(work, struct fcoe_port, destroy_work);
mutex_lock(&fcoe_config_mutex);
/* set if this is an NPIV port */
npiv = port->lport->vport ? 1 : 0;
fcoe = port->priv;
fcoe_if_destroy(port->lport);
/* Do not tear down the fcoe interface for NPIV port */
if (!npiv)
fcoe_interface_cleanup(fcoe);
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
}
@ -2593,12 +2647,15 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fc_lport *vn_port = vport->dd_data;
struct fcoe_port *port = lport_priv(vn_port);
mutex_lock(&n_port->lp_mutex);
list_del(&vn_port->list);
mutex_unlock(&n_port->lp_mutex);
queue_work(fcoe_wq, &port->destroy_work);
mutex_lock(&fcoe_config_mutex);
fcoe_if_destroy(vn_port);
mutex_unlock(&fcoe_config_mutex);
return 0;
}

View File

@ -71,8 +71,6 @@ do { \
* @ctlr: The FCoE controller (for FIP)
* @oem: The offload exchange manager for all local port
* instances associated with this port
* @kref: The kernel reference
*
* This structure is 1:1 with a net devive.
*/
struct fcoe_interface {
@ -83,7 +81,6 @@ struct fcoe_interface {
struct packet_type fip_packet_type;
struct fcoe_ctlr ctlr;
struct fc_exch_mgr *oem;
struct kref kref;
};
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)

View File

@ -619,8 +619,8 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
switch (event) {
case NETDEV_UNREGISTER:
printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n",
netdev->name);
LIBFCOE_TRANSPORT_DBG("NETDEV_UNREGISTER %s\n",
netdev->name);
fcoe_del_netdev_mapping(netdev);
break;
}

View File

@ -56,6 +56,7 @@
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
#define HPSA_DRIVER_VERSION "2.0.2-1"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
/* How long to wait (in milliseconds) for board to go into simple mode */
#define MAX_CONFIG_WAIT 30000
@ -202,30 +203,31 @@ static int check_for_unit_attention(struct ctlr_info *h,
switch (c->err_info->SenseInfo[12]) {
case STATE_CHANGED:
dev_warn(&h->pdev->dev, "hpsa%d: a state change "
dev_warn(&h->pdev->dev, HPSA "%d: a state change "
"detected, command retried\n", h->ctlr);
break;
case LUN_FAILED:
dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
"detected, action required\n", h->ctlr);
break;
case REPORT_LUNS_CHANGED:
dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
"changed, action required\n", h->ctlr);
/*
* Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
* Note: this REPORT_LUNS_CHANGED condition only occurs on the external
* target (array) devices.
*/
break;
case POWER_OR_RESET:
dev_warn(&h->pdev->dev, "hpsa%d: a power on "
dev_warn(&h->pdev->dev, HPSA "%d: a power on "
"or device reset detected\n", h->ctlr);
break;
case UNIT_ATTENTION_CLEARED:
dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
"cleared by another initiator\n", h->ctlr);
break;
default:
dev_warn(&h->pdev->dev, "hpsa%d: unknown "
dev_warn(&h->pdev->dev, HPSA "%d: unknown "
"unit attention detected\n", h->ctlr);
break;
}
@ -296,11 +298,23 @@ static u32 unresettable_controller[] = {
0x40800E11, /* Smart Array 5i */
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
0x40700E11, /* Smart Array 5300 */
0x40820E11, /* Smart Array 532 */
0x40830E11, /* Smart Array 5312 */
0x409A0E11, /* Smart Array 641 */
0x409B0E11, /* Smart Array 642 */
0x40910E11, /* Smart Array 6i */
};
/* List of controllers which cannot even be soft reset */
static u32 soft_unresettable_controller[] = {
0x40800E11, /* Smart Array 5i */
0x40700E11, /* Smart Array 5300 */
0x40820E11, /* Smart Array 532 */
0x40830E11, /* Smart Array 5312 */
0x409A0E11, /* Smart Array 641 */
0x409B0E11, /* Smart Array 642 */
0x40910E11, /* Smart Array 6i */
/* Exclude 640x boards. These are two pci devices in one slot
* which share a battery backed cache module. One controls the
* cache, the other accesses the cache through the one that controls
@ -475,8 +489,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
static struct scsi_host_template hpsa_driver_template = {
.module = THIS_MODULE,
.name = "hpsa",
.proc_name = "hpsa",
.name = HPSA,
.proc_name = HPSA,
.queuecommand = hpsa_scsi_queue_command,
.scan_start = hpsa_scan_start,
.scan_finished = hpsa_scan_finished,
@ -577,21 +591,19 @@ static int hpsa_find_target_lun(struct ctlr_info *h,
int i, found = 0;
DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3);
bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
set_bit(h->dev[i]->target, lun_taken);
__set_bit(h->dev[i]->target, lun_taken);
}
for (i = 0; i < HPSA_MAX_DEVICES; i++) {
if (!test_bit(i, lun_taken)) {
/* *bus = 1; */
*target = i;
*lun = 0;
found = 1;
break;
}
i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
if (i < HPSA_MAX_DEVICES) {
/* *bus = 1; */
*target = i;
*lun = 0;
found = 1;
}
return !found;
}
@ -675,6 +687,20 @@ lun_assigned:
return 0;
}
/* Update an entry in h->dev[] array. */
static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
int entry, struct hpsa_scsi_dev_t *new_entry)
{
/* assumes h->devlock is held */
BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
/* Raid level changed. */
h->dev[entry]->raid_level = new_entry->raid_level;
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
new_entry->target, new_entry->lun);
}
/* Replace an entry from h->dev[] array. */
static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
int entry, struct hpsa_scsi_dev_t *new_entry,
@ -781,10 +807,25 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
return 1;
}
static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
struct hpsa_scsi_dev_t *dev2)
{
/* Device attributes that can change, but don't mean
* that the device is a different device, nor that the OS
* needs to be told anything about the change.
*/
if (dev1->raid_level != dev2->raid_level)
return 1;
return 0;
}
/* Find needle in haystack. If exact match found, return DEVICE_SAME,
* and return needle location in *index. If scsi3addr matches, but not
* vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
* location in *index. If needle not found, return DEVICE_NOT_FOUND.
* location in *index.
* In the case of a minor device attribute change, such as RAID level, just
* return DEVICE_UPDATED, along with the updated device's location in index.
* If needle not found, return DEVICE_NOT_FOUND.
*/
static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
struct hpsa_scsi_dev_t *haystack[], int haystack_size,
@ -794,15 +835,19 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
#define DEVICE_NOT_FOUND 0
#define DEVICE_CHANGED 1
#define DEVICE_SAME 2
#define DEVICE_UPDATED 3
for (i = 0; i < haystack_size; i++) {
if (haystack[i] == NULL) /* previously removed. */
continue;
if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
*index = i;
if (device_is_the_same(needle, haystack[i]))
if (device_is_the_same(needle, haystack[i])) {
if (device_updated(needle, haystack[i]))
return DEVICE_UPDATED;
return DEVICE_SAME;
else
} else {
return DEVICE_CHANGED;
}
}
}
*index = -1;
@ -838,6 +883,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
* sd[] and remove them from h->dev[], and for any
* devices which have changed, remove the old device
* info and add the new device info.
* If minor device attributes change, just update
* the existing device structure.
*/
i = 0;
nremoved = 0;
@ -858,6 +905,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
* at the bottom of hpsa_update_scsi_devices()
*/
sd[entry] = NULL;
} else if (device_change == DEVICE_UPDATED) {
hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
}
i++;
}
@ -1257,46 +1306,6 @@ static void complete_scsi_command(struct CommandList *cp)
cmd_free(h, cp);
}
static int hpsa_scsi_detect(struct ctlr_info *h)
{
struct Scsi_Host *sh;
int error;
sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
if (sh == NULL)
goto fail;
sh->io_port = 0;
sh->n_io_port = 0;
sh->this_id = -1;
sh->max_channel = 3;
sh->max_cmd_len = MAX_COMMAND_SIZE;
sh->max_lun = HPSA_MAX_LUN;
sh->max_id = HPSA_MAX_LUN;
sh->can_queue = h->nr_cmds;
sh->cmd_per_lun = h->nr_cmds;
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &h->pdev->dev);
if (error)
goto fail_host_put;
scsi_scan_host(sh);
return 0;
fail_host_put:
dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
" failed for controller %d\n", h->ctlr);
scsi_host_put(sh);
return error;
fail:
dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
" failed for controller %d\n", h->ctlr);
return -ENOMEM;
}
static void hpsa_pci_unmap(struct pci_dev *pdev,
struct CommandList *c, int sg_used, int data_direction)
{
@ -1641,7 +1650,7 @@ bail_out:
return 1;
}
static unsigned char *msa2xxx_model[] = {
static unsigned char *ext_target_model[] = {
"MSA2012",
"MSA2024",
"MSA2312",
@ -1650,78 +1659,54 @@ static unsigned char *msa2xxx_model[] = {
NULL,
};
static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
{
int i;
for (i = 0; msa2xxx_model[i]; i++)
if (strncmp(device->model, msa2xxx_model[i],
strlen(msa2xxx_model[i])) == 0)
for (i = 0; ext_target_model[i]; i++)
if (strncmp(device->model, ext_target_model[i],
strlen(ext_target_model[i])) == 0)
return 1;
return 0;
}
/* Helper function to assign bus, target, lun mapping of devices.
* Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
* Puts non-external target logical volumes on bus 0, external target logical
* volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
* Logical drive target and lun are assigned at this time, but
* physical device lun and target assignment are deferred (assigned
* in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
*/
static void figure_bus_target_lun(struct ctlr_info *h,
u8 *lunaddrbytes, int *bus, int *target, int *lun,
struct hpsa_scsi_dev_t *device)
u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
{
u32 lunid;
u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
if (is_logical_dev_addr_mode(lunaddrbytes)) {
/* logical device */
if (unlikely(is_scsi_rev_5(h))) {
/* p1210m, logical drives lun assignments
* match SCSI REPORT LUNS data.
*/
lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
*bus = 0;
*target = 0;
*lun = (lunid & 0x3fff) + 1;
} else {
/* not p1210m... */
lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
if (is_msa2xxx(h, device)) {
/* msa2xxx way, put logicals on bus 1
* and match target/lun numbers box
* reports.
*/
*bus = 1;
*target = (lunid >> 16) & 0x3fff;
*lun = lunid & 0x00ff;
} else {
/* Traditional smart array way. */
*bus = 0;
*lun = 0;
*target = lunid & 0x3fff;
}
}
} else {
/* physical device */
if (!is_logical_dev_addr_mode(lunaddrbytes)) {
/* physical device, target and lun filled in later */
if (is_hba_lunid(lunaddrbytes))
if (unlikely(is_scsi_rev_5(h))) {
*bus = 0; /* put p1210m ctlr at 0,0,0 */
*target = 0;
*lun = 0;
return;
} else
*bus = 3; /* traditional smartarray */
hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
else
*bus = 2; /* physical disk */
*target = -1;
*lun = -1; /* we will fill these in later. */
/* defer target, lun assignment for physical devices */
hpsa_set_bus_target_lun(device, 2, -1, -1);
return;
}
/* It's a logical device */
if (is_ext_target(h, device)) {
/* external target way, put logicals on bus 1
* and match target/lun numbers box
* reports, other smart array, bus 0, target 0, match lunid
*/
hpsa_set_bus_target_lun(device,
1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
return;
}
hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
}
/*
* If there is no lun 0 on a target, linux won't find any devices.
* For the MSA2xxx boxes, we have to manually detect the enclosure
* For the external targets (arrays), we have to manually detect the enclosure
* which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
* it for some reason. *tmpdevice is the target we're adding,
* this_device is a pointer into the current element of currentsd[]
@ -1730,46 +1715,46 @@ static void figure_bus_target_lun(struct ctlr_info *h,
* lun 0 assigned.
* Returns 1 if an enclosure was added, 0 if not.
*/
static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
static int add_ext_target_dev(struct ctlr_info *h,
struct hpsa_scsi_dev_t *tmpdevice,
struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
int bus, int target, int lun, unsigned long lunzerobits[],
int *nmsa2xxx_enclosures)
unsigned long lunzerobits[], int *n_ext_target_devs)
{
unsigned char scsi3addr[8];
if (test_bit(target, lunzerobits))
if (test_bit(tmpdevice->target, lunzerobits))
return 0; /* There is already a lun 0 on this target. */
if (!is_logical_dev_addr_mode(lunaddrbytes))
return 0; /* It's the logical targets that may lack lun 0. */
if (!is_msa2xxx(h, tmpdevice))
return 0; /* It's only the MSA2xxx that have this problem. */
if (!is_ext_target(h, tmpdevice))
return 0; /* Only external target devices have this problem. */
if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
return 0;
memset(scsi3addr, 0, 8);
scsi3addr[3] = target;
scsi3addr[3] = tmpdevice->target;
if (is_hba_lunid(scsi3addr))
return 0; /* Don't add the RAID controller here. */
if (is_scsi_rev_5(h))
return 0; /* p1210m doesn't need to do this. */
if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
"enclosures exceeded. Check your hardware "
if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
dev_warn(&h->pdev->dev, "Maximum number of external "
"target devices exceeded. Check your hardware "
"configuration.");
return 0;
}
if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
return 0;
(*nmsa2xxx_enclosures)++;
hpsa_set_bus_target_lun(this_device, bus, target, 0);
set_bit(target, lunzerobits);
(*n_ext_target_devs)++;
hpsa_set_bus_target_lun(this_device,
tmpdevice->bus, tmpdevice->target, 0);
set_bit(tmpdevice->target, lunzerobits);
return 1;
}
@ -1863,10 +1848,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
int i, nmsa2xxx_enclosures, ndevs_to_allocate;
int bus, target, lun;
int i, n_ext_target_devs, ndevs_to_allocate;
int raid_ctlr_position;
DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
@ -1883,11 +1867,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
logdev_list, &nlogicals))
goto out;
/* We might see up to 32 MSA2xxx enclosures, actually 8 of them
* but each of them 4 times through different paths. The plus 1
* is for the RAID controller.
/* We might see up to the maximum number of logical and physical disks
* plus external target devices, and a device for the local RAID
* controller.
*/
ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
/* Allocate the per device structures */
for (i = 0; i < ndevs_to_allocate; i++) {
@ -1913,7 +1897,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
raid_ctlr_position = nphysicals + nlogicals;
/* adjust our table of devices */
nmsa2xxx_enclosures = 0;
n_ext_target_devs = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
u8 *lunaddrbytes, is_OBDR = 0;
@ -1929,26 +1913,24 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
&is_OBDR))
continue; /* skip it if we can't talk to it. */
figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
tmpdevice);
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
this_device = currentsd[ncurrent];
/*
* For the msa2xxx boxes, we have to insert a LUN 0 which
* For external target devices, we have to insert a LUN 0 which
* doesn't show up in CCISS_REPORT_PHYSICAL data, but there
* is nonetheless an enclosure device there. We have to
* present that otherwise linux won't find anything if
* there is no lun 0.
*/
if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
lunaddrbytes, bus, target, lun, lunzerobits,
&nmsa2xxx_enclosures)) {
if (add_ext_target_dev(h, tmpdevice, this_device,
lunaddrbytes, lunzerobits,
&n_ext_target_devs)) {
ncurrent++;
this_device = currentsd[ncurrent];
}
*this_device = *tmpdevice;
hpsa_set_bus_target_lun(this_device, bus, target, lun);
switch (this_device->devtype) {
case TYPE_ROM:
@ -2228,13 +2210,42 @@ static void hpsa_unregister_scsi(struct ctlr_info *h)
static int hpsa_register_scsi(struct ctlr_info *h)
{
int rc;
struct Scsi_Host *sh;
int error;
rc = hpsa_scsi_detect(h);
if (rc != 0)
dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
" hpsa_scsi_detect(), rc is %d\n", rc);
return rc;
sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
if (sh == NULL)
goto fail;
sh->io_port = 0;
sh->n_io_port = 0;
sh->this_id = -1;
sh->max_channel = 3;
sh->max_cmd_len = MAX_COMMAND_SIZE;
sh->max_lun = HPSA_MAX_LUN;
sh->max_id = HPSA_MAX_LUN;
sh->can_queue = h->nr_cmds;
sh->cmd_per_lun = h->nr_cmds;
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &h->pdev->dev);
if (error)
goto fail_host_put;
scsi_scan_host(sh);
return 0;
fail_host_put:
dev_err(&h->pdev->dev, "%s: scsi_add_host"
" failed for controller %d\n", __func__, h->ctlr);
scsi_host_put(sh);
return error;
fail:
dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
" failed for controller %d\n", __func__, h->ctlr);
return -ENOMEM;
}
static int wait_for_device_to_become_ready(struct ctlr_info *h,
@ -2700,16 +2711,16 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = -EINVAL;
goto cleanup1;
}
if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
status = -EINVAL;
goto cleanup1;
}
buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
if (!buff) {
status = -ENOMEM;
goto cleanup1;
}
buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
if (!buff_size) {
status = -ENOMEM;
goto cleanup1;
@ -3354,7 +3365,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
static __devinit void init_driver_version(char *driver_version, int len)
{
memset(driver_version, 0, len);
strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
}
static __devinit int write_driver_ver_to_cfgtable(
@ -3935,7 +3946,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
return err;
}
err = pci_request_regions(h->pdev, "hpsa");
err = pci_request_regions(h->pdev, HPSA);
if (err) {
dev_err(&h->pdev->dev,
"cannot obtain PCI resources, aborting\n");
@ -4253,7 +4264,7 @@ static void start_controller_lockup_detector(struct ctlr_info *h)
spin_lock_init(&lockup_detector_lock);
hpsa_lockup_detector =
kthread_run(detect_controller_lockup_thread,
NULL, "hpsa");
NULL, HPSA);
}
if (!hpsa_lockup_detector) {
dev_warn(&h->pdev->dev,
@ -4325,7 +4336,7 @@ reinit_after_soft_reset:
if (rc != 0)
goto clean1;
sprintf(h->devname, "hpsa%d", number_of_controllers);
sprintf(h->devname, HPSA "%d", number_of_controllers);
h->ctlr = number_of_controllers;
number_of_controllers++;
@ -4482,6 +4493,14 @@ static void hpsa_shutdown(struct pci_dev *pdev)
#endif /* CONFIG_PCI_MSI */
}
static void __devexit hpsa_free_device_info(struct ctlr_info *h)
{
int i;
for (i = 0; i < h->ndevices; i++)
kfree(h->dev[i]);
}
static void __devexit hpsa_remove_one(struct pci_dev *pdev)
{
struct ctlr_info *h;
@ -4497,6 +4516,7 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
iounmap(h->vaddr);
iounmap(h->transtable);
iounmap(h->cfgtable);
hpsa_free_device_info(h);
hpsa_free_sg_chain_blocks(h);
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
@ -4530,7 +4550,7 @@ static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
}
static struct pci_driver hpsa_pci_driver = {
.name = "hpsa",
.name = HPSA,
.probe = hpsa_init_one,
.remove = __devexit_p(hpsa_remove_one),
.id_table = hpsa_pci_device_id, /* id_table */
@ -4592,15 +4612,15 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
* Each SG entry requires 16 bytes. The eight registers are programmed
* with the number of 16-byte blocks a command of that size requires.
* The smallest command possible requires 5 such 16 byte blocks.
* the largest command possible requires MAXSGENTRIES + 4 16-byte
* the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
* blocks. Note, this only extends to the SG entries contained
* within the command block, and does not extend to chained blocks
* of SG elements. bft[] contains the eight values we write to
* the registers. They are not evenly distributed, but have more
* sizes for small commands, and fewer sizes for larger commands.
*/
int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
/* 5 = 1 s/g entry or 4k
* 6 = 2 s/g entry or 8k
* 8 = 4 s/g entry or 16k
@ -4613,8 +4633,9 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
memset(h->reply_pool, 0, h->reply_pool_size);
h->reply_pool_head = h->reply_pool;
bft[7] = h->max_sg_entries + 4;
calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
bft[7] = SG_ENTRIES_IN_CMD + 4;
calc_bucket_map(bft, ARRAY_SIZE(bft),
SG_ENTRIES_IN_CMD, h->blockFetchTable);
for (i = 0; i < 8; i++)
writel(bft[i], &h->transtable->BlockFetch[i]);
@ -4652,14 +4673,13 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
return;
hpsa_get_max_perf_mode_cmds(h);
h->max_sg_entries = 32;
/* Performant mode ring buffer and supporting data structures */
h->reply_pool_size = h->max_commands * sizeof(u64);
h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
&(h->reply_pool_dhandle));
/* Need a block fetch table for performant mode */
h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
sizeof(u32)), GFP_KERNEL);
if ((h->reply_pool == NULL)

View File

@ -58,7 +58,6 @@ struct ctlr_info {
unsigned long paddr;
int nr_cmds; /* Number of commands allowed on this controller */
struct CfgTable __iomem *cfgtable;
int max_sg_entries;
int interrupts_enabled;
int major;
int max_commands;
@ -317,7 +316,7 @@ static unsigned long SA5_completed(struct ctlr_info *h)
dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
register_value);
else
dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
#endif
return register_value;

View File

@ -23,7 +23,7 @@
/* general boundary defintions */
#define SENSEINFOBYTES 32 /* may vary between hbas */
#define MAXSGENTRIES 32
#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */
#define HPSA_SG_CHAIN 0x80000000
#define MAXREPLYQS 256
@ -122,12 +122,11 @@ union u64bit {
};
/* FIXME this is a per controller value (barf!) */
#define HPSA_MAX_TARGETS_PER_CTLR 16
#define HPSA_MAX_LUN 1024
#define HPSA_MAX_PHYS_LUN 1024
#define MAX_MSA2XXX_ENCLOSURES 32
#define MAX_EXT_TARGETS 32
#define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \
MAX_MSA2XXX_ENCLOSURES + 1) /* + 1 is for the controller itself */
MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
/* SCSI-3 Commands */
#pragma pack(1)
@ -282,7 +281,7 @@ struct CommandList {
struct CommandListHeader Header;
struct RequestBlock Request;
struct ErrDescriptor ErrDesc;
struct SGDescriptor SG[MAXSGENTRIES];
struct SGDescriptor SG[SG_ENTRIES_IN_CMD];
/* information associated with the command */
u32 busaddr; /* physical addr of this record */
struct ErrorInfo *err_info; /* pointer to the allocated mem */

View File

@ -183,7 +183,7 @@ static const struct ipr_chip_t ipr_chip[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
static int ipr_max_bus_speeds [] = {
@ -9191,15 +9191,15 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
{ }
};

View File

@ -58,7 +58,7 @@
#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D
#define PCI_DEVICE_ID_IBM_CROC_ASIC_E2 0x034A
#define PCI_DEVICE_ID_IBM_CROCODILE 0x034A
#define IPR_SUBS_DEV_ID_2780 0x0264
#define IPR_SUBS_DEV_ID_5702 0x0266
@ -92,7 +92,7 @@
#define IPR_SUBS_DEV_ID_57B1 0x0355
#define IPR_SUBS_DEV_ID_574D 0x0356
#define IPR_SUBS_DEV_ID_575D 0x035D
#define IPR_SUBS_DEV_ID_57C8 0x035D
#define IPR_NAME "ipr"

View File

@ -649,15 +649,13 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co
int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct isci_host *ihost = ha->lldd_ha;
if (test_bit(IHOST_START_PENDING, &ihost->flags))
return 0;
/* todo: use sas_flush_discovery once it is upstream */
scsi_flush_work(shost);
scsi_flush_work(shost);
sas_drain_work(ha);
dev_dbg(&ihost->pdev->dev,
"%s: ihost->status = %d, time = %ld\n",
@ -1490,6 +1488,15 @@ sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
u32 val;
/* enable clock gating for power control of the scu unit */
val = readl(&ihost->smu_registers->clock_gating_control);
val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
writel(val, &ihost->smu_registers->clock_gating_control);
/* set the default interrupt coalescence number and timeout value. */
sci_controller_set_interrupt_coalescence(ihost, 0, 0);

View File

@ -187,6 +187,7 @@ struct isci_host {
int id; /* unique within a given pci device */
struct isci_phy phys[SCI_MAX_PHYS];
struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
struct asd_sas_port sas_ports[SCI_MAX_PORTS];
struct sas_ha_struct sas_ha;
spinlock_t state_lock;
@ -393,24 +394,6 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
#define sci_controller_clear_invalid_phy(controller, phy) \
((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
{
if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
return NULL;
return &iphy->isci_port->isci_host->pdev->dev;
}
static inline struct device *sciport_to_dev(struct isci_port *iport)
{
if (!iport || !iport->isci_host)
return NULL;
return &iport->isci_host->pdev->dev;
}
static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
{
if (!idev || !idev->isci_port || !idev->isci_port->isci_host)

View File

@ -60,6 +60,7 @@
#include <linux/efi.h>
#include <asm/string.h>
#include <scsi/scsi_host.h>
#include "host.h"
#include "isci.h"
#include "task.h"
#include "probe_roms.h"
@ -154,7 +155,6 @@ static struct scsi_host_template isci_sht = {
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.slave_destroy = sas_slave_destroy,
.scan_finished = isci_host_scan_finished,
.scan_start = isci_host_scan_start,
.change_queue_depth = sas_change_queue_depth,
@ -166,9 +166,6 @@ static struct scsi_host_template isci_sht = {
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = isci_bus_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = isci_host_attrs,
@ -194,6 +191,9 @@ static struct sas_domain_function_template isci_transport_ops = {
.lldd_lu_reset = isci_task_lu_reset,
.lldd_query_task = isci_task_query_task,
/* ata recovery called from ata-eh */
.lldd_ata_check_ready = isci_ata_check_ready,
/* Port and Adapter management */
.lldd_clear_nexus_port = isci_task_clear_nexus_port,
.lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
@ -242,18 +242,13 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
if (!sas_ports)
return -ENOMEM;
/*----------------- Libsas Initialization Stuff----------------------
* Set various fields in the sas_ha struct:
*/
sas_ha->sas_ha_name = DRV_NAME;
sas_ha->lldd_module = THIS_MODULE;
sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
/* set the array of phy and port structs. */
for (i = 0; i < SCI_MAX_PHYS; i++) {
sas_phys[i] = &isci_host->phys[i].sas_phy;
sas_ports[i] = &isci_host->ports[i].sas_port;
sas_ports[i] = &isci_host->sas_ports[i];
}
sas_ha->sas_phy = sas_phys;
@ -528,6 +523,13 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
goto err_host_alloc;
}
pci_info->hosts[i] = h;
/* turn on DIF support */
scsi_host_set_prot(h->shost,
SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION);
scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);
@ -551,9 +553,9 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
int i;
for_each_isci_host(i, ihost, pdev) {
wait_for_start(ihost);
isci_unregister(ihost);
isci_host_deinit(ihost);
sci_controller_disable_interrupts(ihost);
}
}

View File

@ -59,6 +59,16 @@
#include "scu_event_codes.h"
#include "probe_roms.h"
#undef C
#define C(a) (#a)
static const char *phy_state_name(enum sci_phy_states state)
{
static const char * const strings[] = PHY_STATES;
return strings[state];
}
#undef C
/* Maximum arbitration wait time in micro-seconds */
#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
@ -67,6 +77,19 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
return iphy->max_negotiated_speed;
}
static struct isci_host *phy_to_host(struct isci_phy *iphy)
{
struct isci_phy *table = iphy - iphy->phy_index;
struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]);
return ihost;
}
static struct device *sciphy_to_dev(struct isci_phy *iphy)
{
return &phy_to_host(iphy)->pdev->dev;
}
static enum sci_status
sci_phy_transport_layer_initialization(struct isci_phy *iphy,
struct scu_transport_layer_registers __iomem *reg)
@ -446,8 +469,8 @@ enum sci_status sci_phy_start(struct isci_phy *iphy)
enum sci_phy_states state = iphy->sm.current_state_id;
if (state != SCI_PHY_STOPPED) {
dev_dbg(sciphy_to_dev(iphy),
"%s: in wrong state: %d\n", __func__, state);
dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
__func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -472,8 +495,8 @@ enum sci_status sci_phy_stop(struct isci_phy *iphy)
case SCI_PHY_READY:
break;
default:
dev_dbg(sciphy_to_dev(iphy),
"%s: in wrong state: %d\n", __func__, state);
dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
__func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -486,8 +509,8 @@ enum sci_status sci_phy_reset(struct isci_phy *iphy)
enum sci_phy_states state = iphy->sm.current_state_id;
if (state != SCI_PHY_READY) {
dev_dbg(sciphy_to_dev(iphy),
"%s: in wrong state: %d\n", __func__, state);
dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
__func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -536,8 +559,8 @@ enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
return SCI_SUCCESS;
}
default:
dev_dbg(sciphy_to_dev(iphy),
"%s: in wrong state: %d\n", __func__, state);
dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
__func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@ -591,6 +614,60 @@ static void sci_phy_complete_link_training(struct isci_phy *iphy,
sci_change_state(&iphy->sm, next_state);
}
static const char *phy_event_name(u32 event_code)
{
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_PORT_SELECTOR_DETECTED:
return "port selector";
case SCU_EVENT_SENT_PORT_SELECTION:
return "port selection";
case SCU_EVENT_HARD_RESET_TRANSMITTED:
return "tx hard reset";
case SCU_EVENT_HARD_RESET_RECEIVED:
return "rx hard reset";
case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
return "identify timeout";
case SCU_EVENT_LINK_FAILURE:
return "link fail";
case SCU_EVENT_SATA_SPINUP_HOLD:
return "sata spinup hold";
case SCU_EVENT_SAS_15_SSC:
case SCU_EVENT_SAS_15:
return "sas 1.5";
case SCU_EVENT_SAS_30_SSC:
case SCU_EVENT_SAS_30:
return "sas 3.0";
case SCU_EVENT_SAS_60_SSC:
case SCU_EVENT_SAS_60:
return "sas 6.0";
case SCU_EVENT_SATA_15_SSC:
case SCU_EVENT_SATA_15:
return "sata 1.5";
case SCU_EVENT_SATA_30_SSC:
case SCU_EVENT_SATA_30:
return "sata 3.0";
case SCU_EVENT_SATA_60_SSC:
case SCU_EVENT_SATA_60:
return "sata 6.0";
case SCU_EVENT_SAS_PHY_DETECTED:
return "sas detect";
case SCU_EVENT_SATA_PHY_DETECTED:
return "sata detect";
default:
return "unknown";
}
}
#define phy_event_dbg(iphy, state, code) \
dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
phy_to_host(iphy)->id, iphy->phy_index, \
phy_state_name(state), phy_event_name(code), code)
#define phy_event_warn(iphy, state, code) \
dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
phy_to_host(iphy)->id, iphy->phy_index, \
phy_state_name(state), phy_event_name(code), code)
enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
{
enum sci_phy_states state = iphy->sm.current_state_id;
@ -607,11 +684,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
iphy->is_in_link_training = true;
break;
default:
dev_dbg(sciphy_to_dev(iphy),
"%s: PHY starting substate machine received "
"unexpected event_code %x\n",
__func__,
event_code);
phy_event_dbg(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@ -648,11 +721,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
default:
dev_warn(sciphy_to_dev(iphy),
"%s: PHY starting substate machine received "
"unexpected event_code %x\n",
__func__, event_code);
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
break;
}
@ -677,10 +746,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
default:
dev_warn(sciphy_to_dev(iphy),
"%s: PHY starting substate machine received "
"unexpected event_code %x\n",
__func__, event_code);
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@ -691,11 +757,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
default:
dev_warn(sciphy_to_dev(iphy),
"%s: PHY starting substate machine received unexpected "
"event_code %x\n",
__func__,
event_code);
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@ -719,11 +781,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
break;
default:
dev_warn(sciphy_to_dev(iphy),
"%s: PHY starting substate machine received "
"unexpected event_code %x\n",
__func__, event_code);
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@ -751,12 +809,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sas_link_training(iphy);
break;
default:
dev_warn(sciphy_to_dev(iphy),
"%s: PHY starting substate machine received "
"unexpected event_code %x\n",
__func__,
event_code);
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@ -793,11 +846,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sas_link_training(iphy);
break;
default:
dev_warn(sciphy_to_dev(iphy),
"%s: PHY starting substate machine received "
"unexpected event_code %x\n",
__func__, event_code);
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
@ -815,12 +864,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
break;
default:
dev_warn(sciphy_to_dev(iphy),
"%s: PHY starting substate machine received "
"unexpected event_code %x\n",
__func__,
event_code);
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
}
return SCI_SUCCESS;
@ -838,10 +882,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
iphy->bcn_received_while_port_unassigned = true;
break;
default:
dev_warn(sciphy_to_dev(iphy),
"%sP SCIC PHY 0x%p ready state machine received "
"unexpected event_code %x\n",
__func__, iphy, event_code);
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE_INVALID_STATE;
}
return SCI_SUCCESS;
@ -852,18 +893,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
default:
dev_warn(sciphy_to_dev(iphy),
"%s: SCIC PHY 0x%p resetting state machine received "
"unexpected event_code %x\n",
__func__, iphy, event_code);
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE_INVALID_STATE;
break;
}
return SCI_SUCCESS;
default:
dev_dbg(sciphy_to_dev(iphy),
"%s: in wrong state: %d\n", __func__, state);
dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
__func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@ -956,8 +993,8 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
return result;
}
default:
dev_dbg(sciphy_to_dev(iphy),
"%s: in wrong state: %d\n", __func__, state);
dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
__func__, phy_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -1299,7 +1336,6 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
sas_addr = cpu_to_be64(sci_sas_addr);
memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
iphy->isci_port = NULL;
iphy->sas_phy.enabled = 0;
iphy->sas_phy.id = index;
iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
@ -1333,13 +1369,13 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
{
int ret = 0;
struct isci_phy *iphy = sas_phy->lldd_phy;
struct isci_port *iport = iphy->isci_port;
struct asd_sas_port *port = sas_phy->port;
struct isci_host *ihost = sas_phy->ha->lldd_ha;
unsigned long flags;
dev_dbg(&ihost->pdev->dev,
"%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
__func__, sas_phy, func, buf, iphy, iport);
__func__, sas_phy, func, buf, iphy, port);
switch (func) {
case PHY_FUNC_DISABLE:
@ -1356,11 +1392,10 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
break;
case PHY_FUNC_HARD_RESET:
if (!iport)
if (!port)
return -ENODEV;
/* Perform the port reset. */
ret = isci_port_perform_hard_reset(ihost, iport, iphy);
ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy);
break;
case PHY_FUNC_GET_EVENTS: {

View File

@ -103,7 +103,6 @@ struct isci_phy {
struct scu_transport_layer_registers __iomem *transport_layer_registers;
struct scu_link_layer_registers __iomem *link_layer_registers;
struct asd_sas_phy sas_phy;
struct isci_port *isci_port;
u8 sas_addr[SAS_ADDR_SIZE];
union {
struct sas_identify_frame iaf;
@ -344,101 +343,65 @@ enum sci_phy_counter_id {
SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
};
enum sci_phy_states {
/**
* Simply the initial state for the base domain state machine.
*/
SCI_PHY_INITIAL,
/**
* This state indicates that the phy has successfully been stopped.
* In this state no new IO operations are permitted on this phy.
* This state is entered from the INITIAL state.
* This state is entered from the STARTING state.
* This state is entered from the READY state.
* This state is entered from the RESETTING state.
*/
SCI_PHY_STOPPED,
/**
* This state indicates that the phy is in the process of becomming
* ready. In this state no new IO operations are permitted on this phy.
* This state is entered from the STOPPED state.
* This state is entered from the READY state.
* This state is entered from the RESETTING state.
*/
SCI_PHY_STARTING,
/**
* Initial state
*/
SCI_PHY_SUB_INITIAL,
/**
* Wait state for the hardware OSSP event type notification
*/
SCI_PHY_SUB_AWAIT_OSSP_EN,
/**
* Wait state for the PHY speed notification
*/
SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
/**
* Wait state for the IAF Unsolicited frame notification
*/
SCI_PHY_SUB_AWAIT_IAF_UF,
/**
* Wait state for the request to consume power
*/
SCI_PHY_SUB_AWAIT_SAS_POWER,
/**
* Wait state for request to consume power
*/
SCI_PHY_SUB_AWAIT_SATA_POWER,
/**
* Wait state for the SATA PHY notification
*/
SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
/**
* Wait for the SATA PHY speed notification
*/
SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
/**
* Wait state for the SIGNATURE FIS unsolicited frame notification
*/
SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
/**
* Exit state for this state machine
*/
SCI_PHY_SUB_FINAL,
/**
* This state indicates the the phy is now ready. Thus, the user
* is able to perform IO operations utilizing this phy as long as it
* is currently part of a valid port.
* This state is entered from the STARTING state.
*/
SCI_PHY_READY,
/**
* This state indicates that the phy is in the process of being reset.
* In this state no new IO operations are permitted on this phy.
* This state is entered from the READY state.
*/
SCI_PHY_RESETTING,
/**
* Simply the final state for the base phy state machine.
*/
SCI_PHY_FINAL,
};
/**
* enum sci_phy_states - phy state machine states
* @SCI_PHY_INITIAL: Simply the initial state for the base domain state
* machine.
* @SCI_PHY_STOPPED: phy has successfully been stopped. In this state
* no new IO operations are permitted on this phy.
* @SCI_PHY_STARTING: the phy is in the process of becomming ready. In
* this state no new IO operations are permitted on
* this phy.
* @SCI_PHY_SUB_INITIAL: Initial state
* @SCI_PHY_SUB_AWAIT_OSSP_EN: Wait state for the hardware OSSP event
* type notification
* @SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: Wait state for the PHY speed
* notification
* @SCI_PHY_SUB_AWAIT_IAF_UF: Wait state for the IAF Unsolicited frame
* notification
* @SCI_PHY_SUB_AWAIT_SAS_POWER: Wait state for the request to consume
* power
* @SCI_PHY_SUB_AWAIT_SATA_POWER: Wait state for request to consume
* power
* @SCI_PHY_SUB_AWAIT_SATA_PHY_EN: Wait state for the SATA PHY
* notification
* @SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: Wait for the SATA PHY speed
* notification
* @SCI_PHY_SUB_AWAIT_SIG_FIS_UF: Wait state for the SIGNATURE FIS
* unsolicited frame notification
* @SCI_PHY_SUB_FINAL: Exit state for this state machine
* @SCI_PHY_READY: phy is now ready. Thus, the user is able to perform
* IO operations utilizing this phy as long as it is
* currently part of a valid port. This state is
* entered from the STARTING state.
* @SCI_PHY_RESETTING: phy is in the process of being reset. In this
* state no new IO operations are permitted on this
* phy. This state is entered from the READY state.
* @SCI_PHY_FINAL: Simply the final state for the base phy state
* machine.
*/
#define PHY_STATES {\
C(PHY_INITIAL),\
C(PHY_STOPPED),\
C(PHY_STARTING),\
C(PHY_SUB_INITIAL),\
C(PHY_SUB_AWAIT_OSSP_EN),\
C(PHY_SUB_AWAIT_SAS_SPEED_EN),\
C(PHY_SUB_AWAIT_IAF_UF),\
C(PHY_SUB_AWAIT_SAS_POWER),\
C(PHY_SUB_AWAIT_SATA_POWER),\
C(PHY_SUB_AWAIT_SATA_PHY_EN),\
C(PHY_SUB_AWAIT_SATA_SPEED_EN),\
C(PHY_SUB_AWAIT_SIG_FIS_UF),\
C(PHY_SUB_FINAL),\
C(PHY_READY),\
C(PHY_RESETTING),\
C(PHY_FINAL),\
}
#undef C
#define C(a) SCI_##a
enum sci_phy_states PHY_STATES;
#undef C
void sci_phy_construct(
struct isci_phy *iphy,

View File

@ -60,18 +60,29 @@
#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
#define SCU_DUMMY_INDEX (0xFFFF)
static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
#undef C
#define C(a) (#a)
const char *port_state_name(enum sci_port_states state)
{
unsigned long flags;
static const char * const strings[] = PORT_STATES;
dev_dbg(&iport->isci_host->pdev->dev,
"%s: iport = %p, state = 0x%x\n",
__func__, iport, status);
return strings[state];
}
#undef C
/* XXX pointless lock */
spin_lock_irqsave(&iport->state_lock, flags);
iport->status = status;
spin_unlock_irqrestore(&iport->state_lock, flags);
static struct device *sciport_to_dev(struct isci_port *iport)
{
int i = iport->physical_port_index;
struct isci_port *table;
struct isci_host *ihost;
if (i == SCIC_SDS_DUMMY_PORT)
i = SCI_MAX_PORTS+1;
table = iport - i;
ihost = container_of(table, typeof(*ihost), ports[0]);
return &ihost->pdev->dev;
}
static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
@ -165,18 +176,12 @@ static void isci_port_link_up(struct isci_host *isci_host,
struct sci_port_properties properties;
unsigned long success = true;
BUG_ON(iphy->isci_port != NULL);
iphy->isci_port = iport;
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p\n",
__func__, iport);
spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
isci_port_change_state(iphy->isci_port, isci_starting);
sci_port_get_properties(iport, &properties);
if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
@ -258,7 +263,6 @@ static void isci_port_link_down(struct isci_host *isci_host,
__func__, isci_device);
set_bit(IDEV_GONE, &isci_device->flags);
}
isci_port_change_state(isci_port, isci_stopping);
}
}
@ -269,52 +273,10 @@ static void isci_port_link_down(struct isci_host *isci_host,
isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
PHYE_LOSS_OF_SIGNAL);
isci_phy->isci_port = NULL;
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p - Done\n", __func__, isci_port);
}
/**
* isci_port_ready() - This function is called by the sci core when a link
* becomes ready.
* @isci_host: This parameter specifies the isci host object.
* @port: This parameter specifies the sci port with the active link.
*
*/
static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
{
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p\n", __func__, isci_port);
complete_all(&isci_port->start_complete);
isci_port_change_state(isci_port, isci_ready);
return;
}
/**
* isci_port_not_ready() - This function is called by the sci core when a link
* is not ready. All remote devices on this link will be removed if they are
* in the stopping state.
* @isci_host: This parameter specifies the isci host object.
* @port: This parameter specifies the sci port with the active link.
*
*/
static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
{
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p\n", __func__, isci_port);
}
static void isci_port_stop_complete(struct isci_host *ihost,
struct isci_port *iport,
enum sci_status completion_status)
{
dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
}
static bool is_port_ready_state(enum sci_port_states state)
{
switch (state) {
@ -353,7 +315,9 @@ static void port_state_machine_change(struct isci_port *iport,
static void isci_port_hard_reset_complete(struct isci_port *isci_port,
enum sci_status completion_status)
{
dev_dbg(&isci_port->isci_host->pdev->dev,
struct isci_host *ihost = isci_port->owning_controller;
dev_dbg(&ihost->pdev->dev,
"%s: isci_port = %p, completion_status=%x\n",
__func__, isci_port, completion_status);
@ -364,23 +328,24 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port,
/* The reset failed. The port state is now SCI_PORT_FAILED. */
if (isci_port->active_phy_mask == 0) {
int phy_idx = isci_port->last_active_phy;
struct isci_phy *iphy = &ihost->phys[phy_idx];
/* Generate the link down now to the host, since it
* was intercepted by the hard reset state machine when
* it really happened.
*/
isci_port_link_down(isci_port->isci_host,
&isci_port->isci_host->phys[
isci_port->last_active_phy],
isci_port);
isci_port_link_down(ihost, iphy, isci_port);
}
/* Advance the port state so that link state changes will be
* noticed.
*/
* noticed.
*/
port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
}
complete_all(&isci_port->hard_reset_complete);
clear_bit(IPORT_RESET_PENDING, &isci_port->state);
wake_up(&ihost->eventq);
}
/* This method will return a true value if the specified phy can be assigned to
@ -835,10 +800,9 @@ static void port_timeout(unsigned long data)
__func__,
iport);
} else if (current_state == SCI_PORT_STOPPING) {
/* if the port is still stopping then the stop has not completed */
isci_port_stop_complete(iport->owning_controller,
iport,
SCI_FAILURE_TIMEOUT);
dev_dbg(sciport_to_dev(iport),
"%s: port%d: stop complete timeout\n",
__func__, iport->physical_port_index);
} else {
/* The port is in the ready state and we have a timer
* reporting a timeout this should not happen.
@ -1003,7 +967,8 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
struct isci_host *ihost = iport->owning_controller;
isci_port_ready(ihost, iport);
dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
__func__, iport->physical_port_index);
for (index = 0; index < SCI_MAX_PHYS; index++) {
if (iport->phy_table[index]) {
@ -1069,7 +1034,8 @@ static void sci_port_ready_substate_operational_exit(struct sci_base_state_machi
*/
sci_port_abort_dummy_request(iport);
isci_port_not_ready(ihost, iport);
dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
__func__, iport->physical_port_index);
if (iport->ready_exit)
sci_port_invalidate_dummy_remote_node(iport);
@ -1081,7 +1047,8 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
struct isci_host *ihost = iport->owning_controller;
if (iport->active_phy_mask == 0) {
isci_port_not_ready(ihost, iport);
dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
__func__, iport->physical_port_index);
port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
} else
@ -1097,8 +1064,8 @@ enum sci_status sci_port_start(struct isci_port *iport)
state = iport->sm.current_state_id;
if (state != SCI_PORT_STOPPED) {
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
__func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -1172,8 +1139,8 @@ enum sci_status sci_port_stop(struct isci_port *iport)
SCI_PORT_STOPPING);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
__func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@ -1187,8 +1154,8 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
state = iport->sm.current_state_id;
if (state != SCI_PORT_SUB_OPERATIONAL) {
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
__func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -1282,8 +1249,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
__func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@ -1332,8 +1299,8 @@ enum sci_status sci_port_remove_phy(struct isci_port *iport,
SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
__func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@ -1375,8 +1342,8 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
__func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@ -1405,8 +1372,8 @@ enum sci_status sci_port_link_down(struct isci_port *iport,
sci_port_deactivate_phy(iport, iphy, false);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
__func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@ -1425,8 +1392,8 @@ enum sci_status sci_port_start_io(struct isci_port *iport,
iport->started_request_count++;
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
__func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@ -1440,8 +1407,8 @@ enum sci_status sci_port_complete_io(struct isci_port *iport,
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_STOPPED:
dev_warn(sciport_to_dev(iport),
"%s: in wrong state: %d\n", __func__, state);
dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
__func__, port_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_PORT_STOPPING:
sci_port_decrement_request_count(iport);
@ -1547,7 +1514,8 @@ static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
if (prev_state == SCI_PORT_RESETTING)
isci_port_hard_reset_complete(iport, SCI_SUCCESS);
else
isci_port_not_ready(ihost, iport);
dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
__func__, iport->physical_port_index);
/* Post and suspend the dummy remote node context for this port. */
sci_port_post_dummy_remote_node(iport);
@ -1644,22 +1612,7 @@ void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
{
INIT_LIST_HEAD(&iport->remote_dev_list);
INIT_LIST_HEAD(&iport->domain_dev_list);
spin_lock_init(&iport->state_lock);
init_completion(&iport->start_complete);
iport->isci_host = ihost;
isci_port_change_state(iport, isci_freed);
}
/**
* isci_port_get_state() - This function gets the status of the port object.
* @isci_port: This parameter points to the isci_port object
*
* status of the object as a isci_status enum.
*/
enum isci_status isci_port_get_state(
struct isci_port *isci_port)
{
return isci_port->status;
}
void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
@ -1670,6 +1623,11 @@ void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy
isci_port_bc_change_received(ihost, iport, iphy);
}
static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
{
wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
}
int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
struct isci_phy *iphy)
{
@ -1680,9 +1638,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
__func__, iport);
init_completion(&iport->hard_reset_complete);
spin_lock_irqsave(&ihost->scic_lock, flags);
set_bit(IPORT_RESET_PENDING, &iport->state);
#define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
@ -1690,7 +1647,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (status == SCI_SUCCESS) {
wait_for_completion(&iport->hard_reset_complete);
wait_port_reset(ihost, iport);
dev_dbg(&ihost->pdev->dev,
"%s: iport = %p; hard reset completion\n",
@ -1704,6 +1661,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
__func__, iport, iport->hard_reset_status);
}
} else {
clear_bit(IPORT_RESET_PENDING, &iport->state);
wake_up(&ihost->eventq);
ret = TMF_RESP_FUNC_FAILED;
dev_err(&ihost->pdev->dev,
@ -1726,24 +1685,80 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
return ret;
}
/**
* isci_port_deformed() - This function is called by libsas when a port becomes
* inactive.
* @phy: This parameter specifies the libsas phy with the inactive port.
*
*/
void isci_port_deformed(struct asd_sas_phy *phy)
int isci_ata_check_ready(struct domain_device *dev)
{
pr_debug("%s: sas_phy = %p\n", __func__, phy);
struct isci_port *iport = dev->port->lldd_port;
struct isci_host *ihost = dev_to_ihost(dev);
struct isci_remote_device *idev;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&ihost->scic_lock, flags);
idev = isci_lookup_device(dev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (!idev)
goto out;
if (test_bit(IPORT_RESET_PENDING, &iport->state))
goto out;
rc = !!iport->active_phy_mask;
out:
isci_put_device(idev);
return rc;
}
void isci_port_deformed(struct asd_sas_phy *phy)
{
struct isci_host *ihost = phy->ha->lldd_ha;
struct isci_port *iport = phy->port->lldd_port;
unsigned long flags;
int i;
/* we got a port notification on a port that was subsequently
* torn down and libsas is just now catching up
*/
if (!iport)
return;
spin_lock_irqsave(&ihost->scic_lock, flags);
for (i = 0; i < SCI_MAX_PHYS; i++) {
if (iport->active_phy_mask & 1 << i)
break;
}
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (i >= SCI_MAX_PHYS)
dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
__func__, (long) (iport - &ihost->ports[0]));
}
/**
* isci_port_formed() - This function is called by libsas when a port becomes
* active.
* @phy: This parameter specifies the libsas phy with the active port.
*
*/
void isci_port_formed(struct asd_sas_phy *phy)
{
pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
struct isci_host *ihost = phy->ha->lldd_ha;
struct isci_phy *iphy = to_iphy(phy);
struct asd_sas_port *port = phy->port;
struct isci_port *iport;
unsigned long flags;
int i;
/* initial ports are formed as the driver is still initializing,
* wait for that process to complete
*/
wait_for_start(ihost);
spin_lock_irqsave(&ihost->scic_lock, flags);
for (i = 0; i < SCI_MAX_PORTS; i++) {
iport = &ihost->ports[i];
if (iport->active_phy_mask & 1 << iphy->phy_index)
break;
}
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (i >= SCI_MAX_PORTS)
iport = NULL;
port->lldd_port = iport;
}

View File

@ -95,14 +95,11 @@ enum isci_status {
* @timer: timeout start/stop operations
*/
struct isci_port {
enum isci_status status;
struct isci_host *isci_host;
struct asd_sas_port sas_port;
struct list_head remote_dev_list;
spinlock_t state_lock;
struct list_head domain_dev_list;
struct completion start_complete;
struct completion hard_reset_complete;
#define IPORT_RESET_PENDING 0
unsigned long state;
enum sci_status hard_reset_status;
struct sci_base_state_machine sm;
bool ready_exit;
@ -147,70 +144,47 @@ struct sci_port_properties {
};
/**
* enum sci_port_states - This enumeration depicts all the states for the
* common port state machine.
*
*
* enum sci_port_states - port state machine states
* @SCI_PORT_STOPPED: port has successfully been stopped. In this state
* no new IO operations are permitted. This state is
* entered from the STOPPING state.
* @SCI_PORT_STOPPING: port is in the process of stopping. In this
* state no new IO operations are permitted, but
* existing IO operations are allowed to complete.
* This state is entered from the READY state.
* @SCI_PORT_READY: port is now ready. Thus, the user is able to
* perform IO operations on this port. This state is
* entered from the STARTING state.
* @SCI_PORT_SUB_WAITING: port is started and ready but has no active
* phys.
* @SCI_PORT_SUB_OPERATIONAL: port is started and ready and there is at
* least one phy operational.
* @SCI_PORT_SUB_CONFIGURING: port is started and there was an
* add/remove phy event. This state is only
* used in Automatic Port Configuration Mode
* (APC)
* @SCI_PORT_RESETTING: port is in the process of performing a hard
* reset. Thus, the user is unable to perform IO
* operations on this port. This state is entered
* from the READY state.
* @SCI_PORT_FAILED: port has failed a reset request. This state is
* entered when a port reset request times out. This
* state is entered from the RESETTING state.
*/
enum sci_port_states {
/**
* This state indicates that the port has successfully been stopped.
* In this state no new IO operations are permitted.
* This state is entered from the STOPPING state.
*/
SCI_PORT_STOPPED,
/**
* This state indicates that the port is in the process of stopping.
* In this state no new IO operations are permitted, but existing IO
* operations are allowed to complete.
* This state is entered from the READY state.
*/
SCI_PORT_STOPPING,
/**
* This state indicates the port is now ready. Thus, the user is
* able to perform IO operations on this port.
* This state is entered from the STARTING state.
*/
SCI_PORT_READY,
/**
* The substate where the port is started and ready but has no
* active phys.
*/
SCI_PORT_SUB_WAITING,
/**
* The substate where the port is started and ready and there is
* at least one phy operational.
*/
SCI_PORT_SUB_OPERATIONAL,
/**
* The substate where the port is started and there was an
* add/remove phy event. This state is only used in Automatic
* Port Configuration Mode (APC)
*/
SCI_PORT_SUB_CONFIGURING,
/**
* This state indicates the port is in the process of performing a hard
* reset. Thus, the user is unable to perform IO operations on this
* port.
* This state is entered from the READY state.
*/
SCI_PORT_RESETTING,
/**
* This state indicates the port has failed a reset request. This state
* is entered when a port reset request times out.
* This state is entered from the RESETTING state.
*/
SCI_PORT_FAILED,
};
#define PORT_STATES {\
C(PORT_STOPPED),\
C(PORT_STOPPING),\
C(PORT_READY),\
C(PORT_SUB_WAITING),\
C(PORT_SUB_OPERATIONAL),\
C(PORT_SUB_CONFIGURING),\
C(PORT_RESETTING),\
C(PORT_FAILED),\
}
#undef C
#define C(a) SCI_##a
enum sci_port_states PORT_STATES;
#undef C
static inline void sci_port_decrement_request_count(struct isci_port *iport)
{
@ -296,9 +270,6 @@ void sci_port_get_attached_sas_address(
struct isci_port *iport,
struct sci_sas_address *sas_address);
enum isci_status isci_port_get_state(
struct isci_port *isci_port);
void isci_port_formed(struct asd_sas_phy *);
void isci_port_deformed(struct asd_sas_phy *);
@ -309,4 +280,5 @@ void isci_port_init(
int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
struct isci_phy *iphy);
int isci_ata_check_ready(struct domain_device *dev);
#endif /* !defined(_ISCI_PORT_H_) */

View File

@ -370,6 +370,27 @@ struct scu_iit_entry {
>> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
)
/* ***************************************************************************** */
#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_SHIFT (0)
#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_MASK (0x00000001)
#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_SHIFT (1)
#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_MASK (0x00000002)
#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_SHIFT (2)
#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_MASK (0x00000004)
#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_SHIFT (3)
#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_MASK (0x00000008)
#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_SHIFT (16)
#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_MASK (0x000F0000)
#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_SHIFT (31)
#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_MASK (0x80000000)
#define SMU_CLOCK_GATING_CONTROL_RESERVED_MASK (0x7FF0FFF0)
#define SMU_CGUCR_GEN_VAL(name, value) \
SCU_GEN_VALUE(SMU_CLOCK_GATING_CONTROL_##name, value)
#define SMU_CGUCR_GEN_BIT(name) \
SCU_GEN_BIT(SMU_CLOCK_GATING_CONTROL_##name)
/* -------------------------------------------------------------------------- */
#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
@ -992,8 +1013,10 @@ struct smu_registers {
u32 mmr_address_window;
/* 0x00A4 SMDW */
u32 mmr_data_window;
u32 reserved_A8;
u32 reserved_AC;
/* 0x00A8 CGUCR */
u32 clock_gating_control;
/* 0x00AC CGUPC */
u32 clock_gating_performance;
/* A whole bunch of reserved space */
u32 reserved_Bx[4];
u32 reserved_Cx[4];

View File

@ -62,6 +62,16 @@
#include "scu_event_codes.h"
#include "task.h"
#undef C
#define C(a) (#a)
const char *dev_state_name(enum sci_remote_device_states state)
{
static const char * const strings[] = REMOTE_DEV_STATES;
return strings[state];
}
#undef C
/**
* isci_remote_device_not_ready() - This function is called by the ihost when
* the remote device is not ready. We mark the isci device as ready (not
@ -167,8 +177,8 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
case SCI_DEV_FAILED:
case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_STOPPED:
return SCI_SUCCESS;
@ -226,8 +236,8 @@ enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
case SCI_DEV_RESETTING:
case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_READY:
case SCI_STP_DEV_IDLE:
@ -246,8 +256,8 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev
enum sci_remote_device_states state = sm->current_state_id;
if (state != SCI_DEV_RESETTING) {
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -262,8 +272,8 @@ enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
enum sci_remote_device_states state = sm->current_state_id;
if (state != SCI_STP_DEV_CMD) {
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -287,8 +297,8 @@ enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
case SCI_SMP_DEV_IDLE:
case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
/* Return the frame back to the controller */
sci_controller_release_frame(ihost, frame_index);
return SCI_FAILURE_INVALID_STATE;
@ -502,8 +512,8 @@ enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
case SCI_DEV_RESETTING:
case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_READY:
/* attempt to start an io request for this device object. The remote
@ -637,8 +647,8 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
case SCI_DEV_FAILED:
case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_READY:
case SCI_STP_DEV_AWAIT_RESET:
@ -721,8 +731,8 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
case SCI_DEV_RESETTING:
case SCI_DEV_FINAL:
default:
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
case SCI_STP_DEV_IDLE:
case SCI_STP_DEV_CMD:
@ -853,8 +863,8 @@ static enum sci_status sci_remote_device_destruct(struct isci_remote_device *ide
struct isci_host *ihost;
if (state != SCI_DEV_STOPPED) {
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -1204,8 +1214,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
enum sci_status status;
if (state != SCI_DEV_STOPPED) {
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
__func__, dev_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -1308,7 +1318,6 @@ void isci_remote_device_release(struct kref *kref)
clear_bit(IDEV_STOP_PENDING, &idev->flags);
clear_bit(IDEV_IO_READY, &idev->flags);
clear_bit(IDEV_GONE, &idev->flags);
clear_bit(IDEV_EH, &idev->flags);
smp_mb__before_clear_bit();
clear_bit(IDEV_ALLOCATED, &idev->flags);
wake_up(&ihost->eventq);
@ -1381,34 +1390,17 @@ void isci_remote_device_gone(struct domain_device *dev)
*
* status, zero indicates success.
*/
int isci_remote_device_found(struct domain_device *domain_dev)
int isci_remote_device_found(struct domain_device *dev)
{
struct isci_host *isci_host = dev_to_ihost(domain_dev);
struct isci_port *isci_port;
struct isci_phy *isci_phy;
struct asd_sas_port *sas_port;
struct asd_sas_phy *sas_phy;
struct isci_host *isci_host = dev_to_ihost(dev);
struct isci_port *isci_port = dev->port->lldd_port;
struct isci_remote_device *isci_device;
enum sci_status status;
dev_dbg(&isci_host->pdev->dev,
"%s: domain_device = %p\n", __func__, domain_dev);
"%s: domain_device = %p\n", __func__, dev);
wait_for_start(isci_host);
sas_port = domain_dev->port;
sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
port_phy_el);
isci_phy = to_iphy(sas_phy);
isci_port = isci_phy->isci_port;
/* we are being called for a device on this port,
* so it has to come up eventually
*/
wait_for_completion(&isci_port->start_complete);
if ((isci_stopping == isci_port_get_state(isci_port)) ||
(isci_stopped == isci_port_get_state(isci_port)))
if (!isci_port)
return -ENODEV;
isci_device = isci_remote_device_alloc(isci_host, isci_port);
@ -1419,7 +1411,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
INIT_LIST_HEAD(&isci_device->node);
spin_lock_irq(&isci_host->scic_lock);
isci_device->domain_dev = domain_dev;
isci_device->domain_dev = dev;
isci_device->isci_port = isci_port;
list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
@ -1432,7 +1424,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
if (status == SCI_SUCCESS) {
/* device came up, advertise it to the world */
domain_dev->lldd_dev = isci_device;
dev->lldd_dev = isci_device;
} else
isci_put_device(isci_device);
spin_unlock_irq(&isci_host->scic_lock);

View File

@ -82,10 +82,9 @@ struct isci_remote_device {
#define IDEV_START_PENDING 0
#define IDEV_STOP_PENDING 1
#define IDEV_ALLOCATED 2
#define IDEV_EH 3
#define IDEV_GONE 4
#define IDEV_IO_READY 5
#define IDEV_IO_NCQERROR 6
#define IDEV_GONE 3
#define IDEV_IO_READY 4
#define IDEV_IO_NCQERROR 5
unsigned long flags;
struct kref kref;
struct isci_port *isci_port;
@ -180,122 +179,101 @@ enum sci_status sci_remote_device_reset_complete(
/**
* enum sci_remote_device_states - This enumeration depicts all the states
* for the common remote device state machine.
* @SCI_DEV_INITIAL: Simply the initial state for the base remote device
* state machine.
*
* @SCI_DEV_STOPPED: This state indicates that the remote device has
* successfully been stopped. In this state no new IO operations are
* permitted. This state is entered from the INITIAL state. This state
* is entered from the STOPPING state.
*
* @SCI_DEV_STARTING: This state indicates the the remote device is in
* the process of becoming ready (i.e. starting). In this state no new
* IO operations are permitted. This state is entered from the STOPPED
* state.
*
* @SCI_DEV_READY: This state indicates the remote device is now ready.
* Thus, the user is able to perform IO operations on the remote device.
* This state is entered from the STARTING state.
*
* @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote
* device. When there are no active IO for the device it is is in this
* state.
*
* @SCI_STP_DEV_CMD: This is the command state for for the STP remote
* device. This state is entered when the device is processing a
* non-NCQ command. The device object will fail any new start IO
* requests until this command is complete.
*
* @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device.
* This state is entered when the device is processing an NCQ reuqest.
* It will remain in this state so long as there is one or more NCQ
* requests being processed.
*
* @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP
* remote device. This state is entered when an SDB error FIS is
* received by the device object while in the NCQ state. The device
* object will only accept a READ LOG command while in this state.
*
* @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP
* ATAPI remote device. This state is entered when ATAPI device sends
* error status FIS without data while the device object is in CMD
* state. A suspension event is expected in this state. The device
* object will resume right away.
*
* @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the
* device is waiting for the RESET task coming to be recovered from
* certain hardware specific error.
*
* @SCI_SMP_DEV_IDLE: This is the ready operational substate for the
* remote device. This is the normal operational state for a remote
* device.
*
* @SCI_SMP_DEV_CMD: This is the suspended state for the remote device.
* This is the state that the device is placed in when a RNC suspend is
* received by the SCU hardware.
*
* @SCI_DEV_STOPPING: This state indicates that the remote device is in
* the process of stopping. In this state no new IO operations are
* permitted, but existing IO operations are allowed to complete. This
* state is entered from the READY state. This state is entered from
* the FAILED state.
*
* @SCI_DEV_FAILED: This state indicates that the remote device has
* failed. In this state no new IO operations are permitted. This
* state is entered from the INITIALIZING state. This state is entered
* from the READY state.
*
* @SCI_DEV_RESETTING: This state indicates the device is being reset.
* In this state no new IO operations are permitted. This state is
* entered from the READY state.
*
* @SCI_DEV_FINAL: Simply the final state for the base remote device
* state machine.
*/
enum sci_remote_device_states {
/**
* Simply the initial state for the base remote device state machine.
*/
SCI_DEV_INITIAL,
/**
* This state indicates that the remote device has successfully been
* stopped. In this state no new IO operations are permitted.
* This state is entered from the INITIAL state.
* This state is entered from the STOPPING state.
*/
SCI_DEV_STOPPED,
/**
* This state indicates the the remote device is in the process of
* becoming ready (i.e. starting). In this state no new IO operations
* are permitted.
* This state is entered from the STOPPED state.
*/
SCI_DEV_STARTING,
/**
* This state indicates the remote device is now ready. Thus, the user
* is able to perform IO operations on the remote device.
* This state is entered from the STARTING state.
*/
SCI_DEV_READY,
/**
* This is the idle substate for the stp remote device. When there are no
* active IO for the device it is is in this state.
*/
SCI_STP_DEV_IDLE,
/**
* This is the command state for for the STP remote device. This state is
* entered when the device is processing a non-NCQ command. The device object
* will fail any new start IO requests until this command is complete.
*/
SCI_STP_DEV_CMD,
/**
* This is the NCQ state for the STP remote device. This state is entered
* when the device is processing an NCQ reuqest. It will remain in this state
* so long as there is one or more NCQ requests being processed.
*/
SCI_STP_DEV_NCQ,
/**
* This is the NCQ error state for the STP remote device. This state is
* entered when an SDB error FIS is received by the device object while in the
* NCQ state. The device object will only accept a READ LOG command while in
* this state.
*/
SCI_STP_DEV_NCQ_ERROR,
/**
* This is the ATAPI error state for the STP ATAPI remote device.
* This state is entered when ATAPI device sends error status FIS
* without data while the device object is in CMD state.
* A suspension event is expected in this state.
* The device object will resume right away.
*/
SCI_STP_DEV_ATAPI_ERROR,
/**
* This is the READY substate indicates the device is waiting for the RESET task
* coming to be recovered from certain hardware specific error.
*/
SCI_STP_DEV_AWAIT_RESET,
/**
* This is the ready operational substate for the remote device. This is the
* normal operational state for a remote device.
*/
SCI_SMP_DEV_IDLE,
/**
* This is the suspended state for the remote device. This is the state that
* the device is placed in when a RNC suspend is received by the SCU hardware.
*/
SCI_SMP_DEV_CMD,
/**
* This state indicates that the remote device is in the process of
* stopping. In this state no new IO operations are permitted, but
* existing IO operations are allowed to complete.
* This state is entered from the READY state.
* This state is entered from the FAILED state.
*/
SCI_DEV_STOPPING,
/**
* This state indicates that the remote device has failed.
* In this state no new IO operations are permitted.
* This state is entered from the INITIALIZING state.
* This state is entered from the READY state.
*/
SCI_DEV_FAILED,
/**
* This state indicates the device is being reset.
* In this state no new IO operations are permitted.
* This state is entered from the READY state.
*/
SCI_DEV_RESETTING,
/**
* Simply the final state for the base remote device state machine.
*/
SCI_DEV_FINAL,
};
#define REMOTE_DEV_STATES {\
C(DEV_INITIAL),\
C(DEV_STOPPED),\
C(DEV_STARTING),\
C(DEV_READY),\
C(STP_DEV_IDLE),\
C(STP_DEV_CMD),\
C(STP_DEV_NCQ),\
C(STP_DEV_NCQ_ERROR),\
C(STP_DEV_ATAPI_ERROR),\
C(STP_DEV_AWAIT_RESET),\
C(SMP_DEV_IDLE),\
C(SMP_DEV_CMD),\
C(DEV_STOPPING),\
C(DEV_FAILED),\
C(DEV_RESETTING),\
C(DEV_FINAL),\
}
#undef C
#define C(a) SCI_##a
enum sci_remote_device_states REMOTE_DEV_STATES;
#undef C
const char *dev_state_name(enum sci_remote_device_states state);
static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
{

View File

@ -60,18 +60,15 @@
#include "scu_event_codes.h"
#include "scu_task_context.h"
#undef C
#define C(a) (#a)
const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
/**
*
* @sci_rnc: The RNC for which the is posted request is being made.
*
* This method will return true if the RNC is not in the initial state. In all
* other states the RNC is considered active and this will return true. The
* destroy request of the state machine drives the RNC back to the initial
* state. If the state machine changes then this routine will also have to be
* changed. bool true if the state machine is not in the initial state false if
* the state machine is in the initial state
*/
return strings[state];
}
#undef C
/**
*

View File

@ -85,61 +85,50 @@ struct sci_remote_node_context;
typedef void (*scics_sds_remote_node_context_callback)(void *);
/**
* This is the enumeration of the remote node context states.
* enum sci_remote_node_context_states
* @SCI_RNC_INITIAL initial state for a remote node context. On a resume
* request the remote node context will transition to the posting state.
*
* @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once
* the RNC is posted the remote node context will be made ready.
*
* @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to
* the hardware. Once the invalidate is complete the remote node context will
* transition to the posting state.
*
* @SCI_RNC_RESUMING: transition state that will post an RNC resume to the
* hardare. Once the event notification of resume complete is received the
* remote node context will transition to the ready state.
*
* @SCI_RNC_READY: state that the remote node context must be in to accept io
* request operations.
*
* @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when
* it gets a TX suspend notification from the hardware.
*
* @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to
* when it gets a TX RX suspend notification from the hardware.
*
* @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits
* for a suspend notification from the hardware. This state is entered when
* either there is a request to supend the remote node context or when there is
* a TC completion where the remote node will be suspended by the hardware.
*/
enum scis_sds_remote_node_context_states {
/**
* This state is the initial state for a remote node context. On a resume
* request the remote node context will transition to the posting state.
*/
SCI_RNC_INITIAL,
/**
* This is a transition state that posts the RNi to the hardware. Once the RNC
* is posted the remote node context will be made ready.
*/
SCI_RNC_POSTING,
/**
* This is a transition state that will post an RNC invalidate to the
* hardware. Once the invalidate is complete the remote node context will
* transition to the posting state.
*/
SCI_RNC_INVALIDATING,
/**
* This is a transition state that will post an RNC resume to the hardare.
* Once the event notification of resume complete is received the remote node
* context will transition to the ready state.
*/
SCI_RNC_RESUMING,
/**
* This is the state that the remote node context must be in to accept io
* request operations.
*/
SCI_RNC_READY,
/**
* This is the state that the remote node context transitions to when it gets
* a TX suspend notification from the hardware.
*/
SCI_RNC_TX_SUSPENDED,
/**
* This is the state that the remote node context transitions to when it gets
* a TX RX suspend notification from the hardware.
*/
SCI_RNC_TX_RX_SUSPENDED,
/**
* This state is a wait state for the remote node context that waits for a
* suspend notification from the hardware. This state is entered when either
* there is a request to supend the remote node context or when there is a TC
* completion where the remote node will be suspended by the hardware.
*/
SCI_RNC_AWAIT_SUSPENSION
};
#define RNC_STATES {\
C(RNC_INITIAL),\
C(RNC_POSTING),\
C(RNC_INVALIDATING),\
C(RNC_RESUMING),\
C(RNC_READY),\
C(RNC_TX_SUSPENDED),\
C(RNC_TX_RX_SUSPENDED),\
C(RNC_AWAIT_SUSPENSION),\
}
#undef C
#define C(a) SCI_##a
enum scis_sds_remote_node_context_states RNC_STATES;
#undef C
const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
/**
*

View File

@ -53,6 +53,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <scsi/scsi_cmnd.h>
#include "isci.h"
#include "task.h"
#include "request.h"
@ -60,6 +61,16 @@
#include "scu_event_codes.h"
#include "sas.h"
#undef C
#define C(a) (#a)
const char *req_state_name(enum sci_base_request_states state)
{
static const char * const strings[] = REQUEST_STATES;
return strings[state];
}
#undef C
static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
int idx)
{
@ -264,6 +275,141 @@ static void scu_ssp_reqeust_construct_task_context(
task_context->response_iu_lower = lower_32_bits(dma_addr);
}
static u8 scu_bg_blk_size(struct scsi_device *sdp)
{
switch (sdp->sector_size) {
case 512:
return 0;
case 1024:
return 1;
case 4096:
return 3;
default:
return 0xff;
}
}
static u32 scu_dif_bytes(u32 len, u32 sector_size)
{
return (len >> ilog2(sector_size)) * 8;
}
static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
{
struct scu_task_context *tc = ireq->tc;
struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
u8 blk_sz = scu_bg_blk_size(scmd->device);
tc->block_guard_enable = 1;
tc->blk_prot_en = 1;
tc->blk_sz = blk_sz;
/* DIF write insert */
tc->blk_prot_func = 0x2;
tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
scmd->device->sector_size);
/* always init to 0, used by hw */
tc->interm_crc_val = 0;
tc->init_crc_seed = 0;
tc->app_tag_verify = 0;
tc->app_tag_gen = 0;
tc->ref_tag_seed_verify = 0;
/* always init to same as bg_blk_sz */
tc->UD_bytes_immed_val = scmd->device->sector_size;
tc->reserved_DC_0 = 0;
/* always init to 8 */
tc->DIF_bytes_immed_val = 8;
tc->reserved_DC_1 = 0;
tc->bgc_blk_sz = scmd->device->sector_size;
tc->reserved_E0_0 = 0;
tc->app_tag_gen_mask = 0;
/** setup block guard control **/
tc->bgctl = 0;
/* DIF write insert */
tc->bgctl_f.op = 0x2;
tc->app_tag_verify_mask = 0;
/* must init to 0 for hw */
tc->blk_guard_err = 0;
tc->reserved_E8_0 = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_gen = 0;
}
static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
{
struct scu_task_context *tc = ireq->tc;
struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
u8 blk_sz = scu_bg_blk_size(scmd->device);
tc->block_guard_enable = 1;
tc->blk_prot_en = 1;
tc->blk_sz = blk_sz;
/* DIF read strip */
tc->blk_prot_func = 0x1;
tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
scmd->device->sector_size);
/* always init to 0, used by hw */
tc->interm_crc_val = 0;
tc->init_crc_seed = 0;
tc->app_tag_verify = 0;
tc->app_tag_gen = 0;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
else if (type & SCSI_PROT_DIF_TYPE3)
tc->ref_tag_seed_verify = 0;
/* always init to same as bg_blk_sz */
tc->UD_bytes_immed_val = scmd->device->sector_size;
tc->reserved_DC_0 = 0;
/* always init to 8 */
tc->DIF_bytes_immed_val = 8;
tc->reserved_DC_1 = 0;
tc->bgc_blk_sz = scmd->device->sector_size;
tc->reserved_E0_0 = 0;
tc->app_tag_gen_mask = 0;
/** setup block guard control **/
tc->bgctl = 0;
/* DIF read strip */
tc->bgctl_f.crc_verify = 1;
tc->bgctl_f.op = 0x1;
if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
tc->bgctl_f.ref_tag_chk = 1;
tc->bgctl_f.app_f_detect = 1;
} else if (type & SCSI_PROT_DIF_TYPE3)
tc->bgctl_f.app_ref_f_detect = 1;
tc->app_tag_verify_mask = 0;
/* must init to 0 for hw */
tc->blk_guard_err = 0;
tc->reserved_E8_0 = 0;
tc->ref_tag_seed_gen = 0;
}
/**
* This method is will fill in the SCU Task Context for a SSP IO request.
* @sci_req:
@ -274,6 +420,10 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
u32 len)
{
struct scu_task_context *task_context = ireq->tc;
struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
struct scsi_cmnd *scmd = sas_task->uldd_task;
u8 prot_type = scsi_get_prot_type(scmd);
u8 prot_op = scsi_get_prot_op(scmd);
scu_ssp_reqeust_construct_task_context(ireq, task_context);
@ -296,6 +446,13 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
if (task_context->transfer_length_bytes > 0)
sci_request_build_sgl(ireq);
if (prot_type != SCSI_PROT_DIF_TYPE0) {
if (prot_op == SCSI_PROT_READ_STRIP)
scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
else if (prot_op == SCSI_PROT_WRITE_INSERT)
scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
}
}
/**
@ -519,18 +676,12 @@ sci_io_request_construct_sata(struct isci_request *ireq,
if (test_bit(IREQ_TMF, &ireq->flags)) {
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
if (tmf->tmf_code == isci_tmf_sata_srst_high ||
tmf->tmf_code == isci_tmf_sata_srst_low) {
scu_stp_raw_request_construct_task_context(ireq);
return SCI_SUCCESS;
} else {
dev_err(&ireq->owning_controller->pdev->dev,
"%s: Request 0x%p received un-handled SAT "
"management protocol 0x%x.\n",
__func__, ireq, tmf->tmf_code);
dev_err(&ireq->owning_controller->pdev->dev,
"%s: Request 0x%p received un-handled SAT "
"management protocol 0x%x.\n",
__func__, ireq, tmf->tmf_code);
return SCI_FAILURE;
}
return SCI_FAILURE;
}
if (!sas_protocol_ata(task->task_proto)) {
@ -627,34 +778,6 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
return status;
}
enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
{
enum sci_status status = SCI_SUCCESS;
/* check for management protocols */
if (test_bit(IREQ_TMF, &ireq->flags)) {
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
if (tmf->tmf_code == isci_tmf_sata_srst_high ||
tmf->tmf_code == isci_tmf_sata_srst_low) {
scu_stp_raw_request_construct_task_context(ireq);
} else {
dev_err(&ireq->owning_controller->pdev->dev,
"%s: Request 0x%p received un-handled SAT "
"Protocol 0x%x.\n",
__func__, ireq, tmf->tmf_code);
return SCI_FAILURE;
}
}
if (status != SCI_SUCCESS)
return status;
sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
return status;
}
/**
* sci_req_tx_bytes - bytes transferred when reply underruns request
* @ireq: request that was terminated early
@ -756,9 +879,6 @@ sci_io_request_terminate(struct isci_request *ireq)
case SCI_REQ_STP_PIO_WAIT_FRAME:
case SCI_REQ_STP_PIO_DATA_IN:
case SCI_REQ_STP_PIO_DATA_OUT:
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
case SCI_REQ_ATAPI_WAIT_H2D:
case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
case SCI_REQ_ATAPI_WAIT_D2H:
@ -800,7 +920,8 @@ enum sci_status sci_request_complete(struct isci_request *ireq)
state = ireq->sm.current_state_id;
if (WARN_ONCE(state != SCI_REQ_COMPLETED,
"isci: request completion from wrong state (%d)\n", state))
"isci: request completion from wrong state (%s)\n",
req_state_name(state)))
return SCI_FAILURE_INVALID_STATE;
if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
@ -821,8 +942,8 @@ enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
state = ireq->sm.current_state_id;
if (state != SCI_REQ_STP_PIO_DATA_IN) {
dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
__func__, event_code, state);
dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
__func__, event_code, req_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
@ -1938,59 +2059,6 @@ sci_io_request_frame_handler(struct isci_request *ireq,
return status;
}
case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
"%s: SCIC IO Request 0x%p could not get frame "
"header for frame index %d, status %x\n",
__func__,
stp_req,
frame_index,
status);
return status;
}
switch (frame_header->fis_type) {
case FIS_REGD2H:
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
/* The command has completed with error */
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
break;
default:
dev_warn(&ihost->pdev->dev,
"%s: IO Request:0x%p Frame Id:%d protocol "
"violation occurred\n",
__func__,
stp_req,
frame_index);
ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
break;
}
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
/* Frame has been decoded return it to the controller */
sci_controller_release_frame(ihost, frame_index);
return status;
}
case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
struct sas_task *task = isci_request_access_task(ireq);
@ -2088,57 +2156,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
return status;
}
static enum sci_status
stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
break;
default:
/*
* All other completion status cause the IO to be complete.
* If a NAK was received, then it is up to the user to retry
* the request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return SCI_SUCCESS;
}
static enum sci_status
stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
u32 completion_code)
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
ireq->sci_status = SCI_SUCCESS;
sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
break;
default:
/* All other completion status cause the IO to be complete. If
* a NAK was received, then it is up to the user to retry the
* request.
*/
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
break;
}
return SCI_SUCCESS;
}
static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
enum sci_base_request_states next)
{
@ -2284,14 +2301,6 @@ sci_io_request_tc_completion(struct isci_request *ireq,
case SCI_REQ_STP_PIO_DATA_OUT:
return pio_data_out_tx_done_tc_event(ireq, completion_code);
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
completion_code);
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
completion_code);
case SCI_REQ_ABORTING:
return request_aborting_state_tc_event(ireq,
completion_code);
@ -2308,12 +2317,8 @@ sci_io_request_tc_completion(struct isci_request *ireq,
return atapi_data_tc_completion_handler(ireq, completion_code);
default:
dev_warn(&ihost->pdev->dev,
"%s: SCIC IO Request given task completion "
"notification %x while in wrong state %d\n",
__func__,
completion_code,
state);
dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
__func__, completion_code, req_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@ -3065,10 +3070,6 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
*/
if (!task && dev->dev_type == SAS_END_DEV) {
state = SCI_REQ_TASK_WAIT_TC_COMP;
} else if (!task &&
(isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
state = SCI_REQ_SMP_WAIT_RESP;
} else if (task && sas_protocol_ata(task->task_proto) &&
@ -3125,31 +3126,6 @@ static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_ba
ireq->target_device->working_request = ireq;
}
static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
ireq->target_device->working_request = ireq;
}
static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
struct scu_task_context *tc = ireq->tc;
struct host_to_dev_fis *h2d_fis;
enum sci_status status;
/* Clear the SRST bit */
h2d_fis = &ireq->stp.cmd;
h2d_fis->control = 0;
/* Clear the TC control bit */
tc->control_frame = 0;
status = sci_controller_continue_io(ireq);
WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
}
static const struct sci_base_state sci_request_state_table[] = {
[SCI_REQ_INIT] = { },
[SCI_REQ_CONSTRUCTED] = { },
@ -3168,13 +3144,6 @@ static const struct sci_base_state sci_request_state_table[] = {
[SCI_REQ_STP_PIO_DATA_OUT] = { },
[SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
[SCI_REQ_STP_UDMA_WAIT_D2H] = { },
[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
.enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
},
[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
.enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
},
[SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
[SCI_REQ_TASK_WAIT_TC_COMP] = { },
[SCI_REQ_TASK_WAIT_TC_RESP] = { },
[SCI_REQ_SMP_WAIT_RESP] = { },
@ -3649,8 +3618,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
/* Cause this task to be scheduled in the SCSI error
* handler thread.
*/
isci_execpath_callback(ihost, task,
sas_task_abort);
sas_task_abort(task);
/* Change the status, since we are holding
* the I/O until it is managed by the SCSI

View File

@ -182,138 +182,103 @@ static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
}
/**
* enum sci_base_request_states - This enumeration depicts all the states for
* the common request state machine.
* enum sci_base_request_states - request state machine states
*
* @SCI_REQ_INIT: Simply the initial state for the base request state machine.
*
* @SCI_REQ_CONSTRUCTED: This state indicates that the request has been
* constructed. This state is entered from the INITIAL state.
*
* @SCI_REQ_STARTED: This state indicates that the request has been started.
* This state is entered from the CONSTRUCTED state.
*
* @SCI_REQ_STP_UDMA_WAIT_TC_COMP:
* @SCI_REQ_STP_UDMA_WAIT_D2H:
* @SCI_REQ_STP_NON_DATA_WAIT_H2D:
* @SCI_REQ_STP_NON_DATA_WAIT_D2H:
*
* @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is
* waiting for the TC completion notification for the H2D Register FIS
*
* @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is
* waiting for either a PIO Setup FIS or a D2H register FIS. The type of frame
* received is based on the result of the prior frame and line conditions.
*
* @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is
* waiting for a DATA frame from the device.
*
* @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is
* waiting to transmit the next data frame to the device.
*
* @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is
* waiting for the TC completion notification for the H2D Register FIS
*
* @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is
* waiting for either a PIO Setup.
*
* @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state
* after receiving TC completion. While in this state IO request object is
* waiting for D2H status frame as UF.
*
* @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports
* task context completion after every frame submission, so in the
* non-accelerated case we need to expect the completion for the "cdb" frame.
*
* @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
* the started raw task management request is waiting for the transmission of
* the initial frame (i.e. command, task, etc.).
*
* @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task
* management request is waiting for the reception of an unsolicited frame
* (i.e. response IU).
*
* @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task
* management request is waiting for the reception of an unsolicited frame
* (i.e. response IU).
*
* @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
* the started SMP request is waiting for the transmission of the initial frame
* (i.e. command, task, etc.).
*
* @SCI_REQ_COMPLETED: This state indicates that the request has completed.
* This state is entered from the STARTED state. This state is entered from the
* ABORTING state.
*
* @SCI_REQ_ABORTING: This state indicates that the request is in the process
* of being terminated/aborted. This state is entered from the CONSTRUCTED
* state. This state is entered from the STARTED state.
*
* @SCI_REQ_FINAL: Simply the final state for the base request state machine.
*/
enum sci_base_request_states {
/*
* Simply the initial state for the base request state machine.
*/
SCI_REQ_INIT,
/*
* This state indicates that the request has been constructed.
* This state is entered from the INITIAL state.
*/
SCI_REQ_CONSTRUCTED,
/*
* This state indicates that the request has been started. This state
* is entered from the CONSTRUCTED state.
*/
SCI_REQ_STARTED,
SCI_REQ_STP_UDMA_WAIT_TC_COMP,
SCI_REQ_STP_UDMA_WAIT_D2H,
SCI_REQ_STP_NON_DATA_WAIT_H2D,
SCI_REQ_STP_NON_DATA_WAIT_D2H,
SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
/*
* While in this state the IO request object is waiting for the TC
* completion notification for the H2D Register FIS
*/
SCI_REQ_STP_PIO_WAIT_H2D,
/*
* While in this state the IO request object is waiting for either a
* PIO Setup FIS or a D2H register FIS. The type of frame received is
* based on the result of the prior frame and line conditions.
*/
SCI_REQ_STP_PIO_WAIT_FRAME,
/*
* While in this state the IO request object is waiting for a DATA
* frame from the device.
*/
SCI_REQ_STP_PIO_DATA_IN,
/*
* While in this state the IO request object is waiting to transmit
* the next data frame to the device.
*/
SCI_REQ_STP_PIO_DATA_OUT,
/*
* While in this state the IO request object is waiting for the TC
* completion notification for the H2D Register FIS
*/
SCI_REQ_ATAPI_WAIT_H2D,
/*
* While in this state the IO request object is waiting for either a
* PIO Setup.
*/
SCI_REQ_ATAPI_WAIT_PIO_SETUP,
/*
* The non-data IO transit to this state in this state after receiving
* TC completion. While in this state IO request object is waiting for
* D2H status frame as UF.
*/
SCI_REQ_ATAPI_WAIT_D2H,
/*
* When transmitting raw frames hardware reports task context completion
* after every frame submission, so in the non-accelerated case we need
* to expect the completion for the "cdb" frame.
*/
SCI_REQ_ATAPI_WAIT_TC_COMP,
/*
* The AWAIT_TC_COMPLETION sub-state indicates that the started raw
* task management request is waiting for the transmission of the
* initial frame (i.e. command, task, etc.).
*/
SCI_REQ_TASK_WAIT_TC_COMP,
/*
* This sub-state indicates that the started task management request
* is waiting for the reception of an unsolicited frame
* (i.e. response IU).
*/
SCI_REQ_TASK_WAIT_TC_RESP,
/*
* This sub-state indicates that the started task management request
* is waiting for the reception of an unsolicited frame
* (i.e. response IU).
*/
SCI_REQ_SMP_WAIT_RESP,
/*
* The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
* request is waiting for the transmission of the initial frame
* (i.e. command, task, etc.).
*/
SCI_REQ_SMP_WAIT_TC_COMP,
/*
* This state indicates that the request has completed.
* This state is entered from the STARTED state. This state is entered
* from the ABORTING state.
*/
SCI_REQ_COMPLETED,
/*
* This state indicates that the request is in the process of being
* terminated/aborted.
* This state is entered from the CONSTRUCTED state.
* This state is entered from the STARTED state.
*/
SCI_REQ_ABORTING,
/*
* Simply the final state for the base request state machine.
*/
SCI_REQ_FINAL,
};
#define REQUEST_STATES {\
C(REQ_INIT),\
C(REQ_CONSTRUCTED),\
C(REQ_STARTED),\
C(REQ_STP_UDMA_WAIT_TC_COMP),\
C(REQ_STP_UDMA_WAIT_D2H),\
C(REQ_STP_NON_DATA_WAIT_H2D),\
C(REQ_STP_NON_DATA_WAIT_D2H),\
C(REQ_STP_PIO_WAIT_H2D),\
C(REQ_STP_PIO_WAIT_FRAME),\
C(REQ_STP_PIO_DATA_IN),\
C(REQ_STP_PIO_DATA_OUT),\
C(REQ_ATAPI_WAIT_H2D),\
C(REQ_ATAPI_WAIT_PIO_SETUP),\
C(REQ_ATAPI_WAIT_D2H),\
C(REQ_ATAPI_WAIT_TC_COMP),\
C(REQ_TASK_WAIT_TC_COMP),\
C(REQ_TASK_WAIT_TC_RESP),\
C(REQ_SMP_WAIT_RESP),\
C(REQ_SMP_WAIT_TC_COMP),\
C(REQ_COMPLETED),\
C(REQ_ABORTING),\
C(REQ_FINAL),\
}
#undef C
#define C(a) SCI_##a
enum sci_base_request_states REQUEST_STATES;
#undef C
const char *req_state_name(enum sci_base_request_states state);
enum sci_status sci_request_start(struct isci_request *ireq);
enum sci_status sci_io_request_terminate(struct isci_request *ireq);
@ -446,10 +411,7 @@ sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 io_tag,
struct isci_request *ireq);
enum sci_status
sci_task_request_construct_ssp(struct isci_request *ireq);
enum sci_status
sci_task_request_construct_sata(struct isci_request *ireq);
enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq);
void sci_smp_request_copy_response(struct isci_request *ireq);
static inline int isci_task_is_ncq_recovery(struct sas_task *task)

View File

@ -866,9 +866,9 @@ struct scu_task_context {
struct transport_snapshot snapshot; /* read only set to 0 */
/* OFFSET 0x5C */
u32 block_protection_enable:1;
u32 block_size:2;
u32 block_protection_function:2;
u32 blk_prot_en:1;
u32 blk_sz:2;
u32 blk_prot_func:2;
u32 reserved_5C_0:9;
u32 active_sgl_element:2; /* read only set to 0 */
u32 sgl_exhausted:1; /* read only set to 0 */
@ -896,33 +896,56 @@ struct scu_task_context {
u32 reserved_C4_CC[3];
/* OFFSET 0xD0 */
u32 intermediate_crc_value:16;
u32 initial_crc_seed:16;
u32 interm_crc_val:16;
u32 init_crc_seed:16;
/* OFFSET 0xD4 */
u32 application_tag_for_verify:16;
u32 application_tag_for_generate:16;
u32 app_tag_verify:16;
u32 app_tag_gen:16;
/* OFFSET 0xD8 */
u32 reference_tag_seed_for_verify_function;
u32 ref_tag_seed_verify;
/* OFFSET 0xDC */
u32 reserved_DC;
u32 UD_bytes_immed_val:13;
u32 reserved_DC_0:3;
u32 DIF_bytes_immed_val:4;
u32 reserved_DC_1:12;
/* OFFSET 0xE0 */
u32 reserved_E0_0:16;
u32 application_tag_mask_for_generate:16;
u32 bgc_blk_sz:13;
u32 reserved_E0_0:3;
u32 app_tag_gen_mask:16;
/* OFFSET 0xE4 */
u32 block_protection_control:16;
u32 application_tag_mask_for_verify:16;
union {
u16 bgctl;
struct {
u16 crc_verify:1;
u16 app_tag_chk:1;
u16 ref_tag_chk:1;
u16 op:2;
u16 legacy:1;
u16 invert_crc_seed:1;
u16 ref_tag_gen:1;
u16 fixed_ref_tag:1;
u16 invert_crc:1;
u16 app_ref_f_detect:1;
u16 uninit_dif_check_err:1;
u16 uninit_dif_bypass:1;
u16 app_f_detect:1;
u16 reserved_0:2;
} bgctl_f;
};
u16 app_tag_verify_mask;
/* OFFSET 0xE8 */
u32 block_protection_error:8;
u32 blk_guard_err:8;
u32 reserved_E8_0:24;
/* OFFSET 0xEC */
u32 reference_tag_seed_for_verify;
u32 ref_tag_seed_gen;
/* OFFSET 0xF0 */
u32 intermediate_crc_valid_snapshot:16;
@ -937,6 +960,6 @@ struct scu_task_context {
/* OFFSET 0xFC */
u32 reference_tag_seed_for_generate_function_snapshot;
};
} __packed;
#endif /* _SCU_TASK_CONTEXT_H_ */

View File

@ -96,8 +96,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
__func__, task, response, status);
task->lldd_task = NULL;
isci_execpath_callback(ihost, task, task->task_done);
task->task_done(task);
break;
case isci_perform_aborted_io_completion:
@ -117,8 +116,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
"%s: Error - task = %p, response=%d, "
"status=%d\n",
__func__, task, response, status);
isci_execpath_callback(ihost, task, sas_task_abort);
sas_task_abort(task);
break;
default:
@ -249,46 +247,6 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
return 0;
}
static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
{
struct isci_tmf *isci_tmf;
enum sci_status status;
if (!test_bit(IREQ_TMF, &ireq->flags))
return SCI_FAILURE;
isci_tmf = isci_request_access_tmf(ireq);
switch (isci_tmf->tmf_code) {
case isci_tmf_sata_srst_high:
case isci_tmf_sata_srst_low: {
struct host_to_dev_fis *fis = &ireq->stp.cmd;
memset(fis, 0, sizeof(*fis));
fis->fis_type = 0x27;
fis->flags &= ~0x80;
fis->flags &= 0xF0;
if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
fis->control |= ATA_SRST;
else
fis->control &= ~ATA_SRST;
break;
}
/* other management commnd go here... */
default:
return SCI_FAILURE;
}
/* core builds the protocol specific request
* based on the h2d fis.
*/
status = sci_task_request_construct_sata(ireq);
return status;
}
static struct isci_request *isci_task_request_build(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 tag, struct isci_tmf *isci_tmf)
@ -328,13 +286,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
return NULL;
}
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
isci_tmf->proto = SAS_PROTOCOL_SATA;
status = isci_sata_management_task_request_build(ireq);
if (status != SCI_SUCCESS)
return NULL;
}
return ireq;
}
@ -873,53 +824,20 @@ static int isci_task_send_lu_reset_sas(
return ret;
}
static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
struct isci_remote_device *idev, u8 *lun)
int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
{
int ret = TMF_RESP_FUNC_FAILED;
struct isci_tmf tmf;
/* Send the soft reset to the target */
#define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
if (ret != TMF_RESP_FUNC_COMPLETE) {
dev_dbg(&ihost->pdev->dev,
"%s: Assert SRST failed (%p) = %x",
__func__, idev, ret);
/* Return the failure so that the LUN reset is escalated
* to a target reset.
*/
}
return ret;
}
/**
* isci_task_lu_reset() - This function is one of the SAS Domain Template
* functions. This is one of the Task Management functoins called by libsas,
* to reset the given lun. Note the assumption that while this call is
* executing, no I/O will be sent by the host to the device.
* @lun: This parameter specifies the lun to be reset.
*
* status, zero indicates success.
*/
int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
{
struct isci_host *isci_host = dev_to_ihost(domain_device);
struct isci_host *isci_host = dev_to_ihost(dev);
struct isci_remote_device *isci_device;
unsigned long flags;
int ret;
spin_lock_irqsave(&isci_host->scic_lock, flags);
isci_device = isci_lookup_device(domain_device);
isci_device = isci_lookup_device(dev);
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
dev_dbg(&isci_host->pdev->dev,
"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
__func__, domain_device, isci_host, isci_device);
__func__, dev, isci_host, isci_device);
if (!isci_device) {
/* If the device is gone, stop the escalations. */
@ -928,11 +846,11 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
ret = TMF_RESP_FUNC_COMPLETE;
goto out;
}
set_bit(IDEV_EH, &isci_device->flags);
/* Send the task management part of the reset. */
if (sas_protocol_ata(domain_device->tproto)) {
ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
if (dev_is_sata(dev)) {
sas_ata_schedule_reset(dev);
ret = TMF_RESP_FUNC_COMPLETE;
} else
ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
@ -1062,9 +980,6 @@ int isci_task_abort_task(struct sas_task *task)
"%s: dev = %p, task = %p, old_request == %p\n",
__func__, isci_device, task, old_request);
if (isci_device)
set_bit(IDEV_EH, &isci_device->flags);
/* Device reset conditions signalled in task_state_flags are the
* responsbility of libsas to observe at the start of the error
* handler thread.
@ -1332,29 +1247,35 @@ isci_task_request_complete(struct isci_host *ihost,
}
static int isci_reset_device(struct isci_host *ihost,
struct domain_device *dev,
struct isci_remote_device *idev)
{
struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
enum sci_status status;
unsigned long flags;
int rc;
unsigned long flags;
enum sci_status status;
struct sas_phy *phy = sas_get_local_phy(dev);
struct isci_port *iport = dev->port->lldd_port;
dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
spin_lock_irqsave(&ihost->scic_lock, flags);
status = sci_remote_device_reset(idev);
if (status != SCI_SUCCESS) {
spin_unlock_irqrestore(&ihost->scic_lock, flags);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
"%s: sci_remote_device_reset(%p) returned %d!\n",
__func__, idev, status);
return TMF_RESP_FUNC_FAILED;
rc = TMF_RESP_FUNC_FAILED;
goto out;
}
spin_unlock_irqrestore(&ihost->scic_lock, flags);
rc = sas_phy_reset(phy, true);
if (scsi_is_sas_phy_local(phy)) {
struct isci_phy *iphy = &ihost->phys[phy->number];
rc = isci_port_perform_hard_reset(ihost, iport, iphy);
} else
rc = sas_phy_reset(phy, !dev_is_sata(dev));
/* Terminate in-progress I/O now. */
isci_remote_device_nuke_requests(ihost, idev);
@ -1371,7 +1292,8 @@ static int isci_reset_device(struct isci_host *ihost,
}
dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
out:
sas_put_local_phy(phy);
return rc;
}
@ -1386,35 +1308,15 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
idev = isci_lookup_device(dev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
ret = TMF_RESP_FUNC_COMPLETE;
goto out;
}
ret = isci_reset_device(ihost, idev);
out:
isci_put_device(idev);
return ret;
}
int isci_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = sdev_to_domain_dev(cmd->device);
struct isci_host *ihost = dev_to_ihost(dev);
struct isci_remote_device *idev;
unsigned long flags;
int ret;
spin_lock_irqsave(&ihost->scic_lock, flags);
idev = isci_lookup_device(dev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (!idev) {
/* XXX: need to cleanup any ireqs targeting this
* domain_device
*/
ret = TMF_RESP_FUNC_COMPLETE;
goto out;
}
ret = isci_reset_device(ihost, idev);
ret = isci_reset_device(ihost, dev, idev);
out:
isci_put_device(idev);
return ret;

View File

@ -86,8 +86,6 @@ enum isci_tmf_function_codes {
isci_tmf_func_none = 0,
isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
isci_tmf_ssp_lun_reset = TMF_LU_RESET,
isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
};
/**
* struct isci_tmf - This class represents the task management object which
@ -210,8 +208,6 @@ int isci_queuecommand(
struct scsi_cmnd *scsi_cmd,
void (*donefunc)(struct scsi_cmnd *));
int isci_bus_reset_handler(struct scsi_cmnd *cmd);
/**
* enum isci_completion_selection - This enum defines the possible actions to
* take with respect to a given request's notification back to libsas.
@ -321,40 +317,4 @@ isci_task_set_completion_status(
return task_notification_selection;
}
/**
* isci_execpath_callback() - This function is called from the task
* execute path when the task needs to callback libsas about the submit-time
* task failure. The callback occurs either through the task's done function
* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
* requests, libsas takes the host lock before calling execute task. Therefore
* in this situation the host lock must be managed before calling the func.
*
* @ihost: This parameter is the controller to which the I/O request was sent.
* @task: This parameter is the I/O request.
* @func: This parameter is the function to call in the correct context.
* @status: This parameter is the status code for the completed task.
*
*/
static inline void isci_execpath_callback(struct isci_host *ihost,
struct sas_task *task,
void (*func)(struct sas_task *))
{
struct domain_device *dev = task->dev;
if (dev_is_sata(dev) && task->uldd_task) {
unsigned long flags;
/* Since we are still in the submit path, and since
* libsas takes the host lock on behalf of SATA
* devices before I/O starts (in the non-discovery case),
* we need to unlock before we can call the callback function.
*/
raw_local_irq_save(flags);
spin_unlock(dev->sata_dev.ap->lock);
func(task);
spin_lock(dev->sata_dev.ap->lock);
raw_local_irq_restore(flags);
} else
func(task);
}
#endif /* !defined(_SCI_TASK_H_) */

View File

@ -684,10 +684,8 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
int buflen)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int value;
switch(param) {
case ISCSI_PARAM_HDRDGST_EN:
@ -699,16 +697,7 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
break;
case ISCSI_PARAM_MAX_R2T:
sscanf(buf, "%d", &value);
if (value <= 0 || !is_power_of_2(value))
return -EINVAL;
if (session->max_r2t == value)
break;
iscsi_tcp_r2tpool_free(session);
iscsi_set_param(cls_conn, param, buf, buflen);
if (iscsi_tcp_r2tpool_alloc(session))
return -ENOMEM;
break;
return iscsi_tcp_set_max_r2t(conn, buf);
default:
return iscsi_set_param(cls_conn, param, buf, buflen);
}

View File

@ -337,6 +337,13 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
schedule_delayed_work(&disc->disc_work, delay);
} else
fc_disc_done(disc, DISC_EV_FAILED);
} else if (PTR_ERR(fp) == -FC_EX_CLOSED) {
/*
* if discovery fails due to lport reset, clear
* pending flag so that subsequent discovery can
* continue
*/
disc->pending = 0;
}
}

View File

@ -56,8 +56,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
else {
/* CT requests */
rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type);
did = FC_FID_DIR_SERV;
rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did);
}
if (rc) {

View File

@ -1642,9 +1642,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
case FC_RCTL_ACK_0:
break;
default:
FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
fh->fh_r_ctl,
fc_exch_rctl_name(fh->fh_r_ctl));
if (ep)
FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
fh->fh_r_ctl,
fc_exch_rctl_name(fh->fh_r_ctl));
break;
}
fc_frame_free(fp);

View File

@ -116,6 +116,8 @@ static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
static void fc_lport_enter_scr(struct fc_lport *);
static void fc_lport_enter_ready(struct fc_lport *);
static void fc_lport_enter_logo(struct fc_lport *);
static void fc_lport_enter_fdmi(struct fc_lport *lport);
static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
static const char *fc_lport_state_names[] = {
[LPORT_ST_DISABLED] = "disabled",
@ -126,6 +128,11 @@ static const char *fc_lport_state_names[] = {
[LPORT_ST_RSPN_ID] = "RSPN_ID",
[LPORT_ST_RFT_ID] = "RFT_ID",
[LPORT_ST_RFF_ID] = "RFF_ID",
[LPORT_ST_FDMI] = "FDMI",
[LPORT_ST_RHBA] = "RHBA",
[LPORT_ST_RPA] = "RPA",
[LPORT_ST_DHBA] = "DHBA",
[LPORT_ST_DPRT] = "DPRT",
[LPORT_ST_SCR] = "SCR",
[LPORT_ST_READY] = "Ready",
[LPORT_ST_LOGO] = "LOGO",
@ -183,11 +190,14 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
if (lport->state == LPORT_ST_DNS) {
lport->dns_rdata = rdata;
fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
} else if (lport->state == LPORT_ST_FDMI) {
lport->ms_rdata = rdata;
fc_lport_enter_ms(lport, LPORT_ST_DHBA);
} else {
FC_LPORT_DBG(lport, "Received an READY event "
"on port (%6.6x) for the directory "
"server, but the lport is not "
"in the DNS state, it's in the "
"in the DNS or FDMI state, it's in the "
"%d state", rdata->ids.port_id,
lport->state);
lport->tt.rport_logoff(rdata);
@ -196,7 +206,10 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
case RPORT_EV_LOGO:
case RPORT_EV_FAILED:
case RPORT_EV_STOP:
lport->dns_rdata = NULL;
if (rdata->ids.port_id == FC_FID_DIR_SERV)
lport->dns_rdata = NULL;
else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
lport->ms_rdata = NULL;
break;
case RPORT_EV_NONE:
break;
@ -1148,7 +1161,10 @@ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
break;
case LPORT_ST_RFF_ID:
fc_lport_enter_scr(lport);
if (lport->fdmi_enabled)
fc_lport_enter_fdmi(lport);
else
fc_lport_enter_scr(lport);
break;
default:
/* should have already been caught by state checks */
@ -1162,6 +1178,85 @@ err:
mutex_unlock(&lport->lp_mutex);
}
/**
* fc_lport_ms_resp() - Handle response to a management server
* exchange
* @sp: current sequence in exchange
* @fp: response frame
* @lp_arg: Fibre Channel host port instance
*
* Locking Note: This function will be called without the lport lock
* held, but it will lock, call an _enter_* function or fc_lport_error()
* and then unlock the lport.
*/
static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
void *lp_arg)
{
struct fc_lport *lport = lp_arg;
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
FC_LPORT_DBG(lport, "Received a management server response, "
"but in state %s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
fh = fc_frame_header_get(fp);
ct = fc_frame_payload_get(fp, sizeof(*ct));
if (fh && ct && fh->fh_type == FC_TYPE_CT &&
ct->ct_fs_type == FC_FST_MGMT &&
ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
FC_LPORT_DBG(lport, "Received a management server response, "
"reason=%d explain=%d\n",
ct->ct_reason,
ct->ct_explan);
switch (lport->state) {
case LPORT_ST_RHBA:
if (ntohs(ct->ct_cmd) == FC_FS_ACC)
fc_lport_enter_ms(lport, LPORT_ST_RPA);
else /* Error Skip RPA */
fc_lport_enter_scr(lport);
break;
case LPORT_ST_RPA:
fc_lport_enter_scr(lport);
break;
case LPORT_ST_DPRT:
fc_lport_enter_ms(lport, LPORT_ST_RHBA);
break;
case LPORT_ST_DHBA:
fc_lport_enter_ms(lport, LPORT_ST_DPRT);
break;
default:
/* should have already been caught by state checks */
break;
}
} else {
/* Invalid Frame? */
fc_lport_error(lport, fp);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&lport->lp_mutex);
}
/**
* fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
* @sp: current sequence in SCR exchange
@ -1338,6 +1433,123 @@ err:
fc_lport_error(lport, NULL);
}
/**
* fc_lport_enter_ms() - management server commands
* @lport: Fibre Channel local port to register
*
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
{
struct fc_frame *fp;
enum fc_fdmi_req cmd;
int size = sizeof(struct fc_ct_hdr);
size_t len;
int numattrs;
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
fc_lport_state_names[state],
fc_lport_state(lport));
fc_lport_state_enter(lport, state);
switch (state) {
case LPORT_ST_RHBA:
cmd = FC_FDMI_RHBA;
/* Number of HBA Attributes */
numattrs = 10;
len = sizeof(struct fc_fdmi_rhba);
len -= sizeof(struct fc_fdmi_attr_entry);
len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
len += FC_FDMI_HBA_ATTR_MODEL_LEN;
len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
size += len;
break;
case LPORT_ST_RPA:
cmd = FC_FDMI_RPA;
/* Number of Port Attributes */
numattrs = 6;
len = sizeof(struct fc_fdmi_rpa);
len -= sizeof(struct fc_fdmi_attr_entry);
len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
size += len;
break;
case LPORT_ST_DPRT:
cmd = FC_FDMI_DPRT;
len = sizeof(struct fc_fdmi_dprt);
size += len;
break;
case LPORT_ST_DHBA:
cmd = FC_FDMI_DHBA;
len = sizeof(struct fc_fdmi_dhba);
size += len;
break;
default:
fc_lport_error(lport, NULL);
return;
}
FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
cmd, (int)len, size);
fp = fc_frame_alloc(lport, size);
if (!fp) {
fc_lport_error(lport, fp);
return;
}
if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
fc_lport_ms_resp,
lport, 3 * lport->r_a_tov))
fc_lport_error(lport, fp);
}
/**
* fc_rport_enter_fdmi() - Create a fc_rport for the management server
* @lport: The local port requesting a remote port for the management server
*
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
static void fc_lport_enter_fdmi(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_FDMI);
mutex_lock(&lport->disc.disc_mutex);
rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
lport->tt.rport_login(rdata);
return;
err:
fc_lport_error(lport, NULL);
}
/**
* fc_lport_timeout() - Handler for the retry_work timer
* @work: The work struct of the local port
@ -1371,6 +1583,15 @@ static void fc_lport_timeout(struct work_struct *work)
case LPORT_ST_RFF_ID:
fc_lport_enter_ns(lport, lport->state);
break;
case LPORT_ST_FDMI:
fc_lport_enter_fdmi(lport);
break;
case LPORT_ST_RHBA:
case LPORT_ST_RPA:
case LPORT_ST_DHBA:
case LPORT_ST_DPRT:
fc_lport_enter_ms(lport, lport->state);
break;
case LPORT_ST_SCR:
fc_lport_enter_scr(lport);
break;

View File

@ -1909,6 +1909,16 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
spin_lock(&session->lock);
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task) {
/*
* Raced with completion. Blk layer has taken ownership
* so let timeout code complete it now.
*/
rc = BLK_EH_HANDLED;
goto done;
}
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
* We are probably in the middle of iscsi recovery so let
@ -1925,16 +1935,6 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
goto done;
}
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task) {
/*
* Raced with completion. Just reset timer, and let it
* complete normally
*/
rc = BLK_EH_RESET_TIMER;
goto done;
}
/*
* If we have sent (at least queued to the network layer) a pdu or
* recvd one for the task since the last timeout ask for
@ -2807,6 +2807,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->username);
kfree(session->username_in);
kfree(session->targetname);
kfree(session->targetalias);
kfree(session->initiatorname);
kfree(session->ifacename);
@ -3200,7 +3201,7 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
sscanf(buf, "%d", &session->initial_r2t_en);
break;
case ISCSI_PARAM_MAX_R2T:
sscanf(buf, "%d", &session->max_r2t);
sscanf(buf, "%hu", &session->max_r2t);
break;
case ISCSI_PARAM_IMM_DATA_EN:
sscanf(buf, "%d", &session->imm_data_en);
@ -3233,6 +3234,8 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
return iscsi_switch_str_param(&session->password_in, buf);
case ISCSI_PARAM_TARGET_NAME:
return iscsi_switch_str_param(&session->targetname, buf);
case ISCSI_PARAM_TARGET_ALIAS:
return iscsi_switch_str_param(&session->targetalias, buf);
case ISCSI_PARAM_TPGT:
sscanf(buf, "%d", &session->tpgt);
break;
@ -3299,6 +3302,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
case ISCSI_PARAM_TARGET_NAME:
len = sprintf(buf, "%s\n", session->targetname);
break;
case ISCSI_PARAM_TARGET_ALIAS:
len = sprintf(buf, "%s\n", session->targetalias);
break;
case ISCSI_PARAM_TPGT:
len = sprintf(buf, "%d\n", session->tpgt);
break;

View File

@ -1170,6 +1170,24 @@ void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
}
EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf)
{
struct iscsi_session *session = conn->session;
unsigned short r2ts = 0;
sscanf(buf, "%hu", &r2ts);
if (session->max_r2t == r2ts)
return 0;
if (!r2ts || !is_power_of_2(r2ts))
return -EINVAL;
session->max_r2t = r2ts;
iscsi_tcp_r2tpool_free(session);
return iscsi_tcp_r2tpool_alloc(session);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_set_max_r2t);
void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{

File diff suppressed because it is too large Load Diff

View File

@ -30,29 +30,30 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
#include "../scsi_sas_internal.h"
/* ---------- Basic task processing for discovery purposes ---------- */
void sas_init_dev(struct domain_device *dev)
{
INIT_LIST_HEAD(&dev->siblings);
INIT_LIST_HEAD(&dev->dev_list_node);
switch (dev->dev_type) {
case SAS_END_DEV:
break;
case EDGE_DEV:
case FANOUT_DEV:
INIT_LIST_HEAD(&dev->ex_dev.children);
break;
case SATA_DEV:
case SATA_PM:
case SATA_PM_PORT:
INIT_LIST_HEAD(&dev->sata_dev.children);
break;
default:
break;
}
switch (dev->dev_type) {
case SAS_END_DEV:
break;
case EDGE_DEV:
case FANOUT_DEV:
INIT_LIST_HEAD(&dev->ex_dev.children);
mutex_init(&dev->ex_dev.cmd_mutex);
break;
case SATA_DEV:
case SATA_PM:
case SATA_PM_PORT:
case SATA_PENDING:
INIT_LIST_HEAD(&dev->sata_dev.children);
break;
default:
break;
}
}
/* ---------- Domain device discovery ---------- */
@ -68,19 +69,18 @@ void sas_init_dev(struct domain_device *dev)
*/
static int sas_get_port_device(struct asd_sas_port *port)
{
unsigned long flags;
struct asd_sas_phy *phy;
struct sas_rphy *rphy;
struct domain_device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
dev = sas_alloc_device();
if (!dev)
return -ENOMEM;
spin_lock_irqsave(&port->phy_list_lock, flags);
spin_lock_irq(&port->phy_list_lock);
if (list_empty(&port->phy_list)) {
spin_unlock_irqrestore(&port->phy_list_lock, flags);
kfree(dev);
spin_unlock_irq(&port->phy_list_lock);
sas_put_device(dev);
return -ENODEV;
}
phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
@ -88,7 +88,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
(size_t)phy->frame_rcvd_size));
spin_unlock(&phy->frame_rcvd_lock);
spin_unlock_irqrestore(&port->phy_list_lock, flags);
spin_unlock_irq(&port->phy_list_lock);
if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
struct dev_to_host_fis *fis =
@ -130,9 +130,14 @@ static int sas_get_port_device(struct asd_sas_port *port)
}
if (!rphy) {
kfree(dev);
sas_put_device(dev);
return -ENODEV;
}
spin_lock_irq(&port->phy_list_lock);
list_for_each_entry(phy, &port->phy_list, port_phy_el)
sas_phy_set_target(phy, dev);
spin_unlock_irq(&port->phy_list_lock);
rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
sas_fill_in_rphy(dev, rphy);
@ -147,11 +152,17 @@ static int sas_get_port_device(struct asd_sas_port *port)
memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
port->disc.max_level = 0;
sas_device_set_phy(dev, port->port);
dev->rphy = rphy;
spin_lock_irq(&port->dev_list_lock);
list_add_tail(&dev->dev_list_node, &port->dev_list);
spin_unlock_irq(&port->dev_list_lock);
if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
list_add_tail(&dev->disco_list_node, &port->disco_list);
else {
spin_lock_irq(&port->dev_list_lock);
list_add_tail(&dev->dev_list_node, &port->dev_list);
spin_unlock_irq(&port->dev_list_lock);
}
return 0;
}
@ -173,6 +184,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
dev_name(sas_ha->dev),
SAS_ADDR(dev->sas_addr), res);
}
kref_get(&dev->kref);
}
return res;
}
@ -184,12 +196,40 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
struct Scsi_Host *shost = sas_ha->core.shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (i->dft->lldd_dev_gone)
if (i->dft->lldd_dev_gone) {
i->dft->lldd_dev_gone(dev);
sas_put_device(dev);
}
}
/* ---------- Common/dispatchers ---------- */
static void sas_probe_devices(struct work_struct *work)
{
struct domain_device *dev, *n;
struct sas_discovery_event *ev =
container_of(work, struct sas_discovery_event, work);
struct asd_sas_port *port = ev->port;
clear_bit(DISCE_PROBE, &port->disc.pending);
/* devices must be domain members before link recovery and probe */
list_for_each_entry(dev, &port->disco_list, disco_list_node) {
spin_lock_irq(&port->dev_list_lock);
list_add_tail(&dev->dev_list_node, &port->dev_list);
spin_unlock_irq(&port->dev_list_lock);
}
sas_probe_sata(port);
list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
int err;
err = sas_rphy_add(dev->rphy);
if (err)
sas_fail_probe(dev, __func__, err);
else
list_del_init(&dev->disco_list_node);
}
}
/**
* sas_discover_end_dev -- discover an end device (SSP, etc)
@ -203,22 +243,36 @@ int sas_discover_end_dev(struct domain_device *dev)
res = sas_notify_lldd_dev_found(dev);
if (res)
goto out_err2;
res = sas_rphy_add(dev->rphy);
if (res)
goto out_err;
return res;
sas_discover_event(dev->port, DISCE_PROBE);
return 0;
out_err:
sas_notify_lldd_dev_gone(dev);
out_err2:
return res;
}
/* ---------- Device registration and unregistration ---------- */
void sas_free_device(struct kref *kref)
{
struct domain_device *dev = container_of(kref, typeof(*dev), kref);
if (dev->parent)
sas_put_device(dev->parent);
sas_port_put_phy(dev->phy);
dev->phy = NULL;
/* remove the phys and ports, everything else should be gone */
if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
kfree(dev->ex_dev.ex_phy);
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_port_destroy(dev->sata_dev.ap);
dev->sata_dev.ap = NULL;
}
kfree(dev);
}
static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
{
sas_notify_lldd_dev_gone(dev);
@ -230,34 +284,84 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
spin_unlock_irq(&port->dev_list_lock);
sas_put_device(dev);
}
static void sas_destruct_devices(struct work_struct *work)
{
struct domain_device *dev, *n;
struct sas_discovery_event *ev =
container_of(work, struct sas_discovery_event, work);
struct asd_sas_port *port = ev->port;
clear_bit(DISCE_DESTRUCT, &port->disc.pending);
list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
list_del_init(&dev->disco_list_node);
sas_remove_children(&dev->rphy->dev);
sas_rphy_delete(dev->rphy);
dev->rphy = NULL;
sas_unregister_common_dev(port, dev);
}
}
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
{
if (dev->rphy) {
sas_remove_children(&dev->rphy->dev);
sas_rphy_delete(dev->rphy);
if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
!list_empty(&dev->disco_list_node)) {
/* this rphy never saw sas_rphy_add */
list_del_init(&dev->disco_list_node);
sas_rphy_free(dev->rphy);
dev->rphy = NULL;
sas_unregister_common_dev(port, dev);
}
if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
/* remove the phys and ports, everything else should be gone */
kfree(dev->ex_dev.ex_phy);
dev->ex_dev.ex_phy = NULL;
if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
sas_rphy_unlink(dev->rphy);
list_move_tail(&dev->disco_list_node, &port->destroy_list);
sas_discover_event(dev->port, DISCE_DESTRUCT);
}
sas_unregister_common_dev(port, dev);
}
void sas_unregister_domain_devices(struct asd_sas_port *port)
void sas_unregister_domain_devices(struct asd_sas_port *port, int gone)
{
struct domain_device *dev, *n;
list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node)
list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) {
if (gone)
set_bit(SAS_DEV_GONE, &dev->state);
sas_unregister_dev(port, dev);
}
list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node)
sas_unregister_dev(port, dev);
port->port->rphy = NULL;
}
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port)
{
struct sas_ha_struct *ha;
struct sas_phy *new_phy;
if (!dev)
return;
ha = dev->port->ha;
new_phy = sas_port_get_phy(port);
/* pin and record last seen phy */
spin_lock_irq(&ha->phy_port_lock);
if (new_phy) {
sas_port_put_phy(dev->phy);
dev->phy = new_phy;
}
spin_unlock_irq(&ha->phy_port_lock);
}
/* ---------- Discovery and Revalidation ---------- */
/**
@ -277,8 +381,7 @@ static void sas_discover_domain(struct work_struct *work)
container_of(work, struct sas_discovery_event, work);
struct asd_sas_port *port = ev->port;
sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
&port->disc.pending);
clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
if (port->port_dev)
return;
@ -318,11 +421,12 @@ static void sas_discover_domain(struct work_struct *work)
sas_rphy_free(dev->rphy);
dev->rphy = NULL;
list_del_init(&dev->disco_list_node);
spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
spin_unlock_irq(&port->dev_list_lock);
kfree(dev); /* not kobject_register-ed yet */
sas_put_device(dev);
port->port_dev = NULL;
}
@ -336,21 +440,51 @@ static void sas_revalidate_domain(struct work_struct *work)
struct sas_discovery_event *ev =
container_of(work, struct sas_discovery_event, work);
struct asd_sas_port *port = ev->port;
struct sas_ha_struct *ha = port->ha;
sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
&port->disc.pending);
/* prevent revalidation from finding sata links in recovery */
mutex_lock(&ha->disco_mutex);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n",
port->id, task_pid_nr(current));
goto out;
}
clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
task_pid_nr(current));
if (port->port_dev)
res = sas_ex_revalidate_domain(port->port_dev);
SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
}
/* ---------- Events ---------- */
static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work)
{
/* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */
scsi_queue_work(ha->core.shost, work);
}
static void sas_chain_event(int event, unsigned long *pending,
struct work_struct *work,
struct sas_ha_struct *ha)
{
if (!test_and_set_bit(event, pending)) {
unsigned long flags;
spin_lock_irqsave(&ha->state_lock, flags);
sas_chain_work(ha, work);
spin_unlock_irqrestore(&ha->state_lock, flags);
}
}
int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
{
struct sas_discovery *disc;
@ -361,8 +495,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
BUG_ON(ev >= DISC_NUM_EVENTS);
sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
&disc->disc_work[ev].work, port->ha);
sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
return 0;
}
@ -380,9 +513,10 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
[DISCE_PROBE] = sas_probe_devices,
[DISCE_DESTRUCT] = sas_destruct_devices,
};
spin_lock_init(&disc->disc_event_lock);
disc->pending = 0;
for (i = 0; i < DISC_NUM_EVENTS; i++) {
INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);

View File

@ -22,15 +22,103 @@
*
*/
#include <linux/export.h>
#include <scsi/scsi_host.h>
#include "sas_internal.h"
#include "sas_dump.h"
void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work)
{
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
return;
if (test_bit(SAS_HA_DRAINING, &ha->state))
list_add(&work->entry, &ha->defer_q);
else
scsi_queue_work(ha->core.shost, work);
}
static void sas_queue_event(int event, unsigned long *pending,
struct work_struct *work,
struct sas_ha_struct *ha)
{
if (!test_and_set_bit(event, pending)) {
unsigned long flags;
spin_lock_irqsave(&ha->state_lock, flags);
sas_queue_work(ha, work);
spin_unlock_irqrestore(&ha->state_lock, flags);
}
}
void __sas_drain_work(struct sas_ha_struct *ha)
{
struct workqueue_struct *wq = ha->core.shost->work_q;
struct work_struct *w, *_w;
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->state_lock);
spin_unlock_irq(&ha->state_lock);
drain_workqueue(wq);
spin_lock_irq(&ha->state_lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
list_for_each_entry_safe(w, _w, &ha->defer_q, entry) {
list_del_init(&w->entry);
sas_queue_work(ha, w);
}
spin_unlock_irq(&ha->state_lock);
}
int sas_drain_work(struct sas_ha_struct *ha)
{
int err;
err = mutex_lock_interruptible(&ha->drain_mutex);
if (err)
return err;
if (test_bit(SAS_HA_REGISTERED, &ha->state))
__sas_drain_work(ha);
mutex_unlock(&ha->drain_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(sas_drain_work);
void sas_disable_revalidation(struct sas_ha_struct *ha)
{
mutex_lock(&ha->disco_mutex);
set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
mutex_unlock(&ha->disco_mutex);
}
void sas_enable_revalidation(struct sas_ha_struct *ha)
{
int i;
mutex_lock(&ha->disco_mutex);
clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
const int ev = DISCE_REVALIDATE_DOMAIN;
struct sas_discovery *d = &port->disc;
if (!test_and_clear_bit(ev, &d->pending))
continue;
sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
}
mutex_unlock(&ha->disco_mutex);
}
static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
{
BUG_ON(event >= HA_NUM_EVENTS);
sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
sas_queue_event(event, &sas_ha->pending,
&sas_ha->ha_events[event].work, sas_ha);
}
@ -40,7 +128,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
BUG_ON(event >= PORT_NUM_EVENTS);
sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
sas_queue_event(event, &phy->port_events_pending,
&phy->port_events[event].work, ha);
}
@ -50,7 +138,7 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
BUG_ON(event >= PHY_NUM_EVENTS);
sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
sas_queue_event(event, &phy->phy_events_pending,
&phy->phy_events[event].work, ha);
}
@ -62,8 +150,6 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
int i;
spin_lock_init(&sas_ha->event_lock);
for (i = 0; i < HA_NUM_EVENTS; i++) {
INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
sas_ha->ha_events[i].ha = sas_ha;

View File

@ -28,6 +28,7 @@
#include "sas_internal.h"
#include <scsi/sas_ata.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "../scsi_sas_internal.h"
@ -71,11 +72,18 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
mutex_lock(&dev->ex_dev.cmd_mutex);
for (retry = 0; retry < 3; retry++) {
task = sas_alloc_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
if (test_bit(SAS_DEV_GONE, &dev->state)) {
res = -ECOMM;
break;
}
task = sas_alloc_task(GFP_KERNEL);
if (!task) {
res = -ENOMEM;
break;
}
task->dev = dev;
task->task_proto = dev->tproto;
sg_init_one(&task->smp_task.smp_req, req, req_size);
@ -93,7 +101,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
if (res) {
del_timer(&task->timer);
SAS_DPRINTK("executing SMP task failed:%d\n", res);
goto ex_err;
break;
}
wait_for_completion(&task->completion);
@ -103,24 +111,30 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
i->dft->lldd_abort_task(task);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
SAS_DPRINTK("SMP task aborted and not done\n");
goto ex_err;
break;
}
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAM_STAT_GOOD) {
res = 0;
break;
} if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_UNDERRUN) {
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_UNDERRUN) {
/* no error, but return the number of bytes of
* underrun */
res = task->task_status.residual;
break;
} if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_OVERRUN) {
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_OVERRUN) {
res = -EMSGSIZE;
break;
} else {
}
if (task->task_status.resp == SAS_TASK_UNDELIVERED &&
task->task_status.stat == SAS_DEVICE_UNKNOWN)
break;
else {
SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
"status 0x%x\n", __func__,
SAS_ADDR(dev->sas_addr),
@ -130,11 +144,10 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
task = NULL;
}
}
ex_err:
mutex_unlock(&dev->ex_dev.cmd_mutex);
BUG_ON(retry == 3 && task != NULL);
if (task != NULL) {
sas_free_task(task);
}
sas_free_task(task);
return res;
}
@ -153,19 +166,49 @@ static inline void *alloc_smp_resp(int size)
return kzalloc(size, GFP_KERNEL);
}
/* ---------- Expander configuration ---------- */
static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
void *disc_resp)
static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
{
switch (phy->routing_attr) {
case TABLE_ROUTING:
if (dev->ex_dev.t2t_supp)
return 'U';
else
return 'T';
case DIRECT_ROUTING:
return 'D';
case SUBTRACTIVE_ROUTING:
return 'S';
default:
return '?';
}
}
static enum sas_dev_type to_dev_type(struct discover_resp *dr)
{
/* This is detecting a failure to transmit initial dev to host
* FIS as described in section J.5 of sas-2 r16
*/
if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
return SATA_PENDING;
else
return dr->attached_dev_type;
}
static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
{
enum sas_dev_type dev_type;
enum sas_linkrate linkrate;
u8 sas_addr[SAS_ADDR_SIZE];
struct smp_resp *resp = rsp;
struct discover_resp *dr = &resp->disc;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
struct smp_resp *resp = disc_resp;
struct discover_resp *dr = &resp->disc;
struct sas_rphy *rphy = dev->rphy;
int rediscover = (phy->phy != NULL);
bool new_phy = !phy->phy;
char *type;
if (!rediscover) {
if (new_phy) {
phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
/* FIXME: error_handling */
@ -184,8 +227,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
break;
}
/* check if anything important changed to squelch debug */
dev_type = phy->attached_dev_type;
linkrate = phy->linkrate;
memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
phy->attached_dev_type = to_dev_type(dr);
phy->phy_id = phy_id;
phy->attached_dev_type = dr->attached_dev_type;
phy->linkrate = dr->linkrate;
phy->attached_sata_host = dr->attached_sata_host;
phy->attached_sata_dev = dr->attached_sata_dev;
@ -200,9 +248,11 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
phy->last_da_index = -1;
phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
phy->phy->identify.device_type = phy->attached_dev_type;
phy->phy->identify.device_type = dr->attached_dev_type;
phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
phy->phy->identify.target_port_protocols = phy->attached_tproto;
if (!phy->attached_tproto && dr->attached_sata_dev)
phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
phy->phy->identify.phy_identifier = phy_id;
phy->phy->minimum_linkrate_hw = dr->hmin_linkrate;
phy->phy->maximum_linkrate_hw = dr->hmax_linkrate;
@ -210,20 +260,76 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
if (!rediscover)
if (new_phy)
if (sas_phy_add(phy->phy)) {
sas_phy_free(phy->phy);
return;
}
SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n",
SAS_ADDR(dev->sas_addr), phy->phy_id,
phy->routing_attr == TABLE_ROUTING ? 'T' :
phy->routing_attr == DIRECT_ROUTING ? 'D' :
phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?',
SAS_ADDR(phy->attached_sas_addr));
switch (phy->attached_dev_type) {
case SATA_PENDING:
type = "stp pending";
break;
case NO_DEVICE:
type = "no device";
break;
case SAS_END_DEV:
if (phy->attached_iproto) {
if (phy->attached_tproto)
type = "host+target";
else
type = "host";
} else {
if (dr->attached_sata_dev)
type = "stp";
else
type = "ssp";
}
break;
case EDGE_DEV:
case FANOUT_DEV:
type = "smp";
break;
default:
type = "unknown";
}
return;
/* this routine is polled by libata error recovery so filter
* unimportant messages
*/
if (new_phy || phy->attached_dev_type != dev_type ||
phy->linkrate != linkrate ||
SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr))
/* pass */;
else
return;
SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
SAS_ADDR(dev->sas_addr), phy->phy_id,
sas_route_char(dev, phy), phy->linkrate,
SAS_ADDR(phy->attached_sas_addr), type);
}
/* check if we have an existing attached ata device on this expander phy */
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
{
struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id];
struct domain_device *dev;
struct sas_rphy *rphy;
if (!ex_phy->port)
return NULL;
rphy = ex_phy->port->rphy;
if (!rphy)
return NULL;
dev = sas_find_dev_by_rphy(rphy);
if (dev && dev_is_sata(dev))
return dev;
return NULL;
}
#define DISCOVER_REQ_SIZE 16
@ -232,39 +338,25 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
u8 *disc_resp, int single)
{
int i, res;
struct discover_resp *dr;
int res;
disc_req[9] = single;
for (i = 1 ; i < 3; i++) {
struct discover_resp *dr;
res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
return res;
/* This is detecting a failure to transmit initial
* dev to host FIS as described in section G.5 of
* sas-2 r 04b */
dr = &((struct smp_resp *)disc_resp)->disc;
if (memcmp(dev->sas_addr, dr->attached_sas_addr,
SAS_ADDR_SIZE) == 0) {
sas_printk("Found loopback topology, just ignore it!\n");
return 0;
}
if (!(dr->attached_dev_type == 0 &&
dr->attached_sata_dev))
break;
/* In order to generate the dev to host FIS, we
* send a link reset to the expander port */
sas_smp_phy_control(dev, single, PHY_FUNC_LINK_RESET, NULL);
/* Wait for the reset to trigger the negotiation */
msleep(500);
res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
return res;
dr = &((struct smp_resp *)disc_resp)->disc;
if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
sas_printk("Found loopback topology, just ignore it!\n");
return 0;
}
sas_set_ex_phy(dev, single, disc_resp);
return 0;
}
static int sas_ex_phy_discover(struct domain_device *dev, int single)
int sas_ex_phy_discover(struct domain_device *dev, int single)
{
struct expander_device *ex = &dev->ex_dev;
int res = 0;
@ -569,9 +661,8 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
#define RPS_REQ_SIZE 16
#define RPS_RESP_SIZE 60
static int sas_get_report_phy_sata(struct domain_device *dev,
int phy_id,
struct smp_resp *rps_resp)
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
struct smp_resp *rps_resp)
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
@ -657,10 +748,11 @@ static struct domain_device *sas_ex_discover_end_dev(
if (phy->attached_sata_host || phy->attached_sata_ps)
return NULL;
child = kzalloc(sizeof(*child), GFP_KERNEL);
child = sas_alloc_device();
if (!child)
return NULL;
kref_get(&parent->kref);
child->parent = parent;
child->port = parent->port;
child->iproto = phy->attached_iproto;
@ -676,24 +768,13 @@ static struct domain_device *sas_ex_discover_end_dev(
}
}
sas_ex_get_linkrate(parent, child, phy);
sas_device_set_phy(child, phy->port);
#ifdef CONFIG_SCSI_SAS_ATA
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
child->dev_type = SATA_DEV;
if (phy->attached_tproto & SAS_PROTOCOL_STP)
child->tproto = phy->attached_tproto;
if (phy->attached_sata_dev)
child->tproto |= SATA_DEV;
res = sas_get_report_phy_sata(parent, phy_id,
&child->sata_dev.rps_resp);
if (res) {
SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
"0x%x\n", SAS_ADDR(parent->sas_addr),
phy_id, res);
res = sas_get_ata_info(child, phy);
if (res)
goto out_free;
}
memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
sizeof(struct dev_to_host_fis));
rphy = sas_end_device_alloc(phy->port);
if (unlikely(!rphy))
@ -703,9 +784,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
spin_lock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->dev_list_node, &parent->port->dev_list);
spin_unlock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
res = sas_discover_sata(child);
if (res) {
@ -729,9 +808,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
sas_fill_in_rphy(child, rphy);
spin_lock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->dev_list_node, &parent->port->dev_list);
spin_unlock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
res = sas_discover_end_dev(child);
if (res) {
@ -755,6 +832,7 @@ static struct domain_device *sas_ex_discover_end_dev(
sas_rphy_free(child->rphy);
child->rphy = NULL;
list_del(&child->disco_list_node);
spin_lock_irq(&parent->port->dev_list_lock);
list_del(&child->dev_list_node);
spin_unlock_irq(&parent->port->dev_list_lock);
@ -762,7 +840,7 @@ static struct domain_device *sas_ex_discover_end_dev(
sas_port_delete(phy->port);
out_err:
phy->port = NULL;
kfree(child);
sas_put_device(child);
return NULL;
}
@ -809,7 +887,7 @@ static struct domain_device *sas_ex_discover_expander(
phy->attached_phy_id);
return NULL;
}
child = kzalloc(sizeof(*child), GFP_KERNEL);
child = sas_alloc_device();
if (!child)
return NULL;
@ -835,6 +913,7 @@ static struct domain_device *sas_ex_discover_expander(
child->rphy = rphy;
edev = rphy_to_expander_device(rphy);
child->dev_type = phy->attached_dev_type;
kref_get(&parent->kref);
child->parent = parent;
child->port = port;
child->iproto = phy->attached_iproto;
@ -858,7 +937,7 @@ static struct domain_device *sas_ex_discover_expander(
spin_lock_irq(&parent->port->dev_list_lock);
list_del(&child->dev_list_node);
spin_unlock_irq(&parent->port->dev_list_lock);
kfree(child);
sas_put_device(child);
return NULL;
}
list_add_tail(&child->siblings, &parent->ex_dev.children);
@ -908,7 +987,8 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
if (ex_phy->attached_dev_type != SAS_END_DEV &&
ex_phy->attached_dev_type != FANOUT_DEV &&
ex_phy->attached_dev_type != EDGE_DEV) {
ex_phy->attached_dev_type != EDGE_DEV &&
ex_phy->attached_dev_type != SATA_PENDING) {
SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
"phy 0x%x\n", ex_phy->attached_dev_type,
SAS_ADDR(dev->sas_addr),
@ -934,6 +1014,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
switch (ex_phy->attached_dev_type) {
case SAS_END_DEV:
case SATA_PENDING:
child = sas_ex_discover_end_dev(dev, phy_id);
break;
case FANOUT_DEV:
@ -1128,32 +1209,25 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
struct ex_phy *parent_phy,
struct ex_phy *child_phy)
{
static const char ra_char[] = {
[DIRECT_ROUTING] = 'D',
[SUBTRACTIVE_ROUTING] = 'S',
[TABLE_ROUTING] = 'T',
};
static const char *ex_type[] = {
[EDGE_DEV] = "edge",
[FANOUT_DEV] = "fanout",
};
struct domain_device *parent = child->parent;
sas_printk("%s ex %016llx (T2T supp:%d) phy 0x%x <--> %s ex %016llx "
"(T2T supp:%d) phy 0x%x has %c:%c routing link!\n",
sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx "
"phy 0x%x has %c:%c routing link!\n",
ex_type[parent->dev_type],
SAS_ADDR(parent->sas_addr),
parent->ex_dev.t2t_supp,
parent_phy->phy_id,
ex_type[child->dev_type],
SAS_ADDR(child->sas_addr),
child->ex_dev.t2t_supp,
child_phy->phy_id,
ra_char[parent_phy->routing_attr],
ra_char[child_phy->routing_attr]);
sas_route_char(parent, parent_phy),
sas_route_char(child, child_phy));
}
static int sas_check_eeds(struct domain_device *child,
@ -1610,8 +1684,8 @@ static int sas_get_phy_change_count(struct domain_device *dev,
return res;
}
static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
int phy_id, u8 *attached_sas_addr)
static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_dev_type *type)
{
int res;
struct smp_resp *disc_resp;
@ -1623,10 +1697,11 @@ static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
dr = &disc_resp->disc;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
if (!res) {
memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8);
if (dr->attached_dev_type == 0)
memset(attached_sas_addr, 0, 8);
if (res == 0) {
memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8);
*type = to_dev_type(dr);
if (*type == 0)
memset(sas_addr, 0, 8);
}
kfree(disc_resp);
return res;
@ -1748,7 +1823,7 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
struct domain_device *child, *n;
list_for_each_entry_safe(child, n, &ex->children, siblings) {
child->gone = 1;
set_bit(SAS_DEV_GONE, &child->state);
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV)
sas_unregister_ex_tree(port, child);
@ -1763,27 +1838,28 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
{
struct expander_device *ex_dev = &parent->ex_dev;
struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
struct domain_device *child, *n;
struct domain_device *child, *n, *found = NULL;
if (last) {
list_for_each_entry_safe(child, n,
&ex_dev->children, siblings) {
if (SAS_ADDR(child->sas_addr) ==
SAS_ADDR(phy->attached_sas_addr)) {
child->gone = 1;
set_bit(SAS_DEV_GONE, &child->state);
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV)
sas_unregister_ex_tree(parent->port, child);
else
sas_unregister_dev(parent->port, child);
found = child;
break;
}
}
parent->gone = 1;
sas_disable_routing(parent, phy->attached_sas_addr);
}
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
if (phy->port) {
sas_port_delete_phy(phy->port, phy->phy);
sas_device_set_phy(found, phy->port);
if (phy->port->num_phys == 0)
sas_port_delete(phy->port);
phy->port = NULL;
@ -1874,39 +1950,71 @@ out:
return res;
}
static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
{
if (old == new)
return true;
/* treat device directed resets as flutter, if we went
* SAS_END_DEV to SATA_PENDING the link needs recovery
*/
if ((old == SATA_PENDING && new == SAS_END_DEV) ||
(old == SAS_END_DEV && new == SATA_PENDING))
return true;
return false;
}
static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
{
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
u8 attached_sas_addr[8];
enum sas_dev_type type = NO_DEVICE;
u8 sas_addr[8];
int res;
res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr);
res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
switch (res) {
case SMP_RESP_NO_PHY:
phy->phy_state = PHY_NOT_PRESENT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
goto out; break;
return res;
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
goto out; break;
return res;
case SMP_RESP_FUNC_ACC:
break;
}
if (SAS_ADDR(attached_sas_addr) == 0) {
if (SAS_ADDR(sas_addr) == 0) {
phy->phy_state = PHY_EMPTY;
sas_unregister_devs_sas_addr(dev, phy_id, last);
} else if (SAS_ADDR(attached_sas_addr) ==
SAS_ADDR(phy->attached_sas_addr)) {
SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n",
SAS_ADDR(dev->sas_addr), phy_id);
return res;
} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
dev_type_flutter(type, phy->attached_dev_type)) {
struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
char *action = "";
sas_ex_phy_discover(dev, phy_id);
} else
res = sas_discover_new(dev, phy_id);
out:
return res;
if (ata_dev && phy->attached_dev_type == SATA_PENDING)
action = ", needs recovery";
SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
SAS_ADDR(dev->sas_addr), phy_id, action);
return res;
}
/* delete the old link */
if (SAS_ADDR(phy->attached_sas_addr) &&
SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
SAS_ADDR(dev->sas_addr), phy_id,
SAS_ADDR(phy->attached_sas_addr));
sas_unregister_devs_sas_addr(dev, phy_id, last);
}
return sas_discover_new(dev, phy_id);
}
/**

View File

@ -187,11 +187,14 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
struct sas_phy_linkrates rates;
struct asd_sas_phy *asd_phy;
if (phy_id >= sas_ha->num_phys) {
resp_data[2] = SMP_RESP_NO_PHY;
return;
}
asd_phy = sas_ha->sas_phy[phy_id];
switch (phy_op) {
case PHY_FUNC_NOP:
case PHY_FUNC_LINK_RESET:
@ -210,7 +213,13 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
rates.minimum_linkrate = min;
rates.maximum_linkrate = max;
if (i->dft->lldd_control_phy(sas_ha->sas_phy[phy_id], phy_op, &rates))
/* filter reset requests through libata eh */
if (phy_op == PHY_FUNC_LINK_RESET && sas_try_ata_reset(asd_phy) == 0) {
resp_data[2] = SMP_RESP_FUNC_ACC;
return;
}
if (i->dft->lldd_control_phy(asd_phy, phy_op, &rates))
resp_data[2] = SMP_RESP_FUNC_FAILED;
else
resp_data[2] = SMP_RESP_FUNC_ACC;

View File

@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <scsi/sas_ata.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
@ -97,14 +98,14 @@ void sas_hae_reset(struct work_struct *work)
container_of(work, struct sas_ha_event, work);
struct sas_ha_struct *ha = ev->ha;
sas_begin_event(HAE_RESET, &ha->event_lock,
&ha->pending);
clear_bit(HAE_RESET, &ha->pending);
}
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
int error = 0;
mutex_init(&sas_ha->disco_mutex);
spin_lock_init(&sas_ha->phy_port_lock);
sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
@ -113,8 +114,10 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
else if (sas_ha->lldd_queue_size == -1)
sas_ha->lldd_queue_size = 128; /* Sanity */
sas_ha->state = SAS_HA_REGISTERED;
set_bit(SAS_HA_REGISTERED, &sas_ha->state);
spin_lock_init(&sas_ha->state_lock);
mutex_init(&sas_ha->drain_mutex);
INIT_LIST_HEAD(&sas_ha->defer_q);
error = sas_register_phys(sas_ha);
if (error) {
@ -144,6 +147,7 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
}
INIT_LIST_HEAD(&sas_ha->eh_done_q);
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
@ -156,17 +160,23 @@ Undo_phys:
int sas_unregister_ha(struct sas_ha_struct *sas_ha)
{
unsigned long flags;
/* Set the state to unregistered to avoid further
* events to be queued */
spin_lock_irqsave(&sas_ha->state_lock, flags);
sas_ha->state = SAS_HA_UNREGISTERED;
spin_unlock_irqrestore(&sas_ha->state_lock, flags);
scsi_flush_work(sas_ha->core.shost);
/* Set the state to unregistered to avoid further unchained
* events to be queued, and flush any in-progress drainers
*/
mutex_lock(&sas_ha->drain_mutex);
spin_lock_irq(&sas_ha->state_lock);
clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
spin_unlock_irq(&sas_ha->state_lock);
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
sas_unregister_ports(sas_ha);
/* flush unregistration work */
mutex_lock(&sas_ha->drain_mutex);
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
if (sas_ha->lldd_max_execute_num > 1) {
sas_shutdown_queue(sas_ha);
sas_ha->lldd_max_execute_num = 1;
@ -190,15 +200,41 @@ static int sas_get_linkerrors(struct sas_phy *phy)
return sas_smp_get_phy_events(phy);
}
int sas_phy_enable(struct sas_phy *phy, int enable)
int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
{
int ret;
enum phy_func command;
struct domain_device *dev = NULL;
if (enable)
command = PHY_FUNC_LINK_RESET;
/* try to route user requested link resets through libata */
if (asd_phy->port)
dev = asd_phy->port->port_dev;
/* validate that dev has been probed */
if (dev)
dev = sas_find_dev_by_rphy(dev->rphy);
if (dev && dev_is_sata(dev)) {
sas_ata_schedule_reset(dev);
sas_ata_wait_eh(dev);
return 0;
}
return -ENODEV;
}
/**
* transport_sas_phy_reset - reset a phy and permit libata to manage the link
*
* phy reset request via sysfs in host workqueue context so we know we
* can block on eh and safely traverse the domain_device topology
*/
static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
{
enum phy_func reset_type;
if (hard_reset)
reset_type = PHY_FUNC_HARD_RESET;
else
command = PHY_FUNC_DISABLE;
reset_type = PHY_FUNC_LINK_RESET;
if (scsi_is_sas_phy_local(phy)) {
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
@ -207,15 +243,52 @@ int sas_phy_enable(struct sas_phy *phy, int enable)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
if (!enable) {
sas_phy_disconnected(asd_phy);
sas_ha->notify_phy_event(asd_phy, PHYE_LOSS_OF_SIGNAL);
}
ret = i->dft->lldd_control_phy(asd_phy, command, NULL);
if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
return 0;
return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
} else {
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
ret = sas_smp_phy_control(ddev, phy->number, command, NULL);
struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
if (ata_dev && !hard_reset) {
sas_ata_schedule_reset(ata_dev);
sas_ata_wait_eh(ata_dev);
return 0;
} else
return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
}
}
static int sas_phy_enable(struct sas_phy *phy, int enable)
{
int ret;
enum phy_func cmd;
if (enable)
cmd = PHY_FUNC_LINK_RESET;
else
cmd = PHY_FUNC_DISABLE;
if (scsi_is_sas_phy_local(phy)) {
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
if (enable)
ret = transport_sas_phy_reset(phy, 0);
else
ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
} else {
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
if (enable)
ret = transport_sas_phy_reset(phy, 0);
else
ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
}
return ret;
}
@ -225,6 +298,9 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
int ret;
enum phy_func reset_type;
if (!phy->enabled)
return -ENODEV;
if (hard_reset)
reset_type = PHY_FUNC_HARD_RESET;
else
@ -285,9 +361,101 @@ int sas_set_phy_speed(struct sas_phy *phy,
return ret;
}
static void sas_phy_release(struct sas_phy *phy)
{
kfree(phy->hostdata);
phy->hostdata = NULL;
}
static void phy_reset_work(struct work_struct *work)
{
struct sas_phy_data *d = container_of(work, typeof(*d), reset_work);
d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
}
static void phy_enable_work(struct work_struct *work)
{
struct sas_phy_data *d = container_of(work, typeof(*d), enable_work);
d->enable_result = sas_phy_enable(d->phy, d->enable);
}
static int sas_phy_setup(struct sas_phy *phy)
{
struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
mutex_init(&d->event_lock);
INIT_WORK(&d->reset_work, phy_reset_work);
INIT_WORK(&d->enable_work, phy_enable_work);
d->phy = phy;
phy->hostdata = d;
return 0;
}
static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
{
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct sas_phy_data *d = phy->hostdata;
int rc;
if (!d)
return -ENOMEM;
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->reset_result = 0;
d->hard_reset = hard_reset;
spin_lock_irq(&ha->state_lock);
sas_queue_work(ha, &d->reset_work);
spin_unlock_irq(&ha->state_lock);
rc = sas_drain_work(ha);
if (rc == 0)
rc = d->reset_result;
mutex_unlock(&d->event_lock);
return rc;
}
static int queue_phy_enable(struct sas_phy *phy, int enable)
{
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct sas_phy_data *d = phy->hostdata;
int rc;
if (!d)
return -ENOMEM;
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->enable_result = 0;
d->enable = enable;
spin_lock_irq(&ha->state_lock);
sas_queue_work(ha, &d->enable_work);
spin_unlock_irq(&ha->state_lock);
rc = sas_drain_work(ha);
if (rc == 0)
rc = d->enable_result;
mutex_unlock(&d->event_lock);
return rc;
}
static struct sas_function_template sft = {
.phy_enable = sas_phy_enable,
.phy_reset = sas_phy_reset,
.phy_enable = queue_phy_enable,
.phy_reset = queue_phy_reset,
.phy_setup = sas_phy_setup,
.phy_release = sas_phy_release,
.set_phy_speed = sas_set_phy_speed,
.get_linkerrors = sas_get_linkerrors,
.smp_handler = sas_smp_handler,

View File

@ -30,6 +30,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/libsas.h>
#include <scsi/sas_ata.h>
#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
@ -38,6 +39,18 @@
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
struct sas_phy_data {
/* let reset be performed in sas_queue_work() context */
struct sas_phy *phy;
struct mutex event_lock;
int hard_reset;
int reset_result;
struct work_struct reset_work;
int enable;
int enable_result;
struct work_struct enable_work;
};
void sas_scsi_recover_host(struct Scsi_Host *shost);
int sas_show_class(enum sas_class class, char *buf);
@ -56,6 +69,9 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
void sas_disable_revalidation(struct sas_ha_struct *ha);
void sas_enable_revalidation(struct sas_ha_struct *ha);
void __sas_drain_work(struct sas_ha_struct *ha);
void sas_deform_port(struct asd_sas_phy *phy, int gone);
@ -64,6 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
void sas_porte_link_reset_err(struct work_struct *work);
void sas_porte_timer_event(struct work_struct *work);
void sas_porte_hard_reset(struct work_struct *work);
void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
@ -72,10 +89,17 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
int sas_ex_phy_discover(struct domain_device *dev, int single);
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
struct smp_resp *rps_resp);
int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
void sas_free_device(struct kref *kref);
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
struct request *rsp);
@ -90,36 +114,13 @@ static inline int sas_smp_host_handler(struct Scsi_Host *shost,
}
#endif
static inline void sas_queue_event(int event, spinlock_t *lock,
unsigned long *pending,
struct work_struct *work,
struct sas_ha_struct *sas_ha)
static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
{
unsigned long flags;
spin_lock_irqsave(lock, flags);
if (test_bit(event, pending)) {
spin_unlock_irqrestore(lock, flags);
return;
}
__set_bit(event, pending);
spin_unlock_irqrestore(lock, flags);
spin_lock_irqsave(&sas_ha->state_lock, flags);
if (sas_ha->state != SAS_HA_UNREGISTERED) {
scsi_queue_work(sas_ha->core.shost, work);
}
spin_unlock_irqrestore(&sas_ha->state_lock, flags);
}
static inline void sas_begin_event(int event, spinlock_t *lock,
unsigned long *pending)
{
unsigned long flags;
spin_lock_irqsave(lock, flags);
__clear_bit(event, pending);
spin_unlock_irqrestore(lock, flags);
SAS_DPRINTK("%s: for %s device %16llx returned %d\n",
func, dev->parent ? "exp-attached" :
"direct-attached",
SAS_ADDR(dev->sas_addr), err);
sas_unregister_dev(dev->port, dev);
}
static inline void sas_fill_in_rphy(struct domain_device *dev,
@ -132,6 +133,7 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
case SATA_DEV:
/* FIXME: need sata device type */
case SAS_END_DEV:
case SATA_PENDING:
rphy->identify.device_type = SAS_END_DEVICE;
break;
case EDGE_DEV:
@ -146,6 +148,22 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
}
}
static inline void sas_phy_set_target(struct asd_sas_phy *p, struct domain_device *dev)
{
struct sas_phy *phy = p->phy;
if (dev) {
if (dev_is_sata(dev))
phy->identify.device_type = SAS_END_DEVICE;
else
phy->identify.device_type = dev->dev_type;
phy->identify.target_port_protocols = dev->tproto;
} else {
phy->identify.device_type = SAS_PHY_UNUSED;
phy->identify.target_port_protocols = 0;
}
}
static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
{
struct expander_device *ex = &dev->ex_dev;
@ -161,4 +179,23 @@ static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
sas_port_add_phy(ex->parent_port, ex_phy->phy);
}
static inline struct domain_device *sas_alloc_device(void)
{
struct domain_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev) {
INIT_LIST_HEAD(&dev->siblings);
INIT_LIST_HEAD(&dev->dev_list_node);
INIT_LIST_HEAD(&dev->disco_list_node);
kref_init(&dev->kref);
spin_lock_init(&dev->done_lock);
}
return dev;
}
static inline void sas_put_device(struct domain_device *dev)
{
kref_put(&dev->kref, sas_free_device);
}
#endif /* _SAS_INTERNAL_H_ */

View File

@ -36,8 +36,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
&phy->phy_events_pending);
clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
phy->error = 0;
sas_deform_port(phy, 1);
}
@ -48,8 +47,7 @@ static void sas_phye_oob_done(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
&phy->phy_events_pending);
clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
phy->error = 0;
}
@ -63,8 +61,7 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
&phy->phy_events_pending);
clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
sas_deform_port(phy, 1);
@ -95,8 +92,7 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock,
&phy->phy_events_pending);
clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);

View File

@ -104,13 +104,11 @@ static void sas_form_port(struct asd_sas_phy *phy)
/* add the phy to the port */
list_add_tail(&phy->port_phy_el, &port->phy_list);
sas_phy_set_target(phy, port->port_dev);
phy->port = port;
port->num_phys++;
port->phy_mask |= (1U << phy->id);
if (!port->phy)
port->phy = phy->phy;
if (*(u64 *)port->attached_sas_addr == 0) {
port->class = phy->class;
memcpy(port->attached_sas_addr, phy->attached_sas_addr,
@ -125,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
if (!port->port) {
port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
port->port = sas_port_alloc(phy->phy->dev.parent, phy->id);
BUG_ON(!port->port);
sas_port_add(port->port);
}
@ -170,13 +168,13 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
dev->pathways--;
if (port->num_phys == 1) {
if (dev && gone)
dev->gone = 1;
sas_unregister_domain_devices(port);
sas_unregister_domain_devices(port, gone);
sas_port_delete(port->port);
port->port = NULL;
} else
} else {
sas_port_delete_phy(port->port, phy->phy);
sas_device_set_phy(dev, port->port);
}
if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
@ -185,6 +183,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
spin_lock(&port->phy_list_lock);
list_del_init(&phy->port_phy_el);
sas_phy_set_target(phy, NULL);
phy->port = NULL;
port->num_phys--;
port->phy_mask &= ~(1U << phy->id);
@ -213,8 +212,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
&phy->port_events_pending);
clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
sas_form_port(phy);
}
@ -227,8 +225,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
unsigned long flags;
u32 prim;
sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
&phy->port_events_pending);
clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
spin_lock_irqsave(&phy->sas_prim_lock, flags);
prim = phy->sas_prim;
@ -244,8 +241,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
&phy->port_events_pending);
clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
sas_deform_port(phy, 1);
}
@ -256,8 +252,7 @@ void sas_porte_timer_event(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
&phy->port_events_pending);
clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
sas_deform_port(phy, 1);
}
@ -268,8 +263,7 @@ void sas_porte_hard_reset(struct work_struct *work)
container_of(work, struct asd_sas_event, work);
struct asd_sas_phy *phy = ev->phy;
sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
&phy->port_events_pending);
clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
sas_deform_port(phy, 1);
}
@ -282,6 +276,8 @@ static void sas_init_port(struct asd_sas_port *port,
memset(port, 0, sizeof(*port));
port->id = i;
INIT_LIST_HEAD(&port->dev_list);
INIT_LIST_HEAD(&port->disco_list);
INIT_LIST_HEAD(&port->destroy_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->ha = sas_ha;

View File

@ -49,27 +49,12 @@
#include <linux/scatterlist.h>
#include <linux/libata.h>
/* ---------- SCSI Host glue ---------- */
static void sas_scsi_task_done(struct sas_task *task)
/* record final status and free the task */
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
struct scsi_cmnd *sc = task->uldd_task;
int hs = 0, stat = 0;
if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
/* Aborted tasks will be completed by the error handler */
SAS_DPRINTK("task done but aborted\n");
return;
}
if (unlikely(!sc)) {
SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
list_del_init(&task->list);
sas_free_task(task);
return;
}
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
hs = DID_NO_CONNECT;
@ -124,10 +109,41 @@ static void sas_scsi_task_done(struct sas_task *task)
break;
}
}
ASSIGN_SAS_TASK(sc, NULL);
sc->result = (hs << 16) | stat;
ASSIGN_SAS_TASK(sc, NULL);
list_del_init(&task->list);
sas_free_task(task);
}
static void sas_scsi_task_done(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
struct domain_device *dev = task->dev;
struct sas_ha_struct *ha = dev->port->ha;
unsigned long flags;
spin_lock_irqsave(&dev->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state))
task = NULL;
else
ASSIGN_SAS_TASK(sc, NULL);
spin_unlock_irqrestore(&dev->done_lock, flags);
if (unlikely(!task)) {
/* task will be completed by the error handler */
SAS_DPRINTK("task done but aborted\n");
return;
}
if (unlikely(!sc)) {
SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
list_del_init(&task->list);
sas_free_task(task);
return;
}
sas_end_task(sc, task);
sc->scsi_done(sc);
}
@ -192,17 +208,15 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int res = 0;
/* If the device fell off, no sense in issuing commands */
if (dev->gone) {
if (test_bit(SAS_DEV_GONE, &dev->state)) {
cmd->result = DID_BAD_TARGET << 16;
goto out_done;
}
if (dev_is_sata(dev)) {
unsigned long flags;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
spin_lock_irq(dev->sata_dev.ap->lock);
res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
spin_unlock_irq(dev->sata_dev.ap->lock);
return res;
}
@ -235,24 +249,38 @@ out_done:
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
struct sas_task *task = TO_SAS_TASK(cmd);
/* At this point, we only get called following an actual abort
* of the task, so we should be guaranteed not to be racing with
* any completions from the LLD. Task is freed after this.
*/
sas_end_task(cmd, task);
/* remove the aborted task flag to allow the task to be
* completed now. At this point, we only get called following
* an actual abort of the task, so we should be guaranteed not
* to be racing with any completions from the LLD (hence we
* don't need the task state lock to clear the flag) */
task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
/* Now call task_done. However, task will be free'd after
* this */
task->task_done(task);
/* now finish the command and move it on to the error
* handler done list, this also takes it off the
* error handler pending list */
* error handler pending list.
*/
scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
}
static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_ha_struct *ha = dev->port->ha;
struct sas_task *task = TO_SAS_TASK(cmd);
if (!dev_is_sata(dev)) {
sas_eh_finish_cmd(cmd);
return;
}
/* report the timeout to libata */
sas_end_task(cmd, task);
list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
}
static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
{
struct scsi_cmnd *cmd, *n;
@ -260,7 +288,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
cmd->device->lun == my_cmd->device->lun)
sas_eh_finish_cmd(cmd);
sas_eh_defer_cmd(cmd);
}
}
@ -295,6 +323,7 @@ enum task_disposition {
TASK_IS_DONE,
TASK_IS_ABORTED,
TASK_IS_AT_LU,
TASK_IS_NOT_AT_HA,
TASK_IS_NOT_AT_LU,
TASK_ABORT_FAILED,
};
@ -311,19 +340,18 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
struct scsi_core *core = &ha->core;
struct sas_task *t, *n;
mutex_lock(&core->task_queue_flush);
spin_lock_irqsave(&core->task_queue_lock, flags);
list_for_each_entry_safe(t, n, &core->task_queue, list) {
list_for_each_entry_safe(t, n, &core->task_queue, list)
if (task == t) {
list_del_init(&t->list);
spin_unlock_irqrestore(&core->task_queue_lock,
flags);
SAS_DPRINTK("%s: task 0x%p aborted from "
"task_queue\n",
__func__, task);
return TASK_IS_ABORTED;
break;
}
}
spin_unlock_irqrestore(&core->task_queue_lock, flags);
mutex_unlock(&core->task_queue_flush);
if (task == t)
return TASK_IS_NOT_AT_HA;
}
for (i = 0; i < 5; i++) {
@ -411,30 +439,26 @@ static int sas_recover_I_T(struct domain_device *dev)
return res;
}
/* Find the sas_phy that's attached to this device */
struct sas_phy *sas_find_local_phy(struct domain_device *dev)
/* take a reference on the last known good phy for this device */
struct sas_phy *sas_get_local_phy(struct domain_device *dev)
{
struct domain_device *pdev = dev->parent;
struct ex_phy *exphy = NULL;
int i;
struct sas_ha_struct *ha = dev->port->ha;
struct sas_phy *phy;
unsigned long flags;
/* Directly attached device */
if (!pdev)
return dev->port->phy;
/* a published domain device always has a valid phy, it may be
* stale, but it is never NULL
*/
BUG_ON(!dev->phy);
/* Otherwise look in the expander */
for (i = 0; i < pdev->ex_dev.num_phys; i++)
if (!memcmp(dev->sas_addr,
pdev->ex_dev.ex_phy[i].attached_sas_addr,
SAS_ADDR_SIZE)) {
exphy = &pdev->ex_dev.ex_phy[i];
break;
}
spin_lock_irqsave(&ha->phy_port_lock, flags);
phy = dev->phy;
get_device(&phy->dev);
spin_unlock_irqrestore(&ha->phy_port_lock, flags);
BUG_ON(!exphy);
return exphy->phy;
return phy;
}
EXPORT_SYMBOL_GPL(sas_find_local_phy);
EXPORT_SYMBOL_GPL(sas_get_local_phy);
/* Attempt to send a LUN reset message to a device */
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
@ -461,7 +485,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_phy *phy = sas_find_local_phy(dev);
struct sas_phy *phy = sas_get_local_phy(dev);
int res;
res = sas_phy_reset(phy, 1);
@ -469,6 +493,8 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
kobject_name(&phy->dev.kobj),
res);
sas_put_local_phy(phy);
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
@ -495,9 +521,7 @@ try_bus_reset:
return FAILED;
}
static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
struct list_head *work_q,
struct list_head *done_q)
static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
{
struct scsi_cmnd *cmd, *n;
enum task_disposition res = TASK_IS_DONE;
@ -505,13 +529,28 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
struct sas_internal *i = to_sas_internal(shost->transportt);
unsigned long flags;
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
LIST_HEAD(done);
Again:
/* clean out any commands that won the completion vs eh race */
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct sas_task *task = TO_SAS_TASK(cmd);
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_task *task;
spin_lock_irqsave(&dev->done_lock, flags);
/* by this point the lldd has either observed
* SAS_HA_FROZEN and is leaving the task alone, or has
* won the race with eh and decided to complete it
*/
task = TO_SAS_TASK(cmd);
spin_unlock_irqrestore(&dev->done_lock, flags);
if (!task)
continue;
list_move_tail(&cmd->eh_entry, &done);
}
Again:
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct sas_task *task = TO_SAS_TASK(cmd);
list_del_init(&cmd->eh_entry);
@ -531,15 +570,23 @@ Again:
cmd->eh_eflags = 0;
switch (res) {
case TASK_IS_NOT_AT_HA:
SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
__func__, task,
cmd->retries ? "retry" : "aborted");
if (cmd->retries)
cmd->retries--;
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_DONE:
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
sas_eh_finish_cmd(cmd);
sas_eh_defer_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__func__, task);
sas_eh_finish_cmd(cmd);
sas_eh_defer_cmd(cmd);
continue;
case TASK_IS_AT_LU:
SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@ -550,7 +597,7 @@ Again:
"recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
sas_eh_finish_cmd(cmd);
sas_eh_defer_cmd(cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
@ -560,7 +607,8 @@ Again:
SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
task);
tmf_resp = sas_recover_I_T(task->dev);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
tmf_resp == -ENODEV) {
struct domain_device *dev = task->dev;
SAS_DPRINTK("I_T %016llx recovered\n",
SAS_ADDR(task->dev->sas_addr));
@ -607,13 +655,16 @@ Again:
goto clear_q;
}
}
return list_empty(work_q);
clear_q:
out:
list_splice_tail(&done, work_q);
list_splice_tail_init(&ha->eh_ata_q, work_q);
return;
clear_q:
SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
list_for_each_entry_safe(cmd, n, work_q, eh_entry)
sas_eh_finish_cmd(cmd);
return list_empty(work_q);
goto out;
}
void sas_scsi_recover_host(struct Scsi_Host *shost)
@ -627,12 +678,17 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
shost->host_eh_scheduled = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
SAS_DPRINTK("Enter %s\n", __func__);
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
__func__, shost->host_busy, shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism)
* complete via the normal sas_task completion mechanism),
* SAS_HA_FROZEN gives eh dominion over all sas_task completion.
*/
if (sas_eh_handle_sas_errors(shost, &eh_work_q, &ha->eh_done_q))
set_bit(SAS_HA_FROZEN, &ha->state);
sas_eh_handle_sas_errors(shost, &eh_work_q);
clear_bit(SAS_HA_FROZEN, &ha->state);
if (list_empty(&eh_work_q))
goto out;
/*
@ -641,59 +697,26 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
if (!sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q))
if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
out:
if (ha->lldd_max_execute_num > 1)
wake_up_process(ha->core.queue_thread);
/* now link into libata eh --- if we have any ata devices */
sas_ata_strategy_handler(shost);
scsi_eh_flush_done_q(&ha->eh_done_q);
SAS_DPRINTK("--- Exit %s\n", __func__);
return;
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n",
__func__, shost->host_busy, shost->host_failed);
}
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
unsigned long flags;
enum blk_eh_timer_return rtn;
if (sas_ata_timed_out(cmd, task, &rtn))
return rtn;
if (!task) {
cmd->request->timeout /= 2;
SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
cmd, task, (cmd->request->timeout ?
"BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
if (!cmd->request->timeout)
return BLK_EH_NOT_HANDLED;
return BLK_EH_RESET_TIMER;
}
spin_lock_irqsave(&task->task_state_lock, flags);
BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
"BLK_EH_HANDLED\n", cmd, task);
return BLK_EH_HANDLED;
}
if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
"BLK_EH_RESET_TIMER\n",
cmd, task);
return BLK_EH_RESET_TIMER;
}
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
cmd, task);
scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
return BLK_EH_NOT_HANDLED;
}
@ -737,27 +760,15 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
return found_dev;
}
static inline struct domain_device *sas_find_target(struct scsi_target *starget)
{
struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
return sas_find_dev_by_rphy(rphy);
}
int sas_target_alloc(struct scsi_target *starget)
{
struct domain_device *found_dev = sas_find_target(starget);
int res;
struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
if (!found_dev)
return -ENODEV;
if (dev_is_sata(found_dev)) {
res = sas_ata_init_host_and_port(found_dev, starget);
if (res)
return res;
}
kref_get(&found_dev->kref);
starget->hostdata = found_dev;
return 0;
}
@ -797,14 +808,6 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
return 0;
}
void sas_slave_destroy(struct scsi_device *scsi_dev)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
if (dev_is_sata(dev))
sas_to_ata_dev(dev)->class = ATA_DEV_NONE;
}
int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
@ -871,9 +874,11 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
int res;
struct sas_internal *i = to_sas_internal(core->shost->transportt);
mutex_lock(&core->task_queue_flush);
spin_lock_irqsave(&core->task_queue_lock, flags);
while (!kthread_should_stop() &&
!list_empty(&core->task_queue)) {
!list_empty(&core->task_queue) &&
!test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
if (can_queue >= 0) {
@ -909,6 +914,7 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
}
}
spin_unlock_irqrestore(&core->task_queue_lock, flags);
mutex_unlock(&core->task_queue_flush);
}
/**
@ -935,6 +941,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha)
struct scsi_core *core = &sas_ha->core;
spin_lock_init(&core->task_queue_lock);
mutex_init(&core->task_queue_flush);
core->task_queue_size = 0;
INIT_LIST_HEAD(&core->task_queue);
@ -971,49 +978,6 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
spin_unlock_irqrestore(&core->task_queue_lock, flags);
}
/*
* Call the LLDD task abort routine directly. This function is intended for
* use by upper layers that need to tell the LLDD to abort a task.
*/
int __sas_task_abort(struct sas_task *task)
{
struct sas_internal *si =
to_sas_internal(task->dev->port->ha->core.shost->transportt);
unsigned long flags;
int res;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
task);
return 0;
}
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (!si->dft->lldd_abort_task)
return -ENODEV;
res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
(res == TMF_RESP_FUNC_COMPLETE))
{
spin_unlock_irqrestore(&task->task_state_lock, flags);
task->task_done(task);
return 0;
}
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
return -EAGAIN;
}
/*
* Tell an upper layer that it needs to initiate an abort for a given task.
* This should only ever be called by an LLDD.
@ -1043,27 +1007,15 @@ void sas_task_abort(struct sas_task *task)
}
}
int sas_slave_alloc(struct scsi_device *scsi_dev)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
if (dev_is_sata(dev))
return ata_sas_port_init(dev->sata_dev.ap);
return 0;
}
void sas_target_destroy(struct scsi_target *starget)
{
struct domain_device *found_dev = sas_find_target(starget);
struct domain_device *found_dev = starget->hostdata;
if (!found_dev)
return;
if (dev_is_sata(found_dev))
ata_sas_port_destroy(found_dev->sata_dev.ap);
return;
starget->hostdata = NULL;
sas_put_device(found_dev);
}
static void sas_parse_addr(u8 *sas_addr, const char *p)
@ -1108,16 +1060,12 @@ EXPORT_SYMBOL_GPL(sas_request_addr);
EXPORT_SYMBOL_GPL(sas_queuecommand);
EXPORT_SYMBOL_GPL(sas_target_alloc);
EXPORT_SYMBOL_GPL(sas_slave_configure);
EXPORT_SYMBOL_GPL(sas_slave_destroy);
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
EXPORT_SYMBOL_GPL(sas_change_queue_type);
EXPORT_SYMBOL_GPL(sas_bios_param);
EXPORT_SYMBOL_GPL(__sas_task_abort);
EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset);
EXPORT_SYMBOL_GPL(sas_phy_enable);
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
EXPORT_SYMBOL_GPL(sas_slave_alloc);
EXPORT_SYMBOL_GPL(sas_target_destroy);
EXPORT_SYMBOL_GPL(sas_ioctl);

View File

@ -534,6 +534,7 @@ struct lpfc_hba {
void (*lpfc_scsi_prep_cmnd)
(struct lpfc_vport *, struct lpfc_scsi_buf *,
struct lpfc_nodelist *);
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
(struct lpfc_hba *, uint32_t,
@ -541,8 +542,6 @@ struct lpfc_hba {
void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
struct lpfc_iocbq *);
int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
IOCB_t * (*lpfc_get_iocb_from_iocbq)
(struct lpfc_iocbq *);
void (*lpfc_scsi_cmd_iocb_cmpl)
@ -551,10 +550,12 @@ struct lpfc_hba {
/* MBOX interface function jump table entries */
int (*lpfc_sli_issue_mbox)
(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
/* Slow-path IOCB process function jump table entries */
void (*lpfc_sli_handle_slow_ring_event)
(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t mask);
/* INIT device interface function jump table entries */
int (*lpfc_sli_hbq_to_firmware)
(struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
@ -573,6 +574,10 @@ struct lpfc_hba {
int (*lpfc_selective_reset)
(struct lpfc_hba *);
int (*lpfc_bg_scsi_prep_dma_buf)
(struct lpfc_hba *, struct lpfc_scsi_buf *);
/* Add new entries here */
/* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba;
@ -838,6 +843,7 @@ struct lpfc_hba {
struct dentry *debug_writeGuard; /* inject write guard_tag errors */
struct dentry *debug_writeApp; /* inject write app_tag errors */
struct dentry *debug_writeRef; /* inject write ref_tag errors */
struct dentry *debug_readGuard; /* inject read guard_tag errors */
struct dentry *debug_readApp; /* inject read app_tag errors */
struct dentry *debug_readRef; /* inject read ref_tag errors */
@ -845,10 +851,11 @@ struct lpfc_hba {
uint32_t lpfc_injerr_wgrd_cnt;
uint32_t lpfc_injerr_wapp_cnt;
uint32_t lpfc_injerr_wref_cnt;
uint32_t lpfc_injerr_rgrd_cnt;
uint32_t lpfc_injerr_rapp_cnt;
uint32_t lpfc_injerr_rref_cnt;
sector_t lpfc_injerr_lba;
#define LPFC_INJERR_LBA_OFF (sector_t)0xffffffffffffffff
#define LPFC_INJERR_LBA_OFF (sector_t)(-1)
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;

View File

@ -353,7 +353,7 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
uint32_t if_type;
uint8_t sli_family;
char fwrev[32];
char fwrev[FW_REV_STR_SIZE];
int len;
lpfc_decode_firmware_rev(phba, fwrev, 1);
@ -922,11 +922,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
rc = lpfc_sli4_pdev_status_reg_wait(phba);
if (rc == -EPERM) {
/* no privilage for reset, restore if needed */
if (before_fc_flag & FC_OFFLINE_MODE)
goto out;
/* no privilage for reset */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3150 No privilage to perform the requested "
"access: x%x\n", reg_val);
} else if (rc == -EIO) {
/* reset failed, there is nothing more we can do */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3153 Fail to perform the requested "
"access: x%x\n", reg_val);
return rc;
}

View File

@ -589,7 +589,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
}
cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
cmdiocbq->iocb.ulpContext = rpi;
if (phba->sli_rev == LPFC_SLI_REV4)
cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
else
cmdiocbq->iocb.ulpContext = rpi;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->context1 = NULL;
cmdiocbq->context2 = NULL;
@ -1768,7 +1771,7 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
bf_set(lpfc_mbx_set_diag_state_link_type,
&link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
LPFC_DIAG_LOOPBACK_TYPE_SERDES);
LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
@ -3977,7 +3980,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3106 Handled SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
"subsys_comn, opcode:x%x\n",
opcode);
rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
nemb_mse, dmabuf);
@ -3985,7 +3988,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
default:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"3107 Reject SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
"subsys_comn, opcode:x%x\n",
opcode);
rc = -EPERM;
break;
@ -4556,7 +4559,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ sizeof(MAILBOX_t));
}
} else if (phba->sli_rev == LPFC_SLI_REV4) {
if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
/* Let type 4 (well known data) through because the data is
* returned in varwords[4-8]
* otherwise check the recieve length and fetch the buffer addr
*/
if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
/* rebuild the command for sli4 using our own buffers
* like we do for biu diags
*/

View File

@ -462,3 +462,4 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *);
int lpfc_selective_reset(struct lpfc_hba *);
int lpfc_sli4_read_config(struct lpfc_hba *phba);
int lpfc_scsi_buf_update(struct lpfc_hba *phba);
void lpfc_sli4_node_prep(struct lpfc_hba *phba);

View File

@ -1076,7 +1076,7 @@ int
lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
char fwrev[16];
char fwrev[FW_REV_STR_SIZE];
int n;
lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
@ -1834,7 +1834,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
uint8_t *fwname;
if (phba->sli_rev == LPFC_SLI_REV4)
sprintf(fwrevision, "%s", vp->rev.opFwName);
snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName);
else if (vp->rev.rBit) {
if (psli->sli_flag & LPFC_SLI_ACTIVE)
rev = vp->rev.sli2FwRev;

View File

@ -1019,6 +1019,8 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt);
else if (dent == phba->debug_writeRef)
cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt);
else if (dent == phba->debug_readGuard)
cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rgrd_cnt);
else if (dent == phba->debug_readApp)
cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt);
else if (dent == phba->debug_readRef)
@ -1057,6 +1059,8 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
else if (dent == phba->debug_writeRef)
phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
else if (dent == phba->debug_readGuard)
phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
else if (dent == phba->debug_readApp)
phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
else if (dent == phba->debug_readRef)
@ -3978,6 +3982,17 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
snprintf(name, sizeof(name), "readGuardInjErr");
phba->debug_readGuard =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dif_err);
if (!phba->debug_readGuard) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0808 Cannot create debugfs readGuard\n");
goto debug_failed;
}
snprintf(name, sizeof(name), "readAppInjErr");
phba->debug_readApp =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@ -4318,6 +4333,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_writeRef); /* writeRef */
phba->debug_writeRef = NULL;
}
if (phba->debug_readGuard) {
debugfs_remove(phba->debug_readGuard); /* readGuard */
phba->debug_readGuard = NULL;
}
if (phba->debug_readApp) {
debugfs_remove(phba->debug_readApp); /* readApp */
phba->debug_readApp = NULL;

View File

@ -1526,7 +1526,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
memcpy(&ndlp->active_rrqs.xri_bitmap,
&rrq.xri_bitmap,
sizeof(ndlp->active_rrqs.xri_bitmap));
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
/* Since we are swapping the ndlp passed in with the new one
* and the did has already been swapped, copy over the
* state and names.
@ -1536,6 +1535,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
sizeof(struct lpfc_name));
new_ndlp->nlp_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
/* Fix up the rport accordingly */
rport = ndlp->rport;
if (rport) {
@ -7172,7 +7172,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* FDISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0126 FDISC failed. (%d/%d)\n",
"0126 FDISC failed. (x%x/x%x)\n",
irsp->ulpStatus, irsp->un.ulpWord[4]);
goto fdisc_failed;
}
@ -7283,6 +7283,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int rc;
vport->port_state = LPFC_FDISC;
vport->fc_myDID = 0;
cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
ELS_CMD_FDISC);

View File

@ -2977,9 +2977,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"topology\n");
/* Get Loop Map information */
if (bf_get(lpfc_mbx_read_top_il, la)) {
spin_lock_irq(shost->host_lock);
spin_lock(shost->host_lock);
vport->fc_flag |= FC_LBIT;
spin_unlock_irq(shost->host_lock);
spin_unlock(shost->host_lock);
}
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
@ -3029,9 +3029,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
spin_lock_irq(shost->host_lock);
spin_lock(shost->host_lock);
vport->fc_flag |= FC_LBIT;
spin_unlock_irq(shost->host_lock);
spin_unlock(shost->host_lock);
}
spin_unlock_irq(&phba->hbalock);
@ -5332,6 +5332,10 @@ lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
{
uint16_t *rpi = param;
/* check for active node */
if (!NLP_CHK_NODE_ACT(ndlp))
return 0;
return ndlp->nlp_rpi == *rpi;
}

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2010 Emulex. All rights reserved. *
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@ -70,6 +70,7 @@
/* vendor ID used in SCSI netlink calls */
#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
#define FW_REV_STR_SIZE 32
/* Common Transport structures and definitions */
union CtRevisionId {
@ -2567,6 +2568,8 @@ typedef struct {
#define DMP_MEM_REG 0x1
#define DMP_NV_PARAMS 0x2
#define DMP_LMSD 0x3 /* Link Module Serial Data */
#define DMP_WELL_KNOWN 0x4
#define DMP_REGION_VPD 0xe
#define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */

View File

@ -321,6 +321,10 @@ struct lpfc_cqe {
#define CQE_STATUS_CMD_REJECT 0xb
#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
#define CQE_STATUS_DI_ERROR 0x16
/* Used when mapping CQE status to IOCB */
#define LPFC_IOCB_STATUS_MASK 0xf
/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
#define CQE_HW_STATUS_NO_ERR 0x0
@ -348,6 +352,21 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_hw_status_WORD word0
uint32_t total_data_placed;
uint32_t parameter;
#define lpfc_wcqe_c_bg_edir_SHIFT 5
#define lpfc_wcqe_c_bg_edir_MASK 0x00000001
#define lpfc_wcqe_c_bg_edir_WORD parameter
#define lpfc_wcqe_c_bg_tdpv_SHIFT 3
#define lpfc_wcqe_c_bg_tdpv_MASK 0x00000001
#define lpfc_wcqe_c_bg_tdpv_WORD parameter
#define lpfc_wcqe_c_bg_re_SHIFT 2
#define lpfc_wcqe_c_bg_re_MASK 0x00000001
#define lpfc_wcqe_c_bg_re_WORD parameter
#define lpfc_wcqe_c_bg_ae_SHIFT 1
#define lpfc_wcqe_c_bg_ae_MASK 0x00000001
#define lpfc_wcqe_c_bg_ae_WORD parameter
#define lpfc_wcqe_c_bg_ge_SHIFT 0
#define lpfc_wcqe_c_bg_ge_MASK 0x00000001
#define lpfc_wcqe_c_bg_ge_WORD parameter
uint32_t word3;
#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
@ -359,8 +378,8 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_pv_MASK 0x00000001
#define lpfc_wcqe_c_pv_WORD word3
#define lpfc_wcqe_c_priority_SHIFT 24
#define lpfc_wcqe_c_priority_MASK 0x00000007
#define lpfc_wcqe_c_priority_WORD word3
#define lpfc_wcqe_c_priority_MASK 0x00000007
#define lpfc_wcqe_c_priority_WORD word3
#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
@ -715,12 +734,20 @@ struct lpfc_register {
#define lpfc_eqcq_doorbell_eqci_SHIFT 9
#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
#define lpfc_eqcq_doorbell_eqci_WORD word0
#define lpfc_eqcq_doorbell_cqid_SHIFT 0
#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF
#define lpfc_eqcq_doorbell_cqid_WORD word0
#define lpfc_eqcq_doorbell_eqid_SHIFT 0
#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF
#define lpfc_eqcq_doorbell_eqid_WORD word0
#define lpfc_eqcq_doorbell_cqid_lo_SHIFT 0
#define lpfc_eqcq_doorbell_cqid_lo_MASK 0x03FF
#define lpfc_eqcq_doorbell_cqid_lo_WORD word0
#define lpfc_eqcq_doorbell_cqid_hi_SHIFT 11
#define lpfc_eqcq_doorbell_cqid_hi_MASK 0x001F
#define lpfc_eqcq_doorbell_cqid_hi_WORD word0
#define lpfc_eqcq_doorbell_eqid_lo_SHIFT 0
#define lpfc_eqcq_doorbell_eqid_lo_MASK 0x01FF
#define lpfc_eqcq_doorbell_eqid_lo_WORD word0
#define lpfc_eqcq_doorbell_eqid_hi_SHIFT 11
#define lpfc_eqcq_doorbell_eqid_hi_MASK 0x001F
#define lpfc_eqcq_doorbell_eqid_hi_WORD word0
#define LPFC_CQID_HI_FIELD_SHIFT 10
#define LPFC_EQID_HI_FIELD_SHIFT 9
#define LPFC_BMBX 0x0160
#define lpfc_bmbx_addr_SHIFT 2
@ -3313,7 +3340,11 @@ struct xmit_bls_rsp64_wqe {
uint32_t rsrvd4;
struct wqe_did wqe_dest;
struct wqe_common wqe_com; /* words 6-11 */
uint32_t rsvd_12_15[4];
uint32_t word12;
#define xmit_bls_rsp64_temprpi_SHIFT 0
#define xmit_bls_rsp64_temprpi_MASK 0x0000ffff
#define xmit_bls_rsp64_temprpi_WORD word12
uint32_t rsvd_13_15[3];
};
struct wqe_rctl_dfctl {

View File

@ -32,6 +32,7 @@
#include <linux/aer.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/miscdevice.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@ -1474,8 +1475,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
phba->sli4_hba.u.if_type2.STATUSregaddr,
&portstat_reg.word0);
/* consider PCI bus read error as pci_channel_offline */
if (pci_rd_rc1 == -EIO)
if (pci_rd_rc1 == -EIO) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3151 PCI bus read access failure: x%x\n",
readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
return;
}
reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
@ -1525,6 +1530,9 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
/* fall through for not able to recover */
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3152 Unrecoverable error, bring the port "
"offline\n");
lpfc_sli4_offline_eratt(phba);
break;
case LPFC_SLI_INTF_IF_TYPE_1:
@ -2333,13 +2341,20 @@ lpfc_cleanup(struct lpfc_vport *vport)
continue;
}
/* take care of nodes in unused state before the state
* machine taking action.
*/
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
lpfc_nlp_put(ndlp);
continue;
}
if (ndlp->nlp_type & NLP_FABRIC)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
/* At this point, ALL ndlp's should be gone
@ -2512,6 +2527,42 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
}
}
/**
* lpfc_sli4_node_prep - Assign RPIs for active nodes.
* @phba: pointer to lpfc hba data structure.
*
* Allocate RPIs for all active remote nodes. This is needed whenever
* an SLI4 adapter is reset and the driver is not unloading. Its purpose
* is to fixup the temporary rpi assignments.
**/
void
lpfc_sli4_node_prep(struct lpfc_hba *phba)
{
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_vport **vports;
int i;
if (phba->sli_rev != LPFC_SLI_REV4)
return;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
if (vports[i]->load_flag & FC_UNLOADING)
continue;
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
if (NLP_CHK_NODE_ACT(ndlp))
ndlp->nlp_rpi =
lpfc_sli4_alloc_rpi(phba);
}
}
}
lpfc_destroy_vport_work_array(phba, vports);
}
/**
* lpfc_online - Initialize and bring a HBA online
* @phba: pointer to lpfc hba data structure.
@ -2653,6 +2704,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
}
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
/*
* Whenever an SLI4 port goes offline, free the
* RPI. A new RPI when the adapter port comes
* back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vports[i], ndlp);
}
@ -4327,6 +4387,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
int longs, sli_family;
int sges_per_segment;
/* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba);
@ -4390,6 +4451,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
/* With BlockGuard we can have multiple SGEs per Data Segemnt */
sges_per_segment = 1;
if (phba->cfg_enable_bg)
sges_per_segment = 2;
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
@ -4398,7 +4464,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* sgl sizes of must be a power of 2.
*/
buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
(((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
sizeof(struct sli4_sge)));
sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
@ -4415,6 +4482,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
default:
break;
}
for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
dma_buf_size < max_buf_size && buf_size > dma_buf_size;
dma_buf_size = dma_buf_size << 1)
@ -7223,19 +7291,17 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
rc = -ENODEV;
goto out;
}
if (bf_get(lpfc_sliport_status_rn, &reg_data))
reset_again++;
if (bf_get(lpfc_sliport_status_rdy, &reg_data))
break;
if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
reset_again++;
break;
}
}
/*
* If the port responds to the init request with
* reset needed, delay for a bit and restart the loop.
*/
if (reset_again) {
if (reset_again && (rdy_chk < 1000)) {
msleep(10);
reset_again = 0;
continue;
@ -8112,6 +8178,9 @@ lpfc_unset_hba(struct lpfc_hba *phba)
vport->load_flag |= FC_UNLOADING;
spin_unlock_irq(shost->host_lock);
kfree(phba->vpi_bmask);
kfree(phba->vpi_ids);
lpfc_stop_hba_timers(phba);
phba->pport->work_port_events = 0;
@ -8644,6 +8713,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
/* Final cleanup of txcmplq and reset the HBA */
lpfc_sli_brdrestart(phba);
kfree(phba->vpi_bmask);
kfree(phba->vpi_ids);
lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock);
list_del_init(&vport->listentry);
@ -9058,7 +9130,7 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
int
lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
{
char fwrev[32];
char fwrev[FW_REV_STR_SIZE];
struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
struct list_head dma_buffer_list;
int i, rc = 0;
@ -10012,6 +10084,36 @@ lpfc_io_resume(struct pci_dev *pdev)
return;
}
/**
* lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
* @inode: pointer to the inode representing the lpfcmgmt device
* @filep: pointer to the file representing the open lpfcmgmt device
*
* This routine puts a reference count on the lpfc module whenever the
* character device is opened
**/
static int
lpfc_mgmt_open(struct inode *inode, struct file *filep)
{
try_module_get(THIS_MODULE);
return 0;
}
/**
* lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
* @inode: pointer to the inode representing the lpfcmgmt device
* @filep: pointer to the file representing the open lpfcmgmt device
*
* This routine removes a reference count from the lpfc module when the
* character device is closed
**/
static int
lpfc_mgmt_release(struct inode *inode, struct file *filep)
{
module_put(THIS_MODULE);
return 0;
}
static struct pci_device_id lpfc_id_table[] = {
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
PCI_ANY_ID, PCI_ANY_ID, },
@ -10124,6 +10226,17 @@ static struct pci_driver lpfc_driver = {
.err_handler = &lpfc_err_handler,
};
static const struct file_operations lpfc_mgmt_fop = {
.open = lpfc_mgmt_open,
.release = lpfc_mgmt_release,
};
static struct miscdevice lpfc_mgmt_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "lpfcmgmt",
.fops = &lpfc_mgmt_fop,
};
/**
* lpfc_init - lpfc module initialization routine
*
@ -10144,6 +10257,11 @@ lpfc_init(void)
printk(LPFC_MODULE_DESC "\n");
printk(LPFC_COPYRIGHT "\n");
error = misc_register(&lpfc_mgmt_dev);
if (error)
printk(KERN_ERR "Could not register lpfcmgmt device, "
"misc_register returned with status %d", error);
if (lpfc_enable_npiv) {
lpfc_transport_functions.vport_create = lpfc_vport_create;
lpfc_transport_functions.vport_delete = lpfc_vport_delete;
@ -10180,6 +10298,7 @@ lpfc_init(void)
static void __exit
lpfc_exit(void)
{
misc_deregister(&lpfc_mgmt_dev);
pci_unregister_driver(&lpfc_driver);
fc_release_transport(lpfc_transport_template);
if (lpfc_enable_npiv)

View File

@ -48,6 +48,10 @@ static int
lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_name *nn, struct lpfc_name *pn)
{
/* First, we MUST have a RPI registered */
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
return 0;
/* Compare the ADISC rsp WWNN / WWPN matches our internal node
* table entry for that node.
*/
@ -385,6 +389,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!mbox)
goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
(uint8_t *) sp, mbox, ndlp->nlp_rpi);
if (rc) {
@ -445,11 +453,43 @@ out:
return 0;
}
/**
* lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object
*
* This routine is invoked to issue a completion to a rcv'ed
* ADISC or PDISC after the paused RPI has been resumed.
**/
static void
lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
uint32_t cmd;
elsiocb = (struct lpfc_iocbq *)mboxq->context1;
ndlp = (struct lpfc_nodelist *) mboxq->context2;
vport = mboxq->vport;
cmd = elsiocb->drvrTimeout;
if (cmd == ELS_CMD_ADISC) {
lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
} else {
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
ndlp, NULL);
}
kfree(elsiocb);
mempool_free(mboxq, phba->mbox_mem_pool);
}
static int
lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd;
struct serv_parm *sp;
struct lpfc_name *pnn, *ppn;
@ -475,12 +515,43 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
icmd = &cmdiocb->iocb;
if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
/*
* As soon as we send ACC, the remote NPort can
* start sending us data. Thus, for SLI4 we must
* resume the RPI before the ACC goes out.
*/
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
GFP_KERNEL);
if (elsiocb) {
/* Save info from cmd IOCB used in rsp */
memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
sizeof(struct lpfc_iocbq));
/* Save the ELS cmd */
elsiocb->drvrTimeout = cmd;
lpfc_sli4_resume_rpi(ndlp,
lpfc_mbx_cmpl_resume_rpi, elsiocb);
goto out;
}
}
if (cmd == ELS_CMD_ADISC) {
lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
} else {
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
NULL);
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
ndlp, NULL);
}
out:
/* If we are authenticated, move to the proper state */
if (ndlp->nlp_type & NLP_FCP_TARGET)
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
else
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
return 1;
}
/* Reject this request because invalid parameters */
@ -1229,7 +1300,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
}
if (phba->sli_rev == LPFC_SLI_REV4) {
rc = lpfc_sli4_resume_rpi(ndlp);
rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
if (rc) {
/* Stay in state and retry. */
ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;

File diff suppressed because it is too large Load Diff

View File

@ -293,7 +293,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
}
bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
/* PCI read to flush PCI pipeline on re-arming for INTx mode */
if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
@ -372,7 +374,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
return released;
}
@ -553,81 +557,6 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
return sglq;
}
/**
* __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
* @phba: Pointer to HBA context object.
* @ndlp: nodelist pointer for this target.
* @xritag: xri used in this exchange.
* @rxid: Remote Exchange ID.
* @send_rrq: Flag used to determine if we should send rrq els cmd.
*
* This function is called with hbalock held.
* The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
* rrq struct and adds it to the active_rrq_list.
*
* returns 0 for rrq slot for this xri
* < 0 Were not able to get rrq mem or invalid parameter.
**/
static int
__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
{
struct lpfc_node_rrq *rrq;
int empty;
uint32_t did = 0;
if (!ndlp)
return -EINVAL;
if (!phba->cfg_enable_rrq)
return -EINVAL;
if (phba->pport->load_flag & FC_UNLOADING) {
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
goto out;
}
did = ndlp->nlp_DID;
/*
* set the active bit even if there is no mem available.
*/
if (NLP_CHK_FREE_REQ(ndlp))
goto out;
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
goto out;
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
rrq->xritag = xritag;
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
empty = list_empty(&phba->active_rrq_list);
rrq->send_rrq = send_rrq;
list_add_tail(&rrq->list, &phba->active_rrq_list);
if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
phba->hba_flag |= HBA_RRQ_ACTIVE;
if (empty)
lpfc_worker_wake_up(phba);
}
return 0;
}
out:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2921 Can't set rrq active xri:0x%x rxid:0x%x"
" DID:0x%x Send:%d\n",
xritag, rxid, did, send_rrq);
return -EINVAL;
}
/**
* lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
* @phba: Pointer to HBA context object.
@ -856,15 +785,68 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
**/
int
lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
{
int ret;
unsigned long iflags;
struct lpfc_node_rrq *rrq;
int empty;
if (!ndlp)
return -EINVAL;
if (!phba->cfg_enable_rrq)
return -EINVAL;
spin_lock_irqsave(&phba->hbalock, iflags);
ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
if (phba->pport->load_flag & FC_UNLOADING) {
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
goto out;
}
/*
* set the active bit even if there is no mem available.
*/
if (NLP_CHK_FREE_REQ(ndlp))
goto out;
if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
goto out;
if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
goto out;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret;
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (!rrq) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
" DID:0x%x Send:%d\n",
xritag, rxid, ndlp->nlp_DID, send_rrq);
return -EINVAL;
}
rrq->send_rrq = send_rrq;
rrq->xritag = xritag;
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
rrq->send_rrq = send_rrq;
spin_lock_irqsave(&phba->hbalock, iflags);
empty = list_empty(&phba->active_rrq_list);
list_add_tail(&rrq->list, &phba->active_rrq_list);
phba->hba_flag |= HBA_RRQ_ACTIVE;
if (empty)
lpfc_worker_wake_up(phba);
spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
out:
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2921 Can't set rrq active xri:0x%x rxid:0x%x"
" DID:0x%x Send:%d\n",
xritag, rxid, ndlp->nlp_DID, send_rrq);
return -EINVAL;
}
/**
@ -5596,6 +5578,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
for (i = 0; i < count; i++)
phba->sli4_hba.rpi_ids[i] = base + i;
lpfc_sli4_node_prep(phba);
/* VPIs. */
count = phba->sli4_hba.max_cfg_param.max_vpi;
base = phba->sli4_hba.max_cfg_param.vpi_base;
@ -7555,6 +7539,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
sgl = (struct sli4_sge *)sglq->sgl;
icmd = &piocbq->iocb;
if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
return sglq->sli4_xritag;
if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
numBdes = icmd->un.genreq64.bdl.bdeSize /
sizeof(struct ulp_bde64);
@ -7756,6 +7742,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
*pcmd == ELS_CMD_SCR ||
*pcmd == ELS_CMD_FDISC ||
*pcmd == ELS_CMD_PLOGI)) {
bf_set(els_req64_sp, &wqe->els_req, 1);
bf_set(els_req64_sid, &wqe->els_req,
@ -7763,7 +7750,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
phba->vpi_ids[phba->pport->vpi]);
} else if (iocbq->context1) {
} else if (pcmd && iocbq->context1) {
bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
@ -7830,12 +7817,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
if (iocbq->iocb_flag & LPFC_IO_DIF) {
iocbq->iocb_flag &= ~LPFC_IO_DIF;
bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
}
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@ -7849,12 +7840,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
if (iocbq->iocb_flag & LPFC_IO_DIF) {
iocbq->iocb_flag &= ~LPFC_IO_DIF;
bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
}
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
break;
case CMD_FCP_ICMND64_CR:
/* word3 iocb=IO_TAG wqe=reserved */
@ -7982,6 +7977,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
xritag = 0;
break;
case CMD_XMIT_BLS_RSP64_CX:
ndlp = (struct lpfc_nodelist *)iocbq->context1;
/* As BLS ABTS RSP WQE is very different from other WQEs,
* we re-construct this WQE here based on information in
* iocbq from scratch.
@ -8008,8 +8004,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
}
bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
/* Use CT=VPI */
bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
ndlp->nlp_DID);
bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
iocbq->iocb.ulpContext);
bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
iocbq->iocb.ulpContext);
phba->vpi_ids[phba->pport->vpi]);
bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
LPFC_WQE_LENLOC_NONE);
@ -8073,8 +8076,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
sglq = NULL;
else {
if (pring->txq_cnt) {
@ -8384,10 +8386,13 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
{
struct lpfc_vport *vport;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3115 Node Context not found, driver "
"ignoring abts err event\n");
return;
}
vport = ndlp->vport;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3116 Port generated FCP XRI ABORT event on "
@ -10653,12 +10658,14 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
struct lpfc_wcqe_complete *wcqe)
{
unsigned long iflags;
uint32_t status;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
sizeof(struct lpfc_iocbq) - offset);
/* Map WCQE parameters into irspiocb parameters */
pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
status = bf_get(lpfc_wcqe_c_status, wcqe);
pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
if (pIocbOut->iocb_flag & LPFC_IO_FCP)
if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
pIocbIn->iocb.un.fcpi.fcpi_parm =
@ -10671,6 +10678,44 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
}
/* Convert BG errors for completion status */
if (status == CQE_STATUS_DI_ERROR) {
pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
else
pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
BGS_GUARD_ERR_MASK;
if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
BGS_APPTAG_ERR_MASK;
if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
BGS_REFTAG_ERR_MASK;
/* Check to see if there was any good data before the error */
if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
BGS_HI_WATER_MARK_PRESENT_MASK;
pIocbIn->iocb.unsli3.sli3_bg.bghm =
wcqe->total_data_placed;
}
/*
* Set ALL the error bits to indicate we don't know what
* type of error it is.
*/
if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
(BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
BGS_GUARD_ERR_MASK);
}
/* Pick up HBA exchange busy condition */
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
spin_lock_irqsave(&phba->hbalock, iflags);
@ -14042,6 +14087,13 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
{
if (cmd_iocbq)
lpfc_sli_release_iocbq(phba, cmd_iocbq);
/* Failure means BLS ABORT RSP did not get delivered to remote node*/
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3154 BLS ABORT RSP failed, data: x%x/x%x\n",
rsp_iocbq->iocb.ulpStatus,
rsp_iocbq->iocb.un.ulpWord[4]);
}
/**
@ -14748,7 +14800,8 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
* provided rpi via a bitmask.
**/
int
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
{
LPFC_MBOXQ_t *mboxq;
struct lpfc_hba *phba = ndlp->phba;
@ -14761,6 +14814,13 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
/* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
mboxq->context1 = arg;
mboxq->context2 = ndlp;
} else
mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mboxq->vport = ndlp->vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,

View File

@ -69,6 +69,7 @@ struct lpfc_iocbq {
#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
#define DSS_SECURITY_OP 0x100 /* security IO */
#define LPFC_IO_ON_Q 0x200 /* The IO is still on the TXCMPLQ */
#define LPFC_IO_DIF 0x400 /* T10 DIF IO */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14

View File

@ -633,7 +633,8 @@ void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
void lpfc_sli4_remove_rpis(struct lpfc_hba *);
void lpfc_sli4_async_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,

View File

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.28"
#define LPFC_DRIVER_VERSION "8.3.29"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"

View File

@ -2575,6 +2575,11 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
GFP_KERNEL, ioc->chain_pages);
if (!ioc->chain_lookup) {
printk(MPT2SAS_ERR_FMT "chain_lookup: get_free_pages failed, "
"sz(%d)\n", ioc->name, (int)sz);
goto out;
}
ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
ioc->request_sz, 16, 0);
if (!ioc->chain_dma_pool) {

View File

@ -5744,7 +5744,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
}
/**
* _scsih_sas_broadcast_primative_event - handle broadcast events
* _scsih_sas_broadcast_primitive_event - handle broadcast events
* @ioc: per adapter object
* @fw_event: The fw_event_work object
* Context: user.
@ -5752,7 +5752,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
* Return nothing.
*/
static void
_scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
_scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
struct scsi_cmnd *scmd;
@ -7263,7 +7263,7 @@ _firmware_event_work(struct work_struct *work)
fw_event);
break;
case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
_scsih_sas_broadcast_primative_event(ioc,
_scsih_sas_broadcast_primitive_event(ioc,
fw_event);
break;
case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:

View File

@ -60,7 +60,6 @@ static struct scsi_host_template mvs_sht = {
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.slave_destroy = sas_slave_destroy,
.scan_finished = mvs_scan_finished,
.scan_start = mvs_scan_start,
.change_queue_depth = sas_change_queue_depth,
@ -74,7 +73,6 @@ static struct scsi_host_template mvs_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = mvst_host_attrs,

View File

@ -308,7 +308,7 @@ int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (mvs_prv->scan_finished == 0)
return 0;
scsi_flush_work(shost);
sas_drain_work(sha);
return 1;
}
@ -893,9 +893,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
spin_unlock_irq(dev->sata_dev.ap->lock);
spin_lock_irqsave(&mvi->lock, flags);
rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
if (rc)
@ -906,9 +903,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
(MVS_CHIP_SLOT_SZ - 1));
spin_unlock_irqrestore(&mvi->lock, flags);
if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
spin_lock_irq(dev->sata_dev.ap->lock);
return rc;
}
@ -1480,10 +1474,11 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
{
int rc;
struct sas_phy *phy = sas_find_local_phy(dev);
struct sas_phy *phy = sas_get_local_phy(dev);
int reset_type = (dev->dev_type == SATA_DEV ||
(dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
rc = sas_phy_reset(phy, reset_type);
sas_put_local_phy(phy);
msleep(2000);
return rc;
}

View File

@ -46,9 +46,9 @@ static inline u32 pm8001_read_32(void *virt_addr)
return *((u32 *)virt_addr);
}
static inline void pm8001_write_32(void *addr, u32 offset, u32 val)
static inline void pm8001_write_32(void *addr, u32 offset, __le32 val)
{
*((u32 *)(addr + offset)) = val;
*((__le32 *)(addr + offset)) = val;
}
static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar,

View File

@ -338,26 +338,25 @@ update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
}
/**
* bar4_shift - function is called to shift BAR base address
* @pm8001_ha : our hba card information
* pm8001_bar4_shift - function is called to shift BAR base address
* @pm8001_ha : our hba card infomation
* @shiftValue : shifting value in memory bar.
*/
static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
{
u32 regVal;
u32 max_wait_count;
unsigned long start;
/* program the inbound AXI translation Lower Address */
pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue);
/* confirm the setting is written */
max_wait_count = 1 * 1000 * 1000; /* 1 sec */
start = jiffies + HZ; /* 1 sec */
do {
udelay(1);
regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW);
} while ((regVal != shiftValue) && (--max_wait_count));
} while ((regVal != shiftValue) && time_before(jiffies, start));
if (!max_wait_count) {
if (regVal != shiftValue) {
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
" = 0x%x\n", regVal));
@ -375,6 +374,7 @@ static void __devinit
mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
{
u32 value, offset, i;
unsigned long flags;
#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@ -388,16 +388,23 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
* Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
* Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7)
*/
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (-1 == pm8001_bar4_shift(pm8001_ha,
SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
}
for (i = 0; i < 4; i++) {
offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
}
/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
if (-1 == pm8001_bar4_shift(pm8001_ha,
SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
}
for (i = 4; i < 8; i++) {
offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
@ -421,7 +428,8 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
/*set the shifted destination address to 0x0 to avoid error operation */
bar4_shift(pm8001_ha, 0x0);
pm8001_bar4_shift(pm8001_ha, 0x0);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
}
@ -437,6 +445,7 @@ mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
u32 offset;
u32 value;
u32 i;
unsigned long flags;
#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000
#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000
@ -445,24 +454,30 @@ mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF
value = interval & OPEN_RETRY_INTERVAL_REG_MASK;
spin_lock_irqsave(&pm8001_ha->lock, flags);
/* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/
if (-1 == bar4_shift(pm8001_ha,
OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR))
if (-1 == pm8001_bar4_shift(pm8001_ha,
OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
}
for (i = 0; i < 4; i++) {
offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i;
pm8001_cw32(pm8001_ha, 2, offset, value);
}
if (-1 == bar4_shift(pm8001_ha,
OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR))
if (-1 == pm8001_bar4_shift(pm8001_ha,
OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
}
for (i = 4; i < 8; i++) {
offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
pm8001_cw32(pm8001_ha, 2, offset, value);
}
/*set the shifted destination address to 0x0 to avoid error operation */
bar4_shift(pm8001_ha, 0x0);
pm8001_bar4_shift(pm8001_ha, 0x0);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return;
}
@ -607,7 +622,8 @@ static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
update_inbnd_queue_table(pm8001_ha, 0);
update_outbnd_queue_table(pm8001_ha, 0);
mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
mpi_set_open_retry_interval_reg(pm8001_ha, 7);
/* 7->130ms, 34->500ms, 119->1.5s */
mpi_set_open_retry_interval_reg(pm8001_ha, 119);
/* notify firmware update finished and check initialization status */
if (0 == mpi_init_check(pm8001_ha)) {
PM8001_INIT_DBG(pm8001_ha,
@ -688,8 +704,11 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("Firmware is ready for reset .\n"));
} else {
/* Trigger NMI twice via RB6 */
if (-1 == bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
unsigned long flags;
/* Trigger NMI twice via RB6 */
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
RB6_ACCESS_REG));
@ -715,8 +734,10 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
}
return 0;
}
@ -733,6 +754,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
u32 regVal, toggleVal;
u32 max_wait_count;
u32 regVal1, regVal2, regVal3;
unsigned long flags;
/* step1: Check FW is ready for soft reset */
if (soft_reset_ready_check(pm8001_ha) != 0) {
@ -743,7 +765,9 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* step 2: clear NMI status register on AAP1 and IOP, write the same
value to clear */
/* map 0x60000 to BAR4(0x20), BAR2(win) */
if (-1 == bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
MBIC_AAP1_ADDR_BASE));
@ -754,7 +778,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
/* map 0x70000 to BAR4(0x20), BAR2(win) */
if (-1 == bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
MBIC_IOP_ADDR_BASE));
@ -796,7 +821,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* read required registers for confirmming */
/* map 0x0700000 to BAR4(0x20), BAR2(win) */
if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
GSM_ADDR_BASE));
@ -862,7 +888,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* step 5: delay 10 usec */
udelay(10);
/* step 5-b: set GPIO-0 output control to tristate anyway */
if (-1 == bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
GPIO_ADDR_BASE));
@ -878,7 +905,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* Step 6: Reset the IOP and AAP1 */
/* map 0x00000 to BAR4(0x20), BAR2(win) */
if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
SPC_TOP_LEVEL_ADDR_BASE));
@ -915,7 +943,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* step 11: reads and sets the GSM Configuration and Reset Register */
/* map 0x0700000 to BAR4(0x20), BAR2(win) */
if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
GSM_ADDR_BASE));
@ -968,7 +997,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
/* step 13: bring the IOP and AAP1 out of reset */
/* map 0x00000 to BAR4(0x20), BAR2(win) */
if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Shift Bar4 to 0x%x failed\n",
SPC_TOP_LEVEL_ADDR_BASE));
@ -1010,6 +1040,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
pm8001_cr32(pm8001_ha, 0,
MSGU_SCRATCH_PAD_3)));
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
@ -1039,9 +1070,12 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
pm8001_cr32(pm8001_ha, 0,
MSGU_SCRATCH_PAD_3)));
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -1;
}
}
pm8001_bar4_shift(pm8001_ha, 0);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("SPC soft reset Complete\n"));
@ -1157,8 +1191,8 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
msi_index += MSIX_TABLE_BASE;
pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE);
}
/**
* pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
* @pm8001_ha: our hba card information
@ -1212,7 +1246,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
consumer_index = pm8001_read_32(circularQ->ci_virt);
circularQ->consumer_index = cpu_to_le32(consumer_index);
if (((circularQ->producer_idx + bcCount) % 256) ==
circularQ->consumer_index) {
le32_to_cpu(circularQ->consumer_index)) {
*messagePtr = NULL;
return -1;
}
@ -1321,7 +1355,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
u32 header_tmp;
do {
/* If there are not-yet-delivered messages ... */
if (circularQ->producer_index != circularQ->consumer_idx) {
if (le32_to_cpu(circularQ->producer_index)
!= circularQ->consumer_idx) {
/*Get the pointer to the circular queue buffer element*/
msgHeader = (struct mpi_msg_hdr *)
(circularQ->base_virt +
@ -1329,14 +1364,14 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
/* read header */
header_tmp = pm8001_read_32(msgHeader);
msgHeader_tmp = cpu_to_le32(header_tmp);
if (0 != (msgHeader_tmp & 0x80000000)) {
if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
if (OPC_OUB_SKIP_ENTRY !=
(msgHeader_tmp & 0xfff)) {
(le32_to_cpu(msgHeader_tmp) & 0xfff)) {
*messagePtr1 =
((u8 *)msgHeader) +
sizeof(struct mpi_msg_hdr);
*pBC = (u8)((msgHeader_tmp >> 24) &
0x1f);
*pBC = (u8)((le32_to_cpu(msgHeader_tmp)
>> 24) & 0x1f);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk(": CI=%d PI=%d "
"msgHeader=%x\n",
@ -1347,8 +1382,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
} else {
circularQ->consumer_idx =
(circularQ->consumer_idx +
((msgHeader_tmp >> 24) & 0x1f))
% 256;
((le32_to_cpu(msgHeader_tmp)
>> 24) & 0x1f)) % 256;
msgHeader_tmp = 0;
pm8001_write_32(msgHeader, 0, 0);
/* update the CI of outbound queue */
@ -1360,7 +1395,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
} else {
circularQ->consumer_idx =
(circularQ->consumer_idx +
((msgHeader_tmp >> 24) & 0x1f)) % 256;
((le32_to_cpu(msgHeader_tmp) >> 24) &
0x1f)) % 256;
msgHeader_tmp = 0;
pm8001_write_32(msgHeader, 0, 0);
/* update the CI of outbound queue */
@ -1376,7 +1412,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
producer_index = pm8001_read_32(pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
}
} while (circularQ->producer_index != circularQ->consumer_idx);
} while (le32_to_cpu(circularQ->producer_index) !=
circularQ->consumer_idx);
/* while we don't have any more not-yet-delivered message */
/* report empty */
return MPI_IO_STATUS_BUSY;
@ -1388,24 +1425,191 @@ static void pm8001_work_fn(struct work_struct *work)
struct pm8001_device *pm8001_dev;
struct domain_device *dev;
/*
* So far, all users of this stash an associated structure here.
* If we get here, and this pointer is null, then the action
* was cancelled. This nullification happens when the device
* goes away.
*/
pm8001_dev = pw->data; /* Most stash device structure */
if ((pm8001_dev == NULL)
|| ((pw->handler != IO_XFER_ERROR_BREAK)
&& (pm8001_dev->dev_type == NO_DEVICE))) {
kfree(pw);
return;
}
switch (pw->handler) {
case IO_XFER_ERROR_BREAK:
{ /* This one stashes the sas_task instead */
struct sas_task *t = (struct sas_task *)pm8001_dev;
u32 tag;
struct pm8001_ccb_info *ccb;
struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
unsigned long flags, flags1;
struct task_status_struct *ts;
int i;
if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC)
break; /* Task still on lu */
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irqsave(&t->task_state_lock, flags1);
if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
break; /* Task got completed by another */
}
spin_unlock_irqrestore(&t->task_state_lock, flags1);
/* Search for a possible ccb that matches the task */
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
tag = ccb->ccb_tag;
if ((tag != 0xFFFFFFFF) && (ccb->task == t))
break;
}
if (!ccb) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
break; /* Task got freed by another */
}
ts = &t->task_status;
ts->resp = SAS_TASK_COMPLETE;
/* Force the midlayer to retry */
ts->stat = SAS_QUEUE_FULL;
pm8001_dev = ccb->device;
if (pm8001_dev)
pm8001_dev->running_req--;
spin_lock_irqsave(&t->task_state_lock, flags1);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p"
" done with event 0x%x resp 0x%x stat 0x%x but"
" aborted by upper layer!\n",
t, pw->handler, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
}
} break;
case IO_XFER_OPEN_RETRY_TIMEOUT:
{ /* This one stashes the sas_task instead */
struct sas_task *t = (struct sas_task *)pm8001_dev;
u32 tag;
struct pm8001_ccb_info *ccb;
struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
unsigned long flags, flags1;
int i, ret = 0;
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
ret = pm8001_query_task(t);
PM8001_IO_DBG(pm8001_ha,
switch (ret) {
case TMF_RESP_FUNC_SUCC:
pm8001_printk("...Task on lu\n");
break;
case TMF_RESP_FUNC_COMPLETE:
pm8001_printk("...Task NOT on lu\n");
break;
default:
pm8001_printk("...query task failed!!!\n");
break;
});
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irqsave(&t->task_state_lock, flags1);
if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
(void)pm8001_abort_task(t);
break; /* Task got completed by another */
}
spin_unlock_irqrestore(&t->task_state_lock, flags1);
/* Search for a possible ccb that matches the task */
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
tag = ccb->ccb_tag;
if ((tag != 0xFFFFFFFF) && (ccb->task == t))
break;
}
if (!ccb) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
(void)pm8001_abort_task(t);
break; /* Task got freed by another */
}
pm8001_dev = ccb->device;
dev = pm8001_dev->sas_device;
switch (ret) {
case TMF_RESP_FUNC_SUCC: /* task on lu */
ccb->open_retry = 1; /* Snub completion */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
ret = pm8001_abort_task(t);
ccb->open_retry = 0;
switch (ret) {
case TMF_RESP_FUNC_SUCC:
case TMF_RESP_FUNC_COMPLETE:
break;
default: /* device misbehavior */
ret = TMF_RESP_FUNC_FAILED;
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("...Reset phy\n"));
pm8001_I_T_nexus_reset(dev);
break;
}
break;
case TMF_RESP_FUNC_COMPLETE: /* task not on lu */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
/* Do we need to abort the task locally? */
break;
default: /* device misbehavior */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
ret = TMF_RESP_FUNC_FAILED;
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("...Reset phy\n"));
pm8001_I_T_nexus_reset(dev);
}
if (ret == TMF_RESP_FUNC_FAILED)
t = NULL;
pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev);
PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Complete\n"));
} break;
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_IN_ERROR:
pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_NON_OPERATIONAL:
pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
@ -1460,6 +1664,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
status = le32_to_cpu(psspPayload->status);
tag = le32_to_cpu(psspPayload->tag);
ccb = &pm8001_ha->ccb_info[tag];
if ((status == IO_ABORTED) && ccb->open_retry) {
/* Being completed by another */
ccb->open_retry = 0;
return;
}
pm8001_dev = ccb->device;
param = le32_to_cpu(psspPayload->param);
@ -1515,6 +1724,8 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("IO_XFER_ERROR_BREAK\n"));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
/* Force the midlayer to retry */
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
PM8001_IO_DBG(pm8001_ha,
@ -1719,9 +1930,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_XFER_ERROR_BREAK:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_XFER_ERROR_BREAK\n"));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_INTERRUPTED;
break;
pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
return;
case IO_XFER_ERROR_PHY_NOT_READY:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
@ -1800,10 +2010,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
case IO_XFER_OPEN_RETRY_TIMEOUT:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
return;
case IO_XFER_ERROR_UNEXPECTED_PHASE:
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
@ -1877,7 +2085,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
unsigned long flags = 0;
u32 param;
u32 status;
u32 tag;
@ -2016,9 +2223,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*in order to force CPU ordering*/
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@ -2036,9 +2243,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@ -2064,9 +2271,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto*/
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@ -2131,9 +2338,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@ -2155,9 +2362,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@ -2175,31 +2382,31 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_DEV_NO_RESPONSE;
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
spin_lock_irq(&t->task_state_lock);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irq(&t->task_state_lock);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else if (t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irq(&t->task_state_lock);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
} else if (!t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irq(&t->task_state_lock);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
}
}
@ -2207,7 +2414,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
{
struct sas_task *t;
unsigned long flags = 0;
struct task_status_struct *ts;
struct pm8001_ccb_info *ccb;
struct pm8001_device *pm8001_dev;
@ -2287,9 +2493,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
return;
}
break;
@ -2387,31 +2593,31 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_OPEN_TO;
break;
}
spin_lock_irqsave(&t->task_state_lock, flags);
spin_lock_irq(&t->task_state_lock);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irq(&t->task_state_lock);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else if (t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irq(&t->task_state_lock);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
} else if (!t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irq(&t->task_state_lock);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irq(&pm8001_ha->lock);
t->task_done(t);
spin_lock_irqsave(&pm8001_ha->lock, flags);
spin_lock_irq(&pm8001_ha->lock);
}
}
@ -2857,7 +3063,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
memset((u8 *)&payload, 0, sizeof(payload));
circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
payload.tag = 1;
payload.tag = cpu_to_le32(1);
payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0);
@ -2929,9 +3135,9 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->phy_type |= PORT_TYPE_SAS;
phy->identify.device_type = deviceType;
phy->phy_attached = 1;
if (phy->identify.device_type == SAS_END_DEV)
if (phy->identify.device_type == SAS_END_DEVICE)
phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
else if (phy->identify.device_type != NO_DEVICE)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
phy->sas_phy.oob_mode = SAS_OOB_MODE;
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
@ -3075,7 +3281,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
(struct dev_reg_resp *)(piomb + 4);
htag = le32_to_cpu(registerRespPayload->tag);
ccb = &pm8001_ha->ccb_info[registerRespPayload->tag];
ccb = &pm8001_ha->ccb_info[htag];
pm8001_dev = ccb->device;
status = le32_to_cpu(registerRespPayload->status);
device_id = le32_to_cpu(registerRespPayload->device_id);
@ -3149,7 +3355,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct fw_control_ex fw_control_context;
struct fw_flash_Update_resp *ppayload =
(struct fw_flash_Update_resp *)(piomb + 4);
u32 tag = le32_to_cpu(ppayload->tag);
u32 tag = ppayload->tag;
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
status = le32_to_cpu(ppayload->status);
memcpy(&fw_control_context,
@ -3238,13 +3444,12 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct task_abort_resp *pPayload =
(struct task_abort_resp *)(piomb + 4);
ccb = &pm8001_ha->ccb_info[pPayload->tag];
t = ccb->task;
status = le32_to_cpu(pPayload->status);
tag = le32_to_cpu(pPayload->tag);
scp = le32_to_cpu(pPayload->scp);
ccb = &pm8001_ha->ccb_info[tag];
t = ccb->task;
PM8001_IO_DBG(pm8001_ha,
pm8001_printk(" status = 0x%x\n", status));
if (t == NULL)
@ -3270,7 +3475,7 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, pPayload->tag);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();
t->task_done(t);
return 0;
@ -3497,7 +3702,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
u32 pHeader = (u32)*(u32 *)piomb;
u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF);
u8 opc = (u8)(pHeader & 0xFFF);
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
@ -3664,9 +3869,11 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
{
struct outbound_queue_table *circularQ;
void *pMsg1 = NULL;
u8 bc = 0;
u8 uninitialized_var(bc);
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
spin_lock_irqsave(&pm8001_ha->lock, flags);
circularQ = &pm8001_ha->outbnd_q_tbl[0];
do {
ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
@ -3677,16 +3884,16 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
}
if (MPI_IO_STATUS_BUSY == ret) {
u32 producer_idx;
/* Update the producer index from SPC */
producer_idx = pm8001_read_32(circularQ->pi_virt);
circularQ->producer_index = cpu_to_le32(producer_idx);
if (circularQ->producer_index ==
circularQ->producer_index =
cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
if (le32_to_cpu(circularQ->producer_index) ==
circularQ->consumer_idx)
/* OQ is empty */
break;
}
} while (1);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return ret;
}
@ -3712,9 +3919,9 @@ pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
}
}
static void build_smp_cmd(u32 deviceID, u32 hTag, struct smp_req *psmp_cmd)
static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd)
{
psmp_cmd->tag = cpu_to_le32(hTag);
psmp_cmd->tag = hTag;
psmp_cmd->device_id = cpu_to_le32(deviceID);
psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
}
@ -3798,7 +4005,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
int ret;
__le64 phys_addr;
u64 phys_addr;
struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
@ -3819,15 +4026,15 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
offsetof(struct pm8001_ccb_info, buf_prd[0]));
ssp_cmd.addr_low = lower_32_bits(phys_addr);
ssp_cmd.addr_high = upper_32_bits(phys_addr);
phys_addr = ccb->ccb_dma_handle +
offsetof(struct pm8001_ccb_info, buf_prd[0]);
ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr));
ssp_cmd.esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
__le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
ssp_cmd.addr_low = lower_32_bits(dma_addr);
ssp_cmd.addr_high = upper_32_bits(dma_addr);
u64 dma_addr = sg_dma_address(task->scatter);
ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
} else if (task->num_scatter == 0) {
@ -3850,7 +4057,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
int ret;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
__le64 phys_addr;
u64 phys_addr;
u32 ATAP = 0x0;
u32 dir;
struct inbound_queue_table *circularQ;
@ -3889,13 +4096,13 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
phys_addr = cpu_to_le64(ccb->ccb_dma_handle +
offsetof(struct pm8001_ccb_info, buf_prd[0]));
phys_addr = ccb->ccb_dma_handle +
offsetof(struct pm8001_ccb_info, buf_prd[0]);
sata_cmd.addr_low = lower_32_bits(phys_addr);
sata_cmd.addr_high = upper_32_bits(phys_addr);
sata_cmd.esgl = cpu_to_le32(1 << 31);
} else if (task->num_scatter == 1) {
__le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter));
u64 dma_addr = sg_dma_address(task->scatter);
sata_cmd.addr_low = lower_32_bits(dma_addr);
sata_cmd.addr_high = upper_32_bits(dma_addr);
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
@ -4039,7 +4246,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
payload.tag = 1;
payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id);
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("unregister device device_id = %d\n", device_id));
@ -4063,7 +4270,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
memset(&payload, 0, sizeof(payload));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = 1;
payload.tag = cpu_to_le32(1);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
@ -4092,12 +4299,9 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
static irqreturn_t
pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
{
unsigned long flags;
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_chip_interrupt_disable(pm8001_ha);
process_oq(pm8001_ha);
pm8001_chip_interrupt_enable(pm8001_ha);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return IRQ_HANDLED;
}
@ -4360,8 +4564,10 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
payload.cur_image_offset = cpu_to_le32(info->cur_image_offset);
payload.total_image_len = cpu_to_le32(info->total_image_len);
payload.len = info->sgl.im_len.len ;
payload.sgl_addr_lo = lower_32_bits(info->sgl.addr);
payload.sgl_addr_hi = upper_32_bits(info->sgl.addr);
payload.sgl_addr_lo =
cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
payload.sgl_addr_hi =
cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
return ret;
}

View File

@ -625,7 +625,7 @@ struct set_nvm_data_req {
__le32 tag;
__le32 len_ir_vpdd;
__le32 vpd_offset;
u32 reserved[8];
__le32 reserved[8];
__le32 resp_addr_lo;
__le32 resp_addr_hi;
__le32 resp_len;

View File

@ -62,7 +62,6 @@ static struct scsi_host_template pm8001_sht = {
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.slave_destroy = sas_slave_destroy,
.scan_finished = pm8001_scan_finished,
.scan_start = pm8001_scan_start,
.change_queue_depth = sas_change_queue_depth,
@ -76,7 +75,6 @@ static struct scsi_host_template pm8001_sht = {
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
.slave_alloc = sas_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = pm8001_host_attrs,

View File

@ -166,6 +166,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
struct pm8001_hba_info *pm8001_ha = NULL;
struct sas_phy_linkrates *rates;
DECLARE_COMPLETION_ONSTACK(completion);
unsigned long flags;
pm8001_ha = sas_phy->ha->lldd_ha;
pm8001_ha->phy[phy_id].enable_completion = &completion;
switch (func) {
@ -209,8 +210,29 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_DISABLE:
PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
break;
case PHY_FUNC_GET_EVENTS:
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (-1 == pm8001_bar4_shift(pm8001_ha,
(phy_id < 4) ? 0x30000 : 0x40000)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -EINVAL;
}
{
struct sas_phy *phy = sas_phy->phy;
uint32_t *qp = (uint32_t *)(((char *)
pm8001_ha->io_mem[2].memvirtaddr)
+ 0x1034 + (0x4000 * (phy_id & 3)));
phy->invalid_dword_count = qp[0];
phy->running_disparity_error_count = qp[1];
phy->loss_of_dword_sync_count = qp[3];
phy->phy_reset_problem_count = qp[4];
}
pm8001_bar4_shift(pm8001_ha, 0);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return 0;
default:
rc = -ENOSYS;
rc = -EOPNOTSUPP;
}
msleep(300);
return rc;
@ -234,12 +256,14 @@ void pm8001_scan_start(struct Scsi_Host *shost)
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
/* give the phy enabling interrupt event time to come in (1s
* is empirically about all it takes) */
if (time < HZ)
return 0;
/* Wait for discovery to finish */
scsi_flush_work(shost);
sas_drain_work(ha);
return 1;
}
@ -340,7 +364,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
struct pm8001_ccb_info *ccb;
u32 tag = 0xdeadbeef, rc, n_elem = 0;
u32 n = num;
unsigned long flags = 0, flags_libsas = 0;
unsigned long flags = 0;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
@ -364,11 +388,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
ts->stat = SAS_PHY_DOWN;
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
spin_unlock_irqrestore(dev->sata_dev.ap->lock,
flags_libsas);
t->task_done(t);
spin_lock_irqsave(dev->sata_dev.ap->lock,
flags_libsas);
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (n > 1)
t = list_entry(t->list.next,
@ -516,6 +536,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
task->lldd_task = NULL;
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
ccb->open_retry = 0;
pm8001_ccb_free(pm8001_ha, ccb_idx);
}
@ -615,7 +636,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
wait_for_completion(&completion);
if (dev->dev_type == SAS_END_DEV)
msleep(50);
pm8001_ha->flags |= PM8001F_RUN_TIME ;
pm8001_ha->flags = PM8001F_RUN_TIME;
return 0;
found_out:
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@ -860,6 +881,77 @@ static int pm8001_issue_ssp_tmf(struct domain_device *dev,
tmf);
}
/* retry commands by ha, by task and/or by device */
void pm8001_open_reject_retry(
struct pm8001_hba_info *pm8001_ha,
struct sas_task *task_to_close,
struct pm8001_device *device_to_close)
{
int i;
unsigned long flags;
if (pm8001_ha == NULL)
return;
spin_lock_irqsave(&pm8001_ha->lock, flags);
for (i = 0; i < PM8001_MAX_CCB; i++) {
struct sas_task *task;
struct task_status_struct *ts;
struct pm8001_device *pm8001_dev;
unsigned long flags1;
u32 tag;
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
pm8001_dev = ccb->device;
if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
continue;
if (!device_to_close) {
uintptr_t d = (uintptr_t)pm8001_dev
- (uintptr_t)&pm8001_ha->devices;
if (((d % sizeof(*pm8001_dev)) != 0)
|| ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
continue;
} else if (pm8001_dev != device_to_close)
continue;
tag = ccb->ccb_tag;
if (!tag || (tag == 0xFFFFFFFF))
continue;
task = ccb->task;
if (!task || !task->task_done)
continue;
if (task_to_close && (task != task_to_close))
continue;
ts = &task->task_status;
ts->resp = SAS_TASK_COMPLETE;
/* Force the midlayer to retry */
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
if (pm8001_dev)
pm8001_dev->running_req--;
spin_lock_irqsave(&task->task_state_lock, flags1);
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags
& SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
task->task_done(task);
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
}
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
}
/**
* Standard mandates link reset for ATA (type 0) and hard reset for
* SSP (type 1) , only for RECOVERY
@ -875,12 +967,14 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
phy = sas_find_local_phy(dev);
phy = sas_get_local_phy(dev);
if (dev_is_sata(dev)) {
DECLARE_COMPLETION_ONSTACK(completion_setstate);
if (scsi_is_sas_phy_local(phy))
return 0;
if (scsi_is_sas_phy_local(phy)) {
rc = 0;
goto out;
}
rc = sas_phy_reset(phy, 1);
msleep(2000);
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
@ -889,12 +983,14 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
pm8001_dev, 0x01);
wait_for_completion(&completion_setstate);
} else{
rc = sas_phy_reset(phy, 1);
msleep(2000);
} else {
rc = sas_phy_reset(phy, 1);
msleep(2000);
}
PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
pm8001_dev->device_id, rc));
out:
sas_put_local_phy(phy);
return rc;
}
@ -906,10 +1002,11 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
if (dev_is_sata(dev)) {
struct sas_phy *phy = sas_find_local_phy(dev);
struct sas_phy *phy = sas_get_local_phy(dev);
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
rc = sas_phy_reset(phy, 1);
sas_put_local_phy(phy);
rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
pm8001_dev, 0x01);
msleep(2000);

View File

@ -235,6 +235,7 @@ struct pm8001_ccb_info {
struct pm8001_device *device;
struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG];
struct fw_control_ex *fw_control_context;
u8 open_retry;
};
struct mpi_mem {
@ -484,10 +485,15 @@ void pm8001_dev_gone(struct domain_device *dev);
int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
int pm8001_I_T_nexus_reset(struct domain_device *dev);
int pm8001_query_task(struct sas_task *task);
void pm8001_open_reject_retry(
struct pm8001_hba_info *pm8001_ha,
struct sas_task *task_to_close,
struct pm8001_device *device_to_close);
int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
u32 mem_size, u32 align);
int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];

View File

@ -356,7 +356,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
else if (start == (ha->flt_region_boot * 4) ||
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
|| IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7065,
@ -626,144 +627,6 @@ static struct bin_attribute sysfs_reset_attr = {
.write = qla2x00_sysfs_write_reset,
};
static ssize_t
qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
uint16_t dev, adr, opt, len;
int rval;
ha->edc_data_len = 0;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
return -EINVAL;
if (!ha->edc_data) {
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->edc_data_dma);
if (!ha->edc_data) {
ql_log(ql_log_warn, vha, 0x7073,
"Unable to allocate memory for EDC write.\n");
return -ENOMEM;
}
}
dev = le16_to_cpup((void *)&buf[0]);
adr = le16_to_cpup((void *)&buf[2]);
opt = le16_to_cpup((void *)&buf[4]);
len = le16_to_cpup((void *)&buf[6]);
if (!(opt & BIT_0))
if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
return -EINVAL;
memcpy(ha->edc_data, &buf[8], len);
rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7074,
"Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
rval, dev, adr, opt, len, buf[8]);
return -EIO;
}
return count;
}
static struct bin_attribute sysfs_edc_attr = {
.attr = {
.name = "edc",
.mode = S_IWUSR,
},
.size = 0,
.write = qla2x00_sysfs_write_edc,
};
static ssize_t
qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
uint16_t dev, adr, opt, len;
int rval;
ha->edc_data_len = 0;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
return -EINVAL;
if (!ha->edc_data) {
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->edc_data_dma);
if (!ha->edc_data) {
ql_log(ql_log_warn, vha, 0x708c,
"Unable to allocate memory for EDC status.\n");
return -ENOMEM;
}
}
dev = le16_to_cpup((void *)&buf[0]);
adr = le16_to_cpup((void *)&buf[2]);
opt = le16_to_cpup((void *)&buf[4]);
len = le16_to_cpup((void *)&buf[6]);
if (!(opt & BIT_0))
if (len == 0 || len > DMA_POOL_SIZE)
return -EINVAL;
memset(ha->edc_data, 0, len);
rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_info, vha, 0x7075,
"Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
rval, dev, adr, opt, len);
return -EIO;
}
ha->edc_data_len = len;
return count;
}
static ssize_t
qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
return 0;
if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
return -EINVAL;
memcpy(buf, ha->edc_data, ha->edc_data_len);
return ha->edc_data_len;
}
static struct bin_attribute sysfs_edc_status_attr = {
.attr = {
.name = "edc_status",
.mode = S_IRUSR | S_IWUSR,
},
.size = 0,
.write = qla2x00_sysfs_write_edc_status,
.read = qla2x00_sysfs_read_edc_status,
};
static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@ -879,8 +742,6 @@ static struct sysfs_entry {
{ "vpd", &sysfs_vpd_attr, 1 },
{ "sfp", &sysfs_sfp_attr, 1 },
{ "reset", &sysfs_reset_attr, },
{ "edc", &sysfs_edc_attr, 2 },
{ "edc_status", &sysfs_edc_status_attr, 2 },
{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
{ NULL },
@ -898,7 +759,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
continue;
if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@ -926,7 +787,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
continue;
if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
@ -1231,7 +1092,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha))
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@ -1278,7 +1139,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha))
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@ -1293,7 +1154,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha))
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@ -1316,7 +1177,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
if (!IS_QLA8XXX_TYPE(vha->hw))
if (!IS_CNA_CAPABLE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@ -1328,7 +1189,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
if (!IS_QLA8XXX_TYPE(vha->hw))
if (!IS_CNA_CAPABLE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@ -1364,7 +1225,7 @@ qla2x00_thermal_temp_show(struct device *dev,
else if (!vha->hw->flags.eeh_busy)
rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
if (rval != QLA_SUCCESS)
temp = frac = 0;
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
}
@ -1493,6 +1354,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
case PORT_SPEED_10GB:
speed = FC_PORTSPEED_10GBIT;
break;
case PORT_SPEED_16GB:
speed = FC_PORTSPEED_16GBIT;
break;
}
fc_host_speed(shost) = speed;
}
@ -1643,10 +1507,14 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* final cleanup of firmware resources (PCBs and XCBs).
*/
if (fcport->loop_id != FC_NO_LOOP_ID &&
!test_bit(UNLOADING, &fcport->vha->dpc_flags))
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
if (IS_FWI2_CAPABLE(fcport->vha->hw))
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
else
qla2x00_port_logout(fcport->vha, fcport);
}
}
static int
@ -1889,6 +1757,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
break;
}
}
if (qos) {
ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
qos);
@ -2086,7 +1955,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
if (IS_QLA8XXX_TYPE(ha))
if (IS_CNA_CAPABLE(ha))
speed = FC_PORTSPEED_10GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |

View File

@ -11,29 +11,36 @@
#include <linux/delay.h>
/* BSG support for ELS/CT pass through */
inline srb_t *
qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
void
qla2x00_bsg_job_done(void *data, void *ptr, int res)
{
srb_t *sp;
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
bsg_job->reply->result = res;
bsg_job->job_done(bsg_job);
sp->free(vha, sp);
}
void
qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
struct srb_ctx *ctx;
sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
if (!sp)
goto done;
ctx = kzalloc(size, GFP_KERNEL);
if (!ctx) {
mempool_free(sp, ha->srb_mempool);
sp = NULL;
goto done;
}
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
ctx->iocbs = 1;
done:
return sp;
dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
mempool_free(sp, vha->hw->srb_mempool);
}
int
@ -101,8 +108,6 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
uint32_t len;
uint32_t oper;
bsg_job->reply->reply_payload_rcv_len = 0;
if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
ret = -EINVAL;
goto exit_fcp_prio_cfg;
@ -217,6 +222,7 @@ exit_fcp_prio_cfg:
bsg_job->job_done(bsg_job);
return ret;
}
static int
qla2x00_process_els(struct fc_bsg_job *bsg_job)
{
@ -230,7 +236,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
int req_sg_cnt, rsp_sg_cnt;
int rval = (DRIVER_ERROR << 16);
uint16_t nextlid = 0;
struct srb_ctx *els;
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
@ -337,20 +342,21 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
/* Alloc SRB structure */
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
rval = -ENOMEM;
goto done_unmap_sg;
}
els = sp->ctx;
els->type =
sp->type =
(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
els->name =
sp->name =
(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
"bsg_els_rpt" : "bsg_els_hst");
els->u.bsg_job = bsg_job;
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x700a,
"bsg rqst type: %s els type: %x - loop-id=%x "
@ -362,7 +368,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_unmap_sg;
@ -409,7 +414,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
uint16_t loop_id;
struct fc_port *fcport;
char *type = "FC_BSG_HST_CT";
struct srb_ctx *ct;
req_sg_cnt =
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
@ -486,19 +490,20 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
fcport->loop_id = loop_id;
/* Alloc SRB structure */
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
ql_log(ql_log_warn, vha, 0x7015,
"qla2x00_get_ctx_bsg_sp failed.\n");
"qla2x00_get_sp failed.\n");
rval = -ENOMEM;
goto done_free_fcport;
}
ct = sp->ctx;
ct->type = SRB_CT_CMD;
ct->name = "bsg_ct";
ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
ct->u.bsg_job = bsg_job;
sp->type = SRB_CT_CMD;
sp->name = "bsg_ct";
sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
sp->u.bsg_job = bsg_job;
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
ql_dbg(ql_dbg_user, vha, 0x7016,
"bsg rqst type: %s else type: %x - "
@ -511,7 +516,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
goto done_free_fcport;
@ -540,7 +544,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
int rval = 0;
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha))
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto done_set_internal;
new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@ -582,7 +586,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
uint16_t new_config[4];
struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(ha))
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto done_reset_internal;
memset(new_config, 0 , sizeof(new_config));
@ -707,7 +711,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if ((ha->current_topology == ISP_CFG_F ||
(atomic_read(&vha->loop_state) == LOOP_DOWN) ||
(IS_QLA81XX(ha) &&
((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
@ -717,13 +721,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
command_sent = INT_DEF_LB_ECHO_CMD;
rval = qla2x00_echo_test(vha, &elreq, response);
} else {
if (IS_QLA81XX(ha)) {
if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
goto done_free_dma_req;
@ -737,8 +740,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
new_config)) {
ql_log(ql_log_warn, vha, 0x7024,
"Internal loopback failed.\n");
bsg_job->reply->reply_payload_rcv_len =
0;
bsg_job->reply->result =
(DID_ERROR << 16);
rval = -EPERM;
@ -750,8 +751,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
*/
if (qla81xx_reset_internal_loopback(vha,
config, 1)) {
bsg_job->reply->reply_payload_rcv_len =
0;
bsg_job->reply->result =
(DID_ERROR << 16);
rval = -EPERM;
@ -788,7 +787,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"MPI reset failed.\n");
}
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
rval = -EIO;
goto done_free_dma_req;
@ -813,7 +811,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
fw_sts_ptr += sizeof(response);
*fw_sts_ptr = command_sent;
rval = 0;
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
@ -872,7 +869,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
if (rval) {
ql_log(ql_log_warn, vha, 0x7030,
"Vendor request 84xx reset failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
@ -971,9 +968,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7037,
"Vendor request 84xx updatefw failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7038,
"Vendor request 84xx updatefw completed.\n");
@ -1159,7 +1155,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7043,
"Vendor request 84xx mgmt failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
@ -1210,8 +1206,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint8_t *rsp_ptr = NULL;
bsg_job->reply->reply_payload_rcv_len = 0;
if (!IS_IIDMA_CAPABLE(vha->hw)) {
ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
return -EINVAL;
@ -1304,8 +1298,6 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
int valid = 0;
struct qla_hw_data *ha = vha->hw;
bsg_job->reply->reply_payload_rcv_len = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
return -EINVAL;
@ -1331,7 +1323,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
start == (ha->flt_region_fw * 4))
valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
IS_QLA8XXX_TYPE(ha))
IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
valid = 1;
if (!valid) {
ql_log(ql_log_warn, vha, 0x7058,
@ -1617,6 +1609,9 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
struct Scsi_Host *host;
scsi_qla_host_t *vha;
/* In case no data transferred. */
bsg_job->reply->reply_payload_rcv_len = 0;
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
fcport = *(fc_port_t **) rport->dd_data;
@ -1655,6 +1650,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
case FC_BSG_RPT_CT:
default:
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
bsg_job->reply->result = ret;
break;
}
return ret;
@ -1669,7 +1665,6 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
int cnt, que;
unsigned long flags;
struct req_que *req;
struct srb_ctx *sp_bsg;
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
@ -1681,11 +1676,9 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
sp_bsg = sp->ctx;
if (((sp_bsg->type == SRB_CT_CMD) ||
(sp_bsg->type == SRB_ELS_CMD_HST))
&& (sp_bsg->u.bsg_job == bsg_job)) {
if (((sp->type == SRB_CT_CMD) ||
(sp->type == SRB_ELS_CMD_HST))
&& (sp->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
@ -1715,7 +1708,6 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
return 0;
}

View File

@ -11,23 +11,27 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0116 | 0xfa |
* | Mailbox commands | 0x112b | |
* | Device Discovery | 0x2084 | |
* | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, |
* | | | 0x302e |
* | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa |
* | Mailbox commands | 0x113e | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2086 | 0x2020-0x2022 |
* | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401c | |
* | Async Events | 0x5057 | 0x5052 |
* | Timer Routines | 0x6011 | 0x600e,0x600f |
* | User Space Interactions | 0x709e | 0x7018,0x702e |
* | | | 0x7039,0x7045 |
* | Async Events | 0x505d | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | Timer Routines | 0x6011 | 0x600e-0x600f |
* | User Space Interactions | 0x709f | 0x7018,0x702e, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
* | ISP82XX Specific | 0xb052 | |
* | MultiQ | 0xc00b | |
* | Misc | 0xd00b | |
* | ISP82XX Specific | 0xb054 | 0xb053 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
* ----------------------------------------------------------------------
*/
@ -85,7 +89,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
dwords = GID_LIST_SIZE / 4;
dwords = qla2x00_gid_list_size(ha) / 4;
for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
cnt += dwords, addr += dwords) {
if (cnt + dwords > ram_dwords)
@ -260,7 +264,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
words = GID_LIST_SIZE / 2;
words = qla2x00_gid_list_size(ha) / 2;
for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
cnt += words, addr += words) {
if (cnt + words > ram_words)
@ -374,6 +378,77 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
return (char *)iter_reg + ntohl(fcec->size);
}
static inline void *
qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
struct req_que *req;
struct rsp_que *rsp;
int que;
if (!ha->mqenable)
return ptr;
/* Request queues */
for (que = 1; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
if (!req)
break;
/* Add chain. */
q = ptr;
*last_chain = &q->type;
q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
q->chain_size = htonl(
sizeof(struct qla2xxx_mqueue_chain) +
sizeof(struct qla2xxx_mqueue_header) +
(req->length * sizeof(request_t)));
ptr += sizeof(struct qla2xxx_mqueue_chain);
/* Add header. */
qh = ptr;
qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
qh->number = htonl(que);
qh->size = htonl(req->length * sizeof(request_t));
ptr += sizeof(struct qla2xxx_mqueue_header);
/* Add data. */
memcpy(ptr, req->ring, req->length * sizeof(request_t));
ptr += req->length * sizeof(request_t);
}
/* Response queues */
for (que = 1; que < ha->max_rsp_queues; que++) {
rsp = ha->rsp_q_map[que];
if (!rsp)
break;
/* Add chain. */
q = ptr;
*last_chain = &q->type;
q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
q->chain_size = htonl(
sizeof(struct qla2xxx_mqueue_chain) +
sizeof(struct qla2xxx_mqueue_header) +
(rsp->length * sizeof(response_t)));
ptr += sizeof(struct qla2xxx_mqueue_chain);
/* Add header. */
qh = ptr;
qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
qh->number = htonl(que);
qh->size = htonl(rsp->length * sizeof(response_t));
ptr += sizeof(struct qla2xxx_mqueue_header);
/* Add data. */
memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
ptr += rsp->length * sizeof(response_t);
}
return ptr;
}
static inline void *
qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
{
@ -382,7 +457,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
struct qla2xxx_mq_chain *mq = ptr;
struct device_reg_25xxmq __iomem *reg;
if (!ha->mqenable)
if (!ha->mqenable || IS_QLA83XX(ha))
return ptr;
mq = ptr;
@ -1322,12 +1397,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt = qla24xx_copy_eft(ha, nxt);
/* Chain entries -- started with MQ. */
qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
}
/* Adjust valid length. */
ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
qla25xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@ -1636,12 +1715,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt = qla24xx_copy_eft(ha, nxt);
/* Chain entries -- started with MQ. */
qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
}
/* Adjust valid length. */
ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
qla81xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@ -1650,6 +1733,507 @@ qla81xx_fw_dump_failed:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
void
qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
{
int rval;
uint32_t cnt, reg_data;
uint32_t risc_address;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
uint32_t __iomem *dmp_reg;
uint32_t *iter_reg;
uint16_t __iomem *mbx_reg;
unsigned long flags;
struct qla83xx_fw_dump *fw;
uint32_t ext_mem_cnt;
void *nxt, *nxt_chain;
uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
risc_address = ext_mem_cnt = 0;
flags = 0;
if (!hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00c,
"No buffer available for dump!!!\n");
goto qla83xx_fw_dump_failed;
}
if (ha->fw_dumped) {
ql_log(ql_log_warn, vha, 0xd00d,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
goto qla83xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp83;
qla2xxx_prep_dump(ha, ha->fw_dump);
fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
/* Pause RISC. */
rval = qla24xx_pause_risc(reg);
if (rval != QLA_SUCCESS)
goto qla83xx_fw_dump_failed_0;
WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
dmp_reg = &reg->iobase_window;
reg_data = RD_REG_DWORD(dmp_reg);
WRT_REG_DWORD(dmp_reg, 0);
dmp_reg = &reg->unused_4_1[0];
reg_data = RD_REG_DWORD(dmp_reg);
WRT_REG_DWORD(dmp_reg, 0);
WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
dmp_reg = &reg->unused_4_1[2];
reg_data = RD_REG_DWORD(dmp_reg);
WRT_REG_DWORD(dmp_reg, 0);
/* select PCR and disable ecc checking and correction */
WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
RD_REG_DWORD(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
/* Host/Risc registers. */
iter_reg = fw->host_risc_reg;
iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
qla24xx_read_window(reg, 0x7040, 16, iter_reg);
/* PCIe registers. */
WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
RD_REG_DWORD(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
WRT_REG_DWORD(&reg->iobase_window, 0x00);
RD_REG_DWORD(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
/* Disable interrupts. */
WRT_REG_DWORD(&reg->ictrl, 0);
RD_REG_DWORD(&reg->ictrl);
/* Shadow registers. */
WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
RD_REG_DWORD(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
/* RISC I/O register. */
WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
iter_reg = fw->xseq_0_reg;
iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
/* Receive sequence registers. */
iter_reg = fw->rseq_gp_reg;
iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
iter_reg = fw->rseq_0_reg;
iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
/* Auxiliary sequence registers. */
iter_reg = fw->aseq_gp_reg;
iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
qla24xx_read_window(reg, 0xB170, 16, iter_reg);
iter_reg = fw->aseq_0_reg;
iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
/* Command DMA registers. */
iter_reg = fw->cmd_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
/* Queues. */
iter_reg = fw->req0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++)
*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
qla24xx_read_window(reg, 0x7610, 16, iter_reg);
iter_reg = fw->xmt1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
qla24xx_read_window(reg, 0x7630, 16, iter_reg);
iter_reg = fw->xmt2_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
qla24xx_read_window(reg, 0x7650, 16, iter_reg);
iter_reg = fw->xmt3_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
qla24xx_read_window(reg, 0x7670, 16, iter_reg);
iter_reg = fw->xmt4_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
qla24xx_read_window(reg, 0x7690, 16, iter_reg);
qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
/* Receive DMA registers. */
iter_reg = fw->rcvt0_data_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
qla24xx_read_window(reg, 0x7710, 16, iter_reg);
iter_reg = fw->rcvt1_data_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
qla24xx_read_window(reg, 0x7730, 16, iter_reg);
/* RISC registers. */
iter_reg = fw->risc_gp_reg;
iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
/* Local memory controller registers. */
iter_reg = fw->lmc_reg;
iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
qla24xx_read_window(reg, 0x3070, 16, iter_reg);
/* Fibre Protocol Module registers. */
iter_reg = fw->fpm_hdw_reg;
iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
/* RQ0 Array registers. */
iter_reg = fw->rq0_array_reg;
iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
/* RQ1 Array registers. */
iter_reg = fw->rq1_array_reg;
iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
/* RP0 Array registers. */
iter_reg = fw->rp0_array_reg;
iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
/* RP1 Array registers. */
iter_reg = fw->rp1_array_reg;
iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
iter_reg = fw->at0_array_reg;
iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
/* I/O Queue Control registers. */
qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
/* Frame Buffer registers. */
iter_reg = fw->fb_hdw_reg;
iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
/* Multi queue registers */
nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
&last_chain);
rval = qla24xx_soft_reset(ha);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0xd00e,
"SOFT RESET FAILED, forcing continuation of dump!!!\n");
rval = QLA_SUCCESS;
ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
RD_REG_DWORD(&reg->hccr);
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
RD_REG_DWORD(&reg->hccr);
for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
udelay(5);
if (!cnt) {
nxt = fw->code_ram;
nxt += sizeof(fw->code_ram),
nxt += (ha->fw_memory_size - 0x100000 + 1);
goto copy_queue;
} else
ql_log(ql_log_warn, vha, 0xd010,
"bigger hammer success?\n");
}
rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
&nxt);
if (rval != QLA_SUCCESS)
goto qla83xx_fw_dump_failed_0;
copy_queue:
nxt = qla2xxx_copy_queues(ha, nxt);
nxt = qla24xx_copy_eft(ha, nxt);
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
}
/* Adjust valid length. */
ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
qla83xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
qla83xx_fw_dump_failed:
if (!hardware_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
@ -1782,13 +2366,13 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
vaf.va = &va;
switch (level) {
case 0: /* FATAL LOG */
case ql_log_fatal: /* FATAL LOG */
pr_crit("%s%pV", pbuf, &vaf);
break;
case 1:
case ql_log_warn:
pr_err("%s%pV", pbuf, &vaf);
break;
case 2:
case ql_log_info:
pr_warn("%s%pV", pbuf, &vaf);
break;
default:
@ -1837,13 +2421,13 @@ ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
vaf.va = &va;
switch (level) {
case 0: /* FATAL LOG */
case ql_log_fatal: /* FATAL LOG */
pr_crit("%s%pV", pbuf, &vaf);
break;
case 1:
case ql_log_warn:
pr_err("%s%pV", pbuf, &vaf);
break;
case 2:
case ql_log_info:
pr_warn("%s%pV", pbuf, &vaf);
break;
default:

View File

@ -165,6 +165,54 @@ struct qla81xx_fw_dump {
uint32_t ext_mem[1];
};
struct qla83xx_fw_dump {
uint32_t host_status;
uint32_t host_risc_reg[48];
uint32_t pcie_regs[4];
uint32_t host_reg[32];
uint32_t shadow_reg[11];
uint32_t risc_io_reg;
uint16_t mailbox_reg[32];
uint32_t xseq_gp_reg[256];
uint32_t xseq_0_reg[48];
uint32_t xseq_1_reg[16];
uint32_t xseq_2_reg[16];
uint32_t rseq_gp_reg[256];
uint32_t rseq_0_reg[32];
uint32_t rseq_1_reg[16];
uint32_t rseq_2_reg[16];
uint32_t rseq_3_reg[16];
uint32_t aseq_gp_reg[256];
uint32_t aseq_0_reg[32];
uint32_t aseq_1_reg[16];
uint32_t aseq_2_reg[16];
uint32_t aseq_3_reg[16];
uint32_t cmd_dma_reg[64];
uint32_t req0_dma_reg[15];
uint32_t resp0_dma_reg[15];
uint32_t req1_dma_reg[15];
uint32_t xmt0_dma_reg[32];
uint32_t xmt1_dma_reg[32];
uint32_t xmt2_dma_reg[32];
uint32_t xmt3_dma_reg[32];
uint32_t xmt4_dma_reg[32];
uint32_t xmt_data_dma_reg[16];
uint32_t rcvt0_data_dma_reg[32];
uint32_t rcvt1_data_dma_reg[32];
uint32_t risc_gp_reg[128];
uint32_t lmc_reg[128];
uint32_t fpm_hdw_reg[256];
uint32_t rq0_array_reg[256];
uint32_t rq1_array_reg[256];
uint32_t rp0_array_reg[256];
uint32_t rp1_array_reg[256];
uint32_t queue_control_reg[16];
uint32_t fb_hdw_reg[432];
uint32_t at0_array_reg[128];
uint32_t code_ram[0x2400];
uint32_t ext_mem[1];
};
#define EFT_NUM_BUFFERS 4
#define EFT_BYTES_PER_BUFFER 0x4000
#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@ -192,9 +240,23 @@ struct qla2xxx_mq_chain {
uint32_t qregs[4 * QLA_MQ_SIZE];
};
struct qla2xxx_mqueue_header {
uint32_t queue;
#define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2
uint32_t number;
uint32_t size;
};
struct qla2xxx_mqueue_chain {
uint32_t type;
uint32_t chain_size;
};
#define DUMP_CHAIN_VARIANT 0x80000000
#define DUMP_CHAIN_FCE 0x7FFFFAF0
#define DUMP_CHAIN_MQ 0x7FFFFAF1
#define DUMP_CHAIN_QUEUE 0x7FFFFAF2
#define DUMP_CHAIN_LAST 0x80000000
struct qla2xxx_fw_dump {
@ -228,6 +290,7 @@ struct qla2xxx_fw_dump {
struct qla24xx_fw_dump isp24;
struct qla25xx_fw_dump isp25;
struct qla81xx_fw_dump isp81;
struct qla83xx_fw_dump isp83;
} isp;
};

Some files were not shown because too many files have changed in this diff Show More