mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-18 10:34:24 +08:00
SCSI misc on 20150901
This includes one new driver: cxlflash plus the usual grab bag of updates for the major drivers: qla2xxx, ipr, storvsc, pm80xx, hptiop, plus a few assorted fixes. Signed-off-by: James Bottomley <JBottomley@Odin.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQEcBAABAgAGBQJV5eGiAAoJEDeqqVYsXL0MCpwH/AoneZOPeCx0YdTlyiojasi4 Kc7ECmV9IJJoMoCbP8grwStvynYyCHDSphYmqopZPRlD021eG8ota2uRTHEGJI+q SoiZUlq8ti8xgnD55mubwO+UNF+zoELMyHUok2pGzBoZN5alA6nvKuNY7Hif3P3b YMT490oWQLjWmJkMW8TbpMn9nHpW0dfbP323uaggWsMy3CSI707+x36FLi1/ICg6 MZRyv4aESAcauZGUI5EG+SrIl3OBQX7snsYXyuqD3biGqzbGc3p3L9uWG1qXHDbM OSGXhN+our0WYHCV1/UrGz7/IAWW1UU0W2qgCBwkXkDjkXJ4jqd36zLJxeuhSpE= =KOmP -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull first round of SCSI updates from James Bottomley: "This includes one new driver: cxlflash plus the usual grab bag of updates for the major drivers: qla2xxx, ipr, storvsc, pm80xx, hptiop, plus a few assorted fixes. There's another tranch coming, but I want to incubate it another few days in the checkers, plus it includes a mpt2sas separated lifetime fix, which Avago won't get done testing until Friday" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (85 commits) aic94xx: set an error code on failure storvsc: Set the error code correctly in failure conditions storvsc: Allow write_same when host is windows 10 storvsc: use storage protocol version to determine storage capabilities storvsc: use correct defaults for values determined by protocol negotiation storvsc: Untangle the storage protocol negotiation from the vmbus protocol negotiation. storvsc: Use a single value to track protocol versions storvsc: Rather than look for sets of specific protocol versions, make decisions based on ranges. cxlflash: Remove unused variable from queuecommand cxlflash: shift wrapping bug in afu_link_reset() cxlflash: off by one bug in cxlflash_show_port_status() cxlflash: Virtual LUN support cxlflash: Superpipe support cxlflash: Base error recovery support qla2xxx: Update driver version to 8.07.00.26-k qla2xxx: Add pci device id 0x2261. qla2xxx: Fix missing device login retries. qla2xxx: do not clear slot in outstanding cmd array qla2xxx: Remove decrement of sp reference count in abort handler. qla2xxx: Add support to show MPI and PEP FW version for ISP27xx. ...
This commit is contained in:
commit
df910390e2
@ -316,6 +316,7 @@ Code Seq#(hex) Include File Comments
|
||||
0xB3 00 linux/mmc/ioctl.h
|
||||
0xC0 00-0F linux/usb/iowarrior.h
|
||||
0xCA 00-0F uapi/misc/cxl.h
|
||||
0xCA 80-8F uapi/scsi/cxlflash_ioctl.h
|
||||
0xCB 00-1F CBM serial IEC bus in development:
|
||||
<mailto:michael.klein@puffin.lb.shuttle.de>
|
||||
0xCD 01 linux/reiserfs_fs.h
|
||||
|
318
Documentation/powerpc/cxlflash.txt
Normal file
318
Documentation/powerpc/cxlflash.txt
Normal file
@ -0,0 +1,318 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
The IBM Power architecture provides support for CAPI (Coherent
|
||||
Accelerator Power Interface), which is available to certain PCIe slots
|
||||
on Power 8 systems. CAPI can be thought of as a special tunneling
|
||||
protocol through PCIe that allow PCIe adapters to look like special
|
||||
purpose co-processors which can read or write an application's
|
||||
memory and generate page faults. As a result, the host interface to
|
||||
an adapter running in CAPI mode does not require the data buffers to
|
||||
be mapped to the device's memory (IOMMU bypass) nor does it require
|
||||
memory to be pinned.
|
||||
|
||||
On Linux, Coherent Accelerator (CXL) kernel services present CAPI
|
||||
devices as a PCI device by implementing a virtual PCI host bridge.
|
||||
This abstraction simplifies the infrastructure and programming
|
||||
model, allowing for drivers to look similar to other native PCI
|
||||
device drivers.
|
||||
|
||||
CXL provides a mechanism by which user space applications can
|
||||
directly talk to a device (network or storage) bypassing the typical
|
||||
kernel/device driver stack. The CXL Flash Adapter Driver enables a
|
||||
user space application direct access to Flash storage.
|
||||
|
||||
The CXL Flash Adapter Driver is a kernel module that sits in the
|
||||
SCSI stack as a low level device driver (below the SCSI disk and
|
||||
protocol drivers) for the IBM CXL Flash Adapter. This driver is
|
||||
responsible for the initialization of the adapter, setting up the
|
||||
special path for user space access, and performing error recovery. It
|
||||
communicates directly the Flash Accelerator Functional Unit (AFU)
|
||||
as described in Documentation/powerpc/cxl.txt.
|
||||
|
||||
The cxlflash driver supports two, mutually exclusive, modes of
|
||||
operation at the device (LUN) level:
|
||||
|
||||
- Any flash device (LUN) can be configured to be accessed as a
|
||||
regular disk device (i.e.: /dev/sdc). This is the default mode.
|
||||
|
||||
- Any flash device (LUN) can be configured to be accessed from
|
||||
user space with a special block library. This mode further
|
||||
specifies the means of accessing the device and provides for
|
||||
either raw access to the entire LUN (referred to as direct
|
||||
or physical LUN access) or access to a kernel/AFU-mediated
|
||||
partition of the LUN (referred to as virtual LUN access). The
|
||||
segmentation of a disk device into virtual LUNs is assisted
|
||||
by special translation services provided by the Flash AFU.
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
The Coherent Accelerator Interface Architecture (CAIA) introduces a
|
||||
concept of a master context. A master typically has special privileges
|
||||
granted to it by the kernel or hypervisor allowing it to perform AFU
|
||||
wide management and control. The master may or may not be involved
|
||||
directly in each user I/O, but at the minimum is involved in the
|
||||
initial setup before the user application is allowed to send requests
|
||||
directly to the AFU.
|
||||
|
||||
The CXL Flash Adapter Driver establishes a master context with the
|
||||
AFU. It uses memory mapped I/O (MMIO) for this control and setup. The
|
||||
Adapter Problem Space Memory Map looks like this:
|
||||
|
||||
+-------------------------------+
|
||||
| 512 * 64 KB User MMIO |
|
||||
| (per context) |
|
||||
| User Accessible |
|
||||
+-------------------------------+
|
||||
| 512 * 128 B per context |
|
||||
| Provisioning and Control |
|
||||
| Trusted Process accessible |
|
||||
+-------------------------------+
|
||||
| 64 KB Global |
|
||||
| Trusted Process accessible |
|
||||
+-------------------------------+
|
||||
|
||||
This driver configures itself into the SCSI software stack as an
|
||||
adapter driver. The driver is the only entity that is considered a
|
||||
Trusted Process to program the Provisioning and Control and Global
|
||||
areas in the MMIO Space shown above. The master context driver
|
||||
discovers all LUNs attached to the CXL Flash adapter and instantiates
|
||||
scsi block devices (/dev/sdb, /dev/sdc etc.) for each unique LUN
|
||||
seen from each path.
|
||||
|
||||
Once these scsi block devices are instantiated, an application
|
||||
written to a specification provided by the block library may get
|
||||
access to the Flash from user space (without requiring a system call).
|
||||
|
||||
This master context driver also provides a series of ioctls for this
|
||||
block library to enable this user space access. The driver supports
|
||||
two modes for accessing the block device.
|
||||
|
||||
The first mode is called a virtual mode. In this mode a single scsi
|
||||
block device (/dev/sdb) may be carved up into any number of distinct
|
||||
virtual LUNs. The virtual LUNs may be resized as long as the sum of
|
||||
the sizes of all the virtual LUNs, along with the meta-data associated
|
||||
with it does not exceed the physical capacity.
|
||||
|
||||
The second mode is called the physical mode. In this mode a single
|
||||
block device (/dev/sdb) may be opened directly by the block library
|
||||
and the entire space for the LUN is available to the application.
|
||||
|
||||
Only the physical mode provides persistence of the data. i.e. The
|
||||
data written to the block device will survive application exit and
|
||||
restart and also reboot. The virtual LUNs do not persist (i.e. do
|
||||
not survive after the application terminates or the system reboots).
|
||||
|
||||
|
||||
Block library API
|
||||
=================
|
||||
|
||||
Applications intending to get access to the CXL Flash from user
|
||||
space should use the block library, as it abstracts the details of
|
||||
interfacing directly with the cxlflash driver that are necessary for
|
||||
performing administrative actions (i.e.: setup, tear down, resize).
|
||||
The block library can be thought of as a 'user' of services,
|
||||
implemented as IOCTLs, that are provided by the cxlflash driver
|
||||
specifically for devices (LUNs) operating in user space access
|
||||
mode. While it is not a requirement that applications understand
|
||||
the interface between the block library and the cxlflash driver,
|
||||
a high-level overview of each supported service (IOCTL) is provided
|
||||
below.
|
||||
|
||||
The block library can be found on GitHub:
|
||||
http://www.github.com/mikehollinger/ibmcapikv
|
||||
|
||||
|
||||
CXL Flash Driver IOCTLs
|
||||
=======================
|
||||
|
||||
Users, such as the block library, that wish to interface with a flash
|
||||
device (LUN) via user space access need to use the services provided
|
||||
by the cxlflash driver. As these services are implemented as ioctls,
|
||||
a file descriptor handle must first be obtained in order to establish
|
||||
the communication channel between a user and the kernel. This file
|
||||
descriptor is obtained by opening the device special file associated
|
||||
with the scsi disk device (/dev/sdb) that was created during LUN
|
||||
discovery. As per the location of the cxlflash driver within the
|
||||
SCSI protocol stack, this open is actually not seen by the cxlflash
|
||||
driver. Upon successful open, the user receives a file descriptor
|
||||
(herein referred to as fd1) that should be used for issuing the
|
||||
subsequent ioctls listed below.
|
||||
|
||||
The structure definitions for these IOCTLs are available in:
|
||||
uapi/scsi/cxlflash_ioctl.h
|
||||
|
||||
DK_CXLFLASH_ATTACH
|
||||
------------------
|
||||
|
||||
This ioctl obtains, initializes, and starts a context using the CXL
|
||||
kernel services. These services specify a context id (u16) by which
|
||||
to uniquely identify the context and its allocated resources. The
|
||||
services additionally provide a second file descriptor (herein
|
||||
referred to as fd2) that is used by the block library to initiate
|
||||
memory mapped I/O (via mmap()) to the CXL flash device and poll for
|
||||
completion events. This file descriptor is intentionally installed by
|
||||
this driver and not the CXL kernel services to allow for intermediary
|
||||
notification and access in the event of a non-user-initiated close(),
|
||||
such as a killed process. This design point is described in further
|
||||
detail in the description for the DK_CXLFLASH_DETACH ioctl.
|
||||
|
||||
There are a few important aspects regarding the "tokens" (context id
|
||||
and fd2) that are provided back to the user:
|
||||
|
||||
- These tokens are only valid for the process under which they
|
||||
were created. The child of a forked process cannot continue
|
||||
to use the context id or file descriptor created by its parent
|
||||
(see DK_CXLFLASH_VLUN_CLONE for further details).
|
||||
|
||||
- These tokens are only valid for the lifetime of the context and
|
||||
the process under which they were created. Once either is
|
||||
destroyed, the tokens are to be considered stale and subsequent
|
||||
usage will result in errors.
|
||||
|
||||
- When a context is no longer needed, the user shall detach from
|
||||
the context via the DK_CXLFLASH_DETACH ioctl.
|
||||
|
||||
- A close on fd2 will invalidate the tokens. This operation is not
|
||||
required by the user.
|
||||
|
||||
DK_CXLFLASH_USER_DIRECT
|
||||
-----------------------
|
||||
This ioctl is responsible for transitioning the LUN to direct
|
||||
(physical) mode access and configuring the AFU for direct access from
|
||||
user space on a per-context basis. Additionally, the block size and
|
||||
last logical block address (LBA) are returned to the user.
|
||||
|
||||
As mentioned previously, when operating in user space access mode,
|
||||
LUNs may be accessed in whole or in part. Only one mode is allowed
|
||||
at a time and if one mode is active (outstanding references exist),
|
||||
requests to use the LUN in a different mode are denied.
|
||||
|
||||
The AFU is configured for direct access from user space by adding an
|
||||
entry to the AFU's resource handle table. The index of the entry is
|
||||
treated as a resource handle that is returned to the user. The user
|
||||
is then able to use the handle to reference the LUN during I/O.
|
||||
|
||||
DK_CXLFLASH_USER_VIRTUAL
|
||||
------------------------
|
||||
This ioctl is responsible for transitioning the LUN to virtual mode
|
||||
of access and configuring the AFU for virtual access from user space
|
||||
on a per-context basis. Additionally, the block size and last logical
|
||||
block address (LBA) are returned to the user.
|
||||
|
||||
As mentioned previously, when operating in user space access mode,
|
||||
LUNs may be accessed in whole or in part. Only one mode is allowed
|
||||
at a time and if one mode is active (outstanding references exist),
|
||||
requests to use the LUN in a different mode are denied.
|
||||
|
||||
The AFU is configured for virtual access from user space by adding
|
||||
an entry to the AFU's resource handle table. The index of the entry
|
||||
is treated as a resource handle that is returned to the user. The
|
||||
user is then able to use the handle to reference the LUN during I/O.
|
||||
|
||||
By default, the virtual LUN is created with a size of 0. The user
|
||||
would need to use the DK_CXLFLASH_VLUN_RESIZE ioctl to adjust the grow
|
||||
the virtual LUN to a desired size. To avoid having to perform this
|
||||
resize for the initial creation of the virtual LUN, the user has the
|
||||
option of specifying a size as part of the DK_CXLFLASH_USER_VIRTUAL
|
||||
ioctl, such that when success is returned to the user, the
|
||||
resource handle that is provided is already referencing provisioned
|
||||
storage. This is reflected by the last LBA being a non-zero value.
|
||||
|
||||
DK_CXLFLASH_VLUN_RESIZE
|
||||
-----------------------
|
||||
This ioctl is responsible for resizing a previously created virtual
|
||||
LUN and will fail if invoked upon a LUN that is not in virtual
|
||||
mode. Upon success, an updated last LBA is returned to the user
|
||||
indicating the new size of the virtual LUN associated with the
|
||||
resource handle.
|
||||
|
||||
The partitioning of virtual LUNs is jointly mediated by the cxlflash
|
||||
driver and the AFU. An allocation table is kept for each LUN that is
|
||||
operating in the virtual mode and used to program a LUN translation
|
||||
table that the AFU references when provided with a resource handle.
|
||||
|
||||
DK_CXLFLASH_RELEASE
|
||||
-------------------
|
||||
This ioctl is responsible for releasing a previously obtained
|
||||
reference to either a physical or virtual LUN. This can be
|
||||
thought of as the inverse of the DK_CXLFLASH_USER_DIRECT or
|
||||
DK_CXLFLASH_USER_VIRTUAL ioctls. Upon success, the resource handle
|
||||
is no longer valid and the entry in the resource handle table is
|
||||
made available to be used again.
|
||||
|
||||
As part of the release process for virtual LUNs, the virtual LUN
|
||||
is first resized to 0 to clear out and free the translation tables
|
||||
associated with the virtual LUN reference.
|
||||
|
||||
DK_CXLFLASH_DETACH
|
||||
------------------
|
||||
This ioctl is responsible for unregistering a context with the
|
||||
cxlflash driver and release outstanding resources that were
|
||||
not explicitly released via the DK_CXLFLASH_RELEASE ioctl. Upon
|
||||
success, all "tokens" which had been provided to the user from the
|
||||
DK_CXLFLASH_ATTACH onward are no longer valid.
|
||||
|
||||
DK_CXLFLASH_VLUN_CLONE
|
||||
----------------------
|
||||
This ioctl is responsible for cloning a previously created
|
||||
context to a more recently created context. It exists solely to
|
||||
support maintaining user space access to storage after a process
|
||||
forks. Upon success, the child process (which invoked the ioctl)
|
||||
will have access to the same LUNs via the same resource handle(s)
|
||||
and fd2 as the parent, but under a different context.
|
||||
|
||||
Context sharing across processes is not supported with CXL and
|
||||
therefore each fork must be met with establishing a new context
|
||||
for the child process. This ioctl simplifies the state management
|
||||
and playback required by a user in such a scenario. When a process
|
||||
forks, child process can clone the parents context by first creating
|
||||
a context (via DK_CXLFLASH_ATTACH) and then using this ioctl to
|
||||
perform the clone from the parent to the child.
|
||||
|
||||
The clone itself is fairly simple. The resource handle and lun
|
||||
translation tables are copied from the parent context to the child's
|
||||
and then synced with the AFU.
|
||||
|
||||
DK_CXLFLASH_VERIFY
|
||||
------------------
|
||||
This ioctl is used to detect various changes such as the capacity of
|
||||
the disk changing, the number of LUNs visible changing, etc. In cases
|
||||
where the changes affect the application (such as a LUN resize), the
|
||||
cxlflash driver will report the changed state to the application.
|
||||
|
||||
The user calls in when they want to validate that a LUN hasn't been
|
||||
changed in response to a check condition. As the user is operating out
|
||||
of band from the kernel, they will see these types of events without
|
||||
the kernel's knowledge. When encountered, the user's architected
|
||||
behavior is to call in to this ioctl, indicating what they want to
|
||||
verify and passing along any appropriate information. For now, only
|
||||
verifying a LUN change (ie: size different) with sense data is
|
||||
supported.
|
||||
|
||||
DK_CXLFLASH_RECOVER_AFU
|
||||
-----------------------
|
||||
This ioctl is used to drive recovery (if such an action is warranted)
|
||||
of a specified user context. Any state associated with the user context
|
||||
is re-established upon successful recovery.
|
||||
|
||||
User contexts are put into an error condition when the device needs to
|
||||
be reset or is terminating. Users are notified of this error condition
|
||||
by seeing all 0xF's on an MMIO read. Upon encountering this, the
|
||||
architected behavior for a user is to call into this ioctl to recover
|
||||
their context. A user may also call into this ioctl at any time to
|
||||
check if the device is operating normally. If a failure is returned
|
||||
from this ioctl, the user is expected to gracefully clean up their
|
||||
context via release/detach ioctls. Until they do, the context they
|
||||
hold is not relinquished. The user may also optionally exit the process
|
||||
at which time the context/resources they held will be freed as part of
|
||||
the release fop.
|
||||
|
||||
DK_CXLFLASH_MANAGE_LUN
|
||||
----------------------
|
||||
This ioctl is used to switch a LUN from a mode where it is available
|
||||
for file-system access (legacy), to a mode where it is set aside for
|
||||
exclusive user space access (superpipe). In case a LUN is visible
|
||||
across multiple ports and adapters, this ioctl is used to uniquely
|
||||
identify each LUN by its World Wide Node Name (WWNN).
|
@ -8098,7 +8098,7 @@ S: Supported
|
||||
F: drivers/scsi/pmcraid.*
|
||||
|
||||
PMC SIERRA PM8001 DRIVER
|
||||
M: xjtuwjp@gmail.com
|
||||
M: Jack Wang <jinpu.wang@profitbricks.com>
|
||||
M: lindar_liu@usish.com
|
||||
L: pmchba@pmcs.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
|
@ -1859,6 +1859,15 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
|
||||
}
|
||||
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
|
||||
|
||||
/* Basic sanity checks to prevent underflows or integer overflows */
|
||||
if (karg.maxReplyBytes < 0 ||
|
||||
karg.dataInSize < 0 ||
|
||||
karg.dataOutSize < 0 ||
|
||||
karg.dataSgeOffset < 0 ||
|
||||
karg.maxSenseBytes < 0 ||
|
||||
karg.dataSgeOffset > ioc->req_sz / 4)
|
||||
return -EINVAL;
|
||||
|
||||
/* Verify that the final request frame will not be too large.
|
||||
*/
|
||||
sz = karg.dataSgeOffset * 4;
|
||||
|
@ -345,6 +345,7 @@ source "drivers/scsi/cxgbi/Kconfig"
|
||||
source "drivers/scsi/bnx2i/Kconfig"
|
||||
source "drivers/scsi/bnx2fc/Kconfig"
|
||||
source "drivers/scsi/be2iscsi/Kconfig"
|
||||
source "drivers/scsi/cxlflash/Kconfig"
|
||||
|
||||
config SGIWD93_SCSI
|
||||
tristate "SGI WD93C93 SCSI Driver"
|
||||
|
@ -102,6 +102,7 @@ obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
|
||||
obj-$(CONFIG_SCSI_EATA) += eata.o
|
||||
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
|
||||
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
|
||||
obj-$(CONFIG_CXLFLASH) += cxlflash/
|
||||
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
|
||||
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
|
||||
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
|
||||
|
@ -109,6 +109,7 @@ static int asd_map_memio(struct asd_ha_struct *asd_ha)
|
||||
if (!io_handle->addr) {
|
||||
asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
|
||||
pci_name(asd_ha->pcidev));
|
||||
err = -ENOMEM;
|
||||
goto Err_unreq;
|
||||
}
|
||||
}
|
||||
|
@ -851,6 +851,8 @@ bfad_im_module_exit(void)
|
||||
|
||||
if (bfad_im_scsi_vport_transport_template)
|
||||
fc_release_transport(bfad_im_scsi_vport_transport_template);
|
||||
|
||||
idr_destroy(&bfad_im_port_index);
|
||||
}
|
||||
|
||||
void
|
||||
|
11
drivers/scsi/cxlflash/Kconfig
Normal file
11
drivers/scsi/cxlflash/Kconfig
Normal file
@ -0,0 +1,11 @@
|
||||
#
|
||||
# IBM CXL-attached Flash Accelerator SCSI Driver
|
||||
#
|
||||
|
||||
config CXLFLASH
|
||||
tristate "Support for IBM CAPI Flash"
|
||||
depends on PCI && SCSI && CXL && EEH
|
||||
default m
|
||||
help
|
||||
Allows CAPI Accelerated IO to Flash
|
||||
If unsure, say N.
|
2
drivers/scsi/cxlflash/Makefile
Normal file
2
drivers/scsi/cxlflash/Makefile
Normal file
@ -0,0 +1,2 @@
|
||||
obj-$(CONFIG_CXLFLASH) += cxlflash.o
|
||||
cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
|
208
drivers/scsi/cxlflash/common.h
Normal file
208
drivers/scsi/cxlflash/common.h
Normal file
@ -0,0 +1,208 @@
|
||||
/*
|
||||
* CXL Flash Device Driver
|
||||
*
|
||||
* Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
|
||||
* Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
|
||||
*
|
||||
* Copyright (C) 2015 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _CXLFLASH_COMMON_H
|
||||
#define _CXLFLASH_COMMON_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
|
||||
|
||||
#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
|
||||
|
||||
#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
|
||||
#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
|
||||
#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
|
||||
max_sectors
|
||||
in units of
|
||||
512 byte
|
||||
sectors
|
||||
*/
|
||||
|
||||
#define NUM_RRQ_ENTRY 16 /* for master issued cmds */
|
||||
#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
|
||||
|
||||
/* AFU command retry limit */
|
||||
#define MC_RETRY_CNT 5 /* sufficient for SCSI check and
|
||||
certain AFU errors */
|
||||
|
||||
/* Command management definitions */
|
||||
#define CXLFLASH_NUM_CMDS (2 * CXLFLASH_MAX_CMDS) /* Must be a pow2 for
|
||||
alignment and more
|
||||
efficient array
|
||||
index derivation
|
||||
*/
|
||||
|
||||
#define CXLFLASH_MAX_CMDS 16
|
||||
#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
|
||||
|
||||
|
||||
static inline void check_sizes(void)
|
||||
{
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_CMDS);
|
||||
}
|
||||
|
||||
/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
|
||||
#define CMD_BUFSIZE SIZE_4K
|
||||
|
||||
/* flags in IOA status area for host use */
|
||||
#define B_DONE 0x01
|
||||
#define B_ERROR 0x02 /* set with B_DONE */
|
||||
#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */
|
||||
|
||||
enum cxlflash_lr_state {
|
||||
LINK_RESET_INVALID,
|
||||
LINK_RESET_REQUIRED,
|
||||
LINK_RESET_COMPLETE
|
||||
};
|
||||
|
||||
enum cxlflash_init_state {
|
||||
INIT_STATE_NONE,
|
||||
INIT_STATE_PCI,
|
||||
INIT_STATE_AFU,
|
||||
INIT_STATE_SCSI
|
||||
};
|
||||
|
||||
enum cxlflash_state {
|
||||
STATE_NORMAL, /* Normal running state, everything good */
|
||||
STATE_LIMBO, /* Limbo running state, trying to reset/recover */
|
||||
STATE_FAILTERM /* Failed/terminating state, error out users/threads */
|
||||
};
|
||||
|
||||
/*
|
||||
* Each context has its own set of resource handles that is visible
|
||||
* only from that context.
|
||||
*/
|
||||
|
||||
struct cxlflash_cfg {
|
||||
struct afu *afu;
|
||||
struct cxl_context *mcctx;
|
||||
|
||||
struct pci_dev *dev;
|
||||
struct pci_device_id *dev_id;
|
||||
struct Scsi_Host *host;
|
||||
|
||||
ulong cxlflash_regs_pci;
|
||||
|
||||
struct work_struct work_q;
|
||||
enum cxlflash_init_state init_state;
|
||||
enum cxlflash_lr_state lr_state;
|
||||
int lr_port;
|
||||
|
||||
struct cxl_afu *cxl_afu;
|
||||
|
||||
struct pci_pool *cxlflash_cmd_pool;
|
||||
struct pci_dev *parent_dev;
|
||||
|
||||
atomic_t recovery_threads;
|
||||
struct mutex ctx_recovery_mutex;
|
||||
struct mutex ctx_tbl_list_mutex;
|
||||
struct ctx_info *ctx_tbl[MAX_CONTEXT];
|
||||
struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
|
||||
struct file_operations cxl_fops;
|
||||
|
||||
atomic_t num_user_contexts;
|
||||
|
||||
/* Parameters that are LUN table related */
|
||||
int last_lun_index[CXLFLASH_NUM_FC_PORTS];
|
||||
int promote_lun_index;
|
||||
struct list_head lluns; /* list of llun_info structs */
|
||||
|
||||
wait_queue_head_t tmf_waitq;
|
||||
bool tmf_active;
|
||||
wait_queue_head_t limbo_waitq;
|
||||
enum cxlflash_state state;
|
||||
};
|
||||
|
||||
struct afu_cmd {
|
||||
struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
|
||||
struct sisl_ioasa sa; /* IOASA must follow IOARCB */
|
||||
spinlock_t slock;
|
||||
struct completion cevent;
|
||||
char *buf; /* per command buffer */
|
||||
struct afu *parent;
|
||||
int slot;
|
||||
atomic_t free;
|
||||
|
||||
u8 cmd_tmf:1;
|
||||
|
||||
/* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
|
||||
* However for performance reasons the IOARCB/IOASA should be
|
||||
* cache line aligned.
|
||||
*/
|
||||
} __aligned(cache_line_size());
|
||||
|
||||
struct afu {
|
||||
/* Stuff requiring alignment go first. */
|
||||
|
||||
u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */
|
||||
/*
|
||||
* Command & data for AFU commands.
|
||||
*/
|
||||
struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
|
||||
|
||||
/* Beware of alignment till here. Preferably introduce new
|
||||
* fields after this point
|
||||
*/
|
||||
|
||||
/* AFU HW */
|
||||
struct cxl_ioctl_start_work work;
|
||||
struct cxlflash_afu_map *afu_map; /* entire MMIO map */
|
||||
struct sisl_host_map *host_map; /* MC host map */
|
||||
struct sisl_ctrl_map *ctrl_map; /* MC control map */
|
||||
|
||||
ctx_hndl_t ctx_hndl; /* master's context handle */
|
||||
u64 *hrrq_start;
|
||||
u64 *hrrq_end;
|
||||
u64 *hrrq_curr;
|
||||
bool toggle;
|
||||
bool read_room;
|
||||
atomic64_t room;
|
||||
u64 hb;
|
||||
u32 cmd_couts; /* Number of command checkouts */
|
||||
u32 internal_lun; /* User-desired LUN mode for this AFU */
|
||||
|
||||
char version[8];
|
||||
u64 interface_version;
|
||||
|
||||
struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
|
||||
|
||||
};
|
||||
|
||||
static inline u64 lun_to_lunid(u64 lun)
|
||||
{
|
||||
u64 lun_id;
|
||||
|
||||
int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
|
||||
return swab64(lun_id);
|
||||
}
|
||||
|
||||
int cxlflash_send_cmd(struct afu *, struct afu_cmd *);
|
||||
void cxlflash_wait_resp(struct afu *, struct afu_cmd *);
|
||||
int cxlflash_afu_reset(struct cxlflash_cfg *);
|
||||
struct afu_cmd *cxlflash_cmd_checkout(struct afu *);
|
||||
void cxlflash_cmd_checkin(struct afu_cmd *);
|
||||
int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
|
||||
void cxlflash_list_init(void);
|
||||
void cxlflash_term_global_luns(void);
|
||||
void cxlflash_free_errpage(void);
|
||||
int cxlflash_ioctl(struct scsi_device *, int, void __user *);
|
||||
void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *);
|
||||
int cxlflash_mark_contexts_error(struct cxlflash_cfg *);
|
||||
void cxlflash_term_local_luns(struct cxlflash_cfg *);
|
||||
void cxlflash_restore_luntable(struct cxlflash_cfg *);
|
||||
|
||||
#endif /* ifndef _CXLFLASH_COMMON_H */
|
266
drivers/scsi/cxlflash/lunmgt.c
Normal file
266
drivers/scsi/cxlflash/lunmgt.c
Normal file
@ -0,0 +1,266 @@
|
||||
/*
|
||||
* CXL Flash Device Driver
|
||||
*
|
||||
* Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
|
||||
* Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
|
||||
*
|
||||
* Copyright (C) 2015 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <misc/cxl.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <uapi/scsi/cxlflash_ioctl.h>
|
||||
|
||||
#include "sislite.h"
|
||||
#include "common.h"
|
||||
#include "vlun.h"
|
||||
#include "superpipe.h"
|
||||
|
||||
/**
|
||||
* create_local() - allocate and initialize a local LUN information structure
|
||||
* @sdev: SCSI device associated with LUN.
|
||||
* @wwid: World Wide Node Name for LUN.
|
||||
*
|
||||
* Return: Allocated local llun_info structure on success, NULL on failure
|
||||
*/
|
||||
static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
|
||||
{
|
||||
struct llun_info *lli = NULL;
|
||||
|
||||
lli = kzalloc(sizeof(*lli), GFP_KERNEL);
|
||||
if (unlikely(!lli)) {
|
||||
pr_err("%s: could not allocate lli\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
lli->sdev = sdev;
|
||||
lli->newly_created = true;
|
||||
lli->host_no = sdev->host->host_no;
|
||||
lli->in_table = false;
|
||||
|
||||
memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
|
||||
out:
|
||||
return lli;
|
||||
}
|
||||
|
||||
/**
|
||||
* create_global() - allocate and initialize a global LUN information structure
|
||||
* @sdev: SCSI device associated with LUN.
|
||||
* @wwid: World Wide Node Name for LUN.
|
||||
*
|
||||
* Return: Allocated global glun_info structure on success, NULL on failure
|
||||
*/
|
||||
static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
|
||||
{
|
||||
struct glun_info *gli = NULL;
|
||||
|
||||
gli = kzalloc(sizeof(*gli), GFP_KERNEL);
|
||||
if (unlikely(!gli)) {
|
||||
pr_err("%s: could not allocate gli\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_init(&gli->mutex);
|
||||
memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
|
||||
out:
|
||||
return gli;
|
||||
}
|
||||
|
||||
/**
|
||||
* refresh_local() - find and update local LUN information structure by WWID
|
||||
* @cfg: Internal structure associated with the host.
|
||||
* @wwid: WWID associated with LUN.
|
||||
*
|
||||
* When the LUN is found, mark it by updating it's newly_created field.
|
||||
*
|
||||
* Return: Found local lun_info structure on success, NULL on failure
|
||||
* If a LUN with the WWID is found in the list, refresh it's state.
|
||||
*/
|
||||
static struct llun_info *refresh_local(struct cxlflash_cfg *cfg, u8 *wwid)
|
||||
{
|
||||
struct llun_info *lli, *temp;
|
||||
|
||||
list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
|
||||
if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) {
|
||||
lli->newly_created = false;
|
||||
return lli;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* lookup_global() - find a global LUN information structure by WWID
|
||||
* @wwid: WWID associated with LUN.
|
||||
*
|
||||
* Return: Found global lun_info structure on success, NULL on failure
|
||||
*/
|
||||
static struct glun_info *lookup_global(u8 *wwid)
|
||||
{
|
||||
struct glun_info *gli, *temp;
|
||||
|
||||
list_for_each_entry_safe(gli, temp, &global.gluns, list)
|
||||
if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
|
||||
return gli;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* find_and_create_lun() - find or create a local LUN information structure
|
||||
* @sdev: SCSI device associated with LUN.
|
||||
* @wwid: WWID associated with LUN.
|
||||
*
|
||||
* The LUN is kept both in a local list (per adapter) and in a global list
|
||||
* (across all adapters). Certain attributes of the LUN are local to the
|
||||
* adapter (such as index, port selection mask etc.).
|
||||
* The block allocation map is shared across all adapters (i.e. associated
|
||||
* wih the global list). Since different attributes are associated with
|
||||
* the per adapter and global entries, allocate two separate structures for each
|
||||
* LUN (one local, one global).
|
||||
*
|
||||
* Keep a pointer back from the local to the global entry.
|
||||
*
|
||||
* Return: Found/Allocated local lun_info structure on success, NULL on failure
|
||||
*/
|
||||
static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
|
||||
{
|
||||
struct llun_info *lli = NULL;
|
||||
struct glun_info *gli = NULL;
|
||||
struct Scsi_Host *shost = sdev->host;
|
||||
struct cxlflash_cfg *cfg = shost_priv(shost);
|
||||
|
||||
mutex_lock(&global.mutex);
|
||||
if (unlikely(!wwid))
|
||||
goto out;
|
||||
|
||||
lli = refresh_local(cfg, wwid);
|
||||
if (lli)
|
||||
goto out;
|
||||
|
||||
lli = create_local(sdev, wwid);
|
||||
if (unlikely(!lli))
|
||||
goto out;
|
||||
|
||||
gli = lookup_global(wwid);
|
||||
if (gli) {
|
||||
lli->parent = gli;
|
||||
list_add(&lli->list, &cfg->lluns);
|
||||
goto out;
|
||||
}
|
||||
|
||||
gli = create_global(sdev, wwid);
|
||||
if (unlikely(!gli)) {
|
||||
kfree(lli);
|
||||
lli = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
lli->parent = gli;
|
||||
list_add(&lli->list, &cfg->lluns);
|
||||
|
||||
list_add(&gli->list, &global.gluns);
|
||||
|
||||
out:
|
||||
mutex_unlock(&global.mutex);
|
||||
pr_debug("%s: returning %p\n", __func__, lli);
|
||||
return lli;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxlflash_term_local_luns() - Delete all entries from local LUN list, free.
|
||||
* @cfg: Internal structure associated with the host.
|
||||
*/
|
||||
void cxlflash_term_local_luns(struct cxlflash_cfg *cfg)
|
||||
{
|
||||
struct llun_info *lli, *temp;
|
||||
|
||||
mutex_lock(&global.mutex);
|
||||
list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
|
||||
list_del(&lli->list);
|
||||
kfree(lli);
|
||||
}
|
||||
mutex_unlock(&global.mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* cxlflash_list_init() - initializes the global LUN list
|
||||
*/
|
||||
void cxlflash_list_init(void)
|
||||
{
|
||||
INIT_LIST_HEAD(&global.gluns);
|
||||
mutex_init(&global.mutex);
|
||||
global.err_page = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxlflash_term_global_luns() - frees resources associated with global LUN list
|
||||
*/
|
||||
void cxlflash_term_global_luns(void)
|
||||
{
|
||||
struct glun_info *gli, *temp;
|
||||
|
||||
mutex_lock(&global.mutex);
|
||||
list_for_each_entry_safe(gli, temp, &global.gluns, list) {
|
||||
list_del(&gli->list);
|
||||
cxlflash_ba_terminate(&gli->blka.ba_lun);
|
||||
kfree(gli);
|
||||
}
|
||||
mutex_unlock(&global.mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* cxlflash_manage_lun() - handles LUN management activities
|
||||
* @sdev: SCSI device associated with LUN.
|
||||
* @manage: Manage ioctl data structure.
|
||||
*
|
||||
* This routine is used to notify the driver about a LUN's WWID and associate
|
||||
* SCSI devices (sdev) with a global LUN instance. Additionally it serves to
|
||||
* change a LUN's operating mode: legacy or superpipe.
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
int cxlflash_manage_lun(struct scsi_device *sdev,
|
||||
struct dk_cxlflash_manage_lun *manage)
|
||||
{
|
||||
int rc = 0;
|
||||
struct llun_info *lli = NULL;
|
||||
u64 flags = manage->hdr.flags;
|
||||
u32 chan = sdev->channel;
|
||||
|
||||
lli = find_and_create_lun(sdev, manage->wwid);
|
||||
pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n",
|
||||
__func__, get_unaligned_le64(&manage->wwid[0]),
|
||||
get_unaligned_le64(&manage->wwid[8]),
|
||||
manage->hdr.flags, lli);
|
||||
if (unlikely(!lli)) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
|
||||
if (lli->newly_created)
|
||||
lli->port_sel = CHAN2PORT(chan);
|
||||
else
|
||||
lli->port_sel = BOTH_PORTS;
|
||||
/* Store off lun in unpacked, AFU-friendly format */
|
||||
lli->lun_id[chan] = lun_to_lunid(sdev->lun);
|
||||
sdev->hostdata = lli;
|
||||
} else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
|
||||
if (lli->parent->mode != MODE_NONE)
|
||||
rc = -EBUSY;
|
||||
else
|
||||
sdev->hostdata = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
pr_debug("%s: returning rc=%d\n", __func__, rc);
|
||||
return rc;
|
||||
}
|
2494
drivers/scsi/cxlflash/main.c
Normal file
2494
drivers/scsi/cxlflash/main.c
Normal file
File diff suppressed because it is too large
Load Diff
108
drivers/scsi/cxlflash/main.h
Normal file
108
drivers/scsi/cxlflash/main.h
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* CXL Flash Device Driver
|
||||
*
|
||||
* Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
|
||||
* Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
|
||||
*
|
||||
* Copyright (C) 2015 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _CXLFLASH_MAIN_H
|
||||
#define _CXLFLASH_MAIN_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
|
||||
#define CXLFLASH_NAME "cxlflash"
|
||||
#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
|
||||
#define CXLFLASH_DRIVER_DATE "(August 13, 2015)"
|
||||
|
||||
#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
|
||||
#define CXLFLASH_SUBS_DEV_ID 0x04F0
|
||||
|
||||
/* Since there is only one target, make it 0 */
|
||||
#define CXLFLASH_TARGET 0
|
||||
#define CXLFLASH_MAX_CDB_LEN 16
|
||||
|
||||
/* Really only one target per bus since the Texan is directly attached */
|
||||
#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS 1
|
||||
#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET 65536
|
||||
|
||||
#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
|
||||
|
||||
#define NUM_FC_PORTS CXLFLASH_NUM_FC_PORTS /* ports per AFU */
|
||||
|
||||
/* FC defines */
|
||||
#define FC_MTIP_CMDCONFIG 0x010
|
||||
#define FC_MTIP_STATUS 0x018
|
||||
|
||||
#define FC_PNAME 0x300
|
||||
#define FC_CONFIG 0x320
|
||||
#define FC_CONFIG2 0x328
|
||||
#define FC_STATUS 0x330
|
||||
#define FC_ERROR 0x380
|
||||
#define FC_ERRCAP 0x388
|
||||
#define FC_ERRMSK 0x390
|
||||
#define FC_CNT_CRCERR 0x538
|
||||
#define FC_CRC_THRESH 0x580
|
||||
|
||||
#define FC_MTIP_CMDCONFIG_ONLINE 0x20ULL
|
||||
#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ULL
|
||||
|
||||
#define FC_MTIP_STATUS_MASK 0x30ULL
|
||||
#define FC_MTIP_STATUS_ONLINE 0x20ULL
|
||||
#define FC_MTIP_STATUS_OFFLINE 0x10ULL
|
||||
|
||||
/* TIMEOUT and RETRY definitions */
|
||||
|
||||
/* AFU command timeout values */
|
||||
#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
|
||||
|
||||
/* AFU command room retry limit */
|
||||
#define MC_ROOM_RETRY_CNT 10
|
||||
|
||||
/* FC CRC clear periodic timer */
|
||||
#define MC_CRC_THRESH 100 /* threshold in 5 mins */
|
||||
|
||||
#define FC_PORT_STATUS_RETRY_CNT 100 /* 100 100ms retries = 10 seconds */
|
||||
#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 /* microseconds */
|
||||
|
||||
/* VPD defines */
|
||||
#define CXLFLASH_VPD_LEN 256
|
||||
#define WWPN_LEN 16
|
||||
#define WWPN_BUF_LEN (WWPN_LEN + 1)
|
||||
|
||||
enum undo_level {
|
||||
RELEASE_CONTEXT = 0,
|
||||
FREE_IRQ,
|
||||
UNMAP_ONE,
|
||||
UNMAP_TWO,
|
||||
UNMAP_THREE,
|
||||
UNDO_START
|
||||
};
|
||||
|
||||
struct dev_dependent_vals {
|
||||
u64 max_sectors;
|
||||
};
|
||||
|
||||
struct asyc_intr_info {
|
||||
u64 status;
|
||||
char *desc;
|
||||
u8 port;
|
||||
u8 action;
|
||||
#define CLR_FC_ERROR 0x01
|
||||
#define LINK_RESET 0x02
|
||||
};
|
||||
|
||||
#ifndef CONFIG_CXL_EEH
|
||||
#define cxl_perst_reloads_same_image(_a, _b) do { } while (0)
|
||||
#endif
|
||||
|
||||
#endif /* _CXLFLASH_MAIN_H */
|
472
drivers/scsi/cxlflash/sislite.h
Normal file
472
drivers/scsi/cxlflash/sislite.h
Normal file
@ -0,0 +1,472 @@
|
||||
/*
|
||||
* CXL Flash Device Driver
|
||||
*
|
||||
* Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
|
||||
* Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
|
||||
*
|
||||
* Copyright (C) 2015 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _SISLITE_H
|
||||
#define _SISLITE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
typedef u16 ctx_hndl_t;
|
||||
typedef u32 res_hndl_t;
|
||||
|
||||
#define SIZE_4K 4096
|
||||
#define SIZE_64K 65536
|
||||
|
||||
/*
|
||||
* IOARCB: 64 bytes, min 16 byte alignment required, host native endianness
|
||||
* except for SCSI CDB which remains big endian per SCSI standards.
|
||||
*/
|
||||
struct sisl_ioarcb {
|
||||
u16 ctx_id; /* ctx_hndl_t */
|
||||
u16 req_flags;
|
||||
#define SISL_REQ_FLAGS_RES_HNDL 0x8000U /* bit 0 (MSB) */
|
||||
#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000U
|
||||
|
||||
#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000U /* bit 1 */
|
||||
|
||||
#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000U /* bits 8,9 */
|
||||
#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040U
|
||||
#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080U
|
||||
#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U
|
||||
|
||||
#define SISL_REQ_FLAGS_TMF_CMD 0x0004u /* bit 13 */
|
||||
|
||||
#define SISL_REQ_FLAGS_AFU_CMD 0x0002U /* bit 14 */
|
||||
|
||||
#define SISL_REQ_FLAGS_HOST_WRITE 0x0001U /* bit 15 (LSB) */
|
||||
#define SISL_REQ_FLAGS_HOST_READ 0x0000U
|
||||
|
||||
union {
|
||||
u32 res_hndl; /* res_hndl_t */
|
||||
u32 port_sel; /* this is a selection mask:
|
||||
* 0x1 -> port#0 can be selected,
|
||||
* 0x2 -> port#1 can be selected.
|
||||
* Can be bitwise ORed.
|
||||
*/
|
||||
};
|
||||
u64 lun_id;
|
||||
u32 data_len; /* 4K for read/write */
|
||||
u32 ioadl_len;
|
||||
union {
|
||||
u64 data_ea; /* min 16 byte aligned */
|
||||
u64 ioadl_ea;
|
||||
};
|
||||
u8 msi; /* LISN to send on RRQ write */
|
||||
#define SISL_MSI_CXL_PFAULT 0 /* reserved for CXL page faults */
|
||||
#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */
|
||||
#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */
|
||||
#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */
|
||||
|
||||
u8 rrq; /* 0 for a single RRQ */
|
||||
u16 timeout; /* in units specified by req_flags */
|
||||
u32 rsvd1;
|
||||
u8 cdb[16]; /* must be in big endian */
|
||||
struct scsi_cmnd *scp;
|
||||
} __packed;
|
||||
|
||||
struct sisl_rc {
|
||||
u8 flags;
|
||||
#define SISL_RC_FLAGS_SENSE_VALID 0x80U
|
||||
#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40U
|
||||
#define SISL_RC_FLAGS_OVERRUN 0x20U
|
||||
#define SISL_RC_FLAGS_UNDERRUN 0x10U
|
||||
|
||||
u8 afu_rc;
|
||||
#define SISL_AFU_RC_RHT_INVALID 0x01U /* user error */
|
||||
#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */
|
||||
#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */
|
||||
#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra
|
||||
may retry if afu_retry is off
|
||||
possible on master exit
|
||||
*/
|
||||
#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */
|
||||
#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */
|
||||
#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */
|
||||
#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra
|
||||
may retry if afu_retry is off
|
||||
possible on master exit
|
||||
*/
|
||||
#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */
|
||||
|
||||
#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible if master exited */
|
||||
|
||||
/* NO_CHANNELS means the FC ports selected by dest_port in
|
||||
* IOARCB or in the LXT entry are down when the AFU tried to select
|
||||
* a FC port. If the port went down on an active IO, it will set
|
||||
* fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead.
|
||||
*/
|
||||
#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */
|
||||
#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or
|
||||
afu reset/master restart
|
||||
*/
|
||||
#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */
|
||||
#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra
|
||||
may retry if afu_retry is off
|
||||
*/
|
||||
|
||||
u8 scsi_rc; /* SCSI status byte, retry as appropriate */
|
||||
#define SISL_SCSI_RC_CHECK 0x02U
|
||||
#define SISL_SCSI_RC_BUSY 0x08u
|
||||
|
||||
u8 fc_rc; /* retry */
|
||||
/*
|
||||
* We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for
|
||||
* commands that are in flight when a link goes down or is logged out.
|
||||
* If the link is down or logged out before AFU selects the port, either
|
||||
* it will choose the other port or we will get afu_rc=0x20 (no_channel)
|
||||
* if there is no valid port to use.
|
||||
*
|
||||
* ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these
|
||||
* would happen if a frame is dropped and something times out.
|
||||
* NOLOGI or LINKDOWN can be retried if the other port is up.
|
||||
* RESIDERR can be retried as well.
|
||||
*
|
||||
* ABORTFAIL might indicate that lots of frames are getting CRC errors.
|
||||
* So it maybe retried once and reset the link if it happens again.
|
||||
* The link can also be reset on the CRC error threshold interrupt.
|
||||
*/
|
||||
#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */
|
||||
#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */
|
||||
#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */
|
||||
#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */
|
||||
#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */
|
||||
#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */
|
||||
#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */
|
||||
#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
|
||||
#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
|
||||
#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
|
||||
reported len, possbly due to dropped
|
||||
frames */
|
||||
#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
|
||||
};
|
||||
|
||||
#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
|
||||
|
||||
/*
|
||||
* IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
|
||||
* host native endianness
|
||||
*/
|
||||
struct sisl_ioasa {
|
||||
union {
|
||||
struct sisl_rc rc;
|
||||
u32 ioasc;
|
||||
#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
|
||||
};
|
||||
u32 resid;
|
||||
u8 port;
|
||||
u8 afu_extra;
|
||||
/* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
|
||||
* afu_exta contains PSL response code. Useful codes are:
|
||||
*/
|
||||
#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein Action
|
||||
* Enabled N/A
|
||||
* Disabled retry
|
||||
*/
|
||||
#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error
|
||||
* afu_rc Implies
|
||||
* 0x04, 0x14 master exit.
|
||||
* 0x31 user error.
|
||||
*/
|
||||
/* when afu rc=0x20 (no channels):
|
||||
* afu_extra bits [4:5]: available portmask, [6:7]: requested portmask.
|
||||
*/
|
||||
#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2)
|
||||
#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03)
|
||||
|
||||
u8 scsi_extra;
|
||||
u8 fc_extra;
|
||||
u8 sense_data[SISL_SENSE_DATA_LEN];
|
||||
|
||||
/* These fields are defined by the SISlite architecture for the
|
||||
* host to use as they see fit for their implementation.
|
||||
*/
|
||||
union {
|
||||
u64 host_use[4];
|
||||
u8 host_use_b[32];
|
||||
};
|
||||
} __packed;
|
||||
|
||||
#define SISL_RESP_HANDLE_T_BIT 0x1ULL /* Toggle bit */
|
||||
|
||||
/* MMIO space is required to support only 64-bit access */
|
||||
|
||||
/*
|
||||
* This AFU has two mechanisms to deal with endian-ness.
|
||||
* One is a global configuration (in the afu_config) register
|
||||
* below that specifies the endian-ness of the host.
|
||||
* The other is a per context (i.e. application) specification
|
||||
* controlled by the endian_ctrl field here. Since the master
|
||||
* context is one such application the master context's
|
||||
* endian-ness is set to be the same as the host.
|
||||
*
|
||||
* As per the SISlite spec, the MMIO registers are always
|
||||
* big endian.
|
||||
*/
|
||||
#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ULL
|
||||
#define SISL_ENDIAN_CTRL_LE 0x0000000000000000ULL
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_BE
|
||||
#else
|
||||
#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_LE
|
||||
#endif
|
||||
|
||||
/* per context host transport MMIO */
|
||||
struct sisl_host_map {
|
||||
__be64 endian_ctrl; /* Per context Endian Control. The AFU will
|
||||
* operate on whatever the context is of the
|
||||
* host application.
|
||||
*/
|
||||
|
||||
__be64 intr_status; /* this sends LISN# programmed in ctx_ctrl.
|
||||
* Only recovery in a PERM_ERR is a context
|
||||
* exit since there is no way to tell which
|
||||
* command caused the error.
|
||||
*/
|
||||
#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
|
||||
#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
|
||||
#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
|
||||
#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
|
||||
/* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
|
||||
* Same error in data/LXT/RHT access is reported via IOASA.
|
||||
*/
|
||||
#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can be generated
|
||||
* only when AFU auto
|
||||
* retry is disabled.
|
||||
* If user can determine
|
||||
* the command that
|
||||
* caused the error, it
|
||||
* can be retried.
|
||||
*/
|
||||
#define SISL_ISTATUS_UNMASK (0x001FULL) /* 1 means unmasked */
|
||||
#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
|
||||
|
||||
__be64 intr_clear;
|
||||
__be64 intr_mask;
|
||||
__be64 ioarrin; /* only write what cmd_room permits */
|
||||
__be64 rrq_start; /* start & end are both inclusive */
|
||||
__be64 rrq_end; /* write sequence: start followed by end */
|
||||
__be64 cmd_room;
|
||||
__be64 ctx_ctrl; /* least signiifcant byte or b56:63 is LISN# */
|
||||
__be64 mbox_w; /* restricted use */
|
||||
};
|
||||
|
||||
/* per context provisioning & control MMIO */
|
||||
struct sisl_ctrl_map {
|
||||
__be64 rht_start;
|
||||
__be64 rht_cnt_id;
|
||||
/* both cnt & ctx_id args must be ULL */
|
||||
#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32))
|
||||
|
||||
__be64 ctx_cap; /* afu_rc below is when the capability is violated */
|
||||
#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ULL /* afu_rc 0x21 */
|
||||
#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ULL /* afu_rc 0x21 */
|
||||
#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ULL /* afu_rc 0x1a */
|
||||
#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ULL /* afu_rc 0x21 */
|
||||
#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ULL /* afu_rc 0x21 */
|
||||
#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ULL /* afu_rc 0x21 */
|
||||
#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
|
||||
#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
|
||||
__be64 mbox_r;
|
||||
};
|
||||
|
||||
/* single copy global regs */
|
||||
struct sisl_global_regs {
|
||||
__be64 aintr_status;
|
||||
/* In cxlflash, each FC port/link gets a byte of status */
|
||||
#define SISL_ASTATUS_FC0_OTHER 0x8000ULL /* b48, other err,
|
||||
FC_ERRCAP[31:20] */
|
||||
#define SISL_ASTATUS_FC0_LOGO 0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO
|
||||
while logged in */
|
||||
#define SISL_ASTATUS_FC0_CRC_T 0x2000ULL /* b50, CRC threshold exceeded */
|
||||
#define SISL_ASTATUS_FC0_LOGI_R 0x1000ULL /* b51, login state mechine timed out
|
||||
and retrying */
|
||||
#define SISL_ASTATUS_FC0_LOGI_F 0x0800ULL /* b52, login failed,
|
||||
FC_ERROR[19:0] */
|
||||
#define SISL_ASTATUS_FC0_LOGI_S 0x0400ULL /* b53, login succeeded */
|
||||
#define SISL_ASTATUS_FC0_LINK_DN 0x0200ULL /* b54, link online to offline */
|
||||
#define SISL_ASTATUS_FC0_LINK_UP 0x0100ULL /* b55, link offline to online */
|
||||
|
||||
#define SISL_ASTATUS_FC1_OTHER 0x0080ULL /* b56 */
|
||||
#define SISL_ASTATUS_FC1_LOGO 0x0040ULL /* b57 */
|
||||
#define SISL_ASTATUS_FC1_CRC_T 0x0020ULL /* b58 */
|
||||
#define SISL_ASTATUS_FC1_LOGI_R 0x0010ULL /* b59 */
|
||||
#define SISL_ASTATUS_FC1_LOGI_F 0x0008ULL /* b60 */
|
||||
#define SISL_ASTATUS_FC1_LOGI_S 0x0004ULL /* b61 */
|
||||
#define SISL_ASTATUS_FC1_LINK_DN 0x0002ULL /* b62 */
|
||||
#define SISL_ASTATUS_FC1_LINK_UP 0x0001ULL /* b63 */
|
||||
|
||||
#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */
|
||||
#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK)
|
||||
#define SISL_FC_INTERNAL_SHIFT 32
|
||||
|
||||
#define SISL_ASTATUS_UNMASK 0xFFFFULL /* 1 means unmasked */
|
||||
#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */
|
||||
|
||||
__be64 aintr_clear;
|
||||
__be64 aintr_mask;
|
||||
__be64 afu_ctrl;
|
||||
__be64 afu_hb;
|
||||
__be64 afu_scratch_pad;
|
||||
__be64 afu_port_sel;
|
||||
#define SISL_AFUCONF_AR_IOARCB 0x4000ULL
|
||||
#define SISL_AFUCONF_AR_LXT 0x2000ULL
|
||||
#define SISL_AFUCONF_AR_RHT 0x1000ULL
|
||||
#define SISL_AFUCONF_AR_DATA 0x0800ULL
|
||||
#define SISL_AFUCONF_AR_RSRC 0x0400ULL
|
||||
#define SISL_AFUCONF_AR_IOASA 0x0200ULL
|
||||
#define SISL_AFUCONF_AR_RRQ 0x0100ULL
|
||||
/* Aggregate all Auto Retry Bits */
|
||||
#define SISL_AFUCONF_AR_ALL (SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \
|
||||
SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA| \
|
||||
SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \
|
||||
SISL_AFUCONF_AR_RRQ)
|
||||
#ifdef __BIG_ENDIAN
|
||||
#define SISL_AFUCONF_ENDIAN 0x0000ULL
|
||||
#else
|
||||
#define SISL_AFUCONF_ENDIAN 0x0020ULL
|
||||
#endif
|
||||
#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL
|
||||
__be64 afu_config;
|
||||
__be64 rsvd[0xf8];
|
||||
__be64 afu_version;
|
||||
__be64 interface_version;
|
||||
};
|
||||
|
||||
#define CXLFLASH_NUM_FC_PORTS 2
|
||||
#define CXLFLASH_MAX_CONTEXT 512 /* how many contexts per afu */
|
||||
#define CXLFLASH_NUM_VLUNS 512
|
||||
|
||||
struct sisl_global_map {
|
||||
union {
|
||||
struct sisl_global_regs regs;
|
||||
char page0[SIZE_4K]; /* page 0 */
|
||||
};
|
||||
|
||||
char page1[SIZE_4K]; /* page 1 */
|
||||
|
||||
/* pages 2 & 3 */
|
||||
__be64 fc_regs[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
|
||||
|
||||
/* pages 4 & 5 (lun tbl) */
|
||||
__be64 fc_port[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
* CXL Flash Memory Map
|
||||
*
|
||||
* +-------------------------------+
|
||||
* | 512 * 64 KB User MMIO |
|
||||
* | (per context) |
|
||||
* | User Accessible |
|
||||
* +-------------------------------+
|
||||
* | 512 * 128 B per context |
|
||||
* | Provisioning and Control |
|
||||
* | Trusted Process accessible |
|
||||
* +-------------------------------+
|
||||
* | 64 KB Global |
|
||||
* | Trusted Process accessible |
|
||||
* +-------------------------------+
|
||||
*/
|
||||
struct cxlflash_afu_map {
|
||||
union {
|
||||
struct sisl_host_map host;
|
||||
char harea[SIZE_64K]; /* 64KB each */
|
||||
} hosts[CXLFLASH_MAX_CONTEXT];
|
||||
|
||||
union {
|
||||
struct sisl_ctrl_map ctrl;
|
||||
char carea[cache_line_size()]; /* 128B each */
|
||||
} ctrls[CXLFLASH_MAX_CONTEXT];
|
||||
|
||||
union {
|
||||
struct sisl_global_map global;
|
||||
char garea[SIZE_64K]; /* 64KB single block */
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* LXT - LBA Translation Table
|
||||
* LXT control blocks
|
||||
*/
|
||||
struct sisl_lxt_entry {
|
||||
u64 rlba_base; /* bits 0:47 is base
|
||||
* b48:55 is lun index
|
||||
* b58:59 is write & read perms
|
||||
* (if no perm, afu_rc=0x15)
|
||||
* b60:63 is port_sel mask
|
||||
*/
|
||||
};
|
||||
|
||||
/*
|
||||
* RHT - Resource Handle Table
|
||||
* Per the SISlite spec, RHT entries are to be 16-byte aligned
|
||||
*/
|
||||
struct sisl_rht_entry {
|
||||
struct sisl_lxt_entry *lxt_start;
|
||||
u32 lxt_cnt;
|
||||
u16 rsvd;
|
||||
u8 fp; /* format & perm nibbles.
|
||||
* (if no perm, afu_rc=0x05)
|
||||
*/
|
||||
u8 nmask;
|
||||
} __packed __aligned(16);
|
||||
|
||||
struct sisl_rht_entry_f1 {
|
||||
u64 lun_id;
|
||||
union {
|
||||
struct {
|
||||
u8 valid;
|
||||
u8 rsvd[5];
|
||||
u8 fp;
|
||||
u8 port_sel;
|
||||
};
|
||||
|
||||
u64 dw;
|
||||
};
|
||||
} __packed __aligned(16);
|
||||
|
||||
/* make the fp byte */
|
||||
#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm))
|
||||
|
||||
/* make the fp byte for a clone from a source fp and clone flags
|
||||
* flags must be only 2 LSB bits.
|
||||
*/
|
||||
#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags)))
|
||||
|
||||
#define RHT_PERM_READ 0x01U
|
||||
#define RHT_PERM_WRITE 0x02U
|
||||
#define RHT_PERM_RW (RHT_PERM_READ | RHT_PERM_WRITE)
|
||||
|
||||
/* extract the perm bits from a fp */
|
||||
#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW)
|
||||
|
||||
#define PORT0 0x01U
|
||||
#define PORT1 0x02U
|
||||
#define BOTH_PORTS (PORT0 | PORT1)
|
||||
|
||||
/* AFU Sync Mode byte */
|
||||
#define AFU_LW_SYNC 0x0U
|
||||
#define AFU_HW_SYNC 0x1U
|
||||
#define AFU_GSYNC 0x2U
|
||||
|
||||
/* Special Task Management Function CDB */
|
||||
#define TMF_LUN_RESET 0x1U
|
||||
#define TMF_CLEAR_ACA 0x2U
|
||||
|
||||
|
||||
#define SISLITE_MAX_WS_BLOCKS 512
|
||||
|
||||
#endif /* _SISLITE_H */
|
2084
drivers/scsi/cxlflash/superpipe.c
Normal file
2084
drivers/scsi/cxlflash/superpipe.c
Normal file
File diff suppressed because it is too large
Load Diff
147
drivers/scsi/cxlflash/superpipe.h
Normal file
147
drivers/scsi/cxlflash/superpipe.h
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
* CXL Flash Device Driver
|
||||
*
|
||||
* Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
|
||||
* Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
|
||||
*
|
||||
* Copyright (C) 2015 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _CXLFLASH_SUPERPIPE_H
|
||||
#define _CXLFLASH_SUPERPIPE_H
|
||||
|
||||
extern struct cxlflash_global global;
|
||||
|
||||
/*
|
||||
* Terminology: use afu (and not adapter) to refer to the HW.
|
||||
* Adapter is the entire slot and includes PSL out of which
|
||||
* only the AFU is visible to user space.
|
||||
*/
|
||||
|
||||
/* Chunk size parms: note sislite minimum chunk size is
|
||||
0x10000 LBAs corresponding to a NMASK or 16.
|
||||
*/
|
||||
#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
|
||||
|
||||
#define MC_DISCOVERY_TIMEOUT 5 /* 5 secs */
|
||||
|
||||
#define CHAN2PORT(_x) ((_x) + 1)
|
||||
#define PORT2CHAN(_x) ((_x) - 1)
|
||||
|
||||
enum lun_mode {
|
||||
MODE_NONE = 0,
|
||||
MODE_VIRTUAL,
|
||||
MODE_PHYSICAL
|
||||
};
|
||||
|
||||
/* Global (entire driver, spans adapters) lun_info structure */
|
||||
struct glun_info {
|
||||
u64 max_lba; /* from read cap(16) */
|
||||
u32 blk_len; /* from read cap(16) */
|
||||
enum lun_mode mode; /* NONE, VIRTUAL, PHYSICAL */
|
||||
int users; /* Number of users w/ references to LUN */
|
||||
|
||||
u8 wwid[16];
|
||||
|
||||
struct mutex mutex;
|
||||
|
||||
struct blka blka;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/* Local (per-adapter) lun_info structure */
|
||||
struct llun_info {
|
||||
u64 lun_id[CXLFLASH_NUM_FC_PORTS]; /* from REPORT_LUNS */
|
||||
u32 lun_index; /* Index in the LUN table */
|
||||
u32 host_no; /* host_no from Scsi_host */
|
||||
u32 port_sel; /* What port to use for this LUN */
|
||||
bool newly_created; /* Whether the LUN was just discovered */
|
||||
bool in_table; /* Whether a LUN table entry was created */
|
||||
|
||||
u8 wwid[16]; /* Keep a duplicate copy here? */
|
||||
|
||||
struct glun_info *parent; /* Pointer to entry in global LUN structure */
|
||||
struct scsi_device *sdev;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct lun_access {
|
||||
struct llun_info *lli;
|
||||
struct scsi_device *sdev;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
enum ctx_ctrl {
|
||||
CTX_CTRL_CLONE = (1 << 1),
|
||||
CTX_CTRL_ERR = (1 << 2),
|
||||
CTX_CTRL_ERR_FALLBACK = (1 << 3),
|
||||
CTX_CTRL_NOPID = (1 << 4),
|
||||
CTX_CTRL_FILE = (1 << 5)
|
||||
};
|
||||
|
||||
#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0) << 28) | _id)
|
||||
#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF)
|
||||
|
||||
struct ctx_info {
|
||||
struct sisl_ctrl_map *ctrl_map; /* initialized at startup */
|
||||
struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
|
||||
alloc/free on attach/detach */
|
||||
u32 rht_out; /* Number of checked out RHT entries */
|
||||
u32 rht_perms; /* User-defined permissions for RHT entries */
|
||||
struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
|
||||
bool *rht_needs_ws; /* User-desired write-same function per RHTE */
|
||||
|
||||
struct cxl_ioctl_start_work work;
|
||||
u64 ctxid;
|
||||
int lfd;
|
||||
pid_t pid;
|
||||
bool unavail;
|
||||
bool err_recovery_active;
|
||||
struct mutex mutex; /* Context protection */
|
||||
struct cxl_context *ctx;
|
||||
struct list_head luns; /* LUNs attached to this context */
|
||||
const struct vm_operations_struct *cxl_mmap_vmops;
|
||||
struct file *file;
|
||||
struct list_head list; /* Link contexts in error recovery */
|
||||
};
|
||||
|
||||
struct cxlflash_global {
|
||||
struct mutex mutex;
|
||||
struct list_head gluns;/* list of glun_info structs */
|
||||
struct page *err_page; /* One page of all 0xF for error notification */
|
||||
};
|
||||
|
||||
int cxlflash_vlun_resize(struct scsi_device *, struct dk_cxlflash_resize *);
|
||||
int _cxlflash_vlun_resize(struct scsi_device *, struct ctx_info *,
|
||||
struct dk_cxlflash_resize *);
|
||||
|
||||
int cxlflash_disk_release(struct scsi_device *, struct dk_cxlflash_release *);
|
||||
int _cxlflash_disk_release(struct scsi_device *, struct ctx_info *,
|
||||
struct dk_cxlflash_release *);
|
||||
|
||||
int cxlflash_disk_clone(struct scsi_device *, struct dk_cxlflash_clone *);
|
||||
|
||||
int cxlflash_disk_virtual_open(struct scsi_device *, void *);
|
||||
|
||||
int cxlflash_lun_attach(struct glun_info *, enum lun_mode, bool);
|
||||
void cxlflash_lun_detach(struct glun_info *);
|
||||
|
||||
struct ctx_info *get_context(struct cxlflash_cfg *, u64, void *, enum ctx_ctrl);
|
||||
void put_context(struct ctx_info *);
|
||||
|
||||
struct sisl_rht_entry *get_rhte(struct ctx_info *, res_hndl_t,
|
||||
struct llun_info *);
|
||||
|
||||
struct sisl_rht_entry *rhte_checkout(struct ctx_info *, struct llun_info *);
|
||||
void rhte_checkin(struct ctx_info *, struct sisl_rht_entry *);
|
||||
|
||||
void cxlflash_ba_terminate(struct ba_lun *);
|
||||
|
||||
int cxlflash_manage_lun(struct scsi_device *, struct dk_cxlflash_manage_lun *);
|
||||
|
||||
#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
|
1243
drivers/scsi/cxlflash/vlun.c
Normal file
1243
drivers/scsi/cxlflash/vlun.c
Normal file
File diff suppressed because it is too large
Load Diff
86
drivers/scsi/cxlflash/vlun.h
Normal file
86
drivers/scsi/cxlflash/vlun.h
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* CXL Flash Device Driver
|
||||
*
|
||||
* Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
|
||||
* Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
|
||||
*
|
||||
* Copyright (C) 2015 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _CXLFLASH_VLUN_H
|
||||
#define _CXLFLASH_VLUN_H
|
||||
|
||||
/* RHT - Resource Handle Table */
|
||||
#define MC_RHT_NMASK 16 /* in bits */
|
||||
#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */
|
||||
|
||||
#define HIBIT (BITS_PER_LONG - 1)
|
||||
|
||||
#define MAX_AUN_CLONE_CNT 0xFF
|
||||
|
||||
/*
|
||||
* LXT - LBA Translation Table
|
||||
*
|
||||
* +-------+-------+-------+-------+-------+-------+-------+---+---+
|
||||
* | RLBA_BASE |LUN_IDX| P |SEL|
|
||||
* +-------+-------+-------+-------+-------+-------+-------+---+---+
|
||||
*
|
||||
* The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE).
|
||||
* AFU ORes the low order bits from the virtual LBA (offset into the chunk)
|
||||
* with RLBA_BASE. The result is the physical LBA to be sent to storage.
|
||||
* The LXT Entry also contains an index to a LUN TBL and a bitmask of which
|
||||
* outgoing (FC) * ports can be selected. The port select bit-mask is ANDed
|
||||
* with a global port select bit-mask maintained by the driver.
|
||||
* In addition, it has permission bits that are ANDed with the
|
||||
* RHT permissions to arrive at the final permissions for the chunk.
|
||||
*
|
||||
* LXT tables are allocated dynamically in groups. This is done to avoid
|
||||
* a malloc/free overhead each time the LXT has to grow or shrink.
|
||||
*
|
||||
* Based on the current lxt_cnt (used), it is always possible to know
|
||||
* how many are allocated (used+free). The number of allocated entries is
|
||||
* not stored anywhere.
|
||||
*
|
||||
* The LXT table is re-allocated whenever it needs to cross into another group.
|
||||
*/
|
||||
#define LXT_GROUP_SIZE 8
|
||||
#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */
|
||||
#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */
|
||||
#define LXT_PERM_SHIFT 4 /* LXT entry, shift for permission bits */
|
||||
|
||||
struct ba_lun_info {
|
||||
u64 *lun_alloc_map;
|
||||
u32 lun_bmap_size;
|
||||
u32 total_aus;
|
||||
u64 free_aun_cnt;
|
||||
|
||||
/* indices to be used for elevator lookup of free map */
|
||||
u32 free_low_idx;
|
||||
u32 free_curr_idx;
|
||||
u32 free_high_idx;
|
||||
|
||||
u8 *aun_clone_map;
|
||||
};
|
||||
|
||||
struct ba_lun {
|
||||
u64 lun_id;
|
||||
u64 wwpn;
|
||||
size_t lsize; /* LUN size in number of LBAs */
|
||||
size_t lba_size; /* LBA size in number of bytes */
|
||||
size_t au_size; /* Allocation Unit size in number of LBAs */
|
||||
struct ba_lun_info *ba_lun_handle;
|
||||
};
|
||||
|
||||
/* Block Allocator */
|
||||
struct blka {
|
||||
struct ba_lun ba_lun;
|
||||
u64 nchunk; /* number of chunks */
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Disk Array driver for HP Smart Array SAS controllers
|
||||
* Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
|
||||
* Copyright 2014-2015 PMC-Sierra, Inc.
|
||||
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -11,11 +12,7 @@
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
|
||||
* Questions/Comments/Bugfixes to storagedev@pmcs.com
|
||||
*
|
||||
*/
|
||||
|
||||
@ -132,6 +129,11 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
|
||||
{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
|
||||
{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
|
||||
{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
|
||||
{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
|
||||
{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
|
||||
{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
|
||||
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
|
||||
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
|
||||
{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
|
||||
@ -190,6 +192,11 @@ static struct board_type products[] = {
|
||||
{0x21CD103C, "Smart Array", &SA5_access},
|
||||
{0x21CE103C, "Smart HBA", &SA5_access},
|
||||
{0x05809005, "SmartHBA-SA", &SA5_access},
|
||||
{0x05819005, "SmartHBA-SA 8i", &SA5_access},
|
||||
{0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
|
||||
{0x05839005, "SmartHBA-SA 8e", &SA5_access},
|
||||
{0x05849005, "SmartHBA-SA 16i", &SA5_access},
|
||||
{0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
|
||||
{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
|
||||
{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
|
||||
{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
|
||||
@ -267,6 +274,7 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
|
||||
static void hpsa_command_resubmit_worker(struct work_struct *work);
|
||||
static u32 lockup_detected(struct ctlr_info *h);
|
||||
static int detect_controller_lockup(struct ctlr_info *h);
|
||||
static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device);
|
||||
|
||||
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
|
||||
{
|
||||
@ -325,7 +333,7 @@ static int check_for_unit_attention(struct ctlr_info *h,
|
||||
|
||||
decode_sense_data(c->err_info->SenseInfo, sense_len,
|
||||
&sense_key, &asc, &ascq);
|
||||
if (sense_key != UNIT_ATTENTION || asc == -1)
|
||||
if (sense_key != UNIT_ATTENTION || asc == 0xff)
|
||||
return 0;
|
||||
|
||||
switch (asc) {
|
||||
@ -717,12 +725,107 @@ static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
|
||||
return snprintf(buf, 20, "%d\n", offload_enabled);
|
||||
}
|
||||
|
||||
#define MAX_PATHS 8
|
||||
#define PATH_STRING_LEN 50
|
||||
|
||||
static ssize_t path_info_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct ctlr_info *h;
|
||||
struct scsi_device *sdev;
|
||||
struct hpsa_scsi_dev_t *hdev;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
int output_len = 0;
|
||||
u8 box;
|
||||
u8 bay;
|
||||
u8 path_map_index = 0;
|
||||
char *active;
|
||||
unsigned char phys_connector[2];
|
||||
unsigned char path[MAX_PATHS][PATH_STRING_LEN];
|
||||
|
||||
memset(path, 0, MAX_PATHS * PATH_STRING_LEN);
|
||||
sdev = to_scsi_device(dev);
|
||||
h = sdev_to_hba(sdev);
|
||||
spin_lock_irqsave(&h->devlock, flags);
|
||||
hdev = sdev->hostdata;
|
||||
if (!hdev) {
|
||||
spin_unlock_irqrestore(&h->devlock, flags);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
bay = hdev->bay;
|
||||
for (i = 0; i < MAX_PATHS; i++) {
|
||||
path_map_index = 1<<i;
|
||||
if (i == hdev->active_path_index)
|
||||
active = "Active";
|
||||
else if (hdev->path_map & path_map_index)
|
||||
active = "Inactive";
|
||||
else
|
||||
continue;
|
||||
|
||||
output_len = snprintf(path[i],
|
||||
PATH_STRING_LEN, "[%d:%d:%d:%d] %20.20s ",
|
||||
h->scsi_host->host_no,
|
||||
hdev->bus, hdev->target, hdev->lun,
|
||||
scsi_device_type(hdev->devtype));
|
||||
|
||||
if (is_ext_target(h, hdev) ||
|
||||
(hdev->devtype == TYPE_RAID) ||
|
||||
is_logical_dev_addr_mode(hdev->scsi3addr)) {
|
||||
output_len += snprintf(path[i] + output_len,
|
||||
PATH_STRING_LEN, "%s\n",
|
||||
active);
|
||||
continue;
|
||||
}
|
||||
|
||||
box = hdev->box[i];
|
||||
memcpy(&phys_connector, &hdev->phys_connector[i],
|
||||
sizeof(phys_connector));
|
||||
if (phys_connector[0] < '0')
|
||||
phys_connector[0] = '0';
|
||||
if (phys_connector[1] < '0')
|
||||
phys_connector[1] = '0';
|
||||
if (hdev->phys_connector[i] > 0)
|
||||
output_len += snprintf(path[i] + output_len,
|
||||
PATH_STRING_LEN,
|
||||
"PORT: %.2s ",
|
||||
phys_connector);
|
||||
if (hdev->devtype == TYPE_DISK &&
|
||||
hdev->expose_state != HPSA_DO_NOT_EXPOSE) {
|
||||
if (box == 0 || box == 0xFF) {
|
||||
output_len += snprintf(path[i] + output_len,
|
||||
PATH_STRING_LEN,
|
||||
"BAY: %hhu %s\n",
|
||||
bay, active);
|
||||
} else {
|
||||
output_len += snprintf(path[i] + output_len,
|
||||
PATH_STRING_LEN,
|
||||
"BOX: %hhu BAY: %hhu %s\n",
|
||||
box, bay, active);
|
||||
}
|
||||
} else if (box != 0 && box != 0xFF) {
|
||||
output_len += snprintf(path[i] + output_len,
|
||||
PATH_STRING_LEN, "BOX: %hhu %s\n",
|
||||
box, active);
|
||||
} else
|
||||
output_len += snprintf(path[i] + output_len,
|
||||
PATH_STRING_LEN, "%s\n", active);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&h->devlock, flags);
|
||||
return snprintf(buf, output_len+1, "%s%s%s%s%s%s%s%s",
|
||||
path[0], path[1], path[2], path[3],
|
||||
path[4], path[5], path[6], path[7]);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
|
||||
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
|
||||
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
|
||||
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
|
||||
static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
|
||||
host_show_hp_ssd_smart_path_enabled, NULL);
|
||||
static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
|
||||
static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
|
||||
host_show_hp_ssd_smart_path_status,
|
||||
host_store_hp_ssd_smart_path_status);
|
||||
@ -744,6 +847,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
|
||||
&dev_attr_lunid,
|
||||
&dev_attr_unique_id,
|
||||
&dev_attr_hp_ssd_smart_path_enabled,
|
||||
&dev_attr_path_info,
|
||||
&dev_attr_lockup_detected,
|
||||
NULL,
|
||||
};
|
||||
@ -1083,17 +1187,19 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
|
||||
|
||||
/* This is a non-zero lun of a multi-lun device.
|
||||
* Search through our list and find the device which
|
||||
* has the same 8 byte LUN address, excepting byte 4.
|
||||
* has the same 8 byte LUN address, excepting byte 4 and 5.
|
||||
* Assign the same bus and target for this new LUN.
|
||||
* Use the logical unit number from the firmware.
|
||||
*/
|
||||
memcpy(addr1, device->scsi3addr, 8);
|
||||
addr1[4] = 0;
|
||||
addr1[5] = 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
sd = h->dev[i];
|
||||
memcpy(addr2, sd->scsi3addr, 8);
|
||||
addr2[4] = 0;
|
||||
/* differ only in byte 4? */
|
||||
addr2[5] = 0;
|
||||
/* differ only in byte 4 and 5? */
|
||||
if (memcmp(addr1, addr2, 8) == 0) {
|
||||
device->bus = sd->bus;
|
||||
device->target = sd->target;
|
||||
@ -1286,8 +1392,9 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
|
||||
return 1;
|
||||
if (dev1->offload_enabled != dev2->offload_enabled)
|
||||
return 1;
|
||||
if (dev1->queue_depth != dev2->queue_depth)
|
||||
return 1;
|
||||
if (!is_logical_dev_addr_mode(dev1->scsi3addr))
|
||||
if (dev1->queue_depth != dev2->queue_depth)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1376,17 +1483,23 @@ static void hpsa_show_volume_status(struct ctlr_info *h,
|
||||
h->scsi_host->host_no,
|
||||
sd->bus, sd->target, sd->lun);
|
||||
break;
|
||||
case HPSA_LV_NOT_AVAILABLE:
|
||||
dev_info(&h->pdev->dev,
|
||||
"C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
|
||||
h->scsi_host->host_no,
|
||||
sd->bus, sd->target, sd->lun);
|
||||
break;
|
||||
case HPSA_LV_UNDERGOING_RPI:
|
||||
dev_info(&h->pdev->dev,
|
||||
"C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
|
||||
"C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
|
||||
h->scsi_host->host_no,
|
||||
sd->bus, sd->target, sd->lun);
|
||||
break;
|
||||
case HPSA_LV_PENDING_RPI:
|
||||
dev_info(&h->pdev->dev,
|
||||
"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
|
||||
h->scsi_host->host_no,
|
||||
sd->bus, sd->target, sd->lun);
|
||||
"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
|
||||
h->scsi_host->host_no,
|
||||
sd->bus, sd->target, sd->lun);
|
||||
break;
|
||||
case HPSA_LV_ENCRYPTED_NO_KEY:
|
||||
dev_info(&h->pdev->dev,
|
||||
@ -2585,34 +2698,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
|
||||
unsigned char *scsi3addr, unsigned char page,
|
||||
struct bmic_controller_parameters *buf, size_t bufsize)
|
||||
{
|
||||
int rc = IO_OK;
|
||||
struct CommandList *c;
|
||||
struct ErrorInfo *ei;
|
||||
|
||||
c = cmd_alloc(h);
|
||||
if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
|
||||
page, scsi3addr, TYPE_CMD)) {
|
||||
rc = -1;
|
||||
goto out;
|
||||
}
|
||||
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
|
||||
PCI_DMA_FROMDEVICE, NO_TIMEOUT);
|
||||
if (rc)
|
||||
goto out;
|
||||
ei = c->err_info;
|
||||
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
|
||||
hpsa_scsi_interpret_error(h, c);
|
||||
rc = -1;
|
||||
}
|
||||
out:
|
||||
cmd_free(h, c);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
|
||||
u8 reset_type, int reply_queue)
|
||||
{
|
||||
@ -2749,11 +2834,10 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
|
||||
lockup_detected(h));
|
||||
|
||||
if (unlikely(lockup_detected(h))) {
|
||||
dev_warn(&h->pdev->dev,
|
||||
"Controller lockup detected during reset wait\n");
|
||||
mutex_unlock(&h->reset_mutex);
|
||||
rc = -ENODEV;
|
||||
}
|
||||
dev_warn(&h->pdev->dev,
|
||||
"Controller lockup detected during reset wait\n");
|
||||
rc = -ENODEV;
|
||||
}
|
||||
|
||||
if (unlikely(rc))
|
||||
atomic_set(&dev->reset_cmds_out, 0);
|
||||
@ -3186,6 +3270,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
|
||||
/* Keep volume offline in certain cases: */
|
||||
switch (ldstat) {
|
||||
case HPSA_LV_UNDERGOING_ERASE:
|
||||
case HPSA_LV_NOT_AVAILABLE:
|
||||
case HPSA_LV_UNDERGOING_RPI:
|
||||
case HPSA_LV_PENDING_RPI:
|
||||
case HPSA_LV_ENCRYPTED_NO_KEY:
|
||||
@ -3562,29 +3647,6 @@ static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int hpsa_hba_mode_enabled(struct ctlr_info *h)
|
||||
{
|
||||
int rc;
|
||||
int hba_mode_enabled;
|
||||
struct bmic_controller_parameters *ctlr_params;
|
||||
ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!ctlr_params)
|
||||
return -ENOMEM;
|
||||
rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
|
||||
sizeof(struct bmic_controller_parameters));
|
||||
if (rc) {
|
||||
kfree(ctlr_params);
|
||||
return rc;
|
||||
}
|
||||
|
||||
hba_mode_enabled =
|
||||
((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
|
||||
kfree(ctlr_params);
|
||||
return hba_mode_enabled;
|
||||
}
|
||||
|
||||
/* get physical drive ioaccel handle and queue depth */
|
||||
static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
|
||||
struct hpsa_scsi_dev_t *dev,
|
||||
@ -3615,6 +3677,31 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
|
||||
atomic_set(&dev->reset_cmds_out, 0);
|
||||
}
|
||||
|
||||
static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
|
||||
u8 *lunaddrbytes,
|
||||
struct bmic_identify_physical_device *id_phys)
|
||||
{
|
||||
if (PHYS_IOACCEL(lunaddrbytes)
|
||||
&& this_device->ioaccel_handle)
|
||||
this_device->hba_ioaccel_enabled = 1;
|
||||
|
||||
memcpy(&this_device->active_path_index,
|
||||
&id_phys->active_path_number,
|
||||
sizeof(this_device->active_path_index));
|
||||
memcpy(&this_device->path_map,
|
||||
&id_phys->redundant_path_present_map,
|
||||
sizeof(this_device->path_map));
|
||||
memcpy(&this_device->box,
|
||||
&id_phys->alternate_paths_phys_box_on_port,
|
||||
sizeof(this_device->box));
|
||||
memcpy(&this_device->phys_connector,
|
||||
&id_phys->alternate_paths_phys_connector,
|
||||
sizeof(this_device->phys_connector));
|
||||
memcpy(&this_device->bay,
|
||||
&id_phys->phys_bay_in_box,
|
||||
sizeof(this_device->bay));
|
||||
}
|
||||
|
||||
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
||||
{
|
||||
/* the idea here is we could get notified
|
||||
@ -3637,7 +3724,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
||||
int ncurrent = 0;
|
||||
int i, n_ext_target_devs, ndevs_to_allocate;
|
||||
int raid_ctlr_position;
|
||||
int rescan_hba_mode;
|
||||
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
|
||||
|
||||
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
|
||||
@ -3653,17 +3739,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
||||
}
|
||||
memset(lunzerobits, 0, sizeof(lunzerobits));
|
||||
|
||||
rescan_hba_mode = hpsa_hba_mode_enabled(h);
|
||||
if (rescan_hba_mode < 0)
|
||||
goto out;
|
||||
|
||||
if (!h->hba_mode_enabled && rescan_hba_mode)
|
||||
dev_warn(&h->pdev->dev, "HBA mode enabled\n");
|
||||
else if (h->hba_mode_enabled && !rescan_hba_mode)
|
||||
dev_warn(&h->pdev->dev, "HBA mode disabled\n");
|
||||
|
||||
h->hba_mode_enabled = rescan_hba_mode;
|
||||
|
||||
if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
|
||||
logdev_list, &nlogicals))
|
||||
goto out;
|
||||
@ -3739,9 +3814,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
||||
/* do not expose masked devices */
|
||||
if (MASKED_DEVICE(lunaddrbytes) &&
|
||||
i < nphysicals + (raid_ctlr_position == 0)) {
|
||||
if (h->hba_mode_enabled)
|
||||
dev_warn(&h->pdev->dev,
|
||||
"Masked physical device detected\n");
|
||||
this_device->expose_state = HPSA_DO_NOT_EXPOSE;
|
||||
} else {
|
||||
this_device->expose_state =
|
||||
@ -3761,30 +3833,21 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
||||
ncurrent++;
|
||||
break;
|
||||
case TYPE_DISK:
|
||||
if (i >= nphysicals) {
|
||||
ncurrent++;
|
||||
break;
|
||||
}
|
||||
|
||||
if (h->hba_mode_enabled)
|
||||
/* never use raid mapper in HBA mode */
|
||||
if (i < nphysicals + (raid_ctlr_position == 0)) {
|
||||
/* The disk is in HBA mode. */
|
||||
/* Never use RAID mapper in HBA mode. */
|
||||
this_device->offload_enabled = 0;
|
||||
else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
|
||||
h->transMethod & CFGTBL_Trans_io_accel2))
|
||||
break;
|
||||
|
||||
hpsa_get_ioaccel_drive_info(h, this_device,
|
||||
lunaddrbytes, id_phys);
|
||||
atomic_set(&this_device->ioaccel_cmds_out, 0);
|
||||
hpsa_get_ioaccel_drive_info(h, this_device,
|
||||
lunaddrbytes, id_phys);
|
||||
hpsa_get_path_info(this_device, lunaddrbytes,
|
||||
id_phys);
|
||||
}
|
||||
ncurrent++;
|
||||
break;
|
||||
case TYPE_TAPE:
|
||||
case TYPE_MEDIUM_CHANGER:
|
||||
ncurrent++;
|
||||
break;
|
||||
case TYPE_ENCLOSURE:
|
||||
if (h->hba_mode_enabled)
|
||||
ncurrent++;
|
||||
ncurrent++;
|
||||
break;
|
||||
case TYPE_RAID:
|
||||
/* Only present the Smartarray HBA as a RAID controller.
|
||||
@ -5104,7 +5167,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
|
||||
int rc;
|
||||
struct ctlr_info *h;
|
||||
struct hpsa_scsi_dev_t *dev;
|
||||
char msg[40];
|
||||
char msg[48];
|
||||
|
||||
/* find the controller to which the command to be aborted was sent */
|
||||
h = sdev_to_hba(scsicmd->device);
|
||||
@ -5122,16 +5185,18 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
|
||||
|
||||
/* if controller locked up, we can guarantee command won't complete */
|
||||
if (lockup_detected(h)) {
|
||||
sprintf(msg, "cmd %d RESET FAILED, lockup detected",
|
||||
hpsa_get_cmd_index(scsicmd));
|
||||
snprintf(msg, sizeof(msg),
|
||||
"cmd %d RESET FAILED, lockup detected",
|
||||
hpsa_get_cmd_index(scsicmd));
|
||||
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/* this reset request might be the result of a lockup; check */
|
||||
if (detect_controller_lockup(h)) {
|
||||
sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
|
||||
hpsa_get_cmd_index(scsicmd));
|
||||
snprintf(msg, sizeof(msg),
|
||||
"cmd %d RESET FAILED, new lockup detected",
|
||||
hpsa_get_cmd_index(scsicmd));
|
||||
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
|
||||
return FAILED;
|
||||
}
|
||||
@ -5145,7 +5210,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
|
||||
/* send a reset to the SCSI LUN which the command was sent to */
|
||||
rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
|
||||
DEFAULT_REPLY_QUEUE);
|
||||
sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed");
|
||||
snprintf(msg, sizeof(msg), "reset %s",
|
||||
rc == 0 ? "completed successfully" : "failed");
|
||||
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
|
||||
return rc == 0 ? SUCCESS : FAILED;
|
||||
}
|
||||
@ -7989,7 +8055,6 @@ reinit_after_soft_reset:
|
||||
|
||||
pci_set_drvdata(pdev, h);
|
||||
h->ndevices = 0;
|
||||
h->hba_mode_enabled = 0;
|
||||
|
||||
spin_lock_init(&h->devlock);
|
||||
rc = hpsa_put_ctlr_into_performant_mode(h);
|
||||
@ -8054,7 +8119,7 @@ reinit_after_soft_reset:
|
||||
rc = hpsa_kdump_soft_reset(h);
|
||||
if (rc)
|
||||
/* Neither hard nor soft reset worked, we're hosed. */
|
||||
goto clean9;
|
||||
goto clean7;
|
||||
|
||||
dev_info(&h->pdev->dev, "Board READY.\n");
|
||||
dev_info(&h->pdev->dev,
|
||||
@ -8100,8 +8165,6 @@ reinit_after_soft_reset:
|
||||
h->heartbeat_sample_interval);
|
||||
return 0;
|
||||
|
||||
clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */
|
||||
kfree(h->hba_inquiry_data);
|
||||
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
|
||||
hpsa_free_performant_mode(h);
|
||||
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
||||
@ -8209,6 +8272,14 @@ static void hpsa_remove_one(struct pci_dev *pdev)
|
||||
destroy_workqueue(h->rescan_ctlr_wq);
|
||||
destroy_workqueue(h->resubmit_wq);
|
||||
|
||||
/*
|
||||
* Call before disabling interrupts.
|
||||
* scsi_remove_host can trigger I/O operations especially
|
||||
* when multipath is enabled. There can be SYNCHRONIZE CACHE
|
||||
* operations which cannot complete and will hang the system.
|
||||
*/
|
||||
if (h->scsi_host)
|
||||
scsi_remove_host(h->scsi_host); /* init_one 8 */
|
||||
/* includes hpsa_free_irqs - init_one 4 */
|
||||
/* includes hpsa_disable_interrupt_mode - pci_init 2 */
|
||||
hpsa_shutdown(pdev);
|
||||
@ -8217,8 +8288,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
|
||||
|
||||
kfree(h->hba_inquiry_data); /* init_one 10 */
|
||||
h->hba_inquiry_data = NULL; /* init_one 10 */
|
||||
if (h->scsi_host)
|
||||
scsi_remove_host(h->scsi_host); /* init_one 8 */
|
||||
hpsa_free_ioaccel2_sg_chain_blocks(h);
|
||||
hpsa_free_performant_mode(h); /* init_one 7 */
|
||||
hpsa_free_sg_chain_blocks(h); /* init_one 6 */
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Disk Array driver for HP Smart Array SAS controllers
|
||||
* Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
|
||||
* Copyright 2014-2015 PMC-Sierra, Inc.
|
||||
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -11,11 +12,7 @@
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
|
||||
* Questions/Comments/Bugfixes to storagedev@pmcs.com
|
||||
*
|
||||
*/
|
||||
#ifndef HPSA_H
|
||||
@ -53,6 +50,11 @@ struct hpsa_scsi_dev_t {
|
||||
* device via "ioaccel" path.
|
||||
*/
|
||||
u32 ioaccel_handle;
|
||||
u8 active_path_index;
|
||||
u8 path_map;
|
||||
u8 bay;
|
||||
u8 box[8];
|
||||
u16 phys_connector[8];
|
||||
int offload_config; /* I/O accel RAID offload configured */
|
||||
int offload_enabled; /* I/O accel RAID offload enabled */
|
||||
int offload_to_be_enabled;
|
||||
@ -114,7 +116,6 @@ struct bmic_controller_parameters {
|
||||
u8 automatic_drive_slamming;
|
||||
u8 reserved1;
|
||||
u8 nvram_flags;
|
||||
#define HBA_MODE_ENABLED_FLAG (1 << 3)
|
||||
u8 cache_nvram_flags;
|
||||
u8 drive_config_flags;
|
||||
u16 reserved2;
|
||||
@ -153,7 +154,6 @@ struct ctlr_info {
|
||||
unsigned int msi_vector;
|
||||
int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
|
||||
struct access_method access;
|
||||
char hba_mode_enabled;
|
||||
|
||||
/* queue and queue Info */
|
||||
unsigned int Qdepth;
|
||||
|
@ -1,6 +1,7 @@
|
||||
/*
|
||||
* Disk Array driver for HP Smart Array SAS controllers
|
||||
* Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
|
||||
* Copyright 2014-2015 PMC-Sierra, Inc.
|
||||
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -11,11 +12,7 @@
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
|
||||
* Questions/Comments/Bugfixes to storagedev@pmcs.com
|
||||
*
|
||||
*/
|
||||
#ifndef HPSA_CMD_H
|
||||
@ -167,6 +164,7 @@
|
||||
/* Logical volume states */
|
||||
#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
|
||||
#define HPSA_LV_OK 0x0
|
||||
#define HPSA_LV_NOT_AVAILABLE 0x0b
|
||||
#define HPSA_LV_UNDERGOING_ERASE 0x0F
|
||||
#define HPSA_LV_UNDERGOING_RPI 0x12
|
||||
#define HPSA_LV_PENDING_RPI 0x13
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* HighPoint RR3xxx/4xxx controller driver for Linux
|
||||
* Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
|
||||
* Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
|
||||
|
||||
static char driver_name[] = "hptiop";
|
||||
static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
|
||||
static const char driver_ver[] = "v1.8";
|
||||
static const char driver_ver[] = "v1.10.0";
|
||||
|
||||
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
|
||||
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
|
||||
@ -764,9 +764,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
|
||||
scsi_set_resid(scp,
|
||||
scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
|
||||
scp->result = SAM_STAT_CHECK_CONDITION;
|
||||
memcpy(scp->sense_buffer, &req->sg_list,
|
||||
min_t(size_t, SCSI_SENSE_BUFFERSIZE,
|
||||
le32_to_cpu(req->dataxfer_length)));
|
||||
memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
|
||||
goto skip_resid;
|
||||
break;
|
||||
|
||||
@ -1037,8 +1035,9 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
|
||||
|
||||
scp->result = 0;
|
||||
|
||||
if (scp->device->channel || scp->device->lun ||
|
||||
scp->device->id > hba->max_devices) {
|
||||
if (scp->device->channel ||
|
||||
(scp->device->id > hba->max_devices) ||
|
||||
((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) {
|
||||
scp->result = DID_BAD_TARGET << 16;
|
||||
free_req(hba, _req);
|
||||
goto cmd_done;
|
||||
@ -1168,6 +1167,14 @@ static struct device_attribute *hptiop_attrs[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static int hptiop_slave_config(struct scsi_device *sdev)
|
||||
{
|
||||
if (sdev->type == TYPE_TAPE)
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, 8192);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct scsi_host_template driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = driver_name,
|
||||
@ -1179,6 +1186,7 @@ static struct scsi_host_template driver_template = {
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.proc_name = driver_name,
|
||||
.shost_attrs = hptiop_attrs,
|
||||
.slave_configure = hptiop_slave_config,
|
||||
.this_id = -1,
|
||||
.change_queue_depth = hptiop_adjust_disk_queue_depth,
|
||||
};
|
||||
@ -1323,6 +1331,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
|
||||
}
|
||||
|
||||
hba = (struct hptiop_hba *)host->hostdata;
|
||||
memset(hba, 0, sizeof(struct hptiop_hba));
|
||||
|
||||
hba->ops = iop_ops;
|
||||
hba->pcidev = pcidev;
|
||||
@ -1336,7 +1345,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
|
||||
init_waitqueue_head(&hba->reset_wq);
|
||||
init_waitqueue_head(&hba->ioctl_wq);
|
||||
|
||||
host->max_lun = 1;
|
||||
host->max_lun = 128;
|
||||
host->max_channel = 0;
|
||||
host->io_port = 0;
|
||||
host->n_io_port = 0;
|
||||
@ -1428,34 +1437,33 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
|
||||
dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
|
||||
|
||||
hba->req_size = req_size;
|
||||
start_virt = dma_alloc_coherent(&pcidev->dev,
|
||||
hba->req_size*hba->max_requests + 0x20,
|
||||
&start_phy, GFP_KERNEL);
|
||||
|
||||
if (!start_virt) {
|
||||
printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
|
||||
hba->host->host_no);
|
||||
goto free_request_irq;
|
||||
}
|
||||
|
||||
hba->dma_coherent = start_virt;
|
||||
hba->dma_coherent_handle = start_phy;
|
||||
|
||||
if ((start_phy & 0x1f) != 0) {
|
||||
offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
|
||||
start_phy += offset;
|
||||
start_virt += offset;
|
||||
}
|
||||
|
||||
hba->req_list = NULL;
|
||||
|
||||
for (i = 0; i < hba->max_requests; i++) {
|
||||
start_virt = dma_alloc_coherent(&pcidev->dev,
|
||||
hba->req_size + 0x20,
|
||||
&start_phy, GFP_KERNEL);
|
||||
|
||||
if (!start_virt) {
|
||||
printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
|
||||
hba->host->host_no);
|
||||
goto free_request_mem;
|
||||
}
|
||||
|
||||
hba->dma_coherent[i] = start_virt;
|
||||
hba->dma_coherent_handle[i] = start_phy;
|
||||
|
||||
if ((start_phy & 0x1f) != 0) {
|
||||
offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
|
||||
start_phy += offset;
|
||||
start_virt += offset;
|
||||
}
|
||||
|
||||
hba->reqs[i].next = NULL;
|
||||
hba->reqs[i].req_virt = start_virt;
|
||||
hba->reqs[i].req_shifted_phy = start_phy >> 5;
|
||||
hba->reqs[i].index = i;
|
||||
free_req(hba, &hba->reqs[i]);
|
||||
start_virt = (char *)start_virt + hba->req_size;
|
||||
start_phy = start_phy + hba->req_size;
|
||||
}
|
||||
|
||||
/* Enable Interrupt and start background task */
|
||||
@ -1474,11 +1482,16 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
|
||||
return 0;
|
||||
|
||||
free_request_mem:
|
||||
dma_free_coherent(&hba->pcidev->dev,
|
||||
hba->req_size * hba->max_requests + 0x20,
|
||||
hba->dma_coherent, hba->dma_coherent_handle);
|
||||
for (i = 0; i < hba->max_requests; i++) {
|
||||
if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
|
||||
dma_free_coherent(&hba->pcidev->dev,
|
||||
hba->req_size + 0x20,
|
||||
hba->dma_coherent[i],
|
||||
hba->dma_coherent_handle[i]);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
free_request_irq:
|
||||
free_irq(hba->pcidev->irq, hba);
|
||||
|
||||
unmap_pci_bar:
|
||||
@ -1546,6 +1559,7 @@ static void hptiop_remove(struct pci_dev *pcidev)
|
||||
{
|
||||
struct Scsi_Host *host = pci_get_drvdata(pcidev);
|
||||
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
|
||||
u32 i;
|
||||
|
||||
dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
|
||||
|
||||
@ -1555,10 +1569,15 @@ static void hptiop_remove(struct pci_dev *pcidev)
|
||||
|
||||
free_irq(hba->pcidev->irq, hba);
|
||||
|
||||
dma_free_coherent(&hba->pcidev->dev,
|
||||
hba->req_size * hba->max_requests + 0x20,
|
||||
hba->dma_coherent,
|
||||
hba->dma_coherent_handle);
|
||||
for (i = 0; i < hba->max_requests; i++) {
|
||||
if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
|
||||
dma_free_coherent(&hba->pcidev->dev,
|
||||
hba->req_size + 0x20,
|
||||
hba->dma_coherent[i],
|
||||
hba->dma_coherent_handle[i]);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
hba->ops->internal_memfree(hba);
|
||||
|
||||
@ -1653,6 +1672,14 @@ static struct pci_device_id hptiop_id_table[] = {
|
||||
{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops },
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* HighPoint RR3xxx/4xxx controller driver for Linux
|
||||
* Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
|
||||
* Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -327,8 +327,8 @@ struct hptiop_hba {
|
||||
struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
|
||||
|
||||
/* used to free allocated dma area */
|
||||
void *dma_coherent;
|
||||
dma_addr_t dma_coherent_handle;
|
||||
void *dma_coherent[HPTIOP_MAX_REQUESTS];
|
||||
dma_addr_t dma_coherent_handle[HPTIOP_MAX_REQUESTS];
|
||||
|
||||
atomic_t reset_count;
|
||||
atomic_t resetting;
|
||||
|
@ -1165,7 +1165,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
|
||||
|
||||
if (ioa_cfg->sis64) {
|
||||
proto = cfgtew->u.cfgte64->proto;
|
||||
res->res_flags = cfgtew->u.cfgte64->res_flags;
|
||||
res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
|
||||
res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
|
||||
res->qmodel = IPR_QUEUEING_MODEL64(res);
|
||||
res->type = cfgtew->u.cfgte64->res_type;
|
||||
|
||||
@ -1313,8 +1314,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
|
||||
int new_path = 0;
|
||||
|
||||
if (res->ioa_cfg->sis64) {
|
||||
res->flags = cfgtew->u.cfgte64->flags;
|
||||
res->res_flags = cfgtew->u.cfgte64->res_flags;
|
||||
res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
|
||||
res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
|
||||
res->type = cfgtew->u.cfgte64->res_type;
|
||||
|
||||
memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
|
||||
@ -1900,7 +1901,7 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
|
||||
* Return value:
|
||||
* none
|
||||
**/
|
||||
static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
|
||||
static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -2270,7 +2271,7 @@ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
|
||||
((unsigned long)fabric + be16_to_cpu(fabric->length));
|
||||
}
|
||||
|
||||
ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
|
||||
ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2364,7 +2365,7 @@ static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
|
||||
((unsigned long)fabric + be16_to_cpu(fabric->length));
|
||||
}
|
||||
|
||||
ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
|
||||
ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4455,7 +4456,7 @@ static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *a
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||
res = (struct ipr_resource_entry *)sdev->hostdata;
|
||||
if (res && ioa_cfg->sis64)
|
||||
len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
|
||||
len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
|
||||
else if (res)
|
||||
len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
|
||||
|
||||
|
@ -39,8 +39,8 @@
|
||||
/*
|
||||
* Literals
|
||||
*/
|
||||
#define IPR_DRIVER_VERSION "2.6.1"
|
||||
#define IPR_DRIVER_DATE "(March 12, 2015)"
|
||||
#define IPR_DRIVER_VERSION "2.6.2"
|
||||
#define IPR_DRIVER_DATE "(June 11, 2015)"
|
||||
|
||||
/*
|
||||
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
|
||||
@ -1005,13 +1005,13 @@ struct ipr_hostrcb_type_24_error {
|
||||
struct ipr_hostrcb_type_07_error {
|
||||
u8 failure_reason[64];
|
||||
struct ipr_vpd vpd;
|
||||
u32 data[222];
|
||||
__be32 data[222];
|
||||
}__attribute__((packed, aligned (4)));
|
||||
|
||||
struct ipr_hostrcb_type_17_error {
|
||||
u8 failure_reason[64];
|
||||
struct ipr_ext_vpd vpd;
|
||||
u32 data[476];
|
||||
__be32 data[476];
|
||||
}__attribute__((packed, aligned (4)));
|
||||
|
||||
struct ipr_hostrcb_config_element {
|
||||
@ -1289,18 +1289,17 @@ struct ipr_resource_entry {
|
||||
(((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
|
||||
|
||||
u8 ata_class;
|
||||
|
||||
u8 flags;
|
||||
__be16 res_flags;
|
||||
|
||||
u8 type;
|
||||
|
||||
u16 flags;
|
||||
u16 res_flags;
|
||||
|
||||
u8 qmodel;
|
||||
struct ipr_std_inq_data std_inq_data;
|
||||
|
||||
__be32 res_handle;
|
||||
__be64 dev_id;
|
||||
__be64 lun_wwn;
|
||||
u64 lun_wwn;
|
||||
struct scsi_lun dev_lun;
|
||||
u8 res_path[8];
|
||||
|
||||
|
@ -191,7 +191,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_fcp_pkt_destory() - Release hold on a fcp_pkt
|
||||
* fc_fcp_pkt_destroy() - Release hold on a fcp_pkt
|
||||
* @seq: The sequence that the FCP packet is on (required by destructor API)
|
||||
* @fsp: The FCP packet to be released
|
||||
*
|
||||
|
@ -701,7 +701,7 @@ lpfc_work_done(struct lpfc_hba *phba)
|
||||
HA_RXMASK));
|
||||
}
|
||||
}
|
||||
if ((phba->sli_rev == LPFC_SLI_REV4) &
|
||||
if ((phba->sli_rev == LPFC_SLI_REV4) &&
|
||||
(!list_empty(&pring->txq)))
|
||||
lpfc_drain_txq(phba);
|
||||
/*
|
||||
|
@ -268,8 +268,8 @@ mega_query_adapter(adapter_t *adapter)
|
||||
raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
|
||||
|
||||
if ((retval = issue_scb_block(adapter, raw_mbox)))
|
||||
printk(KERN_WARNING
|
||||
"megaraid: Product_info cmd failed with error: %d\n",
|
||||
dev_warn(&adapter->dev->dev,
|
||||
"Product_info cmd failed with error: %d\n",
|
||||
retval);
|
||||
|
||||
pci_unmap_single(adapter->dev, prod_info_dma_handle,
|
||||
@ -334,7 +334,7 @@ mega_query_adapter(adapter_t *adapter)
|
||||
adapter->bios_version[4] = 0;
|
||||
}
|
||||
|
||||
printk(KERN_NOTICE "megaraid: [%s:%s] detected %d logical drives.\n",
|
||||
dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
|
||||
adapter->fw_version, adapter->bios_version, adapter->numldrv);
|
||||
|
||||
/*
|
||||
@ -342,7 +342,7 @@ mega_query_adapter(adapter_t *adapter)
|
||||
*/
|
||||
adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
|
||||
if (adapter->support_ext_cdb)
|
||||
printk(KERN_NOTICE "megaraid: supports extended CDBs.\n");
|
||||
dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
|
||||
|
||||
|
||||
return 0;
|
||||
@ -678,11 +678,11 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
|
||||
|
||||
if(!(adapter->flag & (1L << cmd->device->channel))) {
|
||||
|
||||
printk(KERN_NOTICE
|
||||
"scsi%d: scanning scsi channel %d ",
|
||||
dev_notice(&adapter->dev->dev,
|
||||
"scsi%d: scanning scsi channel %d "
|
||||
"for logical drives\n",
|
||||
adapter->host->host_no,
|
||||
cmd->device->channel);
|
||||
printk("for logical drives.\n");
|
||||
|
||||
adapter->flag |= (1L << cmd->device->channel);
|
||||
}
|
||||
@ -983,11 +983,11 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
|
||||
case READ_CAPACITY:
|
||||
if(!(adapter->flag & (1L << cmd->device->channel))) {
|
||||
|
||||
printk(KERN_NOTICE
|
||||
"scsi%d: scanning scsi channel %d [P%d] ",
|
||||
dev_notice(&adapter->dev->dev,
|
||||
"scsi%d: scanning scsi channel %d [P%d] "
|
||||
"for physical devices\n",
|
||||
adapter->host->host_no,
|
||||
cmd->device->channel, channel);
|
||||
printk("for physical devices.\n");
|
||||
|
||||
adapter->flag |= (1L << cmd->device->channel);
|
||||
}
|
||||
@ -1045,11 +1045,11 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
|
||||
case READ_CAPACITY:
|
||||
if(!(adapter->flag & (1L << cmd->device->channel))) {
|
||||
|
||||
printk(KERN_NOTICE
|
||||
"scsi%d: scanning scsi channel %d [P%d] ",
|
||||
dev_notice(&adapter->dev->dev,
|
||||
"scsi%d: scanning scsi channel %d [P%d] "
|
||||
"for physical devices\n",
|
||||
adapter->host->host_no,
|
||||
cmd->device->channel, channel);
|
||||
printk("for physical devices.\n");
|
||||
|
||||
adapter->flag |= (1L << cmd->device->channel);
|
||||
}
|
||||
@ -1241,7 +1241,7 @@ issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
|
||||
return mbox->m_in.status;
|
||||
|
||||
bug_blocked_mailbox:
|
||||
printk(KERN_WARNING "megaraid: Blocked mailbox......!!\n");
|
||||
dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
|
||||
udelay (1000);
|
||||
return -1;
|
||||
}
|
||||
@ -1454,9 +1454,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
|
||||
* Make sure f/w has completed a valid command
|
||||
*/
|
||||
if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
|
||||
printk(KERN_CRIT
|
||||
"megaraid: invalid command ");
|
||||
printk("Id %d, scb->state:%x, scsi cmd:%p\n",
|
||||
dev_crit(&adapter->dev->dev, "invalid command "
|
||||
"Id %d, scb->state:%x, scsi cmd:%p\n",
|
||||
cmdid, scb->state, scb->cmd);
|
||||
|
||||
continue;
|
||||
@ -1467,8 +1466,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
|
||||
*/
|
||||
if( scb->state & SCB_ABORT ) {
|
||||
|
||||
printk(KERN_WARNING
|
||||
"megaraid: aborted cmd [%x] complete.\n",
|
||||
dev_warn(&adapter->dev->dev,
|
||||
"aborted cmd [%x] complete\n",
|
||||
scb->idx);
|
||||
|
||||
scb->cmd->result = (DID_ABORT << 16);
|
||||
@ -1486,8 +1485,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
|
||||
*/
|
||||
if( scb->state & SCB_RESET ) {
|
||||
|
||||
printk(KERN_WARNING
|
||||
"megaraid: reset cmd [%x] complete.\n",
|
||||
dev_warn(&adapter->dev->dev,
|
||||
"reset cmd [%x] complete\n",
|
||||
scb->idx);
|
||||
|
||||
scb->cmd->result = (DID_RESET << 16);
|
||||
@ -1553,8 +1552,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
|
||||
if( sg_page(sgl) ) {
|
||||
c = *(unsigned char *) sg_virt(&sgl[0]);
|
||||
} else {
|
||||
printk(KERN_WARNING
|
||||
"megaraid: invalid sg.\n");
|
||||
dev_warn(&adapter->dev->dev, "invalid sg\n");
|
||||
c = 0;
|
||||
}
|
||||
|
||||
@ -1902,11 +1900,10 @@ megaraid_reset(struct scsi_cmnd *cmd)
|
||||
mc.opcode = MEGA_RESET_RESERVATIONS;
|
||||
|
||||
if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
|
||||
printk(KERN_WARNING
|
||||
"megaraid: reservation reset failed.\n");
|
||||
dev_warn(&adapter->dev->dev, "reservation reset failed\n");
|
||||
}
|
||||
else {
|
||||
printk(KERN_INFO "megaraid: reservation reset.\n");
|
||||
dev_info(&adapter->dev->dev, "reservation reset\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1939,7 +1936,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
|
||||
struct list_head *pos, *next;
|
||||
scb_t *scb;
|
||||
|
||||
printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n",
|
||||
dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n",
|
||||
(aor == SCB_ABORT)? "ABORTING":"RESET",
|
||||
cmd->cmnd[0], cmd->device->channel,
|
||||
cmd->device->id, (u32)cmd->device->lun);
|
||||
@ -1963,8 +1960,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
|
||||
*/
|
||||
if( scb->state & SCB_ISSUED ) {
|
||||
|
||||
printk(KERN_WARNING
|
||||
"megaraid: %s[%x], fw owner.\n",
|
||||
dev_warn(&adapter->dev->dev,
|
||||
"%s[%x], fw owner\n",
|
||||
(aor==SCB_ABORT) ? "ABORTING":"RESET",
|
||||
scb->idx);
|
||||
|
||||
@ -1976,8 +1973,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
|
||||
* Not yet issued! Remove from the pending
|
||||
* list
|
||||
*/
|
||||
printk(KERN_WARNING
|
||||
"megaraid: %s-[%x], driver owner.\n",
|
||||
dev_warn(&adapter->dev->dev,
|
||||
"%s-[%x], driver owner\n",
|
||||
(aor==SCB_ABORT) ? "ABORTING":"RESET",
|
||||
scb->idx);
|
||||
|
||||
@ -2197,7 +2194,7 @@ proc_show_rebuild_rate(struct seq_file *m, void *v)
|
||||
|
||||
if( mega_adapinq(adapter, dma_handle) != 0 ) {
|
||||
seq_puts(m, "Adapter inquiry failed.\n");
|
||||
printk(KERN_WARNING "megaraid: inquiry failed.\n");
|
||||
dev_warn(&adapter->dev->dev, "inquiry failed\n");
|
||||
goto free_inquiry;
|
||||
}
|
||||
|
||||
@ -2241,7 +2238,7 @@ proc_show_battery(struct seq_file *m, void *v)
|
||||
|
||||
if( mega_adapinq(adapter, dma_handle) != 0 ) {
|
||||
seq_puts(m, "Adapter inquiry failed.\n");
|
||||
printk(KERN_WARNING "megaraid: inquiry failed.\n");
|
||||
dev_warn(&adapter->dev->dev, "inquiry failed\n");
|
||||
goto free_inquiry;
|
||||
}
|
||||
|
||||
@ -2350,7 +2347,7 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
|
||||
|
||||
if( mega_adapinq(adapter, dma_handle) != 0 ) {
|
||||
seq_puts(m, "Adapter inquiry failed.\n");
|
||||
printk(KERN_WARNING "megaraid: inquiry failed.\n");
|
||||
dev_warn(&adapter->dev->dev, "inquiry failed\n");
|
||||
goto free_inquiry;
|
||||
}
|
||||
|
||||
@ -2525,7 +2522,7 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
|
||||
|
||||
if( mega_adapinq(adapter, dma_handle) != 0 ) {
|
||||
seq_puts(m, "Adapter inquiry failed.\n");
|
||||
printk(KERN_WARNING "megaraid: inquiry failed.\n");
|
||||
dev_warn(&adapter->dev->dev, "inquiry failed\n");
|
||||
goto free_inquiry;
|
||||
}
|
||||
|
||||
@ -2799,7 +2796,7 @@ mega_create_proc_entry(int index, struct proc_dir_entry *parent)
|
||||
dir = adapter->controller_proc_dir_entry =
|
||||
proc_mkdir_data(string, 0, parent, adapter);
|
||||
if(!dir) {
|
||||
printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n");
|
||||
dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2807,7 +2804,7 @@ mega_create_proc_entry(int index, struct proc_dir_entry *parent)
|
||||
de = proc_create_data(f->name, S_IRUSR, dir, &mega_proc_fops,
|
||||
f->show);
|
||||
if (!de) {
|
||||
printk(KERN_WARNING "\nmegaraid: proc_create failed\n");
|
||||
dev_warn(&adapter->dev->dev, "proc_create failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2874,9 +2871,9 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
|
||||
return rval;
|
||||
}
|
||||
|
||||
printk(KERN_INFO
|
||||
"megaraid: invalid partition on this disk on channel %d\n",
|
||||
sdev->channel);
|
||||
dev_info(&adapter->dev->dev,
|
||||
"invalid partition on this disk on channel %d\n",
|
||||
sdev->channel);
|
||||
|
||||
/* Default heads (64) & sectors (32) */
|
||||
heads = 64;
|
||||
@ -2936,7 +2933,7 @@ mega_init_scb(adapter_t *adapter)
|
||||
scb->sgl = (mega_sglist *)scb->sgl64;
|
||||
|
||||
if( !scb->sgl ) {
|
||||
printk(KERN_WARNING "RAID: Can't allocate sglist.\n");
|
||||
dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
|
||||
mega_free_sgl(adapter);
|
||||
return -1;
|
||||
}
|
||||
@ -2946,7 +2943,7 @@ mega_init_scb(adapter_t *adapter)
|
||||
&scb->pthru_dma_addr);
|
||||
|
||||
if( !scb->pthru ) {
|
||||
printk(KERN_WARNING "RAID: Can't allocate passthru.\n");
|
||||
dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
|
||||
mega_free_sgl(adapter);
|
||||
return -1;
|
||||
}
|
||||
@ -2956,8 +2953,8 @@ mega_init_scb(adapter_t *adapter)
|
||||
&scb->epthru_dma_addr);
|
||||
|
||||
if( !scb->epthru ) {
|
||||
printk(KERN_WARNING
|
||||
"Can't allocate extended passthru.\n");
|
||||
dev_warn(&adapter->dev->dev,
|
||||
"Can't allocate extended passthru\n");
|
||||
mega_free_sgl(adapter);
|
||||
return -1;
|
||||
}
|
||||
@ -3154,8 +3151,8 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
* Do we support this feature
|
||||
*/
|
||||
if( !adapter->support_random_del ) {
|
||||
printk(KERN_WARNING "megaraid: logdrv ");
|
||||
printk("delete on non-supporting F/W.\n");
|
||||
dev_warn(&adapter->dev->dev, "logdrv "
|
||||
"delete on non-supporting F/W\n");
|
||||
|
||||
return (-EINVAL);
|
||||
}
|
||||
@ -3179,7 +3176,7 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
|
||||
uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
|
||||
|
||||
printk(KERN_WARNING "megaraid: rejected passthru.\n");
|
||||
dev_warn(&adapter->dev->dev, "rejected passthru\n");
|
||||
|
||||
return (-EINVAL);
|
||||
}
|
||||
@ -3683,11 +3680,11 @@ mega_enum_raid_scsi(adapter_t *adapter)
|
||||
|
||||
for( i = 0; i < adapter->product_info.nchannels; i++ ) {
|
||||
if( (adapter->mega_ch_class >> i) & 0x01 ) {
|
||||
printk(KERN_INFO "megaraid: channel[%d] is raid.\n",
|
||||
dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
|
||||
i);
|
||||
}
|
||||
else {
|
||||
printk(KERN_INFO "megaraid: channel[%d] is scsi.\n",
|
||||
dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
|
||||
i);
|
||||
}
|
||||
}
|
||||
@ -3893,7 +3890,7 @@ mega_do_del_logdrv(adapter_t *adapter, int logdrv)
|
||||
|
||||
/* log this event */
|
||||
if(rval) {
|
||||
printk(KERN_WARNING "megaraid: Delete LD-%d failed.", logdrv);
|
||||
dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
|
||||
return rval;
|
||||
}
|
||||
|
||||
@ -4161,7 +4158,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
|
||||
* this information.
|
||||
*/
|
||||
if (rval && trace_level) {
|
||||
printk("megaraid: cmd [%x, %x, %x] status:[%x]\n",
|
||||
dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
|
||||
mc->cmd, mc->opcode, mc->subopcode, rval);
|
||||
}
|
||||
|
||||
@ -4244,11 +4241,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
subsysvid = pdev->subsystem_vendor;
|
||||
subsysid = pdev->subsystem_device;
|
||||
|
||||
printk(KERN_NOTICE "megaraid: found 0x%4.04x:0x%4.04x:bus %d:",
|
||||
id->vendor, id->device, pci_bus);
|
||||
|
||||
printk("slot %d:func %d\n",
|
||||
PCI_SLOT(pci_dev_func), PCI_FUNC(pci_dev_func));
|
||||
dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
|
||||
id->vendor, id->device);
|
||||
|
||||
/* Read the base port and IRQ from PCI */
|
||||
mega_baseport = pci_resource_start(pdev, 0);
|
||||
@ -4259,14 +4253,13 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
flag |= BOARD_MEMMAP;
|
||||
|
||||
if (!request_mem_region(mega_baseport, 128, "megaraid")) {
|
||||
printk(KERN_WARNING "megaraid: mem region busy!\n");
|
||||
dev_warn(&pdev->dev, "mem region busy!\n");
|
||||
goto out_disable_device;
|
||||
}
|
||||
|
||||
mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
|
||||
if (!mega_baseport) {
|
||||
printk(KERN_WARNING
|
||||
"megaraid: could not map hba memory\n");
|
||||
dev_warn(&pdev->dev, "could not map hba memory\n");
|
||||
goto out_release_region;
|
||||
}
|
||||
} else {
|
||||
@ -4285,7 +4278,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
adapter = (adapter_t *)host->hostdata;
|
||||
memset(adapter, 0, sizeof(adapter_t));
|
||||
|
||||
printk(KERN_NOTICE
|
||||
dev_notice(&pdev->dev,
|
||||
"scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
|
||||
host->host_no, mega_baseport, irq);
|
||||
|
||||
@ -4323,21 +4316,20 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
|
||||
MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
|
||||
if (!adapter->mega_buffer) {
|
||||
printk(KERN_WARNING "megaraid: out of RAM.\n");
|
||||
dev_warn(&pdev->dev, "out of RAM\n");
|
||||
goto out_host_put;
|
||||
}
|
||||
|
||||
adapter->scb_list = kmalloc(sizeof(scb_t) * MAX_COMMANDS, GFP_KERNEL);
|
||||
if (!adapter->scb_list) {
|
||||
printk(KERN_WARNING "megaraid: out of RAM.\n");
|
||||
dev_warn(&pdev->dev, "out of RAM\n");
|
||||
goto out_free_cmd_buffer;
|
||||
}
|
||||
|
||||
if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
|
||||
megaraid_isr_memmapped : megaraid_isr_iomapped,
|
||||
IRQF_SHARED, "megaraid", adapter)) {
|
||||
printk(KERN_WARNING
|
||||
"megaraid: Couldn't register IRQ %d!\n", irq);
|
||||
dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
|
||||
goto out_free_scb_list;
|
||||
}
|
||||
|
||||
@ -4357,9 +4349,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (!strcmp(adapter->fw_version, "3.00") ||
|
||||
!strcmp(adapter->fw_version, "3.01")) {
|
||||
|
||||
printk( KERN_WARNING
|
||||
"megaraid: Your card is a Dell PERC "
|
||||
"2/SC RAID controller with "
|
||||
dev_warn(&pdev->dev,
|
||||
"Your card is a Dell PERC "
|
||||
"2/SC RAID controller with "
|
||||
"firmware\nmegaraid: 3.00 or 3.01. "
|
||||
"This driver is known to have "
|
||||
"corruption issues\nmegaraid: with "
|
||||
@ -4390,12 +4382,12 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (!strcmp(adapter->fw_version, "H01.07") ||
|
||||
!strcmp(adapter->fw_version, "H01.08") ||
|
||||
!strcmp(adapter->fw_version, "H01.09") ) {
|
||||
printk(KERN_WARNING
|
||||
"megaraid: Firmware H.01.07, "
|
||||
dev_warn(&pdev->dev,
|
||||
"Firmware H.01.07, "
|
||||
"H.01.08, and H.01.09 on 1M/2M "
|
||||
"controllers\n"
|
||||
"megaraid: do not support 64 bit "
|
||||
"addressing.\nmegaraid: DISABLING "
|
||||
"do not support 64 bit "
|
||||
"addressing.\nDISABLING "
|
||||
"64 bit support.\n");
|
||||
adapter->flag &= ~BOARD_64BIT;
|
||||
}
|
||||
@ -4503,8 +4495,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
*/
|
||||
adapter->has_cluster = mega_support_cluster(adapter);
|
||||
if (adapter->has_cluster) {
|
||||
printk(KERN_NOTICE
|
||||
"megaraid: Cluster driver, initiator id:%d\n",
|
||||
dev_notice(&pdev->dev,
|
||||
"Cluster driver, initiator id:%d\n",
|
||||
adapter->this_id);
|
||||
}
|
||||
#endif
|
||||
@ -4571,7 +4563,7 @@ __megaraid_shutdown(adapter_t *adapter)
|
||||
issue_scb_block(adapter, raw_mbox);
|
||||
|
||||
if (atomic_read(&adapter->pend_cmds) > 0)
|
||||
printk(KERN_WARNING "megaraid: pending commands!!\n");
|
||||
dev_warn(&adapter->dev->dev, "pending commands!!\n");
|
||||
|
||||
/*
|
||||
* Have a delibrate delay to make sure all the caches are
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -221,7 +221,7 @@ static void megasas_teardown_frame_pool_fusion(
|
||||
struct megasas_cmd_fusion *cmd;
|
||||
|
||||
if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
|
||||
printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, "
|
||||
dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
|
||||
"sense pool : %p\n", fusion->sg_dma_pool,
|
||||
fusion->sense_dma_pool);
|
||||
return;
|
||||
@ -332,8 +332,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
|
||||
total_sz_chain_frame, 4,
|
||||
0);
|
||||
if (!fusion->sg_dma_pool) {
|
||||
printk(KERN_DEBUG "megasas: failed to setup request pool "
|
||||
"fusion\n");
|
||||
dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion",
|
||||
@ -341,8 +340,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
|
||||
SCSI_SENSE_BUFFERSIZE, 64, 0);
|
||||
|
||||
if (!fusion->sense_dma_pool) {
|
||||
printk(KERN_DEBUG "megasas: failed to setup sense pool "
|
||||
"fusion\n");
|
||||
dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
|
||||
pci_pool_destroy(fusion->sg_dma_pool);
|
||||
fusion->sg_dma_pool = NULL;
|
||||
return -ENOMEM;
|
||||
@ -366,7 +364,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
|
||||
* whatever has been allocated
|
||||
*/
|
||||
if (!cmd->sg_frame || !cmd->sense) {
|
||||
printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n");
|
||||
dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
|
||||
megasas_teardown_frame_pool_fusion(instance);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -412,7 +410,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
|
||||
&fusion->req_frames_desc_phys, GFP_KERNEL);
|
||||
|
||||
if (!fusion->req_frames_desc) {
|
||||
printk(KERN_ERR "megasas; Could not allocate memory for "
|
||||
dev_err(&instance->pdev->dev, "Could not allocate memory for "
|
||||
"request_frames\n");
|
||||
goto fail_req_desc;
|
||||
}
|
||||
@ -423,7 +421,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
|
||||
fusion->reply_alloc_sz * count, 16, 0);
|
||||
|
||||
if (!fusion->reply_frames_desc_pool) {
|
||||
printk(KERN_ERR "megasas; Could not allocate memory for "
|
||||
dev_err(&instance->pdev->dev, "Could not allocate memory for "
|
||||
"reply_frame pool\n");
|
||||
goto fail_reply_desc;
|
||||
}
|
||||
@ -432,7 +430,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
|
||||
pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
|
||||
&fusion->reply_frames_desc_phys);
|
||||
if (!fusion->reply_frames_desc) {
|
||||
printk(KERN_ERR "megasas; Could not allocate memory for "
|
||||
dev_err(&instance->pdev->dev, "Could not allocate memory for "
|
||||
"reply_frame pool\n");
|
||||
pci_pool_destroy(fusion->reply_frames_desc_pool);
|
||||
goto fail_reply_desc;
|
||||
@ -449,7 +447,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
|
||||
fusion->io_frames_alloc_sz, 16, 0);
|
||||
|
||||
if (!fusion->io_request_frames_pool) {
|
||||
printk(KERN_ERR "megasas: Could not allocate memory for "
|
||||
dev_err(&instance->pdev->dev, "Could not allocate memory for "
|
||||
"io_request_frame pool\n");
|
||||
goto fail_io_frames;
|
||||
}
|
||||
@ -458,7 +456,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
|
||||
pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
|
||||
&fusion->io_request_frames_phys);
|
||||
if (!fusion->io_request_frames) {
|
||||
printk(KERN_ERR "megasas: Could not allocate memory for "
|
||||
dev_err(&instance->pdev->dev, "Could not allocate memory for "
|
||||
"io_request_frames frames\n");
|
||||
pci_pool_destroy(fusion->io_request_frames_pool);
|
||||
goto fail_io_frames;
|
||||
@ -473,7 +471,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
|
||||
* max_cmd, GFP_KERNEL);
|
||||
|
||||
if (!fusion->cmd_list) {
|
||||
printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
|
||||
dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
|
||||
"memory for cmd_list_fusion\n");
|
||||
goto fail_cmd_list;
|
||||
}
|
||||
@ -483,7 +481,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
|
||||
fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
|
||||
GFP_KERNEL);
|
||||
if (!fusion->cmd_list[i]) {
|
||||
printk(KERN_ERR "Could not alloc cmd list fusion\n");
|
||||
dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
|
||||
|
||||
for (j = 0; j < i; j++)
|
||||
kfree(fusion->cmd_list[j]);
|
||||
@ -527,7 +525,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
|
||||
* Create a frame pool and assign one frame to each cmd
|
||||
*/
|
||||
if (megasas_create_frame_pool_fusion(instance)) {
|
||||
printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
|
||||
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
|
||||
megasas_free_cmds_fusion(instance);
|
||||
goto fail_req_desc;
|
||||
}
|
||||
@ -613,7 +611,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
|
||||
cmd = megasas_get_cmd(instance);
|
||||
|
||||
if (!cmd) {
|
||||
printk(KERN_ERR "Could not allocate cmd for INIT Frame\n");
|
||||
dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
|
||||
ret = 1;
|
||||
goto fail_get_cmd;
|
||||
}
|
||||
@ -624,7 +622,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
|
||||
&ioc_init_handle, GFP_KERNEL);
|
||||
|
||||
if (!IOCInitMessage) {
|
||||
printk(KERN_ERR "Could not allocate memory for "
|
||||
dev_err(&instance->pdev->dev, "Could not allocate memory for "
|
||||
"IOCInitMessage\n");
|
||||
ret = 1;
|
||||
goto fail_fw_init;
|
||||
@ -714,7 +712,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
|
||||
ret = 1;
|
||||
goto fail_fw_init;
|
||||
}
|
||||
printk(KERN_ERR "megasas:IOC Init cmd success\n");
|
||||
dev_err(&instance->pdev->dev, "Init cmd success\n");
|
||||
|
||||
ret = 0;
|
||||
|
||||
@ -757,7 +755,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
|
||||
cmd = megasas_get_cmd(instance);
|
||||
|
||||
if (!cmd) {
|
||||
printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n");
|
||||
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -776,7 +774,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
|
||||
ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
|
||||
|
||||
if (!ci) {
|
||||
printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n");
|
||||
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
|
||||
megasas_return_cmd(instance, cmd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -851,8 +849,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
|
||||
cmd = megasas_get_cmd(instance);
|
||||
|
||||
if (!cmd) {
|
||||
printk(KERN_DEBUG "megasas: Failed to get cmd for sync"
|
||||
"info.\n");
|
||||
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -1097,7 +1094,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
|
||||
&fusion->ld_map_phys[i],
|
||||
GFP_KERNEL);
|
||||
if (!fusion->ld_map[i]) {
|
||||
printk(KERN_ERR "megasas: Could not allocate memory "
|
||||
dev_err(&instance->pdev->dev, "Could not allocate memory "
|
||||
"for map info\n");
|
||||
goto fail_map_info;
|
||||
}
|
||||
@ -1162,7 +1159,7 @@ map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
|
||||
cmd->scmd->result = DID_IMM_RETRY << 16;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_DEBUG "megasas: FW status %#x\n", status);
|
||||
dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
|
||||
cmd->scmd->result = DID_ERROR << 16;
|
||||
break;
|
||||
}
|
||||
@ -1851,7 +1848,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
|
||||
&io_request->SGL, cmd);
|
||||
|
||||
if (sge_count > instance->max_num_sge) {
|
||||
printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds "
|
||||
dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
|
||||
"max (0x%x) allowed\n", sge_count,
|
||||
instance->max_num_sge);
|
||||
return 1;
|
||||
@ -1885,7 +1882,7 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
|
||||
struct fusion_context *fusion;
|
||||
|
||||
if (index >= instance->max_fw_cmds) {
|
||||
printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
|
||||
dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
|
||||
"descriptor for scsi%d\n", index,
|
||||
instance->host->host_no);
|
||||
return NULL;
|
||||
@ -1927,7 +1924,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
|
||||
|
||||
if (megasas_build_io_fusion(instance, scmd, cmd)) {
|
||||
megasas_return_cmd_fusion(instance, cmd);
|
||||
printk(KERN_ERR "megasas: Error building command.\n");
|
||||
dev_err(&instance->pdev->dev, "Error building command\n");
|
||||
cmd->request_desc = NULL;
|
||||
return 1;
|
||||
}
|
||||
@ -1937,7 +1934,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
|
||||
|
||||
if (cmd->io_request->ChainOffset != 0 &&
|
||||
cmd->io_request->ChainOffset != 0xF)
|
||||
printk(KERN_ERR "megasas: The chain offset value is not "
|
||||
dev_err(&instance->pdev->dev, "The chain offset value is not "
|
||||
"correct : %x\n", cmd->io_request->ChainOffset);
|
||||
|
||||
/*
|
||||
@ -2025,7 +2022,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
|
||||
if (reply_descript_type ==
|
||||
MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
|
||||
if (megasas_dbg_lvl == 5)
|
||||
printk(KERN_ERR "\nmegasas: FAST Path "
|
||||
dev_err(&instance->pdev->dev, "\nFAST Path "
|
||||
"IO Success\n");
|
||||
}
|
||||
/* Fall thru and complete IO */
|
||||
@ -2186,7 +2183,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
|
||||
else if (fw_state == MFI_STATE_FAULT)
|
||||
schedule_work(&instance->work_init);
|
||||
} else if (fw_state == MFI_STATE_FAULT) {
|
||||
printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
|
||||
dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
|
||||
"for scsi%d\n", instance->host->host_no);
|
||||
schedule_work(&instance->work_init);
|
||||
}
|
||||
@ -2269,7 +2266,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
|
||||
u16 index;
|
||||
|
||||
if (build_mpt_mfi_pass_thru(instance, cmd)) {
|
||||
printk(KERN_ERR "Couldn't build MFI pass thru cmd\n");
|
||||
dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2303,7 +2300,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
|
||||
|
||||
req_desc = build_mpt_cmd(instance, cmd);
|
||||
if (!req_desc) {
|
||||
printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
|
||||
dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
|
||||
return;
|
||||
}
|
||||
megasas_fire_cmd_fusion(instance, req_desc);
|
||||
@ -2413,7 +2410,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
|
||||
fw_state = instance->instancet->read_fw_status_reg(
|
||||
instance->reg_set) & MFI_STATE_MASK;
|
||||
if (fw_state == MFI_STATE_FAULT) {
|
||||
printk(KERN_WARNING "megasas: Found FW in FAULT state,"
|
||||
dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
|
||||
" will reset adapter scsi%d.\n",
|
||||
instance->host->host_no);
|
||||
retval = 1;
|
||||
@ -2436,7 +2433,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
|
||||
hb_seconds_missed++;
|
||||
if (hb_seconds_missed ==
|
||||
(MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
|
||||
printk(KERN_WARNING "megasas: SR-IOV:"
|
||||
dev_warn(&instance->pdev->dev, "SR-IOV:"
|
||||
" Heartbeat never completed "
|
||||
" while polling during I/O "
|
||||
" timeout handling for "
|
||||
@ -2454,7 +2451,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
|
||||
goto out;
|
||||
|
||||
if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
|
||||
printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
|
||||
dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
|
||||
"commands to complete for scsi%d\n", i,
|
||||
outstanding, instance->host->host_no);
|
||||
megasas_complete_cmd_dpc_fusion(
|
||||
@ -2464,7 +2461,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
|
||||
}
|
||||
|
||||
if (atomic_read(&instance->fw_outstanding)) {
|
||||
printk("megaraid_sas: pending commands remain after waiting, "
|
||||
dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
|
||||
"will reset adapter scsi%d.\n",
|
||||
instance->host->host_no);
|
||||
retval = 1;
|
||||
@ -2564,7 +2561,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
mutex_lock(&instance->reset_mutex);
|
||||
|
||||
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
|
||||
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
|
||||
dev_warn(&instance->pdev->dev, "Hardware critical error, "
|
||||
"returning FAILED for scsi%d.\n",
|
||||
instance->host->host_no);
|
||||
mutex_unlock(&instance->reset_mutex);
|
||||
@ -2618,7 +2615,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
|
||||
&convert)) {
|
||||
instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
|
||||
printk(KERN_WARNING "megaraid_sas: resetting fusion "
|
||||
dev_warn(&instance->pdev->dev, "resetting fusion "
|
||||
"adapter scsi%d.\n", instance->host->host_no);
|
||||
if (convert)
|
||||
iotimeout = 0;
|
||||
@ -2645,7 +2642,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
if (instance->disableOnlineCtrlReset ||
|
||||
(abs_state == MFI_STATE_FAULT && !reset_adapter)) {
|
||||
/* Reset not supported, kill adapter */
|
||||
printk(KERN_WARNING "megaraid_sas: Reset not supported"
|
||||
dev_warn(&instance->pdev->dev, "Reset not supported"
|
||||
", killing adapter scsi%d.\n",
|
||||
instance->host->host_no);
|
||||
megaraid_sas_kill_hba(instance);
|
||||
@ -2663,7 +2660,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
instance->hb_host_mem->HB.driverCounter)) {
|
||||
instance->hb_host_mem->HB.driverCounter =
|
||||
instance->hb_host_mem->HB.fwCounter;
|
||||
printk(KERN_WARNING "megasas: SR-IOV:"
|
||||
dev_warn(&instance->pdev->dev, "SR-IOV:"
|
||||
"Late FW heartbeat update for "
|
||||
"scsi%d.\n",
|
||||
instance->host->host_no);
|
||||
@ -2679,8 +2676,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
abs_state = status_reg &
|
||||
MFI_STATE_MASK;
|
||||
if (abs_state == MFI_STATE_READY) {
|
||||
printk(KERN_WARNING "megasas"
|
||||
": SR-IOV: FW was found"
|
||||
dev_warn(&instance->pdev->dev,
|
||||
"SR-IOV: FW was found"
|
||||
"to be in ready state "
|
||||
"for scsi%d.\n",
|
||||
instance->host->host_no);
|
||||
@ -2689,7 +2686,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
msleep(20);
|
||||
}
|
||||
if (abs_state != MFI_STATE_READY) {
|
||||
printk(KERN_WARNING "megasas: SR-IOV: "
|
||||
dev_warn(&instance->pdev->dev, "SR-IOV: "
|
||||
"FW not in ready state after %d"
|
||||
" seconds for scsi%d, status_reg = "
|
||||
"0x%x.\n",
|
||||
@ -2731,7 +2728,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
host_diag =
|
||||
readl(&instance->reg_set->fusion_host_diag);
|
||||
if (retry++ == 100) {
|
||||
printk(KERN_WARNING "megaraid_sas: "
|
||||
dev_warn(&instance->pdev->dev,
|
||||
"Host diag unlock failed! "
|
||||
"for scsi%d\n",
|
||||
instance->host->host_no);
|
||||
@ -2754,7 +2751,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
host_diag =
|
||||
readl(&instance->reg_set->fusion_host_diag);
|
||||
if (retry++ == 1000) {
|
||||
printk(KERN_WARNING "megaraid_sas: "
|
||||
dev_warn(&instance->pdev->dev,
|
||||
"Diag reset adapter never "
|
||||
"cleared for scsi%d!\n",
|
||||
instance->host->host_no);
|
||||
@ -2777,7 +2774,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
instance->reg_set) & MFI_STATE_MASK;
|
||||
}
|
||||
if (abs_state <= MFI_STATE_FW_INIT) {
|
||||
printk(KERN_WARNING "megaraid_sas: firmware "
|
||||
dev_warn(&instance->pdev->dev, "firmware "
|
||||
"state < MFI_STATE_FW_INIT, state = "
|
||||
"0x%x for scsi%d\n", abs_state,
|
||||
instance->host->host_no);
|
||||
@ -2786,7 +2783,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
|
||||
/* Wait for FW to become ready */
|
||||
if (megasas_transition_to_ready(instance, 1)) {
|
||||
printk(KERN_WARNING "megaraid_sas: Failed to "
|
||||
dev_warn(&instance->pdev->dev, "Failed to "
|
||||
"transition controller to ready "
|
||||
"for scsi%d.\n",
|
||||
instance->host->host_no);
|
||||
@ -2795,7 +2792,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
|
||||
megasas_reset_reply_desc(instance);
|
||||
if (megasas_ioc_init_fusion(instance)) {
|
||||
printk(KERN_WARNING "megaraid_sas: "
|
||||
dev_warn(&instance->pdev->dev,
|
||||
"megasas_ioc_init_fusion() failed!"
|
||||
" for scsi%d\n",
|
||||
instance->host->host_no);
|
||||
@ -2836,7 +2833,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
}
|
||||
|
||||
/* Adapter reset completed successfully */
|
||||
printk(KERN_WARNING "megaraid_sas: Reset "
|
||||
dev_warn(&instance->pdev->dev, "Reset "
|
||||
"successful for scsi%d.\n",
|
||||
instance->host->host_no);
|
||||
|
||||
@ -2852,7 +2849,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
|
||||
goto out;
|
||||
}
|
||||
/* Reset failed, kill the adapter */
|
||||
printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
|
||||
dev_warn(&instance->pdev->dev, "Reset failed, killing "
|
||||
"adapter scsi%d.\n", instance->host->host_no);
|
||||
megaraid_sas_kill_hba(instance);
|
||||
instance->skip_heartbeat_timer_del = 1;
|
||||
|
@ -1557,7 +1557,8 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
|
||||
for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
|
||||
(!memap_sz || !pio_sz); i++) {
|
||||
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
|
||||
if (pio_sz)
|
||||
continue;
|
||||
@ -1572,16 +1573,17 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
|
||||
chip_phys = (u64)ioc->chip_phys;
|
||||
memap_sz = pci_resource_len(pdev, i);
|
||||
ioc->chip = ioremap(ioc->chip_phys, memap_sz);
|
||||
if (ioc->chip == NULL) {
|
||||
printk(MPT2SAS_ERR_FMT "unable to map "
|
||||
"adapter memory!\n", ioc->name);
|
||||
r = -EINVAL;
|
||||
goto out_fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ioc->chip == NULL) {
|
||||
printk(MPT2SAS_ERR_FMT "unable to map adapter memory! "
|
||||
"or resource not found\n", ioc->name);
|
||||
r = -EINVAL;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
_base_mask_interrupts(ioc);
|
||||
|
||||
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
|
||||
|
@ -1843,7 +1843,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
|
||||
for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
|
||||
(!memap_sz || !pio_sz); i++) {
|
||||
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
|
||||
if (pio_sz)
|
||||
continue;
|
||||
@ -1856,15 +1857,16 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
|
||||
chip_phys = (u64)ioc->chip_phys;
|
||||
memap_sz = pci_resource_len(pdev, i);
|
||||
ioc->chip = ioremap(ioc->chip_phys, memap_sz);
|
||||
if (ioc->chip == NULL) {
|
||||
pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
|
||||
ioc->name);
|
||||
r = -EINVAL;
|
||||
goto out_fail;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ioc->chip == NULL) {
|
||||
pr_err(MPT3SAS_FMT "unable to map adapter memory! "
|
||||
" or resource not found\n", ioc->name);
|
||||
r = -EINVAL;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
_base_mask_interrupts(ioc);
|
||||
|
||||
r = _base_get_ioc_facts(ioc, CAN_SLEEP);
|
||||
|
@ -338,8 +338,11 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
|
||||
|
||||
res_start = pci_resource_start(pdev, bar);
|
||||
res_len = pci_resource_len(pdev, bar);
|
||||
if (!res_start || !res_len)
|
||||
if (!res_start || !res_len) {
|
||||
iounmap(mvi->regs_ex);
|
||||
mvi->regs_ex = NULL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
res_flag = pci_resource_flags(pdev, bar);
|
||||
if (res_flag & IORESOURCE_CACHEABLE)
|
||||
|
@ -49,13 +49,15 @@ enum chip_flavors {
|
||||
chip_8019,
|
||||
chip_8074,
|
||||
chip_8076,
|
||||
chip_8077
|
||||
chip_8077,
|
||||
chip_8006,
|
||||
};
|
||||
|
||||
enum phy_speed {
|
||||
PHY_SPEED_15 = 0x01,
|
||||
PHY_SPEED_30 = 0x02,
|
||||
PHY_SPEED_60 = 0x04,
|
||||
PHY_SPEED_120 = 0x08,
|
||||
};
|
||||
|
||||
enum data_direction {
|
||||
|
@ -3263,6 +3263,10 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
|
||||
struct sas_phy *sas_phy = phy->sas_phy.phy;
|
||||
|
||||
switch (link_rate) {
|
||||
case PHY_SPEED_120:
|
||||
phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
|
||||
phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
|
||||
break;
|
||||
case PHY_SPEED_60:
|
||||
phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
|
||||
phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
|
||||
|
@ -57,6 +57,7 @@ static const struct pm8001_chip_info pm8001_chips[] = {
|
||||
[chip_8074] = {0, 8, &pm8001_80xx_dispatch,},
|
||||
[chip_8076] = {0, 16, &pm8001_80xx_dispatch,},
|
||||
[chip_8077] = {0, 16, &pm8001_80xx_dispatch,},
|
||||
[chip_8006] = {0, 16, &pm8001_80xx_dispatch,},
|
||||
};
|
||||
static int pm8001_id;
|
||||
|
||||
@ -1107,6 +1108,8 @@ err_out_enable:
|
||||
*/
|
||||
static struct pci_device_id pm8001_pci_table[] = {
|
||||
{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
|
||||
{ PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 },
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 },
|
||||
{ PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
|
||||
/* Support for SPC/SPCv/SPCve controllers */
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
|
||||
@ -1217,7 +1220,7 @@ MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
|
||||
MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
|
||||
MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");
|
||||
MODULE_DESCRIPTION(
|
||||
"PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
|
||||
"PMC-Sierra PM8001/8006/8081/8088/8089/8074/8076/8077 "
|
||||
"SAS/SATA controller driver");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -790,6 +790,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
|
||||
ccb->device = pm8001_dev;
|
||||
ccb->ccb_tag = ccb_tag;
|
||||
ccb->task = task;
|
||||
ccb->n_elem = 0;
|
||||
|
||||
res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
|
||||
pm8001_dev, flag, task_tag, ccb_tag);
|
||||
@ -975,19 +976,27 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
|
||||
phy = sas_get_local_phy(dev);
|
||||
|
||||
if (dev_is_sata(dev)) {
|
||||
DECLARE_COMPLETION_ONSTACK(completion_setstate);
|
||||
if (scsi_is_sas_phy_local(phy)) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
rc = sas_phy_reset(phy, 1);
|
||||
if (rc) {
|
||||
PM8001_EH_DBG(pm8001_ha,
|
||||
pm8001_printk("phy reset failed for device %x\n"
|
||||
"with rc %d\n", pm8001_dev->device_id, rc));
|
||||
rc = TMF_RESP_FUNC_FAILED;
|
||||
goto out;
|
||||
}
|
||||
msleep(2000);
|
||||
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
|
||||
dev, 1, 0);
|
||||
pm8001_dev->setds_completion = &completion_setstate;
|
||||
rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
|
||||
pm8001_dev, 0x01);
|
||||
wait_for_completion(&completion_setstate);
|
||||
if (rc) {
|
||||
PM8001_EH_DBG(pm8001_ha,
|
||||
pm8001_printk("task abort failed %x\n"
|
||||
"with rc %d\n", pm8001_dev->device_id, rc));
|
||||
rc = TMF_RESP_FUNC_FAILED;
|
||||
}
|
||||
} else {
|
||||
rc = sas_phy_reset(phy, 1);
|
||||
msleep(2000);
|
||||
|
@ -58,7 +58,7 @@
|
||||
#include "pm8001_defs.h"
|
||||
|
||||
#define DRV_NAME "pm80xx"
|
||||
#define DRV_VERSION "0.1.37"
|
||||
#define DRV_VERSION "0.1.38"
|
||||
#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
|
||||
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
|
||||
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
|
||||
@ -241,7 +241,7 @@ struct pm8001_chip_info {
|
||||
struct pm8001_port {
|
||||
struct asd_sas_port sas_port;
|
||||
u8 port_attached;
|
||||
u8 wide_port_phymap;
|
||||
u16 wide_port_phymap;
|
||||
u8 port_state;
|
||||
struct list_head list;
|
||||
};
|
||||
@ -569,6 +569,14 @@ struct pm8001_fw_image_header {
|
||||
#define NCQ_READ_LOG_FLAG 0x80000000
|
||||
#define NCQ_ABORT_ALL_FLAG 0x40000000
|
||||
#define NCQ_2ND_RLE_FLAG 0x20000000
|
||||
|
||||
/* Device states */
|
||||
#define DS_OPERATIONAL 0x01
|
||||
#define DS_PORT_IN_RESET 0x02
|
||||
#define DS_IN_RECOVERY 0x03
|
||||
#define DS_IN_ERROR 0x04
|
||||
#define DS_NON_OPERATIONAL 0x07
|
||||
|
||||
/**
|
||||
* brief param structure for firmware flash update.
|
||||
*/
|
||||
|
@ -309,6 +309,9 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
|
||||
pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
|
||||
pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
|
||||
pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
|
||||
/* read port recover and reset timeout */
|
||||
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer =
|
||||
pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -585,6 +588,12 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
|
||||
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
|
||||
pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
|
||||
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
|
||||
|
||||
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000;
|
||||
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
|
||||
PORT_RECOVERY_TIMEOUT;
|
||||
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
|
||||
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -843,6 +852,7 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
|
||||
int rc;
|
||||
u32 tag;
|
||||
u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
|
||||
u32 page_code;
|
||||
|
||||
memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
|
||||
rc = pm8001_tag_alloc(pm8001_ha, &tag);
|
||||
@ -851,8 +861,14 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
|
||||
|
||||
circularQ = &pm8001_ha->inbnd_q_tbl[0];
|
||||
payload.tag = cpu_to_le32(tag);
|
||||
|
||||
if (IS_SPCV_12G(pm8001_ha->pdev))
|
||||
page_code = THERMAL_PAGE_CODE_7H;
|
||||
else
|
||||
page_code = THERMAL_PAGE_CODE_8H;
|
||||
|
||||
payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
|
||||
(THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
|
||||
(THERMAL_ENABLE << 8) | page_code;
|
||||
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
|
||||
|
||||
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
|
||||
@ -1593,6 +1609,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
|
||||
ts->stat = SAS_OPEN_REJECT;
|
||||
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
|
||||
break;
|
||||
case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME:
|
||||
PM8001_IO_DBG(pm8001_ha,
|
||||
pm8001_printk("IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n"));
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
ts->stat = SAS_OPEN_REJECT;
|
||||
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
|
||||
break;
|
||||
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
|
||||
PM8001_IO_DBG(pm8001_ha,
|
||||
pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
|
||||
@ -2829,6 +2852,32 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
|
||||
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
|
||||
u32 phyId, u32 phy_op);
|
||||
|
||||
static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha,
|
||||
void *piomb)
|
||||
{
|
||||
struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4);
|
||||
u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
|
||||
u8 phy_id = (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
|
||||
u32 lr_status_evt_portid =
|
||||
le32_to_cpu(pPayload->lr_status_evt_portid);
|
||||
u8 deviceType = pPayload->sas_identify.dev_type;
|
||||
u8 link_rate = (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
|
||||
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
|
||||
u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
|
||||
struct pm8001_port *port = &pm8001_ha->port[port_id];
|
||||
|
||||
if (deviceType == SAS_END_DEVICE) {
|
||||
pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
|
||||
PHY_NOTIFY_ENABLE_SPINUP);
|
||||
}
|
||||
|
||||
port->wide_port_phymap |= (1U << phy_id);
|
||||
pm8001_get_lrate_mode(phy, link_rate);
|
||||
phy->sas_phy.oob_mode = SAS_OOB_MODE;
|
||||
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
|
||||
phy->phy_attached = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* hw_event_sas_phy_up -FW tells me a SAS phy up event.
|
||||
* @pm8001_ha: our hba card information
|
||||
@ -2856,6 +2905,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
unsigned long flags;
|
||||
u8 deviceType = pPayload->sas_identify.dev_type;
|
||||
port->port_state = portstate;
|
||||
port->wide_port_phymap |= (1U << phy_id);
|
||||
phy->phy_state = PHY_STATE_LINK_UP_SPCV;
|
||||
PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
|
||||
"portid:%d; phyid:%d; linkrate:%d; "
|
||||
@ -2981,7 +3031,6 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
struct pm8001_port *port = &pm8001_ha->port[port_id];
|
||||
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
|
||||
port->port_state = portstate;
|
||||
phy->phy_type = 0;
|
||||
phy->identify.device_type = 0;
|
||||
phy->phy_attached = 0;
|
||||
memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
|
||||
@ -2993,9 +3042,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
pm8001_printk(" PortInvalid portID %d\n", port_id));
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
pm8001_printk(" Last phy Down and port invalid\n"));
|
||||
port->port_attached = 0;
|
||||
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
|
||||
port_id, phy_id, 0, 0);
|
||||
if (phy->phy_type & PORT_TYPE_SATA) {
|
||||
phy->phy_type = 0;
|
||||
port->port_attached = 0;
|
||||
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
|
||||
port_id, phy_id, 0, 0);
|
||||
}
|
||||
sas_phy_disconnected(&phy->sas_phy);
|
||||
break;
|
||||
case PORT_IN_RESET:
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
@ -3003,22 +3056,26 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
break;
|
||||
case PORT_NOT_ESTABLISHED:
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
|
||||
pm8001_printk(" Phy Down and PORT_NOT_ESTABLISHED\n"));
|
||||
port->port_attached = 0;
|
||||
break;
|
||||
case PORT_LOSTCOMM:
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
|
||||
pm8001_printk(" Phy Down and PORT_LOSTCOMM\n"));
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
pm8001_printk(" Last phy Down and port invalid\n"));
|
||||
port->port_attached = 0;
|
||||
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
|
||||
port_id, phy_id, 0, 0);
|
||||
if (phy->phy_type & PORT_TYPE_SATA) {
|
||||
port->port_attached = 0;
|
||||
phy->phy_type = 0;
|
||||
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
|
||||
port_id, phy_id, 0, 0);
|
||||
}
|
||||
sas_phy_disconnected(&phy->sas_phy);
|
||||
break;
|
||||
default:
|
||||
port->port_attached = 0;
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
pm8001_printk(" phy Down and(default) = 0x%x\n",
|
||||
pm8001_printk(" Phy Down and(default) = 0x%x\n",
|
||||
portstate));
|
||||
break;
|
||||
|
||||
@ -3084,7 +3141,7 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
*/
|
||||
static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long flags, i;
|
||||
struct hw_event_resp *pPayload =
|
||||
(struct hw_event_resp *)(piomb + 4);
|
||||
u32 lr_status_evt_portid =
|
||||
@ -3097,9 +3154,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
(u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
|
||||
u8 status =
|
||||
(u8)((lr_status_evt_portid & 0x0F000000) >> 24);
|
||||
|
||||
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
|
||||
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
|
||||
struct pm8001_port *port = &pm8001_ha->port[port_id];
|
||||
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
|
||||
@ -3125,7 +3182,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
case HW_EVENT_PHY_DOWN:
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
pm8001_printk("HW_EVENT_PHY_DOWN\n"));
|
||||
sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
|
||||
if (phy->phy_type & PORT_TYPE_SATA)
|
||||
sas_ha->notify_phy_event(&phy->sas_phy,
|
||||
PHYE_LOSS_OF_SIGNAL);
|
||||
phy->phy_attached = 0;
|
||||
phy->phy_state = 0;
|
||||
hw_event_phy_down(pm8001_ha, piomb);
|
||||
@ -3169,9 +3228,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
|
||||
pm80xx_hw_event_ack_req(pm8001_ha, 0,
|
||||
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
|
||||
sas_phy_disconnected(sas_phy);
|
||||
phy->phy_attached = 0;
|
||||
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
|
||||
break;
|
||||
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
@ -3179,9 +3235,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
pm80xx_hw_event_ack_req(pm8001_ha, 0,
|
||||
HW_EVENT_LINK_ERR_DISPARITY_ERROR,
|
||||
port_id, phy_id, 0, 0);
|
||||
sas_phy_disconnected(sas_phy);
|
||||
phy->phy_attached = 0;
|
||||
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
|
||||
break;
|
||||
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
@ -3189,9 +3242,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
pm80xx_hw_event_ack_req(pm8001_ha, 0,
|
||||
HW_EVENT_LINK_ERR_CODE_VIOLATION,
|
||||
port_id, phy_id, 0, 0);
|
||||
sas_phy_disconnected(sas_phy);
|
||||
phy->phy_attached = 0;
|
||||
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
|
||||
break;
|
||||
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
|
||||
PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
|
||||
@ -3199,9 +3249,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
pm80xx_hw_event_ack_req(pm8001_ha, 0,
|
||||
HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
|
||||
port_id, phy_id, 0, 0);
|
||||
sas_phy_disconnected(sas_phy);
|
||||
phy->phy_attached = 0;
|
||||
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
|
||||
break;
|
||||
case HW_EVENT_MALFUNCTION:
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
@ -3257,13 +3304,19 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
pm80xx_hw_event_ack_req(pm8001_ha, 0,
|
||||
HW_EVENT_PORT_RECOVERY_TIMER_TMO,
|
||||
port_id, phy_id, 0, 0);
|
||||
sas_phy_disconnected(sas_phy);
|
||||
phy->phy_attached = 0;
|
||||
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
|
||||
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
|
||||
if (port->wide_port_phymap & (1 << i)) {
|
||||
phy = &pm8001_ha->phy[i];
|
||||
sas_ha->notify_phy_event(&phy->sas_phy,
|
||||
PHYE_LOSS_OF_SIGNAL);
|
||||
port->wide_port_phymap &= ~(1 << i);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case HW_EVENT_PORT_RECOVER:
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
|
||||
hw_event_port_recover(pm8001_ha, piomb);
|
||||
break;
|
||||
case HW_EVENT_PORT_RESET_COMPLETE:
|
||||
PM8001_MSG_DBG(pm8001_ha,
|
||||
|
@ -177,7 +177,8 @@
|
||||
/* Thermal related */
|
||||
#define THERMAL_ENABLE 0x1
|
||||
#define THERMAL_LOG_ENABLE 0x1
|
||||
#define THERMAL_OP_CODE 0x6
|
||||
#define THERMAL_PAGE_CODE_7H 0x6
|
||||
#define THERMAL_PAGE_CODE_8H 0x7
|
||||
#define LTEMPHIL 70
|
||||
#define RTEMPHIL 100
|
||||
|
||||
@ -1174,7 +1175,7 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
|
||||
#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54
|
||||
#define MPI_IO_RQE_BUSY_FULL 0x55
|
||||
#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56
|
||||
#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57
|
||||
#define IO_XFER_ERROR_INVALID_SSP_RSP_FRAME 0x57
|
||||
#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58
|
||||
|
||||
#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004
|
||||
|
@ -884,7 +884,6 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
|
||||
struct device, kobj)));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int rval;
|
||||
uint16_t actual_size;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
|
||||
return 0;
|
||||
@ -901,7 +900,6 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
|
||||
}
|
||||
|
||||
do_read:
|
||||
actual_size = 0;
|
||||
memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
|
||||
|
||||
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
|
||||
@ -1079,8 +1077,7 @@ qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
|
||||
return scnprintf(buf, PAGE_SIZE, "%s\n",
|
||||
vha->hw->model_desc ? vha->hw->model_desc : "");
|
||||
return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -1348,7 +1345,8 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
|
||||
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
|
||||
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
|
||||
!IS_QLA27XX(ha))
|
||||
return scnprintf(buf, PAGE_SIZE, "\n");
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
|
||||
@ -1537,6 +1535,20 @@ qla2x00_allow_cna_fw_dump_store(struct device *dev,
|
||||
return strlen(buf);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (!IS_QLA27XX(ha))
|
||||
return scnprintf(buf, PAGE_SIZE, "\n");
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
|
||||
ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
|
||||
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
|
||||
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
|
||||
@ -1581,6 +1593,7 @@ static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
|
||||
static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
|
||||
qla2x00_allow_cna_fw_dump_show,
|
||||
qla2x00_allow_cna_fw_dump_store);
|
||||
static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
|
||||
|
||||
struct device_attribute *qla2x00_host_attrs[] = {
|
||||
&dev_attr_driver_version,
|
||||
@ -1614,6 +1627,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
|
||||
&dev_attr_diag_megabytes,
|
||||
&dev_attr_fw_dump_size,
|
||||
&dev_attr_allow_cna_fw_dump,
|
||||
&dev_attr_pep_version,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -405,7 +405,7 @@ done:
|
||||
return rval;
|
||||
}
|
||||
|
||||
inline uint16_t
|
||||
static inline uint16_t
|
||||
qla24xx_calc_ct_iocbs(uint16_t dsds)
|
||||
{
|
||||
uint16_t iocbs;
|
||||
@ -1733,7 +1733,6 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
|
||||
struct Scsi_Host *host = bsg_job->shost;
|
||||
scsi_qla_host_t *vha = shost_priv(host);
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
uint16_t thread_id;
|
||||
uint32_t rval = EXT_STATUS_OK;
|
||||
uint16_t req_sg_cnt = 0;
|
||||
uint16_t rsp_sg_cnt = 0;
|
||||
@ -1790,8 +1789,6 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
|
||||
goto done;
|
||||
}
|
||||
|
||||
thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
|
||||
|
||||
mutex_lock(&ha->selflogin_lock);
|
||||
if (vha->self_login_loop_id == 0) {
|
||||
/* Initialize all required fields of fcport */
|
||||
@ -2174,7 +2171,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct fc_rport *rport;
|
||||
fc_port_t *fcport = NULL;
|
||||
struct Scsi_Host *host;
|
||||
scsi_qla_host_t *vha;
|
||||
|
||||
@ -2183,7 +2179,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
|
||||
|
||||
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
|
||||
rport = bsg_job->rport;
|
||||
fcport = *(fc_port_t **) rport->dd_data;
|
||||
host = rport_to_shost(rport);
|
||||
vha = shost_priv(host);
|
||||
} else {
|
||||
|
@ -19,14 +19,14 @@
|
||||
* | Device Discovery | 0x2016 | 0x2020-0x2022, |
|
||||
* | | | 0x2011-0x2012, |
|
||||
* | | | 0x2099-0x20a4 |
|
||||
* | Queue Command and IO tracing | 0x3059 | 0x300b |
|
||||
* | Queue Command and IO tracing | 0x3075 | 0x300b |
|
||||
* | | | 0x3027-0x3028 |
|
||||
* | | | 0x303d-0x3041 |
|
||||
* | | | 0x302d,0x3033 |
|
||||
* | | | 0x3036,0x3038 |
|
||||
* | | | 0x303a |
|
||||
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
|
||||
* | Async Events | 0x5087 | 0x502b-0x502f |
|
||||
* | Async Events | 0x508a | 0x502b-0x502f |
|
||||
* | | | 0x5047 |
|
||||
* | | | 0x5084,0x5075 |
|
||||
* | | | 0x503d,0x5044 |
|
||||
@ -117,7 +117,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
|
||||
{
|
||||
int rval;
|
||||
uint32_t cnt, stat, timer, dwords, idx;
|
||||
uint16_t mb0, mb1;
|
||||
uint16_t mb0;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
dma_addr_t dump_dma = ha->gid_list_dma;
|
||||
uint32_t *dump = (uint32_t *)ha->gid_list;
|
||||
@ -161,7 +161,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
|
||||
&ha->mbx_cmd_flags);
|
||||
|
||||
mb0 = RD_REG_WORD(®->mailbox0);
|
||||
mb1 = RD_REG_WORD(®->mailbox1);
|
||||
RD_REG_WORD(®->mailbox1);
|
||||
|
||||
WRT_REG_DWORD(®->hccr,
|
||||
HCCRX_CLR_RISC_INT);
|
||||
@ -486,7 +486,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
|
||||
return ptr;
|
||||
|
||||
*last_chain = &fcec->type;
|
||||
fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
|
||||
fcec->type = htonl(DUMP_CHAIN_FCE);
|
||||
fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
|
||||
fce_calc_size(ha->fce_bufs));
|
||||
fcec->size = htonl(fce_calc_size(ha->fce_bufs));
|
||||
@ -527,7 +527,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
|
||||
/* aqp = ha->atio_q_map[que]; */
|
||||
q = ptr;
|
||||
*last_chain = &q->type;
|
||||
q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
|
||||
q->type = htonl(DUMP_CHAIN_QUEUE);
|
||||
q->chain_size = htonl(
|
||||
sizeof(struct qla2xxx_mqueue_chain) +
|
||||
sizeof(struct qla2xxx_mqueue_header) +
|
||||
@ -536,7 +536,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
|
||||
|
||||
/* Add header. */
|
||||
qh = ptr;
|
||||
qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
|
||||
qh->queue = htonl(TYPE_ATIO_QUEUE);
|
||||
qh->number = htonl(que);
|
||||
qh->size = htonl(aqp->length * sizeof(request_t));
|
||||
ptr += sizeof(struct qla2xxx_mqueue_header);
|
||||
@ -571,7 +571,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
|
||||
/* Add chain. */
|
||||
q = ptr;
|
||||
*last_chain = &q->type;
|
||||
q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
|
||||
q->type = htonl(DUMP_CHAIN_QUEUE);
|
||||
q->chain_size = htonl(
|
||||
sizeof(struct qla2xxx_mqueue_chain) +
|
||||
sizeof(struct qla2xxx_mqueue_header) +
|
||||
@ -580,7 +580,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
|
||||
|
||||
/* Add header. */
|
||||
qh = ptr;
|
||||
qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
|
||||
qh->queue = htonl(TYPE_REQUEST_QUEUE);
|
||||
qh->number = htonl(que);
|
||||
qh->size = htonl(req->length * sizeof(request_t));
|
||||
ptr += sizeof(struct qla2xxx_mqueue_header);
|
||||
@ -599,7 +599,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
|
||||
/* Add chain. */
|
||||
q = ptr;
|
||||
*last_chain = &q->type;
|
||||
q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
|
||||
q->type = htonl(DUMP_CHAIN_QUEUE);
|
||||
q->chain_size = htonl(
|
||||
sizeof(struct qla2xxx_mqueue_chain) +
|
||||
sizeof(struct qla2xxx_mqueue_header) +
|
||||
@ -608,7 +608,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
|
||||
|
||||
/* Add header. */
|
||||
qh = ptr;
|
||||
qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
|
||||
qh->queue = htonl(TYPE_RESPONSE_QUEUE);
|
||||
qh->number = htonl(que);
|
||||
qh->size = htonl(rsp->length * sizeof(response_t));
|
||||
ptr += sizeof(struct qla2xxx_mqueue_header);
|
||||
@ -627,15 +627,15 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
|
||||
uint32_t cnt, que_idx;
|
||||
uint8_t que_cnt;
|
||||
struct qla2xxx_mq_chain *mq = ptr;
|
||||
device_reg_t __iomem *reg;
|
||||
device_reg_t *reg;
|
||||
|
||||
if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
|
||||
return ptr;
|
||||
|
||||
mq = ptr;
|
||||
*last_chain = &mq->type;
|
||||
mq->type = __constant_htonl(DUMP_CHAIN_MQ);
|
||||
mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
|
||||
mq->type = htonl(DUMP_CHAIN_MQ);
|
||||
mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
|
||||
|
||||
que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
|
||||
ha->max_req_queues : ha->max_rsp_queues;
|
||||
@ -695,8 +695,10 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
flags = 0;
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
#endif
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
ql_log(ql_log_warn, vha, 0xd002,
|
||||
@ -832,8 +834,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
qla2xxx_dump_post_process(base_vha, rval);
|
||||
|
||||
qla2300_fw_dump_failed:
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -859,8 +865,10 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
mb0 = mb2 = 0;
|
||||
flags = 0;
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
#endif
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
ql_log(ql_log_warn, vha, 0xd004,
|
||||
@ -1030,8 +1038,12 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
qla2xxx_dump_post_process(base_vha, rval);
|
||||
|
||||
qla2100_fw_dump_failed:
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
@ -1039,7 +1051,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
{
|
||||
int rval;
|
||||
uint32_t cnt;
|
||||
uint32_t risc_address;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
uint32_t __iomem *dmp_reg;
|
||||
@ -1047,7 +1058,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
uint16_t __iomem *mbx_reg;
|
||||
unsigned long flags;
|
||||
struct qla24xx_fw_dump *fw;
|
||||
uint32_t ext_mem_cnt;
|
||||
void *nxt;
|
||||
void *nxt_chain;
|
||||
uint32_t *last_chain = NULL;
|
||||
@ -1056,12 +1066,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
if (IS_P3P_TYPE(ha))
|
||||
return;
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
flags = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
#endif
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
ql_log(ql_log_warn, vha, 0xd006,
|
||||
@ -1274,8 +1285,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
|
||||
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
|
||||
if (last_chain) {
|
||||
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
|
||||
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= htonl(DUMP_CHAIN_LAST);
|
||||
}
|
||||
|
||||
/* Adjust valid length. */
|
||||
@ -1285,8 +1296,12 @@ qla24xx_fw_dump_failed_0:
|
||||
qla2xxx_dump_post_process(base_vha, rval);
|
||||
|
||||
qla24xx_fw_dump_failed:
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
@ -1294,7 +1309,6 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
{
|
||||
int rval;
|
||||
uint32_t cnt;
|
||||
uint32_t risc_address;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
uint32_t __iomem *dmp_reg;
|
||||
@ -1302,17 +1316,17 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
uint16_t __iomem *mbx_reg;
|
||||
unsigned long flags;
|
||||
struct qla25xx_fw_dump *fw;
|
||||
uint32_t ext_mem_cnt;
|
||||
void *nxt, *nxt_chain;
|
||||
uint32_t *last_chain = NULL;
|
||||
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
flags = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
#endif
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
ql_log(ql_log_warn, vha, 0xd008,
|
||||
@ -1329,7 +1343,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
}
|
||||
fw = &ha->fw_dump->isp.isp25;
|
||||
qla2xxx_prep_dump(ha, ha->fw_dump);
|
||||
ha->fw_dump->version = __constant_htonl(2);
|
||||
ha->fw_dump->version = htonl(2);
|
||||
|
||||
fw->host_status = htonl(RD_REG_DWORD(®->host_status));
|
||||
|
||||
@ -1593,8 +1607,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
|
||||
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
|
||||
if (last_chain) {
|
||||
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
|
||||
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= htonl(DUMP_CHAIN_LAST);
|
||||
}
|
||||
|
||||
/* Adjust valid length. */
|
||||
@ -1604,8 +1618,12 @@ qla25xx_fw_dump_failed_0:
|
||||
qla2xxx_dump_post_process(base_vha, rval);
|
||||
|
||||
qla25xx_fw_dump_failed:
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
@ -1613,7 +1631,6 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
{
|
||||
int rval;
|
||||
uint32_t cnt;
|
||||
uint32_t risc_address;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
uint32_t __iomem *dmp_reg;
|
||||
@ -1621,17 +1638,17 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
uint16_t __iomem *mbx_reg;
|
||||
unsigned long flags;
|
||||
struct qla81xx_fw_dump *fw;
|
||||
uint32_t ext_mem_cnt;
|
||||
void *nxt, *nxt_chain;
|
||||
uint32_t *last_chain = NULL;
|
||||
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
flags = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
#endif
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
ql_log(ql_log_warn, vha, 0xd00a,
|
||||
@ -1914,8 +1931,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
|
||||
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
|
||||
if (last_chain) {
|
||||
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
|
||||
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= htonl(DUMP_CHAIN_LAST);
|
||||
}
|
||||
|
||||
/* Adjust valid length. */
|
||||
@ -1925,16 +1942,19 @@ qla81xx_fw_dump_failed_0:
|
||||
qla2xxx_dump_post_process(base_vha, rval);
|
||||
|
||||
qla81xx_fw_dump_failed:
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
{
|
||||
int rval;
|
||||
uint32_t cnt, reg_data;
|
||||
uint32_t risc_address;
|
||||
uint32_t cnt;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
uint32_t __iomem *dmp_reg;
|
||||
@ -1942,17 +1962,17 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
uint16_t __iomem *mbx_reg;
|
||||
unsigned long flags;
|
||||
struct qla83xx_fw_dump *fw;
|
||||
uint32_t ext_mem_cnt;
|
||||
void *nxt, *nxt_chain;
|
||||
uint32_t *last_chain = NULL;
|
||||
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
risc_address = ext_mem_cnt = 0;
|
||||
flags = 0;
|
||||
ha->fw_dump_cap_flags = 0;
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
#endif
|
||||
|
||||
if (!ha->fw_dump) {
|
||||
ql_log(ql_log_warn, vha, 0xd00c,
|
||||
@ -1979,16 +1999,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
|
||||
WRT_REG_DWORD(®->iobase_addr, 0x6000);
|
||||
dmp_reg = ®->iobase_window;
|
||||
reg_data = RD_REG_DWORD(dmp_reg);
|
||||
RD_REG_DWORD(dmp_reg);
|
||||
WRT_REG_DWORD(dmp_reg, 0);
|
||||
|
||||
dmp_reg = ®->unused_4_1[0];
|
||||
reg_data = RD_REG_DWORD(dmp_reg);
|
||||
RD_REG_DWORD(dmp_reg);
|
||||
WRT_REG_DWORD(dmp_reg, 0);
|
||||
|
||||
WRT_REG_DWORD(®->iobase_addr, 0x6010);
|
||||
dmp_reg = ®->unused_4_1[2];
|
||||
reg_data = RD_REG_DWORD(dmp_reg);
|
||||
RD_REG_DWORD(dmp_reg);
|
||||
WRT_REG_DWORD(dmp_reg, 0);
|
||||
|
||||
/* select PCR and disable ecc checking and correction */
|
||||
@ -2420,8 +2440,8 @@ copy_queue:
|
||||
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
|
||||
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
|
||||
if (last_chain) {
|
||||
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
|
||||
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
|
||||
*last_chain |= htonl(DUMP_CHAIN_LAST);
|
||||
}
|
||||
|
||||
/* Adjust valid length. */
|
||||
@ -2431,8 +2451,12 @@ qla83xx_fw_dump_failed_0:
|
||||
qla2xxx_dump_post_process(base_vha, rval);
|
||||
|
||||
qla83xx_fw_dump_failed:
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
|
@ -3061,6 +3061,7 @@ struct qla_hw_data {
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
|
||||
#define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261
|
||||
|
||||
uint32_t device_type;
|
||||
#define DT_ISP2100 BIT_0
|
||||
@ -3084,7 +3085,8 @@ struct qla_hw_data {
|
||||
#define DT_ISP8044 BIT_18
|
||||
#define DT_ISP2071 BIT_19
|
||||
#define DT_ISP2271 BIT_20
|
||||
#define DT_ISP_LAST (DT_ISP2271 << 1)
|
||||
#define DT_ISP2261 BIT_21
|
||||
#define DT_ISP_LAST (DT_ISP2261 << 1)
|
||||
|
||||
#define DT_T10_PI BIT_25
|
||||
#define DT_IIDMA BIT_26
|
||||
@ -3116,6 +3118,7 @@ struct qla_hw_data {
|
||||
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
|
||||
#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
|
||||
#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
|
||||
#define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261)
|
||||
|
||||
#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
|
||||
IS_QLA6312(ha) || IS_QLA6322(ha))
|
||||
@ -3124,7 +3127,7 @@ struct qla_hw_data {
|
||||
#define IS_QLA25XX(ha) (IS_QLA2532(ha))
|
||||
#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
|
||||
#define IS_QLA84XX(ha) (IS_QLA8432(ha))
|
||||
#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha))
|
||||
#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha))
|
||||
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
|
||||
IS_QLA84XX(ha))
|
||||
#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
|
||||
@ -3166,6 +3169,7 @@ struct qla_hw_data {
|
||||
#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
|
||||
#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
|
||||
#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
|
||||
#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
|
||||
|
||||
/* HBA serial number */
|
||||
uint8_t serial0;
|
||||
@ -3288,6 +3292,7 @@ struct qla_hw_data {
|
||||
uint8_t mpi_version[3];
|
||||
uint32_t mpi_capabilities;
|
||||
uint8_t phy_version[3];
|
||||
uint8_t pep_version[3];
|
||||
|
||||
/* Firmware dump template */
|
||||
void *fw_dump_template;
|
||||
@ -3420,9 +3425,9 @@ struct qla_hw_data {
|
||||
mempool_t *ctx_mempool;
|
||||
#define FCP_CMND_DMA_POOL_SIZE 512
|
||||
|
||||
unsigned long nx_pcibase; /* Base I/O address */
|
||||
uint8_t *nxdb_rd_ptr; /* Doorbell read pointer */
|
||||
unsigned long nxdb_wr_ptr; /* Door bell write pointer */
|
||||
void __iomem *nx_pcibase; /* Base I/O address */
|
||||
void __iomem *nxdb_rd_ptr; /* Doorbell read pointer */
|
||||
void __iomem *nxdb_wr_ptr; /* Door bell write pointer */
|
||||
|
||||
uint32_t crb_win;
|
||||
uint32_t curr_window;
|
||||
|
@ -35,10 +35,10 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
|
||||
ms_pkt->entry_type = MS_IOCB_TYPE;
|
||||
ms_pkt->entry_count = 1;
|
||||
SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
|
||||
ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
|
||||
ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
|
||||
ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
|
||||
ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
|
||||
ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
|
||||
ms_pkt->cmd_dsd_count = cpu_to_le16(1);
|
||||
ms_pkt->total_dsd_count = cpu_to_le16(2);
|
||||
ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
|
||||
ms_pkt->req_bytecount = cpu_to_le32(req_size);
|
||||
|
||||
@ -74,10 +74,10 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
|
||||
|
||||
ct_pkt->entry_type = CT_IOCB_TYPE;
|
||||
ct_pkt->entry_count = 1;
|
||||
ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS);
|
||||
ct_pkt->nport_handle = cpu_to_le16(NPH_SNS);
|
||||
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
|
||||
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
|
||||
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
|
||||
ct_pkt->cmd_dsd_count = cpu_to_le16(1);
|
||||
ct_pkt->rsp_dsd_count = cpu_to_le16(1);
|
||||
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
|
||||
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
|
||||
|
||||
@ -142,7 +142,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
|
||||
case CS_DATA_UNDERRUN:
|
||||
case CS_DATA_OVERRUN: /* Overrun? */
|
||||
if (ct_rsp->header.response !=
|
||||
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
|
||||
cpu_to_be16(CT_ACCEPT_RESPONSE)) {
|
||||
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
|
||||
"%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
|
||||
routine, vha->d_id.b.domain,
|
||||
@ -1153,10 +1153,10 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
|
||||
ms_pkt->entry_type = MS_IOCB_TYPE;
|
||||
ms_pkt->entry_count = 1;
|
||||
SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
|
||||
ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
|
||||
ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
|
||||
ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
|
||||
ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
|
||||
ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
|
||||
ms_pkt->cmd_dsd_count = cpu_to_le16(1);
|
||||
ms_pkt->total_dsd_count = cpu_to_le16(2);
|
||||
ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
|
||||
ms_pkt->req_bytecount = cpu_to_le32(req_size);
|
||||
|
||||
@ -1193,8 +1193,8 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
|
||||
ct_pkt->entry_count = 1;
|
||||
ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
|
||||
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
|
||||
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
|
||||
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
|
||||
ct_pkt->cmd_dsd_count = cpu_to_le16(1);
|
||||
ct_pkt->rsp_dsd_count = cpu_to_le16(1);
|
||||
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
|
||||
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
|
||||
|
||||
@ -1281,19 +1281,19 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
|
||||
/* Prepare FDMI command arguments -- attribute block, attributes. */
|
||||
memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
|
||||
ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
|
||||
ct_req->req.rhba.entry_count = cpu_to_be32(1);
|
||||
memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
|
||||
size = 2 * WWN_SIZE + 4 + 4;
|
||||
|
||||
/* Attributes */
|
||||
ct_req->req.rhba.attrs.count =
|
||||
__constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT);
|
||||
cpu_to_be32(FDMI_HBA_ATTR_COUNT);
|
||||
entries = ct_req->req.rhba.hba_identifier;
|
||||
|
||||
/* Nodename. */
|
||||
eiter = entries + size;
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
|
||||
eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
|
||||
eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
|
||||
eiter->len = cpu_to_be16(4 + WWN_SIZE);
|
||||
memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
|
||||
size += 4 + WWN_SIZE;
|
||||
|
||||
@ -1302,7 +1302,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
|
||||
/* Manufacturer. */
|
||||
eiter = entries + size;
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
|
||||
eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
|
||||
alen = strlen(QLA2XXX_MANUFACTURER);
|
||||
snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
|
||||
"%s", "QLogic Corporation");
|
||||
@ -1315,7 +1315,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
|
||||
/* Serial number. */
|
||||
eiter = entries + size;
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
|
||||
eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
|
||||
if (IS_FWI2_CAPABLE(ha))
|
||||
qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
|
||||
sizeof(eiter->a.serial_num));
|
||||
@ -1335,7 +1335,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
|
||||
/* Model name. */
|
||||
eiter = entries + size;
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
|
||||
eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
|
||||
snprintf(eiter->a.model, sizeof(eiter->a.model),
|
||||
"%s", ha->model_number);
|
||||
alen = strlen(eiter->a.model);
|
||||
@ -1348,7 +1348,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
|
||||
/* Model description. */
|
||||
eiter = entries + size;
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
|
||||
eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
|
||||
snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
|
||||
"%s", ha->model_desc);
|
||||
alen = strlen(eiter->a.model_desc);
|
||||
@ -1361,7 +1361,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
|
||||
/* Hardware version. */
|
||||
eiter = entries + size;
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
|
||||
eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
|
||||
if (!IS_FWI2_CAPABLE(ha)) {
|
||||
snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
|
||||
"HW:%s", ha->adapter_id);
|
||||
@ -1385,7 +1385,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
|
||||
/* Driver version. */
|
||||
eiter = entries + size;
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
|
||||
eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
|
||||
snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
|
||||
"%s", qla2x00_version_str);
|
||||
alen = strlen(eiter->a.driver_version);
|
||||
@ -1398,7 +1398,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
|
||||
/* Option ROM version. */
|
||||
eiter = entries + size;
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
|
||||
eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
|
||||
snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
|
||||
"%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
|
||||
alen = strlen(eiter->a.orom_version);
|
||||
@ -1411,7 +1411,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
|
||||
|
||||
/* Firmware version */
|
||||
eiter = entries + size;
|
||||
eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
|
||||
eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
|
||||
ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
|
||||
sizeof(eiter->a.fw_version));
|
||||
alen = strlen(eiter->a.fw_version);
|
||||
@ -2484,8 +2484,8 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
|
||||
ct_pkt->entry_count = 1;
|
||||
ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
|
||||
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
|
||||
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
|
||||
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
|
||||
ct_pkt->cmd_dsd_count = cpu_to_le16(1);
|
||||
ct_pkt->rsp_dsd_count = cpu_to_le16(1);
|
||||
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
|
||||
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
|
||||
|
||||
|
@ -1132,7 +1132,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
|
||||
unsigned long flags = 0;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
uint32_t cnt, d2;
|
||||
uint32_t cnt;
|
||||
uint16_t wd;
|
||||
static int abts_cnt; /* ISP abort retry counts */
|
||||
int rval = QLA_SUCCESS;
|
||||
@ -1164,7 +1164,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
|
||||
udelay(100);
|
||||
|
||||
/* Wait for firmware to complete NVRAM accesses. */
|
||||
d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
|
||||
RD_REG_WORD(®->mailbox0);
|
||||
for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 &&
|
||||
rval == QLA_SUCCESS; cnt--) {
|
||||
barrier();
|
||||
@ -1183,7 +1183,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
|
||||
RD_REG_DWORD(®->mailbox0));
|
||||
|
||||
/* Wait for soft-reset to complete. */
|
||||
d2 = RD_REG_DWORD(®->ctrl_status);
|
||||
RD_REG_DWORD(®->ctrl_status);
|
||||
for (cnt = 0; cnt < 6000000; cnt++) {
|
||||
barrier();
|
||||
if ((RD_REG_DWORD(®->ctrl_status) &
|
||||
@ -1226,7 +1226,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
|
||||
WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
|
||||
RD_REG_DWORD(®->hccr);
|
||||
|
||||
d2 = (uint32_t) RD_REG_WORD(®->mailbox0);
|
||||
RD_REG_WORD(®->mailbox0);
|
||||
for (cnt = 6000000; RD_REG_WORD(®->mailbox0) != 0 &&
|
||||
rval == QLA_SUCCESS; cnt--) {
|
||||
barrier();
|
||||
@ -1277,16 +1277,19 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
|
||||
static void
|
||||
qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
uint32_t wd32 = 0;
|
||||
uint delta_msec = 100;
|
||||
uint elapsed_msec = 0;
|
||||
uint timeout_msec;
|
||||
ulong n;
|
||||
|
||||
if (!IS_QLA25XX(ha) && !IS_QLA2031(ha))
|
||||
if (vha->hw->pdev->subsystem_device != 0x0175 &&
|
||||
vha->hw->pdev->subsystem_device != 0x0240)
|
||||
return;
|
||||
|
||||
WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
|
||||
udelay(100);
|
||||
|
||||
attempt:
|
||||
timeout_msec = TIMEOUT_SEMAPHORE;
|
||||
n = timeout_msec / delta_msec;
|
||||
@ -1690,7 +1693,7 @@ allocate:
|
||||
ha->fw_dump->signature[1] = 'L';
|
||||
ha->fw_dump->signature[2] = 'G';
|
||||
ha->fw_dump->signature[3] = 'C';
|
||||
ha->fw_dump->version = __constant_htonl(1);
|
||||
ha->fw_dump->version = htonl(1);
|
||||
|
||||
ha->fw_dump->fixed_size = htonl(fixed_size);
|
||||
ha->fw_dump->mem_size = htonl(mem_size);
|
||||
@ -2070,8 +2073,8 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
|
||||
struct rsp_que *rsp = ha->rsp_q_map[0];
|
||||
|
||||
/* Setup ring parameters in initialization control block. */
|
||||
ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
|
||||
ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
|
||||
ha->init_cb->request_q_outpointer = cpu_to_le16(0);
|
||||
ha->init_cb->response_q_inpointer = cpu_to_le16(0);
|
||||
ha->init_cb->request_q_length = cpu_to_le16(req->length);
|
||||
ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
|
||||
ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
|
||||
@ -2090,7 +2093,7 @@ void
|
||||
qla24xx_config_rings(struct scsi_qla_host *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
|
||||
device_reg_t *reg = ISP_QUE_REG(ha, 0);
|
||||
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
|
||||
struct qla_msix_entry *msix;
|
||||
struct init_cb_24xx *icb;
|
||||
@ -2100,8 +2103,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
|
||||
|
||||
/* Setup ring parameters in initialization control block. */
|
||||
icb = (struct init_cb_24xx *)ha->init_cb;
|
||||
icb->request_q_outpointer = __constant_cpu_to_le16(0);
|
||||
icb->response_q_inpointer = __constant_cpu_to_le16(0);
|
||||
icb->request_q_outpointer = cpu_to_le16(0);
|
||||
icb->response_q_inpointer = cpu_to_le16(0);
|
||||
icb->request_q_length = cpu_to_le16(req->length);
|
||||
icb->response_q_length = cpu_to_le16(rsp->length);
|
||||
icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
|
||||
@ -2110,18 +2113,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
|
||||
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
|
||||
|
||||
/* Setup ATIO queue dma pointers for target mode */
|
||||
icb->atio_q_inpointer = __constant_cpu_to_le16(0);
|
||||
icb->atio_q_inpointer = cpu_to_le16(0);
|
||||
icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
|
||||
icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
|
||||
icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
|
||||
|
||||
if (IS_SHADOW_REG_CAPABLE(ha))
|
||||
icb->firmware_options_2 |=
|
||||
__constant_cpu_to_le32(BIT_30|BIT_29);
|
||||
icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
|
||||
|
||||
if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
|
||||
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
|
||||
icb->rid = __constant_cpu_to_le16(rid);
|
||||
icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
|
||||
icb->rid = cpu_to_le16(rid);
|
||||
if (ha->flags.msix_enabled) {
|
||||
msix = &ha->msix_entries[1];
|
||||
ql_dbg(ql_dbg_init, vha, 0x00fd,
|
||||
@ -2131,26 +2133,22 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
|
||||
}
|
||||
/* Use alternate PCI bus number */
|
||||
if (MSB(rid))
|
||||
icb->firmware_options_2 |=
|
||||
__constant_cpu_to_le32(BIT_19);
|
||||
icb->firmware_options_2 |= cpu_to_le32(BIT_19);
|
||||
/* Use alternate PCI devfn */
|
||||
if (LSB(rid))
|
||||
icb->firmware_options_2 |=
|
||||
__constant_cpu_to_le32(BIT_18);
|
||||
icb->firmware_options_2 |= cpu_to_le32(BIT_18);
|
||||
|
||||
/* Use Disable MSIX Handshake mode for capable adapters */
|
||||
if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
|
||||
(ha->flags.msix_enabled)) {
|
||||
icb->firmware_options_2 &=
|
||||
__constant_cpu_to_le32(~BIT_22);
|
||||
icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
|
||||
ha->flags.disable_msix_handshake = 1;
|
||||
ql_dbg(ql_dbg_init, vha, 0x00fe,
|
||||
"MSIX Handshake Disable Mode turned on.\n");
|
||||
} else {
|
||||
icb->firmware_options_2 |=
|
||||
__constant_cpu_to_le32(BIT_22);
|
||||
icb->firmware_options_2 |= cpu_to_le32(BIT_22);
|
||||
}
|
||||
icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
|
||||
icb->firmware_options_2 |= cpu_to_le32(BIT_23);
|
||||
|
||||
WRT_REG_DWORD(®->isp25mq.req_q_in, 0);
|
||||
WRT_REG_DWORD(®->isp25mq.req_q_out, 0);
|
||||
@ -2248,7 +2246,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
|
||||
}
|
||||
|
||||
if (IS_FWI2_CAPABLE(ha)) {
|
||||
mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
|
||||
mid_init_cb->options = cpu_to_le16(BIT_1);
|
||||
mid_init_cb->init_cb.execution_throttle =
|
||||
cpu_to_le16(ha->fw_xcb_count);
|
||||
/* D-Port Status */
|
||||
@ -2677,8 +2675,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
|
||||
nv->frame_payload_size = 1024;
|
||||
}
|
||||
|
||||
nv->max_iocb_allocation = __constant_cpu_to_le16(256);
|
||||
nv->execution_throttle = __constant_cpu_to_le16(16);
|
||||
nv->max_iocb_allocation = cpu_to_le16(256);
|
||||
nv->execution_throttle = cpu_to_le16(16);
|
||||
nv->retry_count = 8;
|
||||
nv->retry_delay = 1;
|
||||
|
||||
@ -2696,7 +2694,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
|
||||
nv->host_p[1] = BIT_2;
|
||||
nv->reset_delay = 5;
|
||||
nv->port_down_retry_count = 8;
|
||||
nv->max_luns_per_target = __constant_cpu_to_le16(8);
|
||||
nv->max_luns_per_target = cpu_to_le16(8);
|
||||
nv->link_down_timeout = 60;
|
||||
|
||||
rval = 1;
|
||||
@ -2824,7 +2822,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
|
||||
memcpy(vha->node_name, icb->node_name, WWN_SIZE);
|
||||
memcpy(vha->port_name, icb->port_name, WWN_SIZE);
|
||||
|
||||
icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
|
||||
icb->execution_throttle = cpu_to_le16(0xFFFF);
|
||||
|
||||
ha->retry_count = nv->retry_count;
|
||||
|
||||
@ -2876,10 +2874,10 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
|
||||
if (ql2xloginretrycount)
|
||||
ha->login_retry_count = ql2xloginretrycount;
|
||||
|
||||
icb->lun_enables = __constant_cpu_to_le16(0);
|
||||
icb->lun_enables = cpu_to_le16(0);
|
||||
icb->command_resource_count = 0;
|
||||
icb->immediate_notify_resource_count = 0;
|
||||
icb->timeout = __constant_cpu_to_le16(0);
|
||||
icb->timeout = cpu_to_le16(0);
|
||||
|
||||
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
|
||||
/* Enable RIO */
|
||||
@ -3958,12 +3956,10 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
|
||||
uint16_t *next_loopid)
|
||||
{
|
||||
int rval;
|
||||
int retry;
|
||||
uint8_t opts;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
rval = QLA_SUCCESS;
|
||||
retry = 0;
|
||||
|
||||
if (IS_ALOGIO_CAPABLE(ha)) {
|
||||
if (fcport->flags & FCF_ASYNC_SENT)
|
||||
@ -5117,7 +5113,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
|
||||
/* Bad NVRAM data, set defaults parameters. */
|
||||
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
|
||||
|| nv->id[3] != ' ' ||
|
||||
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
|
||||
nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
|
||||
/* Reset NVRAM data. */
|
||||
ql_log(ql_log_warn, vha, 0x006b,
|
||||
"Inconsistent NVRAM detected: checksum=0x%x id=%c "
|
||||
@ -5130,12 +5126,12 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
|
||||
* Set default initialization control block.
|
||||
*/
|
||||
memset(nv, 0, ha->nvram_size);
|
||||
nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
|
||||
nv->version = __constant_cpu_to_le16(ICB_VERSION);
|
||||
nv->nvram_version = cpu_to_le16(ICB_VERSION);
|
||||
nv->version = cpu_to_le16(ICB_VERSION);
|
||||
nv->frame_payload_size = 2048;
|
||||
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
|
||||
nv->exchange_count = __constant_cpu_to_le16(0);
|
||||
nv->hard_address = __constant_cpu_to_le16(124);
|
||||
nv->execution_throttle = cpu_to_le16(0xFFFF);
|
||||
nv->exchange_count = cpu_to_le16(0);
|
||||
nv->hard_address = cpu_to_le16(124);
|
||||
nv->port_name[0] = 0x21;
|
||||
nv->port_name[1] = 0x00 + ha->port_no + 1;
|
||||
nv->port_name[2] = 0x00;
|
||||
@ -5153,29 +5149,29 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
|
||||
nv->node_name[6] = 0x55;
|
||||
nv->node_name[7] = 0x86;
|
||||
qla24xx_nvram_wwn_from_ofw(vha, nv);
|
||||
nv->login_retry_count = __constant_cpu_to_le16(8);
|
||||
nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
|
||||
nv->login_timeout = __constant_cpu_to_le16(0);
|
||||
nv->login_retry_count = cpu_to_le16(8);
|
||||
nv->interrupt_delay_timer = cpu_to_le16(0);
|
||||
nv->login_timeout = cpu_to_le16(0);
|
||||
nv->firmware_options_1 =
|
||||
__constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
|
||||
nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
|
||||
nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
|
||||
nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
|
||||
nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
|
||||
nv->efi_parameters = __constant_cpu_to_le32(0);
|
||||
cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
|
||||
nv->firmware_options_2 = cpu_to_le32(2 << 4);
|
||||
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
|
||||
nv->firmware_options_3 = cpu_to_le32(2 << 13);
|
||||
nv->host_p = cpu_to_le32(BIT_11|BIT_10);
|
||||
nv->efi_parameters = cpu_to_le32(0);
|
||||
nv->reset_delay = 5;
|
||||
nv->max_luns_per_target = __constant_cpu_to_le16(128);
|
||||
nv->port_down_retry_count = __constant_cpu_to_le16(30);
|
||||
nv->link_down_timeout = __constant_cpu_to_le16(30);
|
||||
nv->max_luns_per_target = cpu_to_le16(128);
|
||||
nv->port_down_retry_count = cpu_to_le16(30);
|
||||
nv->link_down_timeout = cpu_to_le16(30);
|
||||
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
if (!qla_ini_mode_enabled(vha)) {
|
||||
/* Don't enable full login after initial LIP */
|
||||
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
|
||||
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
|
||||
/* Don't enable LIP full login for initiator */
|
||||
nv->host_p &= __constant_cpu_to_le32(~BIT_10);
|
||||
nv->host_p &= cpu_to_le32(~BIT_10);
|
||||
}
|
||||
|
||||
qlt_24xx_config_nvram_stage1(vha, nv);
|
||||
@ -5209,14 +5205,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
|
||||
|
||||
qlt_24xx_config_nvram_stage2(vha, icb);
|
||||
|
||||
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
|
||||
if (nv->host_p & cpu_to_le32(BIT_15)) {
|
||||
/* Use alternate WWN? */
|
||||
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
|
||||
memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
|
||||
}
|
||||
|
||||
/* Prepare nodename */
|
||||
if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
|
||||
if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
|
||||
/*
|
||||
* Firmware will apply the following mask if the nodename was
|
||||
* not provided.
|
||||
@ -5248,7 +5244,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
|
||||
memcpy(vha->node_name, icb->node_name, WWN_SIZE);
|
||||
memcpy(vha->port_name, icb->port_name, WWN_SIZE);
|
||||
|
||||
icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
|
||||
icb->execution_throttle = cpu_to_le16(0xFFFF);
|
||||
|
||||
ha->retry_count = le16_to_cpu(nv->login_retry_count);
|
||||
|
||||
@ -5256,7 +5252,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
|
||||
if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
|
||||
nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
|
||||
if (le16_to_cpu(nv->login_timeout) < 4)
|
||||
nv->login_timeout = __constant_cpu_to_le16(4);
|
||||
nv->login_timeout = cpu_to_le16(4);
|
||||
ha->login_timeout = le16_to_cpu(nv->login_timeout);
|
||||
icb->login_timeout = nv->login_timeout;
|
||||
|
||||
@ -5307,7 +5303,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
|
||||
ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
|
||||
le16_to_cpu(icb->interrupt_delay_timer): 2;
|
||||
}
|
||||
icb->firmware_options_2 &= __constant_cpu_to_le32(
|
||||
icb->firmware_options_2 &= cpu_to_le32(
|
||||
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
|
||||
vha->flags.process_response_queue = 0;
|
||||
if (ha->zio_mode != QLA_ZIO_DISABLED) {
|
||||
@ -6063,7 +6059,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
/* Bad NVRAM data, set defaults parameters. */
|
||||
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
|
||||
|| nv->id[3] != ' ' ||
|
||||
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
|
||||
nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
|
||||
/* Reset NVRAM data. */
|
||||
ql_log(ql_log_info, vha, 0x0073,
|
||||
"Inconsistent NVRAM detected: checksum=0x%x id=%c "
|
||||
@ -6077,11 +6073,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
* Set default initialization control block.
|
||||
*/
|
||||
memset(nv, 0, ha->nvram_size);
|
||||
nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
|
||||
nv->version = __constant_cpu_to_le16(ICB_VERSION);
|
||||
nv->nvram_version = cpu_to_le16(ICB_VERSION);
|
||||
nv->version = cpu_to_le16(ICB_VERSION);
|
||||
nv->frame_payload_size = 2048;
|
||||
nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
|
||||
nv->exchange_count = __constant_cpu_to_le16(0);
|
||||
nv->execution_throttle = cpu_to_le16(0xFFFF);
|
||||
nv->exchange_count = cpu_to_le16(0);
|
||||
nv->port_name[0] = 0x21;
|
||||
nv->port_name[1] = 0x00 + ha->port_no + 1;
|
||||
nv->port_name[2] = 0x00;
|
||||
@ -6098,20 +6094,20 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
nv->node_name[5] = 0x1c;
|
||||
nv->node_name[6] = 0x55;
|
||||
nv->node_name[7] = 0x86;
|
||||
nv->login_retry_count = __constant_cpu_to_le16(8);
|
||||
nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
|
||||
nv->login_timeout = __constant_cpu_to_le16(0);
|
||||
nv->login_retry_count = cpu_to_le16(8);
|
||||
nv->interrupt_delay_timer = cpu_to_le16(0);
|
||||
nv->login_timeout = cpu_to_le16(0);
|
||||
nv->firmware_options_1 =
|
||||
__constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
|
||||
nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
|
||||
nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
|
||||
nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
|
||||
nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
|
||||
nv->efi_parameters = __constant_cpu_to_le32(0);
|
||||
cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
|
||||
nv->firmware_options_2 = cpu_to_le32(2 << 4);
|
||||
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
|
||||
nv->firmware_options_3 = cpu_to_le32(2 << 13);
|
||||
nv->host_p = cpu_to_le32(BIT_11|BIT_10);
|
||||
nv->efi_parameters = cpu_to_le32(0);
|
||||
nv->reset_delay = 5;
|
||||
nv->max_luns_per_target = __constant_cpu_to_le16(128);
|
||||
nv->port_down_retry_count = __constant_cpu_to_le16(30);
|
||||
nv->link_down_timeout = __constant_cpu_to_le16(180);
|
||||
nv->max_luns_per_target = cpu_to_le16(128);
|
||||
nv->port_down_retry_count = cpu_to_le16(30);
|
||||
nv->link_down_timeout = cpu_to_le16(180);
|
||||
nv->enode_mac[0] = 0x00;
|
||||
nv->enode_mac[1] = 0xC0;
|
||||
nv->enode_mac[2] = 0xDD;
|
||||
@ -6170,13 +6166,13 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
qlt_81xx_config_nvram_stage2(vha, icb);
|
||||
|
||||
/* Use alternate WWN? */
|
||||
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
|
||||
if (nv->host_p & cpu_to_le32(BIT_15)) {
|
||||
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
|
||||
memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
|
||||
}
|
||||
|
||||
/* Prepare nodename */
|
||||
if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
|
||||
if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
|
||||
/*
|
||||
* Firmware will apply the following mask if the nodename was
|
||||
* not provided.
|
||||
@ -6205,7 +6201,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
memcpy(vha->node_name, icb->node_name, WWN_SIZE);
|
||||
memcpy(vha->port_name, icb->port_name, WWN_SIZE);
|
||||
|
||||
icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
|
||||
icb->execution_throttle = cpu_to_le16(0xFFFF);
|
||||
|
||||
ha->retry_count = le16_to_cpu(nv->login_retry_count);
|
||||
|
||||
@ -6213,7 +6209,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
|
||||
nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
|
||||
if (le16_to_cpu(nv->login_timeout) < 4)
|
||||
nv->login_timeout = __constant_cpu_to_le16(4);
|
||||
nv->login_timeout = cpu_to_le16(4);
|
||||
ha->login_timeout = le16_to_cpu(nv->login_timeout);
|
||||
icb->login_timeout = nv->login_timeout;
|
||||
|
||||
@ -6259,7 +6255,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
|
||||
/* if not running MSI-X we need handshaking on interrupts */
|
||||
if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
|
||||
icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
|
||||
icb->firmware_options_2 |= cpu_to_le32(BIT_22);
|
||||
|
||||
/* Enable ZIO. */
|
||||
if (!vha->flags.init_done) {
|
||||
@ -6268,7 +6264,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
|
||||
ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
|
||||
le16_to_cpu(icb->interrupt_delay_timer): 2;
|
||||
}
|
||||
icb->firmware_options_2 &= __constant_cpu_to_le32(
|
||||
icb->firmware_options_2 &= cpu_to_le32(
|
||||
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
|
||||
vha->flags.process_response_queue = 0;
|
||||
if (ha->zio_mode != QLA_ZIO_DISABLED) {
|
||||
|
@ -108,8 +108,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
|
||||
cont_pkt = (cont_entry_t *)req->ring_ptr;
|
||||
|
||||
/* Load packet defaults. */
|
||||
*((uint32_t *)(&cont_pkt->entry_type)) =
|
||||
__constant_cpu_to_le32(CONTINUE_TYPE);
|
||||
*((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
|
||||
|
||||
return (cont_pkt);
|
||||
}
|
||||
@ -138,8 +137,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
|
||||
|
||||
/* Load packet defaults. */
|
||||
*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
|
||||
__constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
|
||||
__constant_cpu_to_le32(CONTINUE_A64_TYPE);
|
||||
cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
|
||||
cpu_to_le32(CONTINUE_A64_TYPE);
|
||||
|
||||
return (cont_pkt);
|
||||
}
|
||||
@ -204,11 +203,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
|
||||
|
||||
/* Update entry type to indicate Command Type 2 IOCB */
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) =
|
||||
__constant_cpu_to_le32(COMMAND_TYPE);
|
||||
cpu_to_le32(COMMAND_TYPE);
|
||||
|
||||
/* No data transfer */
|
||||
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
|
||||
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
cmd_pkt->byte_count = cpu_to_le32(0);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -261,12 +260,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
|
||||
cmd = GET_CMD_SP(sp);
|
||||
|
||||
/* Update entry type to indicate Command Type 3 IOCB */
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) =
|
||||
__constant_cpu_to_le32(COMMAND_A64_TYPE);
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
|
||||
|
||||
/* No data transfer */
|
||||
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
|
||||
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
cmd_pkt->byte_count = cpu_to_le32(0);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -310,7 +308,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
|
||||
int
|
||||
qla2x00_start_scsi(srb_t *sp)
|
||||
{
|
||||
int ret, nseg;
|
||||
int nseg;
|
||||
unsigned long flags;
|
||||
scsi_qla_host_t *vha;
|
||||
struct scsi_cmnd *cmd;
|
||||
@ -327,7 +325,6 @@ qla2x00_start_scsi(srb_t *sp)
|
||||
struct rsp_que *rsp;
|
||||
|
||||
/* Setup device pointers. */
|
||||
ret = 0;
|
||||
vha = sp->fcport->vha;
|
||||
ha = vha->hw;
|
||||
reg = &ha->iobase->isp;
|
||||
@ -403,7 +400,7 @@ qla2x00_start_scsi(srb_t *sp)
|
||||
/* Set target ID and LUN number*/
|
||||
SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
|
||||
cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
|
||||
cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
|
||||
cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
|
||||
|
||||
/* Load SCSI command packet. */
|
||||
memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
|
||||
@ -454,7 +451,7 @@ void
|
||||
qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
|
||||
device_reg_t *reg = ISP_QUE_REG(ha, req->id);
|
||||
|
||||
if (IS_P3P_TYPE(ha)) {
|
||||
qla82xx_start_iocbs(vha);
|
||||
@ -597,12 +594,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
|
||||
cmd = GET_CMD_SP(sp);
|
||||
|
||||
/* Update entry type to indicate Command Type 3 IOCB */
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) =
|
||||
__constant_cpu_to_le32(COMMAND_TYPE_6);
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
|
||||
|
||||
/* No data transfer */
|
||||
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
|
||||
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
cmd_pkt->byte_count = cpu_to_le32(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -611,13 +607,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
|
||||
|
||||
/* Set transfer direction */
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
cmd_pkt->control_flags =
|
||||
__constant_cpu_to_le16(CF_WRITE_DATA);
|
||||
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
|
||||
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
|
||||
vha->qla_stats.output_requests++;
|
||||
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
cmd_pkt->control_flags =
|
||||
__constant_cpu_to_le16(CF_READ_DATA);
|
||||
cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
|
||||
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
|
||||
vha->qla_stats.input_requests++;
|
||||
}
|
||||
@ -680,7 +674,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
|
||||
*
|
||||
* Returns the number of dsd list needed to store @dsds.
|
||||
*/
|
||||
inline uint16_t
|
||||
static inline uint16_t
|
||||
qla24xx_calc_dsd_lists(uint16_t dsds)
|
||||
{
|
||||
uint16_t dsd_lists = 0;
|
||||
@ -700,7 +694,7 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
|
||||
* @cmd_pkt: Command type 3 IOCB
|
||||
* @tot_dsds: Total number of segments to transfer
|
||||
*/
|
||||
inline void
|
||||
static inline void
|
||||
qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
|
||||
uint16_t tot_dsds)
|
||||
{
|
||||
@ -710,32 +704,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
|
||||
struct scsi_cmnd *cmd;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
struct req_que *req;
|
||||
|
||||
cmd = GET_CMD_SP(sp);
|
||||
|
||||
/* Update entry type to indicate Command Type 3 IOCB */
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) =
|
||||
__constant_cpu_to_le32(COMMAND_TYPE_7);
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
|
||||
|
||||
/* No data transfer */
|
||||
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
|
||||
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
cmd_pkt->byte_count = cpu_to_le32(0);
|
||||
return;
|
||||
}
|
||||
|
||||
vha = sp->fcport->vha;
|
||||
req = vha->req;
|
||||
|
||||
/* Set transfer direction */
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
cmd_pkt->task_mgmt_flags =
|
||||
__constant_cpu_to_le16(TMF_WRITE_DATA);
|
||||
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
|
||||
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
|
||||
vha->qla_stats.output_requests++;
|
||||
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
cmd_pkt->task_mgmt_flags =
|
||||
__constant_cpu_to_le16(TMF_READ_DATA);
|
||||
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
|
||||
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
|
||||
vha->qla_stats.input_requests++;
|
||||
}
|
||||
@ -809,7 +798,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
|
||||
* match LBA in CDB + N
|
||||
*/
|
||||
case SCSI_PROT_DIF_TYPE2:
|
||||
pkt->app_tag = __constant_cpu_to_le16(0);
|
||||
pkt->app_tag = cpu_to_le16(0);
|
||||
pkt->app_tag_mask[0] = 0x0;
|
||||
pkt->app_tag_mask[1] = 0x0;
|
||||
|
||||
@ -840,7 +829,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
|
||||
case SCSI_PROT_DIF_TYPE1:
|
||||
pkt->ref_tag = cpu_to_le32((uint32_t)
|
||||
(0xffffffff & scsi_get_lba(cmd)));
|
||||
pkt->app_tag = __constant_cpu_to_le16(0);
|
||||
pkt->app_tag = cpu_to_le16(0);
|
||||
pkt->app_tag_mask[0] = 0x0;
|
||||
pkt->app_tag_mask[1] = 0x0;
|
||||
|
||||
@ -933,11 +922,9 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
|
||||
dma_addr_t sle_dma;
|
||||
uint32_t sle_dma_len, tot_prot_dma_len = 0;
|
||||
struct scsi_cmnd *cmd;
|
||||
struct scsi_qla_host *vha;
|
||||
|
||||
memset(&sgx, 0, sizeof(struct qla2_sgx));
|
||||
if (sp) {
|
||||
vha = sp->fcport->vha;
|
||||
cmd = GET_CMD_SP(sp);
|
||||
prot_int = cmd->device->sector_size;
|
||||
|
||||
@ -947,7 +934,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
|
||||
|
||||
sg_prot = scsi_prot_sglist(cmd);
|
||||
} else if (tc) {
|
||||
vha = tc->vha;
|
||||
prot_int = tc->blk_sz;
|
||||
sgx.tot_bytes = tc->bufflen;
|
||||
sgx.cur_sg = tc->sg;
|
||||
@ -1047,15 +1033,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
|
||||
int i;
|
||||
uint16_t used_dsds = tot_dsds;
|
||||
struct scsi_cmnd *cmd;
|
||||
struct scsi_qla_host *vha;
|
||||
|
||||
if (sp) {
|
||||
cmd = GET_CMD_SP(sp);
|
||||
sgl = scsi_sglist(cmd);
|
||||
vha = sp->fcport->vha;
|
||||
} else if (tc) {
|
||||
sgl = tc->sg;
|
||||
vha = tc->vha;
|
||||
} else {
|
||||
BUG();
|
||||
return 1;
|
||||
@ -1231,7 +1214,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
uint32_t *cur_dsd, *fcp_dl;
|
||||
scsi_qla_host_t *vha;
|
||||
struct scsi_cmnd *cmd;
|
||||
int sgc;
|
||||
uint32_t total_bytes = 0;
|
||||
uint32_t data_bytes;
|
||||
uint32_t dif_bytes;
|
||||
@ -1247,10 +1229,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
|
||||
cmd = GET_CMD_SP(sp);
|
||||
|
||||
sgc = 0;
|
||||
/* Update entry type to indicate Command Type CRC_2 IOCB */
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) =
|
||||
__constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
|
||||
|
||||
vha = sp->fcport->vha;
|
||||
ha = vha->hw;
|
||||
@ -1258,7 +1238,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
/* No data transfer */
|
||||
data_bytes = scsi_bufflen(cmd);
|
||||
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
|
||||
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
cmd_pkt->byte_count = cpu_to_le32(0);
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
@ -1267,10 +1247,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
/* Set transfer direction */
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
cmd_pkt->control_flags =
|
||||
__constant_cpu_to_le16(CF_WRITE_DATA);
|
||||
cpu_to_le16(CF_WRITE_DATA);
|
||||
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
cmd_pkt->control_flags =
|
||||
__constant_cpu_to_le16(CF_READ_DATA);
|
||||
cpu_to_le16(CF_READ_DATA);
|
||||
}
|
||||
|
||||
if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
|
||||
@ -1392,7 +1372,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
|
||||
crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
|
||||
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
|
||||
crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
|
||||
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
|
||||
/* Fibre channel byte count */
|
||||
cmd_pkt->byte_count = cpu_to_le32(total_bytes);
|
||||
fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
|
||||
@ -1400,13 +1380,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
*fcp_dl = htonl(total_bytes);
|
||||
|
||||
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
|
||||
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
cmd_pkt->byte_count = cpu_to_le32(0);
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
/* Walks data segments */
|
||||
|
||||
cmd_pkt->control_flags |=
|
||||
__constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
|
||||
cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
|
||||
|
||||
if (!bundling && tot_prot_dsds) {
|
||||
if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
|
||||
@ -1418,8 +1397,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
|
||||
|
||||
if (bundling && tot_prot_dsds) {
|
||||
/* Walks dif segments */
|
||||
cmd_pkt->control_flags |=
|
||||
__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
|
||||
cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
|
||||
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
|
||||
if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
|
||||
tot_prot_dsds, NULL))
|
||||
@ -1442,7 +1420,7 @@ crc_queuing_error:
|
||||
int
|
||||
qla24xx_start_scsi(srb_t *sp)
|
||||
{
|
||||
int ret, nseg;
|
||||
int nseg;
|
||||
unsigned long flags;
|
||||
uint32_t *clr_ptr;
|
||||
uint32_t index;
|
||||
@ -1458,8 +1436,6 @@ qla24xx_start_scsi(srb_t *sp)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
/* Setup device pointers. */
|
||||
ret = 0;
|
||||
|
||||
qla25xx_set_que(sp, &rsp);
|
||||
req = vha->req;
|
||||
|
||||
@ -1753,7 +1729,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
|
||||
cmd_pkt->entry_count = (uint8_t)req_cnt;
|
||||
/* Specify response queue number where completion should happen */
|
||||
cmd_pkt->entry_status = (uint8_t) rsp->id;
|
||||
cmd_pkt->timeout = __constant_cpu_to_le16(0);
|
||||
cmd_pkt->timeout = cpu_to_le16(0);
|
||||
wmb();
|
||||
|
||||
/* Adjust ring index. */
|
||||
@ -1819,7 +1795,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req = ha->req_q_map[0];
|
||||
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
|
||||
device_reg_t *reg = ISP_QUE_REG(ha, req->id);
|
||||
uint32_t index, handle;
|
||||
request_t *pkt;
|
||||
uint16_t cnt, req_cnt;
|
||||
@ -2044,10 +2020,10 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
|
||||
els_iocb->entry_status = 0;
|
||||
els_iocb->handle = sp->handle;
|
||||
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
|
||||
els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
|
||||
els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
|
||||
els_iocb->vp_index = sp->fcport->vha->vp_idx;
|
||||
els_iocb->sof_type = EST_SOFI3;
|
||||
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
|
||||
els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
|
||||
|
||||
els_iocb->opcode =
|
||||
sp->type == SRB_ELS_CMD_RPT ?
|
||||
@ -2091,7 +2067,6 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
|
||||
int loop_iterartion = 0;
|
||||
int cont_iocb_prsnt = 0;
|
||||
int entry_count = 1;
|
||||
|
||||
memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
|
||||
@ -2099,13 +2074,13 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
|
||||
ct_iocb->entry_status = 0;
|
||||
ct_iocb->handle1 = sp->handle;
|
||||
SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
|
||||
ct_iocb->status = __constant_cpu_to_le16(0);
|
||||
ct_iocb->control_flags = __constant_cpu_to_le16(0);
|
||||
ct_iocb->status = cpu_to_le16(0);
|
||||
ct_iocb->control_flags = cpu_to_le16(0);
|
||||
ct_iocb->timeout = 0;
|
||||
ct_iocb->cmd_dsd_count =
|
||||
__constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
|
||||
cpu_to_le16(bsg_job->request_payload.sg_cnt);
|
||||
ct_iocb->total_dsd_count =
|
||||
__constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
|
||||
cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
|
||||
ct_iocb->req_bytecount =
|
||||
cpu_to_le32(bsg_job->request_payload.payload_len);
|
||||
ct_iocb->rsp_bytecount =
|
||||
@ -2142,7 +2117,6 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
|
||||
vha->hw->req_q_map[0]);
|
||||
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
|
||||
avail_dsds = 5;
|
||||
cont_iocb_prsnt = 1;
|
||||
entry_count++;
|
||||
}
|
||||
|
||||
@ -2170,7 +2144,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
|
||||
int loop_iterartion = 0;
|
||||
int cont_iocb_prsnt = 0;
|
||||
int entry_count = 1;
|
||||
|
||||
ct_iocb->entry_type = CT_IOCB_TYPE;
|
||||
@ -2180,13 +2153,13 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
|
||||
|
||||
ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
|
||||
ct_iocb->vp_index = sp->fcport->vha->vp_idx;
|
||||
ct_iocb->comp_status = __constant_cpu_to_le16(0);
|
||||
ct_iocb->comp_status = cpu_to_le16(0);
|
||||
|
||||
ct_iocb->cmd_dsd_count =
|
||||
__constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
|
||||
cpu_to_le16(bsg_job->request_payload.sg_cnt);
|
||||
ct_iocb->timeout = 0;
|
||||
ct_iocb->rsp_dsd_count =
|
||||
__constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
|
||||
cpu_to_le16(bsg_job->reply_payload.sg_cnt);
|
||||
ct_iocb->rsp_byte_count =
|
||||
cpu_to_le32(bsg_job->reply_payload.payload_len);
|
||||
ct_iocb->cmd_byte_count =
|
||||
@ -2217,7 +2190,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
|
||||
ha->req_q_map[0]);
|
||||
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
|
||||
avail_dsds = 5;
|
||||
cont_iocb_prsnt = 1;
|
||||
entry_count++;
|
||||
}
|
||||
|
||||
@ -2240,7 +2212,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
|
||||
int
|
||||
qla82xx_start_scsi(srb_t *sp)
|
||||
{
|
||||
int ret, nseg;
|
||||
int nseg;
|
||||
unsigned long flags;
|
||||
struct scsi_cmnd *cmd;
|
||||
uint32_t *clr_ptr;
|
||||
@ -2260,7 +2232,6 @@ qla82xx_start_scsi(srb_t *sp)
|
||||
struct rsp_que *rsp = NULL;
|
||||
|
||||
/* Setup device pointers. */
|
||||
ret = 0;
|
||||
reg = &ha->iobase->isp82;
|
||||
cmd = GET_CMD_SP(sp);
|
||||
req = vha->req;
|
||||
@ -2539,16 +2510,12 @@ sufficient_dsds:
|
||||
/* write, read and verify logic */
|
||||
dbval = dbval | (req->id << 8) | (req->ring_index << 16);
|
||||
if (ql2xdbwr)
|
||||
qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
|
||||
qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
|
||||
else {
|
||||
WRT_REG_DWORD(
|
||||
(unsigned long __iomem *)ha->nxdb_wr_ptr,
|
||||
dbval);
|
||||
WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
|
||||
wmb();
|
||||
while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
|
||||
WRT_REG_DWORD(
|
||||
(unsigned long __iomem *)ha->nxdb_wr_ptr,
|
||||
dbval);
|
||||
while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
|
||||
WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
|
||||
wmb();
|
||||
}
|
||||
}
|
||||
@ -2682,7 +2649,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
|
||||
|
||||
/*Update entry type to indicate bidir command */
|
||||
*((uint32_t *)(&cmd_pkt->entry_type)) =
|
||||
__constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
|
||||
cpu_to_le32(COMMAND_BIDIRECTIONAL);
|
||||
|
||||
/* Set the transfer direction, in this set both flags
|
||||
* Also set the BD_WRAP_BACK flag, firmware will take care
|
||||
@ -2690,8 +2657,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
|
||||
*/
|
||||
cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
|
||||
cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
|
||||
cmd_pkt->control_flags =
|
||||
__constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
|
||||
cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
|
||||
BD_WRAP_BACK);
|
||||
|
||||
req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
|
||||
|
@ -116,7 +116,7 @@ bool
|
||||
qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
|
||||
{
|
||||
/* Check for PCI disconnection */
|
||||
if (reg == 0xffffffff) {
|
||||
if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
|
||||
if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
|
||||
!test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
|
||||
!test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
|
||||
@ -560,6 +560,17 @@ qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline fc_port_t *
|
||||
qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
|
||||
{
|
||||
fc_port_t *fcport;
|
||||
|
||||
list_for_each_entry(fcport, &vha->vp_fcports, list)
|
||||
if (fcport->loop_id == loop_id)
|
||||
return fcport;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla2x00_async_event() - Process aynchronous events.
|
||||
* @ha: SCSI driver HA context
|
||||
@ -575,7 +586,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
|
||||
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
|
||||
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
|
||||
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
|
||||
uint32_t rscn_entry, host_pid, tmp_pid;
|
||||
uint32_t rscn_entry, host_pid;
|
||||
unsigned long flags;
|
||||
fc_port_t *fcport = NULL;
|
||||
|
||||
@ -897,11 +908,29 @@ skip_rio:
|
||||
(mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
|
||||
break;
|
||||
|
||||
/* Global event -- port logout or port unavailable. */
|
||||
if (mb[1] == 0xffff && mb[2] == 0x7) {
|
||||
if (mb[2] == 0x7) {
|
||||
ql_dbg(ql_dbg_async, vha, 0x5010,
|
||||
"Port unavailable %04x %04x %04x.\n",
|
||||
"Port %s %04x %04x %04x.\n",
|
||||
mb[1] == 0xffff ? "unavailable" : "logout",
|
||||
mb[1], mb[2], mb[3]);
|
||||
|
||||
if (mb[1] == 0xffff)
|
||||
goto global_port_update;
|
||||
|
||||
/* Port logout */
|
||||
fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
|
||||
if (!fcport)
|
||||
break;
|
||||
if (atomic_read(&fcport->state) != FCS_ONLINE)
|
||||
break;
|
||||
ql_dbg(ql_dbg_async, vha, 0x508a,
|
||||
"Marking port lost loopid=%04x portid=%06x.\n",
|
||||
fcport->loop_id, fcport->d_id.b24);
|
||||
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
|
||||
break;
|
||||
|
||||
global_port_update:
|
||||
/* Port unavailable. */
|
||||
ql_log(ql_log_warn, vha, 0x505e,
|
||||
"Link is offline.\n");
|
||||
|
||||
@ -998,7 +1027,6 @@ skip_rio:
|
||||
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
||||
if (atomic_read(&fcport->state) != FCS_ONLINE)
|
||||
continue;
|
||||
tmp_pid = fcport->d_id.b24;
|
||||
if (fcport->d_id.b24 == rscn_entry) {
|
||||
qla2x00_mark_device_lost(vha, fcport, 0, 0);
|
||||
break;
|
||||
@ -1565,7 +1593,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
|
||||
"Async-%s error - hdl=%x entry-status(%x).\n",
|
||||
type, sp->handle, sts->entry_status);
|
||||
iocb->u.tmf.data = QLA_FUNCTION_FAILED;
|
||||
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
|
||||
} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
|
||||
ql_log(ql_log_warn, fcport->vha, 0x5039,
|
||||
"Async-%s error - hdl=%x completion status(%x).\n",
|
||||
type, sp->handle, sts->comp_status);
|
||||
@ -2045,14 +2073,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
}
|
||||
|
||||
/* Validate handle. */
|
||||
if (handle < req->num_outstanding_cmds)
|
||||
if (handle < req->num_outstanding_cmds) {
|
||||
sp = req->outstanding_cmds[handle];
|
||||
else
|
||||
sp = NULL;
|
||||
|
||||
if (sp == NULL) {
|
||||
if (!sp) {
|
||||
ql_dbg(ql_dbg_io, vha, 0x3075,
|
||||
"%s(%ld): Already returned command for status handle (0x%x).\n",
|
||||
__func__, vha->host_no, sts->handle);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
ql_dbg(ql_dbg_io, vha, 0x3017,
|
||||
"Invalid status handle (0x%x).\n", sts->handle);
|
||||
"Invalid status handle, out of range (0x%x).\n",
|
||||
sts->handle);
|
||||
|
||||
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
|
||||
if (IS_P3P_TYPE(ha))
|
||||
@ -2339,12 +2371,12 @@ out:
|
||||
ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
|
||||
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
|
||||
"portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
|
||||
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
|
||||
"rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
|
||||
comp_status, scsi_status, res, vha->host_no,
|
||||
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
|
||||
fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
|
||||
cp->cmnd, scsi_bufflen(cp), rsp_info_len,
|
||||
resid_len, fw_resid_len);
|
||||
resid_len, fw_resid_len, sp, cp);
|
||||
|
||||
if (rsp->status_srb == NULL)
|
||||
sp->done(ha, sp, res);
|
||||
@ -2441,13 +2473,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
|
||||
}
|
||||
fatal:
|
||||
ql_log(ql_log_warn, vha, 0x5030,
|
||||
"Error entry - invalid handle/queue.\n");
|
||||
|
||||
if (IS_P3P_TYPE(ha))
|
||||
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
|
||||
else
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
qla2xxx_wake_dpc(vha);
|
||||
"Error entry - invalid handle/queue (%04x).\n", que);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -555,7 +555,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
|
||||
if (IS_FWI2_CAPABLE(ha))
|
||||
mcp->in_mb |= MBX_17|MBX_16|MBX_15;
|
||||
if (IS_QLA27XX(ha))
|
||||
mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;
|
||||
mcp->in_mb |= MBX_23 | MBX_22 | MBX_21 | MBX_20 | MBX_19 |
|
||||
MBX_18 | MBX_14 | MBX_13 | MBX_11 | MBX_10 | MBX_9 | MBX_8;
|
||||
|
||||
mcp->flags = 0;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
@ -571,6 +573,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
|
||||
ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
|
||||
else
|
||||
ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
|
||||
|
||||
if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
|
||||
ha->mpi_version[0] = mcp->mb[10] & 0xff;
|
||||
ha->mpi_version[1] = mcp->mb[11] >> 8;
|
||||
@ -580,6 +583,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
|
||||
ha->phy_version[1] = mcp->mb[9] >> 8;
|
||||
ha->phy_version[2] = mcp->mb[9] & 0xff;
|
||||
}
|
||||
|
||||
if (IS_FWI2_CAPABLE(ha)) {
|
||||
ha->fw_attributes_h = mcp->mb[15];
|
||||
ha->fw_attributes_ext[0] = mcp->mb[16];
|
||||
@ -591,7 +595,14 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
|
||||
"%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
|
||||
__func__, mcp->mb[17], mcp->mb[16]);
|
||||
}
|
||||
|
||||
if (IS_QLA27XX(ha)) {
|
||||
ha->mpi_version[0] = mcp->mb[10] & 0xff;
|
||||
ha->mpi_version[1] = mcp->mb[11] >> 8;
|
||||
ha->mpi_version[2] = mcp->mb[11] & 0xff;
|
||||
ha->pep_version[0] = mcp->mb[13] & 0xff;
|
||||
ha->pep_version[1] = mcp->mb[14] >> 8;
|
||||
ha->pep_version[2] = mcp->mb[14] & 0xff;
|
||||
ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
|
||||
ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
|
||||
}
|
||||
@ -1135,20 +1146,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
|
||||
vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
|
||||
}
|
||||
/* If FA-WWN supported */
|
||||
if (mcp->mb[7] & BIT_14) {
|
||||
vha->port_name[0] = MSB(mcp->mb[16]);
|
||||
vha->port_name[1] = LSB(mcp->mb[16]);
|
||||
vha->port_name[2] = MSB(mcp->mb[17]);
|
||||
vha->port_name[3] = LSB(mcp->mb[17]);
|
||||
vha->port_name[4] = MSB(mcp->mb[18]);
|
||||
vha->port_name[5] = LSB(mcp->mb[18]);
|
||||
vha->port_name[6] = MSB(mcp->mb[19]);
|
||||
vha->port_name[7] = LSB(mcp->mb[19]);
|
||||
fc_host_port_name(vha->host) =
|
||||
wwn_to_u64(vha->port_name);
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x10ca,
|
||||
"FA-WWN acquired %016llx\n",
|
||||
wwn_to_u64(vha->port_name));
|
||||
if (IS_FAWWN_CAPABLE(vha->hw)) {
|
||||
if (mcp->mb[7] & BIT_14) {
|
||||
vha->port_name[0] = MSB(mcp->mb[16]);
|
||||
vha->port_name[1] = LSB(mcp->mb[16]);
|
||||
vha->port_name[2] = MSB(mcp->mb[17]);
|
||||
vha->port_name[3] = LSB(mcp->mb[17]);
|
||||
vha->port_name[4] = MSB(mcp->mb[18]);
|
||||
vha->port_name[5] = LSB(mcp->mb[18]);
|
||||
vha->port_name[6] = MSB(mcp->mb[19]);
|
||||
vha->port_name[7] = LSB(mcp->mb[19]);
|
||||
fc_host_port_name(vha->host) =
|
||||
wwn_to_u64(vha->port_name);
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x10ca,
|
||||
"FA-WWN acquired %016llx\n",
|
||||
wwn_to_u64(vha->port_name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1239,7 +1252,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
|
||||
"Entered %s.\n", __func__);
|
||||
|
||||
if (IS_P3P_TYPE(ha) && ql2xdbwr)
|
||||
qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
|
||||
qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
|
||||
(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
|
||||
|
||||
if (ha->flags.npiv_supported)
|
||||
@ -1865,7 +1878,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
|
||||
uint32_t iop[2];
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req;
|
||||
struct rsp_que *rsp;
|
||||
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
|
||||
"Entered %s.\n", __func__);
|
||||
@ -1874,7 +1886,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
|
||||
req = ha->req_q_map[0];
|
||||
else
|
||||
req = vha->req;
|
||||
rsp = req->rsp;
|
||||
|
||||
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
|
||||
if (lg == NULL) {
|
||||
@ -1888,11 +1899,11 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
|
||||
lg->entry_count = 1;
|
||||
lg->handle = MAKE_HANDLE(req->id, lg->handle);
|
||||
lg->nport_handle = cpu_to_le16(loop_id);
|
||||
lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
|
||||
lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
|
||||
if (opt & BIT_0)
|
||||
lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
|
||||
lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
|
||||
if (opt & BIT_1)
|
||||
lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
|
||||
lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
|
||||
lg->port_id[0] = al_pa;
|
||||
lg->port_id[1] = area;
|
||||
lg->port_id[2] = domain;
|
||||
@ -1907,7 +1918,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
|
||||
"Failed to complete IOCB -- error status (%x).\n",
|
||||
lg->entry_status);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
|
||||
} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
|
||||
iop[0] = le32_to_cpu(lg->io_parameter[0]);
|
||||
iop[1] = le32_to_cpu(lg->io_parameter[1]);
|
||||
|
||||
@ -1961,7 +1972,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
|
||||
mb[10] |= BIT_0; /* Class 2. */
|
||||
if (lg->io_parameter[9] || lg->io_parameter[10])
|
||||
mb[10] |= BIT_1; /* Class 3. */
|
||||
if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
|
||||
if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
|
||||
mb[10] |= BIT_7; /* Confirmed Completion
|
||||
* Allowed
|
||||
*/
|
||||
@ -2142,7 +2153,6 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
|
||||
dma_addr_t lg_dma;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req;
|
||||
struct rsp_que *rsp;
|
||||
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
|
||||
"Entered %s.\n", __func__);
|
||||
@ -2159,13 +2169,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
|
||||
req = ha->req_q_map[0];
|
||||
else
|
||||
req = vha->req;
|
||||
rsp = req->rsp;
|
||||
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
|
||||
lg->entry_count = 1;
|
||||
lg->handle = MAKE_HANDLE(req->id, lg->handle);
|
||||
lg->nport_handle = cpu_to_le16(loop_id);
|
||||
lg->control_flags =
|
||||
__constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
|
||||
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
|
||||
LCF_FREE_NPORT);
|
||||
lg->port_id[0] = al_pa;
|
||||
lg->port_id[1] = area;
|
||||
@ -2181,7 +2190,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
|
||||
"Failed to complete IOCB -- error status (%x).\n",
|
||||
lg->entry_status);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
|
||||
} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x1071,
|
||||
"Failed to complete IOCB -- completion status (%x) "
|
||||
"ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
|
||||
@ -2673,7 +2682,7 @@ qla24xx_abort_command(srb_t *sp)
|
||||
"Failed to complete IOCB -- error status (%x).\n",
|
||||
abt->entry_status);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
|
||||
} else if (abt->nport_handle != cpu_to_le16(0)) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x1090,
|
||||
"Failed to complete IOCB -- completion status (%x).\n",
|
||||
le16_to_cpu(abt->nport_handle));
|
||||
@ -2756,8 +2765,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
|
||||
"Failed to complete IOCB -- error status (%x).\n",
|
||||
sts->entry_status);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else if (sts->comp_status !=
|
||||
__constant_cpu_to_le16(CS_COMPLETE)) {
|
||||
} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x1096,
|
||||
"Failed to complete IOCB -- completion status (%x).\n",
|
||||
le16_to_cpu(sts->comp_status));
|
||||
@ -2853,7 +2861,8 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
|
||||
if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
|
||||
!IS_QLA27XX(vha->hw))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
|
||||
@ -2891,7 +2900,8 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
|
||||
if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
|
||||
if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
|
||||
!IS_QLA27XX(vha->hw))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
|
||||
@ -3483,7 +3493,7 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
|
||||
"Failed to complete IOCB -- error status (%x).\n",
|
||||
vpmod->comp_status);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
|
||||
} else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x10bf,
|
||||
"Failed to complete IOCB -- completion status (%x).\n",
|
||||
le16_to_cpu(vpmod->comp_status));
|
||||
@ -3542,7 +3552,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
|
||||
vce->entry_type = VP_CTRL_IOCB_TYPE;
|
||||
vce->entry_count = 1;
|
||||
vce->command = cpu_to_le16(cmd);
|
||||
vce->vp_count = __constant_cpu_to_le16(1);
|
||||
vce->vp_count = cpu_to_le16(1);
|
||||
|
||||
/* index map in firmware starts with 1; decrement index
|
||||
* this is ok as we never use index 0
|
||||
@ -3562,7 +3572,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
|
||||
"Failed to complete IOCB -- error status (%x).\n",
|
||||
vce->entry_status);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
} else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
|
||||
} else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x10c5,
|
||||
"Failed to complet IOCB -- completion status (%x).\n",
|
||||
le16_to_cpu(vce->comp_status));
|
||||
|
@ -371,7 +371,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
|
||||
void
|
||||
qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
|
||||
{
|
||||
int ret;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
scsi_qla_host_t *vp;
|
||||
unsigned long flags = 0;
|
||||
@ -392,7 +391,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
|
||||
atomic_inc(&vp->vref_count);
|
||||
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
||||
|
||||
ret = qla2x00_do_dpc_vp(vp);
|
||||
qla2x00_do_dpc_vp(vp);
|
||||
|
||||
spin_lock_irqsave(&ha->vport_slock, flags);
|
||||
atomic_dec(&vp->vref_count);
|
||||
|
@ -862,7 +862,7 @@ qlafx00_config_queues(struct scsi_qla_host *vha)
|
||||
dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
|
||||
|
||||
req->length = ha->req_que_len;
|
||||
req->ring = (void *)ha->iobase + ha->req_que_off;
|
||||
req->ring = (void __force *)ha->iobase + ha->req_que_off;
|
||||
req->dma = bar2_hdl + ha->req_que_off;
|
||||
if ((!req->ring) || (req->length == 0)) {
|
||||
ql_log_pci(ql_log_info, ha->pdev, 0x012f,
|
||||
@ -877,7 +877,7 @@ qlafx00_config_queues(struct scsi_qla_host *vha)
|
||||
ha->req_que_off, (u64)req->dma);
|
||||
|
||||
rsp->length = ha->rsp_que_len;
|
||||
rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
|
||||
rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
|
||||
rsp->dma = bar2_hdl + ha->rsp_que_off;
|
||||
if ((!rsp->ring) || (rsp->length == 0)) {
|
||||
ql_log_pci(ql_log_info, ha->pdev, 0x0131,
|
||||
@ -1317,10 +1317,10 @@ int
|
||||
qlafx00_configure_devices(scsi_qla_host_t *vha)
|
||||
{
|
||||
int rval;
|
||||
unsigned long flags, save_flags;
|
||||
unsigned long flags;
|
||||
rval = QLA_SUCCESS;
|
||||
|
||||
save_flags = flags = vha->dpc_flags;
|
||||
flags = vha->dpc_flags;
|
||||
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2090,
|
||||
"Configure devices -- dpc flags =0x%lx\n", flags);
|
||||
@ -1425,7 +1425,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp)
|
||||
pkt = rsp->ring_ptr;
|
||||
for (cnt = 0; cnt < rsp->length; cnt++) {
|
||||
pkt->signature = RESPONSE_PROCESSED;
|
||||
WRT_REG_DWORD((void __iomem *)&pkt->signature,
|
||||
WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
|
||||
RESPONSE_PROCESSED);
|
||||
pkt++;
|
||||
}
|
||||
@ -2279,7 +2279,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
struct sts_entry_fx00 *sts;
|
||||
__le16 comp_status;
|
||||
__le16 scsi_status;
|
||||
uint16_t ox_id;
|
||||
__le16 lscsi_status;
|
||||
int32_t resid;
|
||||
uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
|
||||
@ -2344,7 +2343,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
|
||||
fcport = sp->fcport;
|
||||
|
||||
ox_id = 0;
|
||||
sense_len = par_sense_len = rsp_info_len = resid_len =
|
||||
fw_resid_len = 0;
|
||||
if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
|
||||
@ -2528,12 +2526,12 @@ check_scsi_status:
|
||||
ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
|
||||
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
|
||||
"tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
|
||||
"rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
|
||||
"rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, "
|
||||
"par_sense_len=0x%x, rsp_info_len=0x%x\n",
|
||||
comp_status, scsi_status, res, vha->host_no,
|
||||
cp->device->id, cp->device->lun, fcport->tgt_id,
|
||||
lscsi_status, cp->cmnd, scsi_bufflen(cp),
|
||||
rsp_info_len, resid_len, fw_resid_len, sense_len,
|
||||
rsp_info, resid_len, fw_resid_len, sense_len,
|
||||
par_sense_len, rsp_info_len);
|
||||
|
||||
if (rsp->status_srb == NULL)
|
||||
@ -3009,7 +3007,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
|
||||
|
||||
/* No data transfer */
|
||||
if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
|
||||
lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
|
||||
lcmd_pkt->byte_count = cpu_to_le32(0);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3071,7 +3069,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
|
||||
int
|
||||
qlafx00_start_scsi(srb_t *sp)
|
||||
{
|
||||
int ret, nseg;
|
||||
int nseg;
|
||||
unsigned long flags;
|
||||
uint32_t index;
|
||||
uint32_t handle;
|
||||
@ -3088,8 +3086,6 @@ qlafx00_start_scsi(srb_t *sp)
|
||||
struct scsi_lun llun;
|
||||
|
||||
/* Setup device pointers. */
|
||||
ret = 0;
|
||||
|
||||
rsp = ha->rsp_q_map[0];
|
||||
req = vha->req;
|
||||
|
||||
|
@ -347,32 +347,31 @@ char *qdev_state(uint32_t dev_state)
|
||||
}
|
||||
|
||||
/*
|
||||
* In: 'off' is offset from CRB space in 128M pci map
|
||||
* Out: 'off' is 2M pci map addr
|
||||
* In: 'off_in' is offset from CRB space in 128M pci map
|
||||
* Out: 'off_out' is 2M pci map addr
|
||||
* side effect: lock crb window
|
||||
*/
|
||||
static void
|
||||
qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
|
||||
qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
|
||||
void __iomem **off_out)
|
||||
{
|
||||
u32 win_read;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
ha->crb_win = CRB_HI(*off);
|
||||
writel(ha->crb_win,
|
||||
(void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
|
||||
ha->crb_win = CRB_HI(off_in);
|
||||
writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase);
|
||||
|
||||
/* Read back value to make sure write has gone through before trying
|
||||
* to use it.
|
||||
*/
|
||||
win_read = RD_REG_DWORD((void __iomem *)
|
||||
(CRB_WINDOW_2M + ha->nx_pcibase));
|
||||
win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
|
||||
if (win_read != ha->crb_win) {
|
||||
ql_dbg(ql_dbg_p3p, vha, 0xb000,
|
||||
"%s: Written crbwin (0x%x) "
|
||||
"!= Read crbwin (0x%x), off=0x%lx.\n",
|
||||
__func__, ha->crb_win, win_read, *off);
|
||||
__func__, ha->crb_win, win_read, off_in);
|
||||
}
|
||||
*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
|
||||
*off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
@ -417,29 +416,30 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
|
||||
}
|
||||
|
||||
static int
|
||||
qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
|
||||
qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
|
||||
void __iomem **off_out)
|
||||
{
|
||||
struct crb_128M_2M_sub_block_map *m;
|
||||
|
||||
if (*off >= QLA82XX_CRB_MAX)
|
||||
if (off_in >= QLA82XX_CRB_MAX)
|
||||
return -1;
|
||||
|
||||
if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
|
||||
*off = (*off - QLA82XX_PCI_CAMQM) +
|
||||
if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) {
|
||||
*off_out = (off_in - QLA82XX_PCI_CAMQM) +
|
||||
QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (*off < QLA82XX_PCI_CRBSPACE)
|
||||
if (off_in < QLA82XX_PCI_CRBSPACE)
|
||||
return -1;
|
||||
|
||||
*off -= QLA82XX_PCI_CRBSPACE;
|
||||
*off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE);
|
||||
|
||||
/* Try direct map */
|
||||
m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
|
||||
m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
|
||||
|
||||
if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
|
||||
*off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
|
||||
if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) {
|
||||
*off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase;
|
||||
return 0;
|
||||
}
|
||||
/* Not in direct map, use crb window */
|
||||
@ -465,51 +465,61 @@ static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
|
||||
}
|
||||
|
||||
int
|
||||
qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
|
||||
qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data)
|
||||
{
|
||||
void __iomem *off;
|
||||
unsigned long flags = 0;
|
||||
int rv;
|
||||
|
||||
rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
|
||||
rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
|
||||
|
||||
BUG_ON(rv == -1);
|
||||
|
||||
if (rv == 1) {
|
||||
#ifndef __CHECKER__
|
||||
write_lock_irqsave(&ha->hw_lock, flags);
|
||||
#endif
|
||||
qla82xx_crb_win_lock(ha);
|
||||
qla82xx_pci_set_crbwindow_2M(ha, &off);
|
||||
qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
|
||||
}
|
||||
|
||||
writel(data, (void __iomem *)off);
|
||||
|
||||
if (rv == 1) {
|
||||
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
|
||||
#ifndef __CHECKER__
|
||||
write_unlock_irqrestore(&ha->hw_lock, flags);
|
||||
#endif
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
|
||||
qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
|
||||
{
|
||||
void __iomem *off;
|
||||
unsigned long flags = 0;
|
||||
int rv;
|
||||
u32 data;
|
||||
|
||||
rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
|
||||
rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
|
||||
|
||||
BUG_ON(rv == -1);
|
||||
|
||||
if (rv == 1) {
|
||||
#ifndef __CHECKER__
|
||||
write_lock_irqsave(&ha->hw_lock, flags);
|
||||
#endif
|
||||
qla82xx_crb_win_lock(ha);
|
||||
qla82xx_pci_set_crbwindow_2M(ha, &off);
|
||||
qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
|
||||
}
|
||||
data = RD_REG_DWORD((void __iomem *)off);
|
||||
data = RD_REG_DWORD(off);
|
||||
|
||||
if (rv == 1) {
|
||||
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
|
||||
#ifndef __CHECKER__
|
||||
write_unlock_irqrestore(&ha->hw_lock, flags);
|
||||
#endif
|
||||
}
|
||||
return data;
|
||||
}
|
||||
@ -547,9 +557,6 @@ void qla82xx_idc_unlock(struct qla_hw_data *ha)
|
||||
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
|
||||
}
|
||||
|
||||
/* PCI Windowing for DDR regions. */
|
||||
#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
|
||||
(((addr) <= (high)) && ((addr) >= (low)))
|
||||
/*
|
||||
* check memory access boundary.
|
||||
* used by test agent. support ddr access only for now
|
||||
@ -558,9 +565,9 @@ static unsigned long
|
||||
qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
|
||||
unsigned long long addr, int size)
|
||||
{
|
||||
if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
|
||||
if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
|
||||
QLA82XX_ADDR_DDR_NET_MAX) ||
|
||||
!QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
|
||||
!addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET,
|
||||
QLA82XX_ADDR_DDR_NET_MAX) ||
|
||||
((size != 1) && (size != 2) && (size != 4) && (size != 8)))
|
||||
return 0;
|
||||
@ -577,7 +584,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
|
||||
u32 win_read;
|
||||
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
|
||||
if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
|
||||
QLA82XX_ADDR_DDR_NET_MAX)) {
|
||||
/* DDR network side */
|
||||
window = MN_WIN(addr);
|
||||
@ -592,7 +599,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
|
||||
__func__, window, win_read);
|
||||
}
|
||||
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
|
||||
} else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
|
||||
} else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
|
||||
QLA82XX_ADDR_OCM0_MAX)) {
|
||||
unsigned int temp1;
|
||||
if ((addr & 0x00ff800) == 0xff800) {
|
||||
@ -615,7 +622,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
|
||||
}
|
||||
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
|
||||
|
||||
} else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
|
||||
} else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET,
|
||||
QLA82XX_P3_ADDR_QDR_NET_MAX)) {
|
||||
/* QDR network side */
|
||||
window = MS_WIN(addr);
|
||||
@ -656,16 +663,16 @@ static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
|
||||
qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
|
||||
|
||||
/* DDR network side */
|
||||
if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
|
||||
if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
|
||||
QLA82XX_ADDR_DDR_NET_MAX))
|
||||
BUG();
|
||||
else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
|
||||
else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
|
||||
QLA82XX_ADDR_OCM0_MAX))
|
||||
return 1;
|
||||
else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
|
||||
else if (addr_in_range(addr, QLA82XX_ADDR_OCM1,
|
||||
QLA82XX_ADDR_OCM1_MAX))
|
||||
return 1;
|
||||
else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
|
||||
else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
|
||||
/* QDR network side */
|
||||
window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
|
||||
if (ha->qdr_sn_window == window)
|
||||
@ -922,20 +929,18 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
|
||||
{
|
||||
uint32_t off_value, rval = 0;
|
||||
|
||||
WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase),
|
||||
(off & 0xFFFF0000));
|
||||
WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
|
||||
|
||||
/* Read back value to make sure write has gone through */
|
||||
RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
|
||||
RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
|
||||
off_value = (off & 0x0000FFFF);
|
||||
|
||||
if (flag)
|
||||
WRT_REG_DWORD((void __iomem *)
|
||||
(off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
|
||||
data);
|
||||
WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
|
||||
data);
|
||||
else
|
||||
rval = RD_REG_DWORD((void __iomem *)
|
||||
(off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
|
||||
rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M +
|
||||
ha->nx_pcibase);
|
||||
|
||||
return rval;
|
||||
}
|
||||
@ -1663,8 +1668,7 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
|
||||
}
|
||||
|
||||
len = pci_resource_len(ha->pdev, 0);
|
||||
ha->nx_pcibase =
|
||||
(unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
|
||||
ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len);
|
||||
if (!ha->nx_pcibase) {
|
||||
ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
|
||||
"Cannot remap pcibase MMIO, aborting.\n");
|
||||
@ -1673,17 +1677,13 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
|
||||
|
||||
/* Mapping of IO base pointer */
|
||||
if (IS_QLA8044(ha)) {
|
||||
ha->iobase =
|
||||
(device_reg_t *)((uint8_t *)ha->nx_pcibase);
|
||||
ha->iobase = ha->nx_pcibase;
|
||||
} else if (IS_QLA82XX(ha)) {
|
||||
ha->iobase =
|
||||
(device_reg_t *)((uint8_t *)ha->nx_pcibase +
|
||||
0xbc000 + (ha->pdev->devfn << 11));
|
||||
ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11);
|
||||
}
|
||||
|
||||
if (!ql2xdbwr) {
|
||||
ha->nxdb_wr_ptr =
|
||||
(unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
|
||||
ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) +
|
||||
(ha->pdev->devfn << 12)), 4);
|
||||
if (!ha->nxdb_wr_ptr) {
|
||||
ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
|
||||
@ -1694,10 +1694,10 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
|
||||
/* Mapping of IO base pointer,
|
||||
* door bell read and write pointer
|
||||
*/
|
||||
ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
|
||||
ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) +
|
||||
(ha->pdev->devfn * 8);
|
||||
} else {
|
||||
ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
|
||||
ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ?
|
||||
QLA82XX_CAMRAM_DB1 :
|
||||
QLA82XX_CAMRAM_DB2);
|
||||
}
|
||||
@ -1707,12 +1707,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
|
||||
ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
|
||||
"nx_pci_base=%p iobase=%p "
|
||||
"max_req_queues=%d msix_count=%d.\n",
|
||||
(void *)ha->nx_pcibase, ha->iobase,
|
||||
ha->nx_pcibase, ha->iobase,
|
||||
ha->max_req_queues, ha->msix_count);
|
||||
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
|
||||
"nx_pci_base=%p iobase=%p "
|
||||
"max_req_queues=%d msix_count=%d.\n",
|
||||
(void *)ha->nx_pcibase, ha->iobase,
|
||||
ha->nx_pcibase, ha->iobase,
|
||||
ha->max_req_queues, ha->msix_count);
|
||||
return 0;
|
||||
|
||||
@ -1740,8 +1740,8 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
|
||||
ret = pci_set_mwi(ha->pdev);
|
||||
ha->chip_revision = ha->pdev->revision;
|
||||
ql_dbg(ql_dbg_init, vha, 0x0043,
|
||||
"Chip revision:%d.\n",
|
||||
ha->chip_revision);
|
||||
"Chip revision:%d; pci_set_mwi() returned %d.\n",
|
||||
ha->chip_revision, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1768,8 +1768,8 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
|
||||
|
||||
/* Setup ring parameters in initialization control block. */
|
||||
icb = (struct init_cb_81xx *)ha->init_cb;
|
||||
icb->request_q_outpointer = __constant_cpu_to_le16(0);
|
||||
icb->response_q_inpointer = __constant_cpu_to_le16(0);
|
||||
icb->request_q_outpointer = cpu_to_le16(0);
|
||||
icb->response_q_inpointer = cpu_to_le16(0);
|
||||
icb->request_q_length = cpu_to_le16(req->length);
|
||||
icb->response_q_length = cpu_to_le16(rsp->length);
|
||||
icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
|
||||
@ -1777,9 +1777,9 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
|
||||
icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
|
||||
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
|
||||
|
||||
WRT_REG_DWORD((unsigned long __iomem *)®->req_q_out[0], 0);
|
||||
WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_in[0], 0);
|
||||
WRT_REG_DWORD((unsigned long __iomem *)®->rsp_q_out[0], 0);
|
||||
WRT_REG_DWORD(®->req_q_out[0], 0);
|
||||
WRT_REG_DWORD(®->rsp_q_in[0], 0);
|
||||
WRT_REG_DWORD(®->rsp_q_out[0], 0);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2298,7 +2298,7 @@ void qla82xx_init_flags(struct qla_hw_data *ha)
|
||||
ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
|
||||
}
|
||||
|
||||
inline void
|
||||
static inline void
|
||||
qla82xx_set_idc_version(scsi_qla_host_t *vha)
|
||||
{
|
||||
int idc_ver;
|
||||
@ -2481,14 +2481,12 @@ try_blob_fw:
|
||||
ql_log(ql_log_info, vha, 0x00a5,
|
||||
"Firmware loaded successfully from binary blob.\n");
|
||||
return QLA_SUCCESS;
|
||||
} else {
|
||||
ql_log(ql_log_fatal, vha, 0x00a6,
|
||||
"Firmware load failed for binary blob.\n");
|
||||
blob->fw = NULL;
|
||||
blob = NULL;
|
||||
goto fw_load_failed;
|
||||
}
|
||||
return QLA_SUCCESS;
|
||||
|
||||
ql_log(ql_log_fatal, vha, 0x00a6,
|
||||
"Firmware load failed for binary blob.\n");
|
||||
blob->fw = NULL;
|
||||
blob = NULL;
|
||||
|
||||
fw_load_failed:
|
||||
return QLA_FUNCTION_FAILED;
|
||||
@ -2549,7 +2547,7 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
|
||||
"Do ROM fast read failed.\n");
|
||||
goto done_read;
|
||||
}
|
||||
dwptr[i] = __constant_cpu_to_le32(val);
|
||||
dwptr[i] = cpu_to_le32(val);
|
||||
}
|
||||
done_read:
|
||||
return dwptr;
|
||||
@ -2671,7 +2669,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
|
||||
{
|
||||
int ret;
|
||||
uint32_t liter;
|
||||
uint32_t sec_mask, rest_addr;
|
||||
uint32_t rest_addr;
|
||||
dma_addr_t optrom_dma;
|
||||
void *optrom = NULL;
|
||||
int page_mode = 0;
|
||||
@ -2693,7 +2691,6 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
|
||||
}
|
||||
|
||||
rest_addr = ha->fdt_block_size - 1;
|
||||
sec_mask = ~rest_addr;
|
||||
|
||||
ret = qla82xx_unprotect_flash(ha);
|
||||
if (ret) {
|
||||
@ -2789,7 +2786,6 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct req_que *req = ha->req_q_map[0];
|
||||
struct device_reg_82xx __iomem *reg;
|
||||
uint32_t dbval;
|
||||
|
||||
/* Adjust ring index. */
|
||||
@ -2800,18 +2796,16 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
|
||||
} else
|
||||
req->ring_ptr++;
|
||||
|
||||
reg = &ha->iobase->isp82;
|
||||
dbval = 0x04 | (ha->portnum << 5);
|
||||
|
||||
dbval = dbval | (req->id << 8) | (req->ring_index << 16);
|
||||
if (ql2xdbwr)
|
||||
qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
|
||||
qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
|
||||
else {
|
||||
WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
|
||||
WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
|
||||
wmb();
|
||||
while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
|
||||
WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
|
||||
dbval);
|
||||
while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
|
||||
WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
|
||||
wmb();
|
||||
}
|
||||
}
|
||||
@ -3842,8 +3836,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
|
||||
loop_cnt = ocm_hdr->op_count;
|
||||
|
||||
for (i = 0; i < loop_cnt; i++) {
|
||||
r_value = RD_REG_DWORD((void __iomem *)
|
||||
(r_addr + ha->nx_pcibase));
|
||||
r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase);
|
||||
*data_ptr++ = cpu_to_le32(r_value);
|
||||
r_addr += r_stride;
|
||||
}
|
||||
|
@ -462,12 +462,11 @@ qla8044_flash_lock(scsi_qla_host_t *vha)
|
||||
static void
|
||||
qla8044_flash_unlock(scsi_qla_host_t *vha)
|
||||
{
|
||||
int ret_val;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
/* Reading FLASH_UNLOCK register unlocks the Flash */
|
||||
qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
|
||||
ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
|
||||
qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
|
||||
}
|
||||
|
||||
|
||||
@ -561,7 +560,7 @@ qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
|
||||
return buf;
|
||||
}
|
||||
|
||||
inline int
|
||||
static inline int
|
||||
qla8044_need_reset(struct scsi_qla_host *vha)
|
||||
{
|
||||
uint32_t drv_state, drv_active;
|
||||
@ -1130,9 +1129,9 @@ qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++, addr += 16) {
|
||||
if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET,
|
||||
if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET,
|
||||
QLA8044_ADDR_QDR_NET_MAX)) ||
|
||||
(QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET,
|
||||
(addr_in_range(addr, QLA8044_ADDR_DDR_NET,
|
||||
QLA8044_ADDR_DDR_NET_MAX)))) {
|
||||
ret_val = QLA_FUNCTION_FAILED;
|
||||
goto exit_ms_mem_write_unlock;
|
||||
@ -1605,7 +1604,7 @@ qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
|
||||
qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
|
||||
}
|
||||
|
||||
inline void
|
||||
static inline void
|
||||
qla8044_set_rst_ready(struct scsi_qla_host *vha)
|
||||
{
|
||||
uint32_t drv_state;
|
||||
@ -2992,7 +2991,7 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
|
||||
uint32_t addr1, addr2, value, data, temp, wrVal;
|
||||
uint8_t stride, stride2;
|
||||
uint16_t count;
|
||||
uint32_t poll, mask, data_size, modify_mask;
|
||||
uint32_t poll, mask, modify_mask;
|
||||
uint32_t wait_count = 0;
|
||||
|
||||
uint32_t *data_ptr = *d_ptr;
|
||||
@ -3009,7 +3008,6 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
|
||||
poll = rddfe->poll;
|
||||
mask = rddfe->mask;
|
||||
modify_mask = rddfe->modify_mask;
|
||||
data_size = rddfe->data_size;
|
||||
|
||||
addr2 = addr1 + stride;
|
||||
|
||||
@ -3091,7 +3089,7 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
|
||||
uint8_t stride1, stride2;
|
||||
uint32_t addr3, addr4, addr5, addr6, addr7;
|
||||
uint16_t count, loop_cnt;
|
||||
uint32_t poll, mask;
|
||||
uint32_t mask;
|
||||
uint32_t *data_ptr = *d_ptr;
|
||||
|
||||
struct qla8044_minidump_entry_rdmdio *rdmdio;
|
||||
@ -3105,7 +3103,6 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
|
||||
stride2 = rdmdio->stride_2;
|
||||
count = rdmdio->count;
|
||||
|
||||
poll = rdmdio->poll;
|
||||
mask = rdmdio->mask;
|
||||
value2 = rdmdio->value_2;
|
||||
|
||||
@ -3164,7 +3161,7 @@ error:
|
||||
static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
|
||||
struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
|
||||
{
|
||||
uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
|
||||
uint32_t addr1, addr2, value1, value2, poll, r_value;
|
||||
uint32_t wait_count = 0;
|
||||
struct qla8044_minidump_entry_pollwr *pollwr_hdr;
|
||||
|
||||
@ -3175,7 +3172,6 @@ static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
|
||||
value2 = pollwr_hdr->value_2;
|
||||
|
||||
poll = pollwr_hdr->poll;
|
||||
mask = pollwr_hdr->mask;
|
||||
|
||||
while (wait_count < poll) {
|
||||
qla8044_rd_reg_indirect(vha, addr1, &r_value);
|
||||
|
@ -58,8 +58,10 @@
|
||||
#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff)
|
||||
|
||||
/* PCI Windowing for DDR regions. */
|
||||
#define QLA8044_ADDR_IN_RANGE(addr, low, high) \
|
||||
(((addr) <= (high)) && ((addr) >= (low)))
|
||||
static inline bool addr_in_range(u64 addr, u64 low, u64 high)
|
||||
{
|
||||
return addr <= high && addr >= low;
|
||||
}
|
||||
|
||||
/* Indirectly Mapped Registers */
|
||||
#define QLA8044_FLASH_SPI_STATUS 0x2808E010
|
||||
|
@ -656,7 +656,7 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
|
||||
"SP reference-count to ZERO -- sp=%p cmd=%p.\n",
|
||||
sp, GET_CMD_SP(sp));
|
||||
if (ql2xextended_error_logging & ql_dbg_io)
|
||||
BUG();
|
||||
WARN_ON(atomic_read(&sp->ref_count) == 0);
|
||||
return;
|
||||
}
|
||||
if (!atomic_dec_and_test(&sp->ref_count))
|
||||
@ -958,8 +958,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
}
|
||||
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x8002,
|
||||
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p\n",
|
||||
vha->host_no, id, lun, sp, cmd);
|
||||
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
|
||||
vha->host_no, id, lun, sp, cmd, sp->handle);
|
||||
|
||||
/* Get a reference to the sp and drop the lock.*/
|
||||
sp_get(sp);
|
||||
@ -967,14 +967,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
rval = ha->isp_ops->abort_command(sp);
|
||||
if (rval) {
|
||||
if (rval == QLA_FUNCTION_PARAMETER_ERROR) {
|
||||
/*
|
||||
* Decrement the ref_count since we can't find the
|
||||
* command
|
||||
*/
|
||||
atomic_dec(&sp->ref_count);
|
||||
if (rval == QLA_FUNCTION_PARAMETER_ERROR)
|
||||
ret = SUCCESS;
|
||||
} else
|
||||
else
|
||||
ret = FAILED;
|
||||
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x8003,
|
||||
@ -986,12 +981,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
/*
|
||||
* Clear the slot in the oustanding_cmds array if we can't find the
|
||||
* command to reclaim the resources.
|
||||
*/
|
||||
if (rval == QLA_FUNCTION_PARAMETER_ERROR)
|
||||
vha->req->outstanding_cmds[sp->handle] = NULL;
|
||||
sp->done(ha, sp, 0);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
@ -2219,6 +2208,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
|
||||
ha->device_type |= DT_IIDMA;
|
||||
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
||||
break;
|
||||
case PCI_DEVICE_ID_QLOGIC_ISP2261:
|
||||
ha->device_type |= DT_ISP2261;
|
||||
ha->device_type |= DT_ZIO_SUPPORTED;
|
||||
ha->device_type |= DT_FWI2;
|
||||
ha->device_type |= DT_IIDMA;
|
||||
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
|
||||
break;
|
||||
}
|
||||
|
||||
if (IS_QLA82XX(ha))
|
||||
@ -2296,7 +2292,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) {
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
|
||||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
|
||||
bars = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
mem_only = 1;
|
||||
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
|
||||
@ -2974,7 +2971,6 @@ qla2x00_shutdown(struct pci_dev *pdev)
|
||||
static void
|
||||
qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
|
||||
{
|
||||
struct Scsi_Host *scsi_host;
|
||||
scsi_qla_host_t *vha;
|
||||
unsigned long flags;
|
||||
|
||||
@ -2985,7 +2981,7 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
|
||||
BUG_ON(base_vha->list.next == &ha->vp_list);
|
||||
/* This assumes first entry in ha->vp_list is always base vha */
|
||||
vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
|
||||
scsi_host = scsi_host_get(vha->host);
|
||||
scsi_host_get(vha->host);
|
||||
|
||||
spin_unlock_irqrestore(&ha->vport_slock, flags);
|
||||
mutex_unlock(&ha->vport_lock);
|
||||
@ -3275,9 +3271,10 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
|
||||
if (!do_login)
|
||||
return;
|
||||
|
||||
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
|
||||
|
||||
if (fcport->login_retry == 0) {
|
||||
fcport->login_retry = vha->hw->login_retry_count;
|
||||
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
|
||||
|
||||
ql_dbg(ql_dbg_disc, vha, 0x2067,
|
||||
"Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
|
||||
@ -4801,7 +4798,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
|
||||
static int
|
||||
qla2x00_do_dpc(void *data)
|
||||
{
|
||||
int rval;
|
||||
scsi_qla_host_t *base_vha;
|
||||
struct qla_hw_data *ha;
|
||||
|
||||
@ -5033,7 +5029,7 @@ loop_resync_check:
|
||||
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
|
||||
&base_vha->dpc_flags))) {
|
||||
|
||||
rval = qla2x00_loop_resync(base_vha);
|
||||
qla2x00_loop_resync(base_vha);
|
||||
|
||||
clear_bit(LOOP_RESYNC_ACTIVE,
|
||||
&base_vha->dpc_flags);
|
||||
@ -5717,6 +5713,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
|
||||
{ 0 },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
|
||||
|
@ -316,7 +316,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
|
||||
|
||||
wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
|
||||
stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
|
||||
__constant_cpu_to_le16(0x1234), 100000);
|
||||
cpu_to_le16(0x1234), 100000);
|
||||
wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
|
||||
if (stat != QLA_SUCCESS || wprot != 0x1234) {
|
||||
/* Write enable. */
|
||||
@ -691,9 +691,9 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
|
||||
region = (struct qla_flt_region *)&flt[1];
|
||||
ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
|
||||
flt_addr << 2, OPTROM_BURST_SIZE);
|
||||
if (*wptr == __constant_cpu_to_le16(0xffff))
|
||||
if (*wptr == cpu_to_le16(0xffff))
|
||||
goto no_flash_data;
|
||||
if (flt->version != __constant_cpu_to_le16(1)) {
|
||||
if (flt->version != cpu_to_le16(1)) {
|
||||
ql_log(ql_log_warn, vha, 0x0047,
|
||||
"Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
|
||||
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
|
||||
@ -892,7 +892,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
|
||||
fdt = (struct qla_fdt_layout *)req->ring;
|
||||
ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
|
||||
ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
|
||||
if (*wptr == __constant_cpu_to_le16(0xffff))
|
||||
if (*wptr == cpu_to_le16(0xffff))
|
||||
goto no_flash_data;
|
||||
if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
|
||||
fdt->sig[3] != 'D')
|
||||
@ -991,7 +991,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
|
||||
ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
|
||||
QLA82XX_IDC_PARAM_ADDR , 8);
|
||||
|
||||
if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
|
||||
if (*wptr == cpu_to_le32(0xffffffff)) {
|
||||
ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
|
||||
ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
|
||||
} else {
|
||||
@ -1051,9 +1051,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
|
||||
|
||||
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
|
||||
ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
|
||||
if (hdr.version == __constant_cpu_to_le16(0xffff))
|
||||
if (hdr.version == cpu_to_le16(0xffff))
|
||||
return;
|
||||
if (hdr.version != __constant_cpu_to_le16(1)) {
|
||||
if (hdr.version != cpu_to_le16(1)) {
|
||||
ql_dbg(ql_dbg_user, vha, 0x7090,
|
||||
"Unsupported NPIV-Config "
|
||||
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
|
||||
|
@ -1141,7 +1141,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
|
||||
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
|
||||
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
|
||||
nack->u.isp24.flags = ntfy->u.isp24.flags &
|
||||
__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
|
||||
cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
|
||||
}
|
||||
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
|
||||
nack->u.isp24.status = ntfy->u.isp24.status;
|
||||
@ -1199,7 +1199,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
|
||||
resp->sof_type = abts->sof_type;
|
||||
resp->exchange_address = abts->exchange_address;
|
||||
resp->fcp_hdr_le = abts->fcp_hdr_le;
|
||||
f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
|
||||
f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
|
||||
F_CTL_LAST_SEQ | F_CTL_END_SEQ |
|
||||
F_CTL_SEQ_INITIATIVE);
|
||||
p = (uint8_t *)&f_ctl;
|
||||
@ -1274,15 +1274,14 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
|
||||
ctio->entry_count = 1;
|
||||
ctio->nport_handle = entry->nport_handle;
|
||||
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
|
||||
ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
ctio->vp_index = vha->vp_idx;
|
||||
ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
|
||||
ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
|
||||
ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
|
||||
ctio->exchange_addr = entry->exchange_addr_to_abort;
|
||||
ctio->u.status1.flags =
|
||||
__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
|
||||
CTIO7_FLAGS_TERMINATE);
|
||||
ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
|
||||
CTIO7_FLAGS_TERMINATE);
|
||||
ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
|
||||
|
||||
/* Memory Barrier */
|
||||
@ -1522,20 +1521,19 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
|
||||
ctio->entry_count = 1;
|
||||
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
|
||||
ctio->nport_handle = mcmd->sess->loop_id;
|
||||
ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
ctio->vp_index = ha->vp_idx;
|
||||
ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
|
||||
ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
|
||||
ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
|
||||
ctio->exchange_addr = atio->u.isp24.exchange_addr;
|
||||
ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
|
||||
__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
|
||||
CTIO7_FLAGS_SEND_STATUS);
|
||||
cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
|
||||
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
|
||||
ctio->u.status1.ox_id = cpu_to_le16(temp);
|
||||
ctio->u.status1.scsi_status =
|
||||
__constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
|
||||
ctio->u.status1.response_len = __constant_cpu_to_le16(8);
|
||||
cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
|
||||
ctio->u.status1.response_len = cpu_to_le16(8);
|
||||
ctio->u.status1.sense_data[0] = resp_code;
|
||||
|
||||
/* Memory Barrier */
|
||||
@ -1786,7 +1784,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
|
||||
|
||||
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
|
||||
pkt->nport_handle = prm->cmd->loop_id;
|
||||
pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
|
||||
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
|
||||
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
|
||||
@ -2087,10 +2085,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
|
||||
{
|
||||
prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
|
||||
(uint32_t)sizeof(ctio->u.status1.sense_data));
|
||||
ctio->u.status0.flags |=
|
||||
__constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
|
||||
ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
|
||||
if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
|
||||
ctio->u.status0.flags |= __constant_cpu_to_le16(
|
||||
ctio->u.status0.flags |= cpu_to_le16(
|
||||
CTIO7_FLAGS_EXPLICIT_CONFORM |
|
||||
CTIO7_FLAGS_CONFORM_REQ);
|
||||
}
|
||||
@ -2107,17 +2104,17 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
|
||||
"non GOOD status\n");
|
||||
goto skip_explict_conf;
|
||||
}
|
||||
ctio->u.status1.flags |= __constant_cpu_to_le16(
|
||||
ctio->u.status1.flags |= cpu_to_le16(
|
||||
CTIO7_FLAGS_EXPLICIT_CONFORM |
|
||||
CTIO7_FLAGS_CONFORM_REQ);
|
||||
}
|
||||
skip_explict_conf:
|
||||
ctio->u.status1.flags &=
|
||||
~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
|
||||
~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
|
||||
ctio->u.status1.flags |=
|
||||
__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
|
||||
cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
|
||||
ctio->u.status1.scsi_status |=
|
||||
__constant_cpu_to_le16(SS_SENSE_LEN_VALID);
|
||||
cpu_to_le16(SS_SENSE_LEN_VALID);
|
||||
ctio->u.status1.sense_length =
|
||||
cpu_to_le16(prm->sense_buffer_len);
|
||||
for (i = 0; i < prm->sense_buffer_len/4; i++)
|
||||
@ -2137,9 +2134,9 @@ skip_explict_conf:
|
||||
#endif
|
||||
} else {
|
||||
ctio->u.status1.flags &=
|
||||
~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
|
||||
~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
|
||||
ctio->u.status1.flags |=
|
||||
__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
|
||||
cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
|
||||
ctio->u.status1.sense_length = 0;
|
||||
memset(ctio->u.status1.sense_data, 0,
|
||||
sizeof(ctio->u.status1.sense_data));
|
||||
@ -2261,7 +2258,6 @@ static inline int
|
||||
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
|
||||
{
|
||||
uint32_t *cur_dsd;
|
||||
int sgc;
|
||||
uint32_t transfer_length = 0;
|
||||
uint32_t data_bytes;
|
||||
uint32_t dif_bytes;
|
||||
@ -2278,7 +2274,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
|
||||
struct atio_from_isp *atio = &prm->cmd->atio;
|
||||
uint16_t t16;
|
||||
|
||||
sgc = 0;
|
||||
ha = vha->hw;
|
||||
|
||||
pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
|
||||
@ -2368,7 +2363,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
|
||||
|
||||
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
|
||||
pkt->nport_handle = prm->cmd->loop_id;
|
||||
pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
|
||||
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
|
||||
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
|
||||
@ -2384,9 +2379,9 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
|
||||
|
||||
/* Set transfer direction */
|
||||
if (cmd->dma_data_direction == DMA_TO_DEVICE)
|
||||
pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
|
||||
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
|
||||
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
|
||||
pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
|
||||
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
|
||||
|
||||
|
||||
pkt->dseg_count = prm->tot_dsds;
|
||||
@ -2438,11 +2433,11 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
|
||||
crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
|
||||
crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
|
||||
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
|
||||
crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
|
||||
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
|
||||
|
||||
|
||||
/* Walks data segments */
|
||||
pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
|
||||
pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
|
||||
|
||||
if (!bundling && prm->prot_seg_cnt) {
|
||||
if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
|
||||
@ -2548,7 +2543,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
||||
|
||||
if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
|
||||
pkt->u.status0.flags |=
|
||||
__constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
|
||||
cpu_to_le16(CTIO7_FLAGS_DATA_IN |
|
||||
CTIO7_FLAGS_STATUS_MODE_0);
|
||||
|
||||
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
|
||||
@ -2560,11 +2555,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
||||
cpu_to_le16(prm.rq_result);
|
||||
pkt->u.status0.residual =
|
||||
cpu_to_le32(prm.residual);
|
||||
pkt->u.status0.flags |= __constant_cpu_to_le16(
|
||||
pkt->u.status0.flags |= cpu_to_le16(
|
||||
CTIO7_FLAGS_SEND_STATUS);
|
||||
if (qlt_need_explicit_conf(ha, cmd, 0)) {
|
||||
pkt->u.status0.flags |=
|
||||
__constant_cpu_to_le16(
|
||||
cpu_to_le16(
|
||||
CTIO7_FLAGS_EXPLICIT_CONFORM |
|
||||
CTIO7_FLAGS_CONFORM_REQ);
|
||||
}
|
||||
@ -2592,12 +2587,12 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
||||
ctio->entry_count = 1;
|
||||
ctio->entry_type = CTIO_TYPE7;
|
||||
ctio->dseg_count = 0;
|
||||
ctio->u.status1.flags &= ~__constant_cpu_to_le16(
|
||||
ctio->u.status1.flags &= ~cpu_to_le16(
|
||||
CTIO7_FLAGS_DATA_IN);
|
||||
|
||||
/* Real finish is ctio_m1's finish */
|
||||
pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
|
||||
pkt->u.status0.flags |= __constant_cpu_to_le16(
|
||||
pkt->u.status0.flags |= cpu_to_le16(
|
||||
CTIO7_FLAGS_DONT_RET_CTIO);
|
||||
|
||||
/* qlt_24xx_init_ctio_to_isp will correct
|
||||
@ -2687,7 +2682,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
||||
}
|
||||
|
||||
pkt = (struct ctio7_to_24xx *)prm.pkt;
|
||||
pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
|
||||
pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
|
||||
CTIO7_FLAGS_STATUS_MODE_0);
|
||||
|
||||
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
|
||||
@ -2762,7 +2757,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
|
||||
|
||||
/* Update protection tag */
|
||||
if (cmd->prot_sg_cnt) {
|
||||
uint32_t i, j = 0, k = 0, num_ent;
|
||||
uint32_t i, k = 0, num_ent;
|
||||
struct scatterlist *sg, *sgl;
|
||||
|
||||
|
||||
@ -2775,7 +2770,6 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
|
||||
k += num_ent;
|
||||
continue;
|
||||
}
|
||||
j = blocks_done - k - 1;
|
||||
k = blocks_done;
|
||||
break;
|
||||
}
|
||||
@ -2969,14 +2963,14 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
|
||||
ctio24 = (struct ctio7_to_24xx *)pkt;
|
||||
ctio24->entry_type = CTIO_TYPE7;
|
||||
ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
|
||||
ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
ctio24->vp_index = vha->vp_idx;
|
||||
ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
|
||||
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
|
||||
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
|
||||
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
|
||||
ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
|
||||
__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
|
||||
cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
|
||||
CTIO7_FLAGS_TERMINATE);
|
||||
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
|
||||
ctio24->u.status1.ox_id = cpu_to_le16(temp);
|
||||
@ -3216,7 +3210,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
|
||||
if (ctio != NULL) {
|
||||
struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
|
||||
term = !(c->flags &
|
||||
__constant_cpu_to_le16(OF_TERM_EXCH));
|
||||
cpu_to_le16(OF_TERM_EXCH));
|
||||
} else
|
||||
term = 1;
|
||||
|
||||
@ -3364,7 +3358,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct se_cmd *se_cmd;
|
||||
const struct target_core_fabric_ops *tfo;
|
||||
struct qla_tgt_cmd *cmd;
|
||||
|
||||
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
|
||||
@ -3382,7 +3375,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
|
||||
return;
|
||||
|
||||
se_cmd = &cmd->se_cmd;
|
||||
tfo = se_cmd->se_tfo;
|
||||
cmd->cmd_sent_to_fw = 0;
|
||||
|
||||
qlt_unmap_sg(vha, cmd);
|
||||
@ -3480,13 +3472,9 @@ skip_term:
|
||||
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
|
||||
cmd->cmd_flags |= BIT_12;
|
||||
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
|
||||
int rx_status = 0;
|
||||
|
||||
cmd->state = QLA_TGT_STATE_DATA_IN;
|
||||
|
||||
if (unlikely(status != CTIO_SUCCESS))
|
||||
rx_status = -EIO;
|
||||
else
|
||||
if (status == CTIO_SUCCESS)
|
||||
cmd->write_data_transferred = 1;
|
||||
|
||||
ha->tgt.tgt_ops->handle_data(cmd);
|
||||
@ -3928,12 +3916,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
|
||||
struct qla_tgt *tgt;
|
||||
struct qla_tgt_sess *sess;
|
||||
uint32_t lun, unpacked_lun;
|
||||
int lun_size, fn;
|
||||
int fn;
|
||||
|
||||
tgt = vha->vha_tgt.qla_tgt;
|
||||
|
||||
lun = a->u.isp24.fcp_cmnd.lun;
|
||||
lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
|
||||
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
|
||||
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
|
||||
a->u.isp24.fcp_hdr.s_id);
|
||||
@ -4578,16 +4565,20 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
unsigned long flags = 0;
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!ha_locked)
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
#endif
|
||||
|
||||
qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
|
||||
NOTIFY_ACK_SRR_FLAGS_REJECT,
|
||||
NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
|
||||
NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!ha_locked)
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
#endif
|
||||
|
||||
kfree(imm);
|
||||
}
|
||||
@ -4931,14 +4922,14 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
|
||||
ctio24 = (struct ctio7_to_24xx *)pkt;
|
||||
ctio24->entry_type = CTIO_TYPE7;
|
||||
ctio24->nport_handle = sess->loop_id;
|
||||
ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
|
||||
ctio24->vp_index = vha->vp_idx;
|
||||
ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
|
||||
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
|
||||
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
|
||||
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
|
||||
ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
|
||||
__constant_cpu_to_le16(
|
||||
cpu_to_le16(
|
||||
CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
|
||||
CTIO7_FLAGS_DONT_RET_CTIO);
|
||||
/*
|
||||
@ -5266,7 +5257,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
|
||||
struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
|
||||
int rc;
|
||||
if (atio->u.isp2x.status !=
|
||||
__constant_cpu_to_le16(ATIO_CDB_VALID)) {
|
||||
cpu_to_le16(ATIO_CDB_VALID)) {
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe05e,
|
||||
"qla_target(%d): ATIO with error "
|
||||
"status %x received\n", vha->vp_idx,
|
||||
@ -5340,7 +5331,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
|
||||
le16_to_cpu(entry->u.isp2x.status));
|
||||
tgt->notify_ack_expected--;
|
||||
if (entry->u.isp2x.status !=
|
||||
__constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
|
||||
cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
|
||||
ql_dbg(ql_dbg_tgt, vha, 0xe061,
|
||||
"qla_target(%d): NOTIFY_ACK "
|
||||
"failed %x\n", vha->vp_idx,
|
||||
@ -5659,7 +5650,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
|
||||
uint8_t *s_id = NULL; /* to hide compiler warnings */
|
||||
int rc;
|
||||
uint32_t lun, unpacked_lun;
|
||||
int lun_size, fn;
|
||||
int fn;
|
||||
void *iocb;
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
@ -5691,7 +5682,6 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
|
||||
|
||||
iocb = a;
|
||||
lun = a->u.isp24.fcp_cmnd.lun;
|
||||
lun_size = sizeof(lun);
|
||||
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
|
||||
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
|
||||
|
||||
@ -6215,19 +6205,19 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
|
||||
ha->tgt.saved_set = 1;
|
||||
}
|
||||
|
||||
nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
|
||||
nv->exchange_count = cpu_to_le16(0xFFFF);
|
||||
|
||||
/* Enable target mode */
|
||||
nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
|
||||
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
|
||||
|
||||
/* Disable ini mode, if requested */
|
||||
if (!qla_ini_mode_enabled(vha))
|
||||
nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
|
||||
nv->firmware_options_1 |= cpu_to_le32(BIT_5);
|
||||
|
||||
/* Disable Full Login after LIP */
|
||||
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
|
||||
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
|
||||
/* Enable initial LIP */
|
||||
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
|
||||
nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
|
||||
if (ql2xtgt_tape_enable)
|
||||
/* Enable FC Tape support */
|
||||
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
|
||||
@ -6236,9 +6226,9 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
|
||||
nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
|
||||
|
||||
/* Disable Full Login after LIP */
|
||||
nv->host_p &= __constant_cpu_to_le32(~BIT_10);
|
||||
nv->host_p &= cpu_to_le32(~BIT_10);
|
||||
/* Enable target PRLI control */
|
||||
nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
|
||||
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
|
||||
} else {
|
||||
if (ha->tgt.saved_set) {
|
||||
nv->exchange_count = ha->tgt.saved_exchange_count;
|
||||
@ -6260,12 +6250,12 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
|
||||
fc_host_supported_classes(vha->host) =
|
||||
FC_COS_CLASS2 | FC_COS_CLASS3;
|
||||
|
||||
nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
|
||||
nv->firmware_options_2 |= cpu_to_le32(BIT_8);
|
||||
} else {
|
||||
if (vha->flags.init_done)
|
||||
fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
|
||||
|
||||
nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
|
||||
nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6277,7 +6267,7 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
|
||||
|
||||
if (ha->tgt.node_name_set) {
|
||||
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
|
||||
icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
|
||||
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6302,20 +6292,19 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
|
||||
ha->tgt.saved_set = 1;
|
||||
}
|
||||
|
||||
nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
|
||||
nv->exchange_count = cpu_to_le16(0xFFFF);
|
||||
|
||||
/* Enable target mode */
|
||||
nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
|
||||
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
|
||||
|
||||
/* Disable ini mode, if requested */
|
||||
if (!qla_ini_mode_enabled(vha))
|
||||
nv->firmware_options_1 |=
|
||||
__constant_cpu_to_le32(BIT_5);
|
||||
nv->firmware_options_1 |= cpu_to_le32(BIT_5);
|
||||
|
||||
/* Disable Full Login after LIP */
|
||||
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
|
||||
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
|
||||
/* Enable initial LIP */
|
||||
nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
|
||||
nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
|
||||
if (ql2xtgt_tape_enable)
|
||||
/* Enable FC tape support */
|
||||
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
|
||||
@ -6324,9 +6313,9 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
|
||||
nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
|
||||
|
||||
/* Disable Full Login after LIP */
|
||||
nv->host_p &= __constant_cpu_to_le32(~BIT_10);
|
||||
nv->host_p &= cpu_to_le32(~BIT_10);
|
||||
/* Enable target PRLI control */
|
||||
nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
|
||||
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
|
||||
} else {
|
||||
if (ha->tgt.saved_set) {
|
||||
nv->exchange_count = ha->tgt.saved_exchange_count;
|
||||
@ -6348,12 +6337,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
|
||||
fc_host_supported_classes(vha->host) =
|
||||
FC_COS_CLASS2 | FC_COS_CLASS3;
|
||||
|
||||
nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
|
||||
nv->firmware_options_2 |= cpu_to_le32(BIT_8);
|
||||
} else {
|
||||
if (vha->flags.init_done)
|
||||
fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
|
||||
|
||||
nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
|
||||
nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6368,7 +6357,7 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
|
||||
|
||||
if (ha->tgt.node_name_set) {
|
||||
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
|
||||
icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
|
||||
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -137,39 +137,39 @@ qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
|
||||
}
|
||||
|
||||
static inline void
|
||||
qla27xx_read8(void *window, void *buf, ulong *len)
|
||||
qla27xx_read8(void __iomem *window, void *buf, ulong *len)
|
||||
{
|
||||
uint8_t value = ~0;
|
||||
|
||||
if (buf) {
|
||||
value = RD_REG_BYTE((__iomem void *)window);
|
||||
value = RD_REG_BYTE(window);
|
||||
}
|
||||
qla27xx_insert32(value, buf, len);
|
||||
}
|
||||
|
||||
static inline void
|
||||
qla27xx_read16(void *window, void *buf, ulong *len)
|
||||
qla27xx_read16(void __iomem *window, void *buf, ulong *len)
|
||||
{
|
||||
uint16_t value = ~0;
|
||||
|
||||
if (buf) {
|
||||
value = RD_REG_WORD((__iomem void *)window);
|
||||
value = RD_REG_WORD(window);
|
||||
}
|
||||
qla27xx_insert32(value, buf, len);
|
||||
}
|
||||
|
||||
static inline void
|
||||
qla27xx_read32(void *window, void *buf, ulong *len)
|
||||
qla27xx_read32(void __iomem *window, void *buf, ulong *len)
|
||||
{
|
||||
uint32_t value = ~0;
|
||||
|
||||
if (buf) {
|
||||
value = RD_REG_DWORD((__iomem void *)window);
|
||||
value = RD_REG_DWORD(window);
|
||||
}
|
||||
qla27xx_insert32(value, buf, len);
|
||||
}
|
||||
|
||||
static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
|
||||
static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
|
||||
{
|
||||
return
|
||||
(width == 1) ? qla27xx_read8 :
|
||||
@ -181,7 +181,7 @@ static inline void
|
||||
qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
|
||||
uint offset, void *buf, ulong *len)
|
||||
{
|
||||
void *window = (void *)reg + offset;
|
||||
void __iomem *window = (void __iomem *)reg + offset;
|
||||
|
||||
qla27xx_read32(window, buf, len);
|
||||
}
|
||||
@ -202,8 +202,8 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
|
||||
uint32_t addr, uint offset, uint count, uint width, void *buf,
|
||||
ulong *len)
|
||||
{
|
||||
void *window = (void *)reg + offset;
|
||||
void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
|
||||
void __iomem *window = (void __iomem *)reg + offset;
|
||||
void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
|
||||
|
||||
qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
|
||||
while (count--) {
|
||||
@ -805,9 +805,8 @@ static void
|
||||
qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
|
||||
{
|
||||
uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
|
||||
int rval = 0;
|
||||
|
||||
rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
|
||||
sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
|
||||
v+0, v+1, v+2, v+3, v+4, v+5);
|
||||
|
||||
tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
|
||||
@ -940,8 +939,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
{
|
||||
ulong flags = 0;
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
|
||||
#endif
|
||||
|
||||
if (!vha->hw->fw_dump)
|
||||
ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
|
||||
@ -954,6 +955,8 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
|
||||
else
|
||||
qla27xx_execute_fwdt_template(vha);
|
||||
|
||||
#ifndef __CHECKER__
|
||||
if (!hardware_locked)
|
||||
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
|
||||
#endif
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
/*
|
||||
* Driver version
|
||||
*/
|
||||
#define QLA2XXX_VERSION "8.07.00.18-k"
|
||||
#define QLA2XXX_VERSION "8.07.00.26-k"
|
||||
|
||||
#define QLA_DRIVER_MAJOR_VER 8
|
||||
#define QLA_DRIVER_MINOR_VER 7
|
||||
|
@ -420,6 +420,12 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
|
||||
|
||||
static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
|
||||
{
|
||||
if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
|
||||
struct qla_tgt_cmd *cmd = container_of(se_cmd,
|
||||
struct qla_tgt_cmd, se_cmd);
|
||||
return cmd->state;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -420,6 +420,10 @@ static void scsi_report_sense(struct scsi_device *sdev,
|
||||
evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"Mode parameters changed");
|
||||
} else if (sshdr->asc == 0x2a && sshdr->ascq == 0x06) {
|
||||
evt_type = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED;
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"Asymmetric access state changed");
|
||||
} else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
|
||||
evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
@ -1155,8 +1159,13 @@ int scsi_eh_get_sense(struct list_head *work_q,
|
||||
struct Scsi_Host *shost;
|
||||
int rtn;
|
||||
|
||||
/*
|
||||
* If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO,
|
||||
* should not get sense.
|
||||
*/
|
||||
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
|
||||
if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
|
||||
(scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
|
||||
SCSI_SENSE_VALID(scmd))
|
||||
continue;
|
||||
|
||||
|
@ -2423,7 +2423,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
|
||||
unsigned char cmd[12];
|
||||
int use_10_for_ms;
|
||||
int header_length;
|
||||
int result;
|
||||
int result, retry_count = retries;
|
||||
struct scsi_sense_hdr my_sshdr;
|
||||
|
||||
memset(data, 0, sizeof(*data));
|
||||
@ -2502,6 +2502,11 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
|
||||
data->block_descriptor_length = buffer[3];
|
||||
}
|
||||
data->header_length = header_length;
|
||||
} else if ((status_byte(result) == CHECK_CONDITION) &&
|
||||
scsi_sense_valid(sshdr) &&
|
||||
sshdr->sense_key == UNIT_ATTENTION && retry_count) {
|
||||
retry_count--;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -2707,6 +2712,9 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
|
||||
case SDEV_EVT_LUN_CHANGE_REPORTED:
|
||||
envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
|
||||
break;
|
||||
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
|
||||
envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
|
||||
break;
|
||||
default:
|
||||
/* do nothing */
|
||||
break;
|
||||
@ -2810,6 +2818,7 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
|
||||
case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
|
||||
case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
|
||||
case SDEV_EVT_LUN_CHANGE_REPORTED:
|
||||
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
|
||||
default:
|
||||
/* do nothing */
|
||||
break;
|
||||
|
@ -2042,6 +2042,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
|
||||
session->transport = transport;
|
||||
session->creator = -1;
|
||||
session->recovery_tmo = 120;
|
||||
session->recovery_tmo_sysfs_override = false;
|
||||
session->state = ISCSI_SESSION_FREE;
|
||||
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
|
||||
INIT_LIST_HEAD(&session->sess_list);
|
||||
@ -2786,7 +2787,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
|
||||
switch (ev->u.set_param.param) {
|
||||
case ISCSI_PARAM_SESS_RECOVERY_TMO:
|
||||
sscanf(data, "%d", &value);
|
||||
session->recovery_tmo = value;
|
||||
if (!session->recovery_tmo_sysfs_override)
|
||||
session->recovery_tmo = value;
|
||||
break;
|
||||
default:
|
||||
err = transport->set_param(conn, ev->u.set_param.param,
|
||||
@ -4049,13 +4051,15 @@ store_priv_session_##field(struct device *dev, \
|
||||
if ((session->state == ISCSI_SESSION_FREE) || \
|
||||
(session->state == ISCSI_SESSION_FAILED)) \
|
||||
return -EBUSY; \
|
||||
if (strncmp(buf, "off", 3) == 0) \
|
||||
if (strncmp(buf, "off", 3) == 0) { \
|
||||
session->field = -1; \
|
||||
else { \
|
||||
session->field##_sysfs_override = true; \
|
||||
} else { \
|
||||
val = simple_strtoul(buf, &cp, 0); \
|
||||
if (*cp != '\0' && *cp != '\n') \
|
||||
return -EINVAL; \
|
||||
session->field = val; \
|
||||
session->field##_sysfs_override = true; \
|
||||
} \
|
||||
return count; \
|
||||
}
|
||||
@ -4066,6 +4070,7 @@ store_priv_session_##field(struct device *dev, \
|
||||
static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \
|
||||
show_priv_session_##field, \
|
||||
store_priv_session_##field)
|
||||
|
||||
iscsi_priv_session_rw_attr(recovery_tmo, "%d");
|
||||
|
||||
static struct attribute *iscsi_session_attrs[] = {
|
||||
|
@ -85,6 +85,7 @@ static int debug_flag;
|
||||
|
||||
static struct class st_sysfs_class;
|
||||
static const struct attribute_group *st_dev_groups[];
|
||||
static const struct attribute_group *st_drv_groups[];
|
||||
|
||||
MODULE_AUTHOR("Kai Makisara");
|
||||
MODULE_DESCRIPTION("SCSI tape (st) driver");
|
||||
@ -198,15 +199,13 @@ static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int);
|
||||
static int st_probe(struct device *);
|
||||
static int st_remove(struct device *);
|
||||
|
||||
static int do_create_sysfs_files(void);
|
||||
static void do_remove_sysfs_files(void);
|
||||
|
||||
static struct scsi_driver st_template = {
|
||||
.gendrv = {
|
||||
.name = "st",
|
||||
.owner = THIS_MODULE,
|
||||
.probe = st_probe,
|
||||
.remove = st_remove,
|
||||
.groups = st_drv_groups,
|
||||
},
|
||||
};
|
||||
|
||||
@ -4404,14 +4403,8 @@ static int __init init_st(void)
|
||||
if (err)
|
||||
goto err_chrdev;
|
||||
|
||||
err = do_create_sysfs_files();
|
||||
if (err)
|
||||
goto err_scsidrv;
|
||||
|
||||
return 0;
|
||||
|
||||
err_scsidrv:
|
||||
scsi_unregister_driver(&st_template.gendrv);
|
||||
err_chrdev:
|
||||
unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
|
||||
ST_MAX_TAPE_ENTRIES);
|
||||
@ -4422,11 +4415,11 @@ err_class:
|
||||
|
||||
static void __exit exit_st(void)
|
||||
{
|
||||
do_remove_sysfs_files();
|
||||
scsi_unregister_driver(&st_template.gendrv);
|
||||
unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
|
||||
ST_MAX_TAPE_ENTRIES);
|
||||
class_unregister(&st_sysfs_class);
|
||||
idr_destroy(&st_index_idr);
|
||||
printk(KERN_INFO "st: Unloaded.\n");
|
||||
}
|
||||
|
||||
@ -4435,68 +4428,38 @@ module_exit(exit_st);
|
||||
|
||||
|
||||
/* The sysfs driver interface. Read-only at the moment */
|
||||
static ssize_t st_try_direct_io_show(struct device_driver *ddp, char *buf)
|
||||
static ssize_t try_direct_io_show(struct device_driver *ddp, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
|
||||
}
|
||||
static DRIVER_ATTR(try_direct_io, S_IRUGO, st_try_direct_io_show, NULL);
|
||||
static DRIVER_ATTR_RO(try_direct_io);
|
||||
|
||||
static ssize_t st_fixed_buffer_size_show(struct device_driver *ddp, char *buf)
|
||||
static ssize_t fixed_buffer_size_show(struct device_driver *ddp, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
|
||||
}
|
||||
static DRIVER_ATTR(fixed_buffer_size, S_IRUGO, st_fixed_buffer_size_show, NULL);
|
||||
static DRIVER_ATTR_RO(fixed_buffer_size);
|
||||
|
||||
static ssize_t st_max_sg_segs_show(struct device_driver *ddp, char *buf)
|
||||
static ssize_t max_sg_segs_show(struct device_driver *ddp, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
|
||||
}
|
||||
static DRIVER_ATTR(max_sg_segs, S_IRUGO, st_max_sg_segs_show, NULL);
|
||||
static DRIVER_ATTR_RO(max_sg_segs);
|
||||
|
||||
static ssize_t st_version_show(struct device_driver *ddd, char *buf)
|
||||
static ssize_t version_show(struct device_driver *ddd, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
|
||||
return scnprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
|
||||
}
|
||||
static DRIVER_ATTR(version, S_IRUGO, st_version_show, NULL);
|
||||
static DRIVER_ATTR_RO(version);
|
||||
|
||||
static int do_create_sysfs_files(void)
|
||||
{
|
||||
struct device_driver *sysfs = &st_template.gendrv;
|
||||
int err;
|
||||
|
||||
err = driver_create_file(sysfs, &driver_attr_try_direct_io);
|
||||
if (err)
|
||||
return err;
|
||||
err = driver_create_file(sysfs, &driver_attr_fixed_buffer_size);
|
||||
if (err)
|
||||
goto err_try_direct_io;
|
||||
err = driver_create_file(sysfs, &driver_attr_max_sg_segs);
|
||||
if (err)
|
||||
goto err_attr_fixed_buf;
|
||||
err = driver_create_file(sysfs, &driver_attr_version);
|
||||
if (err)
|
||||
goto err_attr_max_sg;
|
||||
|
||||
return 0;
|
||||
|
||||
err_attr_max_sg:
|
||||
driver_remove_file(sysfs, &driver_attr_max_sg_segs);
|
||||
err_attr_fixed_buf:
|
||||
driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
|
||||
err_try_direct_io:
|
||||
driver_remove_file(sysfs, &driver_attr_try_direct_io);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void do_remove_sysfs_files(void)
|
||||
{
|
||||
struct device_driver *sysfs = &st_template.gendrv;
|
||||
|
||||
driver_remove_file(sysfs, &driver_attr_version);
|
||||
driver_remove_file(sysfs, &driver_attr_max_sg_segs);
|
||||
driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
|
||||
driver_remove_file(sysfs, &driver_attr_try_direct_io);
|
||||
}
|
||||
static struct attribute *st_drv_attrs[] = {
|
||||
&driver_attr_try_direct_io.attr,
|
||||
&driver_attr_fixed_buffer_size.attr,
|
||||
&driver_attr_max_sg_segs.attr,
|
||||
&driver_attr_version.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(st_drv);
|
||||
|
||||
/* The sysfs simple class interface */
|
||||
static ssize_t
|
||||
|
@ -56,15 +56,18 @@
|
||||
* V1 RC > 2008/1/31: 2.0
|
||||
* Win7: 4.2
|
||||
* Win8: 5.1
|
||||
* Win8.1: 6.0
|
||||
* Win10: 6.2
|
||||
*/
|
||||
|
||||
#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
|
||||
(((MINOR_) & 0xff)))
|
||||
|
||||
#define VMSTOR_WIN7_MAJOR 4
|
||||
#define VMSTOR_WIN7_MINOR 2
|
||||
|
||||
#define VMSTOR_WIN8_MAJOR 5
|
||||
#define VMSTOR_WIN8_MINOR 1
|
||||
|
||||
#define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0)
|
||||
#define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2)
|
||||
#define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1)
|
||||
#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
|
||||
#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
|
||||
|
||||
/* Packet structure describing virtual storage requests. */
|
||||
enum vstor_packet_operation {
|
||||
@ -148,21 +151,18 @@ struct hv_fc_wwn_packet {
|
||||
|
||||
/*
|
||||
* Sense buffer size changed in win8; have a run-time
|
||||
* variable to track the size we should use.
|
||||
* variable to track the size we should use. This value will
|
||||
* likely change during protocol negotiation but it is valid
|
||||
* to start by assuming pre-Win8.
|
||||
*/
|
||||
static int sense_buffer_size;
|
||||
static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
|
||||
|
||||
/*
|
||||
* The size of the vmscsi_request has changed in win8. The
|
||||
* additional size is because of new elements added to the
|
||||
* structure. These elements are valid only when we are talking
|
||||
* to a win8 host.
|
||||
* Track the correction to size we need to apply.
|
||||
*/
|
||||
|
||||
static int vmscsi_size_delta;
|
||||
static int vmstor_current_major;
|
||||
static int vmstor_current_minor;
|
||||
* The storage protocol version is determined during the
|
||||
* initial exchange with the host. It will indicate which
|
||||
* storage functionality is available in the host.
|
||||
*/
|
||||
static int vmstor_proto_version;
|
||||
|
||||
struct vmscsi_win8_extension {
|
||||
/*
|
||||
@ -206,6 +206,56 @@ struct vmscsi_request {
|
||||
} __attribute((packed));
|
||||
|
||||
|
||||
/*
|
||||
* The size of the vmscsi_request has changed in win8. The
|
||||
* additional size is because of new elements added to the
|
||||
* structure. These elements are valid only when we are talking
|
||||
* to a win8 host.
|
||||
* Track the correction to size we need to apply. This value
|
||||
* will likely change during protocol negotiation but it is
|
||||
* valid to start by assuming pre-Win8.
|
||||
*/
|
||||
static int vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
|
||||
|
||||
/*
|
||||
* The list of storage protocols in order of preference.
|
||||
*/
|
||||
struct vmstor_protocol {
|
||||
int protocol_version;
|
||||
int sense_buffer_size;
|
||||
int vmscsi_size_delta;
|
||||
};
|
||||
|
||||
|
||||
static const struct vmstor_protocol vmstor_protocols[] = {
|
||||
{
|
||||
VMSTOR_PROTO_VERSION_WIN10,
|
||||
POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
|
||||
0
|
||||
},
|
||||
{
|
||||
VMSTOR_PROTO_VERSION_WIN8_1,
|
||||
POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
|
||||
0
|
||||
},
|
||||
{
|
||||
VMSTOR_PROTO_VERSION_WIN8,
|
||||
POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
|
||||
0
|
||||
},
|
||||
{
|
||||
VMSTOR_PROTO_VERSION_WIN7,
|
||||
PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
|
||||
sizeof(struct vmscsi_win8_extension),
|
||||
},
|
||||
{
|
||||
VMSTOR_PROTO_VERSION_WIN6,
|
||||
PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
|
||||
sizeof(struct vmscsi_win8_extension),
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* This structure is sent during the intialization phase to get the different
|
||||
* properties of the channel.
|
||||
@ -426,7 +476,6 @@ static void storvsc_host_scan(struct work_struct *work)
|
||||
struct storvsc_scan_work *wrk;
|
||||
struct Scsi_Host *host;
|
||||
struct scsi_device *sdev;
|
||||
unsigned long flags;
|
||||
|
||||
wrk = container_of(work, struct storvsc_scan_work, work);
|
||||
host = wrk->host;
|
||||
@ -443,14 +492,8 @@ static void storvsc_host_scan(struct work_struct *work)
|
||||
* may have been removed this way.
|
||||
*/
|
||||
mutex_lock(&host->scan_mutex);
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
list_for_each_entry(sdev, &host->__devices, siblings) {
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
shost_for_each_device(sdev, host)
|
||||
scsi_test_unit_ready(sdev, 1, 1, NULL);
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
mutex_unlock(&host->scan_mutex);
|
||||
/*
|
||||
* Now scan the host to discover LUNs that may have been added.
|
||||
@ -481,18 +524,6 @@ done:
|
||||
kfree(wrk);
|
||||
}
|
||||
|
||||
/*
|
||||
* Major/minor macros. Minor version is in LSB, meaning that earlier flat
|
||||
* version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
|
||||
*/
|
||||
|
||||
static inline u16 storvsc_get_version(u8 major, u8 minor)
|
||||
{
|
||||
u16 version;
|
||||
|
||||
version = ((major << 8) | minor);
|
||||
return version;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can get incoming messages from the host that are not in response to
|
||||
@ -885,7 +916,7 @@ static int storvsc_channel_init(struct hv_device *device)
|
||||
struct storvsc_device *stor_device;
|
||||
struct storvsc_cmd_request *request;
|
||||
struct vstor_packet *vstor_packet;
|
||||
int ret, t;
|
||||
int ret, t, i;
|
||||
int max_chns;
|
||||
bool process_sub_channels = false;
|
||||
|
||||
@ -921,41 +952,65 @@ static int storvsc_channel_init(struct hv_device *device)
|
||||
}
|
||||
|
||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
|
||||
vstor_packet->status != 0)
|
||||
vstor_packet->status != 0) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
||||
/* reuse the packet for version range supported */
|
||||
memset(vstor_packet, 0, sizeof(struct vstor_packet));
|
||||
vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
|
||||
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
|
||||
for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
|
||||
/* reuse the packet for version range supported */
|
||||
memset(vstor_packet, 0, sizeof(struct vstor_packet));
|
||||
vstor_packet->operation =
|
||||
VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
|
||||
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
|
||||
|
||||
vstor_packet->version.major_minor =
|
||||
storvsc_get_version(vmstor_current_major, vmstor_current_minor);
|
||||
vstor_packet->version.major_minor =
|
||||
vmstor_protocols[i].protocol_version;
|
||||
|
||||
/*
|
||||
* The revision number is only used in Windows; set it to 0.
|
||||
*/
|
||||
vstor_packet->version.revision = 0;
|
||||
/*
|
||||
* The revision number is only used in Windows; set it to 0.
|
||||
*/
|
||||
vstor_packet->version.revision = 0;
|
||||
|
||||
ret = vmbus_sendpacket(device->channel, vstor_packet,
|
||||
ret = vmbus_sendpacket(device->channel, vstor_packet,
|
||||
(sizeof(struct vstor_packet) -
|
||||
vmscsi_size_delta),
|
||||
(unsigned long)request,
|
||||
VM_PKT_DATA_INBAND,
|
||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||
if (ret != 0)
|
||||
goto cleanup;
|
||||
if (ret != 0)
|
||||
goto cleanup;
|
||||
|
||||
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
|
||||
if (t == 0) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto cleanup;
|
||||
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
|
||||
if (t == 0) {
|
||||
ret = -ETIMEDOUT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (vstor_packet->status == 0) {
|
||||
vmstor_proto_version =
|
||||
vmstor_protocols[i].protocol_version;
|
||||
|
||||
sense_buffer_size =
|
||||
vmstor_protocols[i].sense_buffer_size;
|
||||
|
||||
vmscsi_size_delta =
|
||||
vmstor_protocols[i].vmscsi_size_delta;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
|
||||
vstor_packet->status != 0)
|
||||
if (vstor_packet->status != 0) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
||||
memset(vstor_packet, 0, sizeof(struct vstor_packet));
|
||||
@ -979,8 +1034,10 @@ static int storvsc_channel_init(struct hv_device *device)
|
||||
}
|
||||
|
||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
|
||||
vstor_packet->status != 0)
|
||||
vstor_packet->status != 0) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to see if multi-channel support is there.
|
||||
@ -988,8 +1045,7 @@ static int storvsc_channel_init(struct hv_device *device)
|
||||
* support multi-channel.
|
||||
*/
|
||||
max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
|
||||
if ((vmbus_proto_version != VERSION_WIN7) &&
|
||||
(vmbus_proto_version != VERSION_WS2008)) {
|
||||
if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
|
||||
if (vstor_packet->storage_channel_properties.flags &
|
||||
STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
|
||||
process_sub_channels = true;
|
||||
@ -1018,8 +1074,10 @@ static int storvsc_channel_init(struct hv_device *device)
|
||||
}
|
||||
|
||||
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
|
||||
vstor_packet->status != 0)
|
||||
vstor_packet->status != 0) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (process_sub_channels)
|
||||
handle_multichannel_storage(device, max_chns);
|
||||
@ -1428,15 +1486,19 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
|
||||
|
||||
/*
|
||||
* If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
|
||||
* if the device is a MSFT virtual device.
|
||||
* if the device is a MSFT virtual device. If the host is
|
||||
* WIN10 or newer, allow write_same.
|
||||
*/
|
||||
if (!strncmp(sdevice->vendor, "Msft", 4)) {
|
||||
switch (vmbus_proto_version) {
|
||||
case VERSION_WIN8:
|
||||
case VERSION_WIN8_1:
|
||||
switch (vmstor_proto_version) {
|
||||
case VMSTOR_PROTO_VERSION_WIN8:
|
||||
case VMSTOR_PROTO_VERSION_WIN8_1:
|
||||
sdevice->scsi_level = SCSI_SPC_3;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10)
|
||||
sdevice->no_write_same = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1563,7 +1625,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
||||
u32 payload_sz;
|
||||
u32 length;
|
||||
|
||||
if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
|
||||
if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) {
|
||||
/*
|
||||
* On legacy hosts filter unimplemented commands.
|
||||
* Future hosts are expected to correctly handle
|
||||
@ -1598,10 +1660,18 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
|
||||
vm_srb->data_in = READ_TYPE;
|
||||
vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
|
||||
break;
|
||||
default:
|
||||
case DMA_NONE:
|
||||
vm_srb->data_in = UNKNOWN_TYPE;
|
||||
vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* This is DMA_BIDIRECTIONAL or something else we are never
|
||||
* supposed to see here.
|
||||
*/
|
||||
WARN(1, "Unexpected data direction: %d\n",
|
||||
scmnd->sc_data_direction);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@ -1758,22 +1828,11 @@ static int storvsc_probe(struct hv_device *device,
|
||||
* set state to properly communicate with the host.
|
||||
*/
|
||||
|
||||
switch (vmbus_proto_version) {
|
||||
case VERSION_WS2008:
|
||||
case VERSION_WIN7:
|
||||
sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
|
||||
vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
|
||||
vmstor_current_major = VMSTOR_WIN7_MAJOR;
|
||||
vmstor_current_minor = VMSTOR_WIN7_MINOR;
|
||||
if (vmbus_proto_version < VERSION_WIN8) {
|
||||
max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
|
||||
max_targets = STORVSC_IDE_MAX_TARGETS;
|
||||
max_channels = STORVSC_IDE_MAX_CHANNELS;
|
||||
break;
|
||||
default:
|
||||
sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
|
||||
vmscsi_size_delta = 0;
|
||||
vmstor_current_major = VMSTOR_WIN8_MAJOR;
|
||||
vmstor_current_minor = VMSTOR_WIN8_MINOR;
|
||||
} else {
|
||||
max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
|
||||
max_targets = STORVSC_MAX_TARGETS;
|
||||
max_channels = STORVSC_MAX_CHANNELS;
|
||||
@ -1783,7 +1842,6 @@ static int storvsc_probe(struct hv_device *device,
|
||||
* VCPUs in the guest.
|
||||
*/
|
||||
max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
|
||||
break;
|
||||
}
|
||||
|
||||
scsi_driver.can_queue = (max_outstanding_req_per_channel *
|
||||
|
@ -57,9 +57,10 @@ enum scsi_device_event {
|
||||
SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */
|
||||
SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */
|
||||
SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */
|
||||
SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */
|
||||
|
||||
SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE,
|
||||
SDEV_EVT_LAST = SDEV_EVT_LUN_CHANGE_REPORTED,
|
||||
SDEV_EVT_LAST = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED,
|
||||
|
||||
SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
|
||||
};
|
||||
|
@ -241,6 +241,7 @@ struct iscsi_cls_session {
|
||||
|
||||
/* recovery fields */
|
||||
int recovery_tmo;
|
||||
bool recovery_tmo_sysfs_override;
|
||||
struct delayed_work recovery_work;
|
||||
|
||||
unsigned int target_id;
|
||||
|
@ -3,3 +3,4 @@ header-y += fc/
|
||||
header-y += scsi_bsg_fc.h
|
||||
header-y += scsi_netlink.h
|
||||
header-y += scsi_netlink_fc.h
|
||||
header-y += cxlflash_ioctl.h
|
||||
|
174
include/uapi/scsi/cxlflash_ioctl.h
Normal file
174
include/uapi/scsi/cxlflash_ioctl.h
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
* CXL Flash Device Driver
|
||||
*
|
||||
* Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
|
||||
* Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
|
||||
*
|
||||
* Copyright (C) 2015 IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _CXLFLASH_IOCTL_H
|
||||
#define _CXLFLASH_IOCTL_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* Structure and flag definitions CXL Flash superpipe ioctls
|
||||
*/
|
||||
|
||||
#define DK_CXLFLASH_VERSION_0 0
|
||||
|
||||
struct dk_cxlflash_hdr {
|
||||
__u16 version; /* Version data */
|
||||
__u16 rsvd[3]; /* Reserved for future use */
|
||||
__u64 flags; /* Input flags */
|
||||
__u64 return_flags; /* Returned flags */
|
||||
};
|
||||
|
||||
/*
|
||||
* Notes:
|
||||
* -----
|
||||
* The 'context_id' field of all ioctl structures contains the context
|
||||
* identifier for a context in the lower 32-bits (upper 32-bits are not
|
||||
* to be used when identifying a context to the AFU). That said, the value
|
||||
* in its entirety (all 64-bits) is to be treated as an opaque cookie and
|
||||
* should be presented as such when issuing ioctls.
|
||||
*
|
||||
* For DK_CXLFLASH_ATTACH ioctl, user specifies read/write access
|
||||
* permissions via the O_RDONLY, O_WRONLY, and O_RDWR flags defined in
|
||||
* the fcntl.h header file.
|
||||
*/
|
||||
#define DK_CXLFLASH_ATTACH_REUSE_CONTEXT 0x8000000000000000ULL
|
||||
|
||||
struct dk_cxlflash_attach {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u64 num_interrupts; /* Requested number of interrupts */
|
||||
__u64 context_id; /* Returned context */
|
||||
__u64 mmio_size; /* Returned size of MMIO area */
|
||||
__u64 block_size; /* Returned block size, in bytes */
|
||||
__u64 adap_fd; /* Returned adapter file descriptor */
|
||||
__u64 last_lba; /* Returned last LBA on the device */
|
||||
__u64 max_xfer; /* Returned max transfer size, blocks */
|
||||
__u64 reserved[8]; /* Reserved for future use */
|
||||
};
|
||||
|
||||
struct dk_cxlflash_detach {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u64 context_id; /* Context to detach */
|
||||
__u64 reserved[8]; /* Reserved for future use */
|
||||
};
|
||||
|
||||
struct dk_cxlflash_udirect {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u64 context_id; /* Context to own physical resources */
|
||||
__u64 rsrc_handle; /* Returned resource handle */
|
||||
__u64 last_lba; /* Returned last LBA on the device */
|
||||
__u64 reserved[8]; /* Reserved for future use */
|
||||
};
|
||||
|
||||
#define DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME 0x8000000000000000ULL
|
||||
|
||||
struct dk_cxlflash_uvirtual {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u64 context_id; /* Context to own virtual resources */
|
||||
__u64 lun_size; /* Requested size, in 4K blocks */
|
||||
__u64 rsrc_handle; /* Returned resource handle */
|
||||
__u64 last_lba; /* Returned last LBA of LUN */
|
||||
__u64 reserved[8]; /* Reserved for future use */
|
||||
};
|
||||
|
||||
struct dk_cxlflash_release {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u64 context_id; /* Context owning resources */
|
||||
__u64 rsrc_handle; /* Resource handle to release */
|
||||
__u64 reserved[8]; /* Reserved for future use */
|
||||
};
|
||||
|
||||
struct dk_cxlflash_resize {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u64 context_id; /* Context owning resources */
|
||||
__u64 rsrc_handle; /* Resource handle of LUN to resize */
|
||||
__u64 req_size; /* New requested size, in 4K blocks */
|
||||
__u64 last_lba; /* Returned last LBA of LUN */
|
||||
__u64 reserved[8]; /* Reserved for future use */
|
||||
};
|
||||
|
||||
struct dk_cxlflash_clone {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u64 context_id_src; /* Context to clone from */
|
||||
__u64 context_id_dst; /* Context to clone to */
|
||||
__u64 adap_fd_src; /* Source context adapter fd */
|
||||
__u64 reserved[8]; /* Reserved for future use */
|
||||
};
|
||||
|
||||
#define DK_CXLFLASH_VERIFY_SENSE_LEN 18
|
||||
#define DK_CXLFLASH_VERIFY_HINT_SENSE 0x8000000000000000ULL
|
||||
|
||||
struct dk_cxlflash_verify {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u64 context_id; /* Context owning resources to verify */
|
||||
__u64 rsrc_handle; /* Resource handle of LUN */
|
||||
__u64 hint; /* Reasons for verify */
|
||||
__u64 last_lba; /* Returned last LBA of device */
|
||||
__u8 sense_data[DK_CXLFLASH_VERIFY_SENSE_LEN]; /* SCSI sense data */
|
||||
__u8 pad[6]; /* Pad to next 8-byte boundary */
|
||||
__u64 reserved[8]; /* Reserved for future use */
|
||||
};
|
||||
|
||||
#define DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET 0x8000000000000000ULL
|
||||
|
||||
struct dk_cxlflash_recover_afu {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u64 reason; /* Reason for recovery request */
|
||||
__u64 context_id; /* Context to recover / updated ID */
|
||||
__u64 mmio_size; /* Returned size of MMIO area */
|
||||
__u64 adap_fd; /* Returned adapter file descriptor */
|
||||
__u64 reserved[8]; /* Reserved for future use */
|
||||
};
|
||||
|
||||
#define DK_CXLFLASH_MANAGE_LUN_WWID_LEN 16
|
||||
#define DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE 0x8000000000000000ULL
|
||||
#define DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE 0x4000000000000000ULL
|
||||
#define DK_CXLFLASH_MANAGE_LUN_ALL_PORTS_ACCESSIBLE 0x2000000000000000ULL
|
||||
|
||||
struct dk_cxlflash_manage_lun {
|
||||
struct dk_cxlflash_hdr hdr; /* Common fields */
|
||||
__u8 wwid[DK_CXLFLASH_MANAGE_LUN_WWID_LEN]; /* Page83 WWID, NAA-6 */
|
||||
__u64 reserved[8]; /* Rsvd, future use */
|
||||
};
|
||||
|
||||
union cxlflash_ioctls {
|
||||
struct dk_cxlflash_attach attach;
|
||||
struct dk_cxlflash_detach detach;
|
||||
struct dk_cxlflash_udirect udirect;
|
||||
struct dk_cxlflash_uvirtual uvirtual;
|
||||
struct dk_cxlflash_release release;
|
||||
struct dk_cxlflash_resize resize;
|
||||
struct dk_cxlflash_clone clone;
|
||||
struct dk_cxlflash_verify verify;
|
||||
struct dk_cxlflash_recover_afu recover_afu;
|
||||
struct dk_cxlflash_manage_lun manage_lun;
|
||||
};
|
||||
|
||||
#define MAX_CXLFLASH_IOCTL_SZ (sizeof(union cxlflash_ioctls))
|
||||
|
||||
#define CXL_MAGIC 0xCA
|
||||
#define CXL_IOWR(_n, _s) _IOWR(CXL_MAGIC, _n, struct _s)
|
||||
|
||||
#define DK_CXLFLASH_ATTACH CXL_IOWR(0x80, dk_cxlflash_attach)
|
||||
#define DK_CXLFLASH_USER_DIRECT CXL_IOWR(0x81, dk_cxlflash_udirect)
|
||||
#define DK_CXLFLASH_RELEASE CXL_IOWR(0x82, dk_cxlflash_release)
|
||||
#define DK_CXLFLASH_DETACH CXL_IOWR(0x83, dk_cxlflash_detach)
|
||||
#define DK_CXLFLASH_VERIFY CXL_IOWR(0x84, dk_cxlflash_verify)
|
||||
#define DK_CXLFLASH_RECOVER_AFU CXL_IOWR(0x85, dk_cxlflash_recover_afu)
|
||||
#define DK_CXLFLASH_MANAGE_LUN CXL_IOWR(0x86, dk_cxlflash_manage_lun)
|
||||
#define DK_CXLFLASH_USER_VIRTUAL CXL_IOWR(0x87, dk_cxlflash_uvirtual)
|
||||
#define DK_CXLFLASH_VLUN_RESIZE CXL_IOWR(0x88, dk_cxlflash_resize)
|
||||
#define DK_CXLFLASH_VLUN_CLONE CXL_IOWR(0x89, dk_cxlflash_clone)
|
||||
|
||||
#endif /* ifndef _CXLFLASH_IOCTL_H */
|
Loading…
Reference in New Issue
Block a user