mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-19 08:05:27 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (200 commits) [SCSI] usbstorage: use last_sector_bug flag universally [SCSI] libsas: abstract STP task status into a function [SCSI] ultrastor: clean up inline asm warnings [SCSI] aic7xxx: fix firmware build [SCSI] aacraid: fib context lock for management ioctls [SCSI] ch: remove forward declarations [SCSI] ch: fix device minor number management bug [SCSI] ch: handle class_device_create failure properly [SCSI] NCR5380: fix section mismatch [SCSI] sg: fix /proc/scsi/sg/devices when no SCSI devices [SCSI] IB/iSER: add logical unit reset support [SCSI] don't use __GFP_DMA for sense buffers if not required [SCSI] use dynamically allocated sense buffer [SCSI] scsi.h: add macro for enclosure bit of inquiry data [SCSI] sd: add fix for devices with last sector access problems [SCSI] fix pcmcia compile problem [SCSI] aacraid: add Voodoo Lite class of cards. [SCSI] aacraid: add new driver features flags [SCSI] qla2xxx: Update version number to 8.02.00-k7. [SCSI] qla2xxx: Issue correct MBC_INITIALIZE_FIRMWARE command. ...
This commit is contained in:
commit
9b73e76f3c
@ -11,7 +11,7 @@ DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
|
||||
procfs-guide.xml writing_usb_driver.xml \
|
||||
kernel-api.xml filesystems.xml lsm.xml usb.xml \
|
||||
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
|
||||
genericirq.xml s390-drivers.xml uio-howto.xml
|
||||
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml
|
||||
|
||||
###
|
||||
# The build process is as follows (targets):
|
||||
|
409
Documentation/DocBook/scsi.tmpl
Normal file
409
Documentation/DocBook/scsi.tmpl
Normal file
@ -0,0 +1,409 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
|
||||
"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
|
||||
|
||||
<book id="scsimid">
|
||||
<bookinfo>
|
||||
<title>SCSI Interfaces Guide</title>
|
||||
|
||||
<authorgroup>
|
||||
<author>
|
||||
<firstname>James</firstname>
|
||||
<surname>Bottomley</surname>
|
||||
<affiliation>
|
||||
<address>
|
||||
<email>James.Bottomley@steeleye.com</email>
|
||||
</address>
|
||||
</affiliation>
|
||||
</author>
|
||||
|
||||
<author>
|
||||
<firstname>Rob</firstname>
|
||||
<surname>Landley</surname>
|
||||
<affiliation>
|
||||
<address>
|
||||
<email>rob@landley.net</email>
|
||||
</address>
|
||||
</affiliation>
|
||||
</author>
|
||||
|
||||
</authorgroup>
|
||||
|
||||
<copyright>
|
||||
<year>2007</year>
|
||||
<holder>Linux Foundation</holder>
|
||||
</copyright>
|
||||
|
||||
<legalnotice>
|
||||
<para>
|
||||
This documentation is free software; you can redistribute
|
||||
it and/or modify it under the terms of the GNU General Public
|
||||
License version 2.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This program is distributed in the hope that it will be
|
||||
useful, but WITHOUT ANY WARRANTY; without even the implied
|
||||
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
For more details see the file COPYING in the source
|
||||
distribution of Linux.
|
||||
</para>
|
||||
</legalnotice>
|
||||
</bookinfo>
|
||||
|
||||
<toc></toc>
|
||||
|
||||
<chapter id="intro">
|
||||
<title>Introduction</title>
|
||||
<sect1 id="protocol_vs_bus">
|
||||
<title>Protocol vs bus</title>
|
||||
<para>
|
||||
Once upon a time, the Small Computer Systems Interface defined both
|
||||
a parallel I/O bus and a data protocol to connect a wide variety of
|
||||
peripherals (disk drives, tape drives, modems, printers, scanners,
|
||||
optical drives, test equipment, and medical devices) to a host
|
||||
computer.
|
||||
</para>
|
||||
<para>
|
||||
Although the old parallel (fast/wide/ultra) SCSI bus has largely
|
||||
fallen out of use, the SCSI command set is more widely used than ever
|
||||
to communicate with devices over a number of different busses.
|
||||
</para>
|
||||
<para>
|
||||
The <ulink url='http://www.t10.org/scsi-3.htm'>SCSI protocol</ulink>
|
||||
is a big-endian peer-to-peer packet based protocol. SCSI commands
|
||||
are 6, 10, 12, or 16 bytes long, often followed by an associated data
|
||||
payload.
|
||||
</para>
|
||||
<para>
|
||||
SCSI commands can be transported over just about any kind of bus, and
|
||||
are the default protocol for storage devices attached to USB, SATA,
|
||||
SAS, Fibre Channel, FireWire, and ATAPI devices. SCSI packets are
|
||||
also commonly exchanged over Infiniband,
|
||||
<ulink url='http://i2o.shadowconnect.com/faq.php'>I20</ulink>, TCP/IP
|
||||
(<ulink url='http://en.wikipedia.org/wiki/ISCSI'>iSCSI</ulink>), even
|
||||
<ulink url='http://cyberelk.net/tim/parport/parscsi.html'>Parallel
|
||||
ports</ulink>.
|
||||
</para>
|
||||
</sect1>
|
||||
<sect1 id="subsystem_design">
|
||||
<title>Design of the Linux SCSI subsystem</title>
|
||||
<para>
|
||||
The SCSI subsystem uses a three layer design, with upper, mid, and low
|
||||
layers. Every operation involving the SCSI subsystem (such as reading
|
||||
a sector from a disk) uses one driver at each of the 3 levels: one
|
||||
upper layer driver, one lower layer driver, and the SCSI midlayer.
|
||||
</para>
|
||||
<para>
|
||||
The SCSI upper layer provides the interface between userspace and the
|
||||
kernel, in the form of block and char device nodes for I/O and
|
||||
ioctl(). The SCSI lower layer contains drivers for specific hardware
|
||||
devices.
|
||||
</para>
|
||||
<para>
|
||||
In between is the SCSI mid-layer, analogous to a network routing
|
||||
layer such as the IPv4 stack. The SCSI mid-layer routes a packet
|
||||
based data protocol between the upper layer's /dev nodes and the
|
||||
corresponding devices in the lower layer. It manages command queues,
|
||||
provides error handling and power management functions, and responds
|
||||
to ioctl() requests.
|
||||
</para>
|
||||
</sect1>
|
||||
</chapter>
|
||||
|
||||
<chapter id="upper_layer">
|
||||
<title>SCSI upper layer</title>
|
||||
<para>
|
||||
The upper layer supports the user-kernel interface by providing
|
||||
device nodes.
|
||||
</para>
|
||||
<sect1 id="sd">
|
||||
<title>sd (SCSI Disk)</title>
|
||||
<para>sd (sd_mod.o)</para>
|
||||
<!-- !Idrivers/scsi/sd.c -->
|
||||
</sect1>
|
||||
<sect1 id="sr">
|
||||
<title>sr (SCSI CD-ROM)</title>
|
||||
<para>sr (sr_mod.o)</para>
|
||||
</sect1>
|
||||
<sect1 id="st">
|
||||
<title>st (SCSI Tape)</title>
|
||||
<para>st (st.o)</para>
|
||||
</sect1>
|
||||
<sect1 id="sg">
|
||||
<title>sg (SCSI Generic)</title>
|
||||
<para>sg (sg.o)</para>
|
||||
</sect1>
|
||||
<sect1 id="ch">
|
||||
<title>ch (SCSI Media Changer)</title>
|
||||
<para>ch (ch.c)</para>
|
||||
</sect1>
|
||||
</chapter>
|
||||
|
||||
<chapter id="mid_layer">
|
||||
<title>SCSI mid layer</title>
|
||||
|
||||
<sect1 id="midlayer_implementation">
|
||||
<title>SCSI midlayer implementation</title>
|
||||
<sect2 id="scsi_device.h">
|
||||
<title>include/scsi/scsi_device.h</title>
|
||||
<para>
|
||||
</para>
|
||||
!Iinclude/scsi/scsi_device.h
|
||||
</sect2>
|
||||
|
||||
<sect2 id="scsi.c">
|
||||
<title>drivers/scsi/scsi.c</title>
|
||||
<para>Main file for the SCSI midlayer.</para>
|
||||
!Edrivers/scsi/scsi.c
|
||||
</sect2>
|
||||
<sect2 id="scsicam.c">
|
||||
<title>drivers/scsi/scsicam.c</title>
|
||||
<para>
|
||||
<ulink url='http://www.t10.org/ftp/t10/drafts/cam/cam-r12b.pdf'>SCSI
|
||||
Common Access Method</ulink> support functions, for use with
|
||||
HDIO_GETGEO, etc.
|
||||
</para>
|
||||
!Edrivers/scsi/scsicam.c
|
||||
</sect2>
|
||||
<sect2 id="scsi_error.c">
|
||||
<title>drivers/scsi/scsi_error.c</title>
|
||||
<para>Common SCSI error/timeout handling routines.</para>
|
||||
!Edrivers/scsi/scsi_error.c
|
||||
</sect2>
|
||||
<sect2 id="scsi_devinfo.c">
|
||||
<title>drivers/scsi/scsi_devinfo.c</title>
|
||||
<para>
|
||||
Manage scsi_dev_info_list, which tracks blacklisted and whitelisted
|
||||
devices.
|
||||
</para>
|
||||
!Idrivers/scsi/scsi_devinfo.c
|
||||
</sect2>
|
||||
<sect2 id="scsi_ioctl.c">
|
||||
<title>drivers/scsi/scsi_ioctl.c</title>
|
||||
<para>
|
||||
Handle ioctl() calls for SCSI devices.
|
||||
</para>
|
||||
!Edrivers/scsi/scsi_ioctl.c
|
||||
</sect2>
|
||||
<sect2 id="scsi_lib.c">
|
||||
<title>drivers/scsi/scsi_lib.c</title>
|
||||
<para>
|
||||
SCSI queuing library.
|
||||
</para>
|
||||
!Edrivers/scsi/scsi_lib.c
|
||||
</sect2>
|
||||
<sect2 id="scsi_lib_dma.c">
|
||||
<title>drivers/scsi/scsi_lib_dma.c</title>
|
||||
<para>
|
||||
SCSI library functions depending on DMA
|
||||
(map and unmap scatter-gather lists).
|
||||
</para>
|
||||
!Edrivers/scsi/scsi_lib_dma.c
|
||||
</sect2>
|
||||
<sect2 id="scsi_module.c">
|
||||
<title>drivers/scsi/scsi_module.c</title>
|
||||
<para>
|
||||
The file drivers/scsi/scsi_module.c contains legacy support for
|
||||
old-style host templates. It should never be used by any new driver.
|
||||
</para>
|
||||
</sect2>
|
||||
<sect2 id="scsi_proc.c">
|
||||
<title>drivers/scsi/scsi_proc.c</title>
|
||||
<para>
|
||||
The functions in this file provide an interface between
|
||||
the PROC file system and the SCSI device drivers
|
||||
It is mainly used for debugging, statistics and to pass
|
||||
information directly to the lowlevel driver.
|
||||
|
||||
I.E. plumbing to manage /proc/scsi/*
|
||||
</para>
|
||||
!Idrivers/scsi/scsi_proc.c
|
||||
</sect2>
|
||||
<sect2 id="scsi_netlink.c">
|
||||
<title>drivers/scsi/scsi_netlink.c</title>
|
||||
<para>
|
||||
Infrastructure to provide async events from transports to userspace
|
||||
via netlink, using a single NETLINK_SCSITRANSPORT protocol for all
|
||||
transports.
|
||||
|
||||
See <ulink url='http://marc.info/?l=linux-scsi&m=115507374832500&w=2'>the
|
||||
original patch submission</ulink> for more details.
|
||||
</para>
|
||||
!Idrivers/scsi/scsi_netlink.c
|
||||
</sect2>
|
||||
<sect2 id="scsi_scan.c">
|
||||
<title>drivers/scsi/scsi_scan.c</title>
|
||||
<para>
|
||||
Scan a host to determine which (if any) devices are attached.
|
||||
|
||||
The general scanning/probing algorithm is as follows, exceptions are
|
||||
made to it depending on device specific flags, compilation options,
|
||||
and global variable (boot or module load time) settings.
|
||||
|
||||
A specific LUN is scanned via an INQUIRY command; if the LUN has a
|
||||
device attached, a scsi_device is allocated and setup for it.
|
||||
|
||||
For every id of every channel on the given host, start by scanning
|
||||
LUN 0. Skip hosts that don't respond at all to a scan of LUN 0.
|
||||
Otherwise, if LUN 0 has a device attached, allocate and setup a
|
||||
scsi_device for it. If target is SCSI-3 or up, issue a REPORT LUN,
|
||||
and scan all of the LUNs returned by the REPORT LUN; else,
|
||||
sequentially scan LUNs up until some maximum is reached, or a LUN is
|
||||
seen that cannot have a device attached to it.
|
||||
</para>
|
||||
!Idrivers/scsi/scsi_scan.c
|
||||
</sect2>
|
||||
<sect2 id="scsi_sysctl.c">
|
||||
<title>drivers/scsi/scsi_sysctl.c</title>
|
||||
<para>
|
||||
Set up the sysctl entry: "/dev/scsi/logging_level"
|
||||
(DEV_SCSI_LOGGING_LEVEL) which sets/returns scsi_logging_level.
|
||||
</para>
|
||||
</sect2>
|
||||
<sect2 id="scsi_sysfs.c">
|
||||
<title>drivers/scsi/scsi_sysfs.c</title>
|
||||
<para>
|
||||
SCSI sysfs interface routines.
|
||||
</para>
|
||||
!Edrivers/scsi/scsi_sysfs.c
|
||||
</sect2>
|
||||
<sect2 id="hosts.c">
|
||||
<title>drivers/scsi/hosts.c</title>
|
||||
<para>
|
||||
mid to lowlevel SCSI driver interface
|
||||
</para>
|
||||
!Edrivers/scsi/hosts.c
|
||||
</sect2>
|
||||
<sect2 id="constants.c">
|
||||
<title>drivers/scsi/constants.c</title>
|
||||
<para>
|
||||
mid to lowlevel SCSI driver interface
|
||||
</para>
|
||||
!Edrivers/scsi/constants.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
<sect1 id="Transport_classes">
|
||||
<title>Transport classes</title>
|
||||
<para>
|
||||
Transport classes are service libraries for drivers in the SCSI
|
||||
lower layer, which expose transport attributes in sysfs.
|
||||
</para>
|
||||
<sect2 id="Fibre_Channel_transport">
|
||||
<title>Fibre Channel transport</title>
|
||||
<para>
|
||||
The file drivers/scsi/scsi_transport_fc.c defines transport attributes
|
||||
for Fibre Channel.
|
||||
</para>
|
||||
!Edrivers/scsi/scsi_transport_fc.c
|
||||
</sect2>
|
||||
<sect2 id="iSCSI_transport">
|
||||
<title>iSCSI transport class</title>
|
||||
<para>
|
||||
The file drivers/scsi/scsi_transport_iscsi.c defines transport
|
||||
attributes for the iSCSI class, which sends SCSI packets over TCP/IP
|
||||
connections.
|
||||
</para>
|
||||
!Edrivers/scsi/scsi_transport_iscsi.c
|
||||
</sect2>
|
||||
<sect2 id="SAS_transport">
|
||||
<title>Serial Attached SCSI (SAS) transport class</title>
|
||||
<para>
|
||||
The file drivers/scsi/scsi_transport_sas.c defines transport
|
||||
attributes for Serial Attached SCSI, a variant of SATA aimed at
|
||||
large high-end systems.
|
||||
</para>
|
||||
<para>
|
||||
The SAS transport class contains common code to deal with SAS HBAs,
|
||||
an aproximated representation of SAS topologies in the driver model,
|
||||
and various sysfs attributes to expose these topologies and managment
|
||||
interfaces to userspace.
|
||||
</para>
|
||||
<para>
|
||||
In addition to the basic SCSI core objects this transport class
|
||||
introduces two additional intermediate objects: The SAS PHY
|
||||
as represented by struct sas_phy defines an "outgoing" PHY on
|
||||
a SAS HBA or Expander, and the SAS remote PHY represented by
|
||||
struct sas_rphy defines an "incoming" PHY on a SAS Expander or
|
||||
end device. Note that this is purely a software concept, the
|
||||
underlying hardware for a PHY and a remote PHY is the exactly
|
||||
the same.
|
||||
</para>
|
||||
<para>
|
||||
There is no concept of a SAS port in this code, users can see
|
||||
what PHYs form a wide port based on the port_identifier attribute,
|
||||
which is the same for all PHYs in a port.
|
||||
</para>
|
||||
!Edrivers/scsi/scsi_transport_sas.c
|
||||
</sect2>
|
||||
<sect2 id="SATA_transport">
|
||||
<title>SATA transport class</title>
|
||||
<para>
|
||||
The SATA transport is handled by libata, which has its own book of
|
||||
documentation in this directory.
|
||||
</para>
|
||||
</sect2>
|
||||
<sect2 id="SPI_transport">
|
||||
<title>Parallel SCSI (SPI) transport class</title>
|
||||
<para>
|
||||
The file drivers/scsi/scsi_transport_spi.c defines transport
|
||||
attributes for traditional (fast/wide/ultra) SCSI busses.
|
||||
</para>
|
||||
!Edrivers/scsi/scsi_transport_spi.c
|
||||
</sect2>
|
||||
<sect2 id="SRP_transport">
|
||||
<title>SCSI RDMA (SRP) transport class</title>
|
||||
<para>
|
||||
The file drivers/scsi/scsi_transport_srp.c defines transport
|
||||
attributes for SCSI over Remote Direct Memory Access.
|
||||
</para>
|
||||
!Edrivers/scsi/scsi_transport_srp.c
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
||||
</chapter>
|
||||
|
||||
<chapter id="lower_layer">
|
||||
<title>SCSI lower layer</title>
|
||||
<sect1 id="hba_drivers">
|
||||
<title>Host Bus Adapter transport types</title>
|
||||
<para>
|
||||
Many modern device controllers use the SCSI command set as a protocol to
|
||||
communicate with their devices through many different types of physical
|
||||
connections.
|
||||
</para>
|
||||
<para>
|
||||
In SCSI language a bus capable of carrying SCSI commands is
|
||||
called a "transport", and a controller connecting to such a bus is
|
||||
called a "host bus adapter" (HBA).
|
||||
</para>
|
||||
<sect2 id="scsi_debug.c">
|
||||
<title>Debug transport</title>
|
||||
<para>
|
||||
The file drivers/scsi/scsi_debug.c simulates a host adapter with a
|
||||
variable number of disks (or disk like devices) attached, sharing a
|
||||
common amount of RAM. Does a lot of checking to make sure that we are
|
||||
not getting blocks mixed up, and panics the kernel if anything out of
|
||||
the ordinary is seen.
|
||||
</para>
|
||||
<para>
|
||||
To be more realistic, the simulated devices have the transport
|
||||
attributes of SAS disks.
|
||||
</para>
|
||||
<para>
|
||||
For documentation see
|
||||
<ulink url='http://www.torque.net/sg/sdebug26.html'>http://www.torque.net/sg/sdebug26.html</ulink>
|
||||
</para>
|
||||
<!-- !Edrivers/scsi/scsi_debug.c -->
|
||||
</sect2>
|
||||
<sect2 id="todo">
|
||||
<title>todo</title>
|
||||
<para>Parallel (fast/wide/ultra) SCSI, USB, SATA,
|
||||
SAS, Fibre Channel, FireWire, ATAPI devices, Infiniband,
|
||||
I20, iSCSI, Parallel ports, netlink...
|
||||
</para>
|
||||
</sect2>
|
||||
</sect1>
|
||||
</chapter>
|
||||
</book>
|
@ -46,8 +46,6 @@
|
||||
.mailmap
|
||||
.mm
|
||||
53c700_d.h
|
||||
53c7xx_d.h
|
||||
53c7xx_u.h
|
||||
53c8xx_d.h*
|
||||
BitKeeper
|
||||
COPYING
|
||||
|
@ -1598,7 +1598,13 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
Format: <vendor>:<model>:<flags>
|
||||
(flags are integer value)
|
||||
|
||||
scsi_logging= [SCSI]
|
||||
scsi_logging_level= [SCSI] a bit mask of logging levels
|
||||
See drivers/scsi/scsi_logging.h for bits. Also
|
||||
settable via sysctl at dev.scsi.logging_level
|
||||
(/proc/sys/dev/scsi/logging_level).
|
||||
There is also a nice 'scsi_logging_level' script in the
|
||||
S390-tools package, available for download at
|
||||
http://www-128.ibm.com/developerworks/linux/linux390/s390-tools-1.5.4.html
|
||||
|
||||
scsi_mod.scan= [SCSI] sync (default) scans SCSI busses as they are
|
||||
discovered. async scans them in kernel threads,
|
||||
|
@ -867,66 +867,6 @@ controller and should be autodetected by the driver. An example is the
|
||||
24 bit region which is specified by a mask of 0x00fffffe.
|
||||
|
||||
|
||||
5.5) 53c7xx=
|
||||
------------
|
||||
|
||||
Syntax: 53c7xx=<sub-options...>
|
||||
|
||||
These options affect the A4000T, A4091, WarpEngine, Blizzard 603e+,
|
||||
and GForce 040/060 SCSI controllers on the Amiga, as well as the
|
||||
builtin MVME 16x SCSI controller.
|
||||
|
||||
The <sub-options> is a comma-separated list of the sub-options listed
|
||||
below.
|
||||
|
||||
5.5.1) nosync
|
||||
-------------
|
||||
|
||||
Syntax: nosync:0
|
||||
|
||||
Disables sync negotiation for all devices. Any value after the
|
||||
colon is acceptable (and has the same effect).
|
||||
|
||||
5.5.2) noasync
|
||||
--------------
|
||||
|
||||
[OBSOLETE, REMOVED]
|
||||
|
||||
5.5.3) nodisconnect
|
||||
-------------------
|
||||
|
||||
Syntax: nodisconnect:0
|
||||
|
||||
Disables SCSI disconnects. Any value after the colon is acceptable
|
||||
(and has the same effect).
|
||||
|
||||
5.5.4) validids
|
||||
---------------
|
||||
|
||||
Syntax: validids:0xNN
|
||||
|
||||
Specify which SCSI ids the driver should pay attention to. This is
|
||||
a bitmask (i.e. to only pay attention to ID#4, you'd use 0x10).
|
||||
Default is 0x7f (devices 0-6).
|
||||
|
||||
5.5.5) opthi
|
||||
5.5.6) optlo
|
||||
------------
|
||||
|
||||
Syntax: opthi:M,optlo:N
|
||||
|
||||
Specify options for "hostdata->options". The acceptable definitions
|
||||
are listed in drivers/scsi/53c7xx.h; the 32 high bits should be in
|
||||
opthi and the 32 low bits in optlo. They must be specified in the
|
||||
order opthi=M,optlo=N.
|
||||
|
||||
5.5.7) next
|
||||
-----------
|
||||
|
||||
No argument. Used to separate blocks of keywords when there's more
|
||||
than one 53c7xx host adapter in the system.
|
||||
|
||||
|
||||
/* Local Variables: */
|
||||
/* mode: text */
|
||||
/* End: */
|
||||
|
@ -64,8 +64,6 @@ lpfc.txt
|
||||
- LPFC driver release notes
|
||||
megaraid.txt
|
||||
- Common Management Module, shared code handling ioctls for LSI drivers
|
||||
ncr53c7xx.txt
|
||||
- info on driver for NCR53c7xx based adapters
|
||||
ncr53c8xx.txt
|
||||
- info on driver for NCR53c8xx based adapters
|
||||
osst.txt
|
||||
|
@ -1,3 +1,162 @@
|
||||
1 Release Date : Thur. Nov. 07 16:30:43 PST 2007 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Sumant Patro
|
||||
Bo Yang
|
||||
|
||||
2 Current Version : 00.00.03.16
|
||||
3 Older Version : 00.00.03.15
|
||||
|
||||
1. Increased MFI_POLL_TIMEOUT_SECS to 60 seconds from 10. FW may take
|
||||
a max of 60 seconds to respond to the INIT cmd.
|
||||
|
||||
1 Release Date : Fri. Sep. 07 16:30:43 PST 2007 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Sumant Patro
|
||||
Bo Yang
|
||||
|
||||
2 Current Version : 00.00.03.15
|
||||
3 Older Version : 00.00.03.14
|
||||
|
||||
1. Added module parameter "poll_mode_io" to support for "polling"
|
||||
(reduced interrupt operation). In this mode, IO completion
|
||||
interrupts are delayed. At the end of initiating IOs, the
|
||||
driver schedules for cmd completion if there are pending cmds
|
||||
to be completed. A timer-based interrupt has also been added
|
||||
to prevent IO completion processing from being delayed
|
||||
indefinitely in the case that no new IOs are initiated.
|
||||
|
||||
1 Release Date : Fri. Sep. 07 16:30:43 PST 2007 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Sumant Patro
|
||||
Bo Yang
|
||||
|
||||
2 Current Version : 00.00.03.14
|
||||
3 Older Version : 00.00.03.13
|
||||
|
||||
1. Setting the max_sectors_per_req based on max SGL supported by the
|
||||
FW. Prior versions calculated this value from controller info
|
||||
(max_sectors_1, max_sectors_2). For certain controllers/FW,
|
||||
this was resulting in a value greater than max SGL supported
|
||||
by the FW. Issue was first reported by users running LUKS+XFS
|
||||
with megaraid_sas. Thanks to RB for providing the logs and
|
||||
duplication steps that helped to get to the root cause of the
|
||||
issue. 2. Increased MFI_POLL_TIMEOUT_SECS to 60 seconds from
|
||||
10. FW may take a max of 60 seconds to respond to the INIT
|
||||
cmd.
|
||||
|
||||
1 Release Date : Fri. June. 15 16:30:43 PST 2007 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Sumant Patro
|
||||
Bo Yang
|
||||
|
||||
2 Current Version : 00.00.03.13
|
||||
3 Older Version : 00.00.03.12
|
||||
|
||||
1. Added the megasas_reset_timer routine to intercept cmd timeout and throttle io.
|
||||
|
||||
On Fri, 2007-03-16 at 16:44 -0600, James Bottomley wrote:
|
||||
It looks like megaraid_sas at least needs this to throttle its commands
|
||||
> as they begin to time out. The code keeps the existing transport
|
||||
> template use of eh_timed_out (and allows the transport to override the
|
||||
> host if they both have this callback).
|
||||
>
|
||||
> James
|
||||
|
||||
1 Release Date : Sat May. 12 16:30:43 PST 2007 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Sumant Patro
|
||||
Bo Yang
|
||||
|
||||
2 Current Version : 00.00.03.12
|
||||
3 Older Version : 00.00.03.11
|
||||
|
||||
1. When MegaSAS driver receives reset call from OS, driver waits in reset
|
||||
routine for max 3 minutes for all pending command completion. Now driver will
|
||||
call completion routine every 5 seconds from the reset routine instead of
|
||||
waiting for depending on cmd completion from isr path.
|
||||
|
||||
1 Release Date : Mon Apr. 30 10:25:52 PST 2007 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Sumant Patro
|
||||
Bo Yang
|
||||
|
||||
2 Current Version : 00.00.03.11
|
||||
3 Older Version : 00.00.03.09
|
||||
|
||||
1. Memory Manager for IOCTL removed for 2.6 kernels.
|
||||
pci_alloc_consistent replaced by dma_alloc_coherent. With this
|
||||
change there is no need of memory manager in the driver code
|
||||
|
||||
On Wed, 2007-02-07 at 13:30 -0800, Andrew Morton wrote:
|
||||
> I suspect all this horror is due to stupidity in the DMA API.
|
||||
>
|
||||
> pci_alloc_consistent() just goes and assumes GFP_ATOMIC, whereas
|
||||
> the caller (megasas_mgmt_fw_ioctl) would have been perfectly happy
|
||||
> to use GFP_KERNEL.
|
||||
>
|
||||
> I bet this fixes it
|
||||
|
||||
It does, but the DMA API was expanded to cope with this exact case, so
|
||||
use dma_alloc_coherent() directly in the megaraid code instead. The dev
|
||||
is just &pci_dev->dev.
|
||||
|
||||
James <James.Bottomley@SteelEye.com>
|
||||
|
||||
3. SYNCHRONIZE_CACHE is not supported by FW and thus blocked by driver.
|
||||
4. Hibernation support added
|
||||
5. Performing diskdump while running IO in RHEL 4 was failing. Fixed.
|
||||
|
||||
1 Release Date : Fri Feb. 09 14:36:28 PST 2007 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Sumant Patro
|
||||
Bo Yang
|
||||
|
||||
2 Current Version : 00.00.03.09
|
||||
3 Older Version : 00.00.03.08
|
||||
|
||||
i. Under heavy IO mid-layer prints "DRIVER_TIMEOUT" errors
|
||||
|
||||
The driver now waits for 10 seconds to elapse instead of 5 (as in
|
||||
previous release) to resume IO.
|
||||
|
||||
1 Release Date : Mon Feb. 05 11:35:24 PST 2007 -
|
||||
(emaild-id:megaraidlinux@lsi.com)
|
||||
Sumant Patro
|
||||
Bo Yang
|
||||
2 Current Version : 00.00.03.08
|
||||
3 Older Version : 00.00.03.07
|
||||
|
||||
i. Under heavy IO mid-layer prints "DRIVER_TIMEOUT" errors
|
||||
|
||||
Fix: The driver is now throttling IO.
|
||||
Checks added in megasas_queue_command to know if FW is able to
|
||||
process commands within timeout period. If number of retries
|
||||
is 2 or greater,the driver stops sending cmd to FW temporarily. IO is
|
||||
resumed if pending cmd count reduces to 16 or 5 seconds has elapsed
|
||||
from the time cmds were last sent to FW.
|
||||
|
||||
ii. FW enables WCE bit in Mode Sense cmd for drives that are configured
|
||||
as WriteBack. The OS may send "SYNCHRONIZE_CACHE" cmd when Logical
|
||||
Disks are exposed with WCE=1. User is advised to enable Write Back
|
||||
mode only when the controller has battery backup. At this time
|
||||
Synhronize cache is not supported by the FW. Driver will short-cycle
|
||||
the cmd and return sucess without sending down to FW.
|
||||
|
||||
1 Release Date : Sun Jan. 14 11:21:32 PDT 2007 -
|
||||
Sumant Patro <Sumant.Patro@lsil.com>/Bo Yang
|
||||
2 Current Version : 00.00.03.07
|
||||
3 Older Version : 00.00.03.06
|
||||
|
||||
i. bios_param entry added in scsi_host_template that returns disk geometry
|
||||
information.
|
||||
|
||||
1 Release Date : Fri Oct 20 11:21:32 PDT 2006 - Sumant Patro <Sumant.Patro@lsil.com>/Bo Yang
|
||||
2 Current Version : 00.00.03.06
|
||||
3 Older Version : 00.00.03.05
|
||||
|
||||
1. Added new memory management module to support the IOCTL memory allocation. For IOCTL we try to allocate from the memory pool created during driver initialization. If mem pool is empty then we allocate at run time.
|
||||
2. Added check in megasas_queue_command and dpc/isr routine to see if we have already declared adapter dead
|
||||
(hw_crit_error=1). If hw_crit_error==1, now we donot accept any processing of pending cmds/accept any cmd from OS
|
||||
|
||||
1 Release Date : Mon Oct 02 11:21:32 PDT 2006 - Sumant Patro <Sumant.Patro@lsil.com>
|
||||
2 Current Version : 00.00.03.05
|
||||
|
@ -56,6 +56,10 @@ Supported Cards/Chipsets
|
||||
9005:0285:9005:02d1 Adaptec 5405 (Voodoo40)
|
||||
9005:0285:15d9:02d2 SMC AOC-USAS-S8i-LP
|
||||
9005:0285:15d9:02d3 SMC AOC-USAS-S8iR-LP
|
||||
9005:0285:9005:02d4 Adaptec 2045 (Voodoo04 Lite)
|
||||
9005:0285:9005:02d5 Adaptec 2405 (Voodoo40 Lite)
|
||||
9005:0285:9005:02d6 Adaptec 2445 (Voodoo44 Lite)
|
||||
9005:0285:9005:02d7 Adaptec 2805 (Voodoo80 Lite)
|
||||
1011:0046:9005:0364 Adaptec 5400S (Mustang)
|
||||
9005:0287:9005:0800 Adaptec Themisto (Jupiter)
|
||||
9005:0200:9005:0200 Adaptec Themisto (Jupiter)
|
||||
|
@ -1,9 +1,9 @@
|
||||
HIGHPOINT ROCKETRAID 3xxx RAID DRIVER (hptiop)
|
||||
HIGHPOINT ROCKETRAID 3xxx/4xxx ADAPTER DRIVER (hptiop)
|
||||
|
||||
Controller Register Map
|
||||
-------------------------
|
||||
|
||||
The controller IOP is accessed via PCI BAR0.
|
||||
For Intel IOP based adapters, the controller IOP is accessed via PCI BAR0:
|
||||
|
||||
BAR0 offset Register
|
||||
0x10 Inbound Message Register 0
|
||||
@ -18,6 +18,24 @@ The controller IOP is accessed via PCI BAR0.
|
||||
0x40 Inbound Queue Port
|
||||
0x44 Outbound Queue Port
|
||||
|
||||
For Marvell IOP based adapters, the IOP is accessed via PCI BAR0 and BAR1:
|
||||
|
||||
BAR0 offset Register
|
||||
0x20400 Inbound Doorbell Register
|
||||
0x20404 Inbound Interrupt Mask Register
|
||||
0x20408 Outbound Doorbell Register
|
||||
0x2040C Outbound Interrupt Mask Register
|
||||
|
||||
BAR1 offset Register
|
||||
0x0 Inbound Queue Head Pointer
|
||||
0x4 Inbound Queue Tail Pointer
|
||||
0x8 Outbound Queue Head Pointer
|
||||
0xC Outbound Queue Tail Pointer
|
||||
0x10 Inbound Message Register
|
||||
0x14 Outbound Message Register
|
||||
0x40-0x1040 Inbound Queue
|
||||
0x1040-0x2040 Outbound Queue
|
||||
|
||||
|
||||
I/O Request Workflow
|
||||
----------------------
|
||||
@ -73,15 +91,9 @@ The driver exposes following sysfs attributes:
|
||||
driver-version R driver version string
|
||||
firmware-version R firmware version string
|
||||
|
||||
The driver registers char device "hptiop" to communicate with HighPoint RAID
|
||||
management software. Its ioctl routine acts as a general binary interface
|
||||
between the IOP firmware and HighPoint RAID management software. New management
|
||||
functions can be implemented in application/firmware without modification
|
||||
in driver code.
|
||||
|
||||
|
||||
-----------------------------------------------------------------------------
|
||||
Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved.
|
||||
Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
|
||||
|
||||
This file is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
|
@ -1,40 +0,0 @@
|
||||
README for WarpEngine/A4000T/A4091 SCSI kernels.
|
||||
|
||||
Use the following options to disable options in the SCSI driver.
|
||||
|
||||
Using amiboot for example.....
|
||||
|
||||
To disable Synchronous Negotiation....
|
||||
|
||||
amiboot -k kernel 53c7xx=nosync:0
|
||||
|
||||
To disable Disconnection....
|
||||
|
||||
amiboot -k kernel 53c7xx=nodisconnect:0
|
||||
|
||||
To disable certain SCSI devices...
|
||||
|
||||
amiboot -k kernel 53c7xx=validids:0x3F
|
||||
|
||||
this allows only device ID's 0,1,2,3,4 and 5 for linux to handle.
|
||||
(this is a bitmasked field - i.e. each bit represents a SCSI ID)
|
||||
|
||||
These commands work on a per controller basis and use the option 'next' to
|
||||
move to the next controller in the system.
|
||||
|
||||
e.g.
|
||||
amiboot -k kernel 53c7xx=nodisconnect:0,next,nosync:0
|
||||
|
||||
this uses No Disconnection on the first controller and Asynchronous
|
||||
SCSI on the second controller.
|
||||
|
||||
Known Issues:
|
||||
|
||||
Two devices are known not to function with the default settings of using
|
||||
synchronous SCSI. These are the Archive Viper 150 Tape Drive and the
|
||||
SyQuest SQ555 removeable hard drive. When using these devices on a controller
|
||||
use the 'nosync:0' option.
|
||||
|
||||
Please try these options and post any problems/successes to me.
|
||||
|
||||
Alan Hourihane <alanh@fairlite.demon.co.uk>
|
@ -3269,8 +3269,10 @@ W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
S: Supported
|
||||
|
||||
S390 ZFCP DRIVER
|
||||
P: Swen Schillig
|
||||
M: swen@vnet.ibm.com
|
||||
P: Christof Schmitt
|
||||
M: christof.schmitt@de.ibm.com
|
||||
P: Martin Peschke
|
||||
M: mp3@de.ibm.com
|
||||
M: linux390@de.ibm.com
|
||||
L: linux-s390@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
|
14
block/bsg.c
14
block/bsg.c
@ -445,6 +445,15 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
||||
else
|
||||
hdr->dout_resid = rq->data_len;
|
||||
|
||||
/*
|
||||
* If the request generated a negative error number, return it
|
||||
* (providing we aren't already returning an error); if it's
|
||||
* just a protocol response (i.e. non negative), that gets
|
||||
* processed above.
|
||||
*/
|
||||
if (!ret && rq->errors < 0)
|
||||
ret = rq->errors;
|
||||
|
||||
blk_rq_unmap_user(bio);
|
||||
blk_put_request(rq);
|
||||
|
||||
@ -837,6 +846,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct bsg_device *bd = file->private_data;
|
||||
int __user *uarg = (int __user *) arg;
|
||||
int ret;
|
||||
|
||||
switch (cmd) {
|
||||
/*
|
||||
@ -889,12 +899,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
if (rq->next_rq)
|
||||
bidi_bio = rq->next_rq->bio;
|
||||
blk_execute_rq(bd->queue, NULL, rq, 0);
|
||||
blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
|
||||
ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
|
||||
|
||||
if (copy_to_user(uarg, &hdr, sizeof(hdr)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* block device ioctls
|
||||
|
@ -759,6 +759,30 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
|
||||
|
||||
EXPORT_SYMBOL(blk_queue_dma_alignment);
|
||||
|
||||
/**
|
||||
* blk_queue_update_dma_alignment - update dma length and memory alignment
|
||||
* @q: the request queue for the device
|
||||
* @mask: alignment mask
|
||||
*
|
||||
* description:
|
||||
* update required memory and length aligment for direct dma transactions.
|
||||
* If the requested alignment is larger than the current alignment, then
|
||||
* the current queue alignment is updated to the new value, otherwise it
|
||||
* is left alone. The design of this is to allow multiple objects
|
||||
* (driver, device, transport etc) to set their respective
|
||||
* alignments without having them interfere.
|
||||
*
|
||||
**/
|
||||
void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
|
||||
{
|
||||
BUG_ON(mask > PAGE_SIZE);
|
||||
|
||||
if (mask > q->dma_alignment)
|
||||
q->dma_alignment = mask;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
||||
|
||||
/**
|
||||
* blk_queue_find_tag - find a request by its tag and queue
|
||||
* @q: The request queue for the device
|
||||
|
@ -839,7 +839,14 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
|
||||
if (dev->class == ATA_DEV_ATAPI) {
|
||||
struct request_queue *q = sdev->request_queue;
|
||||
blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
|
||||
}
|
||||
|
||||
/* set the min alignment */
|
||||
blk_queue_update_dma_alignment(sdev->request_queue,
|
||||
ATA_DMA_PAD_SZ - 1);
|
||||
} else
|
||||
/* ATA devices must be sector aligned */
|
||||
blk_queue_update_dma_alignment(sdev->request_queue,
|
||||
ATA_SECT_SIZE - 1);
|
||||
|
||||
if (dev->class == ATA_DEV_ATA)
|
||||
sdev->manage_start_stop = 1;
|
||||
@ -878,7 +885,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
|
||||
if (dev)
|
||||
ata_scsi_dev_config(sdev, dev);
|
||||
|
||||
return 0; /* scsi layer doesn't check return value, sigh */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -320,9 +320,14 @@ attribute_container_add_attrs(struct class_device *classdev)
|
||||
struct class_device_attribute **attrs = cont->attrs;
|
||||
int i, error;
|
||||
|
||||
if (!attrs)
|
||||
BUG_ON(attrs && cont->grp);
|
||||
|
||||
if (!attrs && !cont->grp)
|
||||
return 0;
|
||||
|
||||
if (cont->grp)
|
||||
return sysfs_create_group(&classdev->kobj, cont->grp);
|
||||
|
||||
for (i = 0; attrs[i]; i++) {
|
||||
error = class_device_create_file(classdev, attrs[i]);
|
||||
if (error)
|
||||
@ -378,9 +383,14 @@ attribute_container_remove_attrs(struct class_device *classdev)
|
||||
struct class_device_attribute **attrs = cont->attrs;
|
||||
int i;
|
||||
|
||||
if (!attrs)
|
||||
if (!attrs && !cont->grp)
|
||||
return;
|
||||
|
||||
if (cont->grp) {
|
||||
sysfs_remove_group(&classdev->kobj, cont->grp);
|
||||
return ;
|
||||
}
|
||||
|
||||
for (i = 0; attrs[i]; i++)
|
||||
class_device_remove_file(classdev, attrs[i]);
|
||||
}
|
||||
|
@ -1238,6 +1238,12 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
|
||||
|
||||
sdev->allow_restart = 1;
|
||||
|
||||
/*
|
||||
* Update the dma alignment (minimum alignment requirements for
|
||||
* start and end of DMA transfers) to be a sector
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, 511);
|
||||
|
||||
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
|
||||
sdev->inquiry_len = 36;
|
||||
|
||||
|
@ -1963,6 +1963,12 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
|
||||
lu->sdev = sdev;
|
||||
sdev->allow_restart = 1;
|
||||
|
||||
/*
|
||||
* Update the dma alignment (minimum alignment requirements for
|
||||
* start and end of DMA transfers) to be a sector
|
||||
*/
|
||||
blk_queue_update_dma_alignment(sdev->request_queue, 511);
|
||||
|
||||
if (lu->workarounds & SBP2_WORKAROUND_INQUIRY_36)
|
||||
sdev->inquiry_len = 36;
|
||||
return 0;
|
||||
|
@ -129,7 +129,7 @@ error:
|
||||
* iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
|
||||
*
|
||||
**/
|
||||
static void
|
||||
static int
|
||||
iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
|
||||
{
|
||||
struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
|
||||
@ -138,6 +138,7 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
|
||||
iser_ctask->command_sent = 0;
|
||||
iser_ctask->iser_conn = iser_conn;
|
||||
iser_ctask_rdma_init(iser_ctask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -220,12 +221,6 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
|
||||
debug_scsi("ctask deq [cid %d itt 0x%x]\n",
|
||||
conn->id, ctask->itt);
|
||||
|
||||
/*
|
||||
* serialize with TMF AbortTask
|
||||
*/
|
||||
if (ctask->mtask)
|
||||
return error;
|
||||
|
||||
/* Send the cmd PDU */
|
||||
if (!iser_ctask->command_sent) {
|
||||
error = iser_send_command(conn, ctask);
|
||||
@ -406,6 +401,7 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
|
||||
ctask = session->cmds[i];
|
||||
iser_ctask = ctask->dd_data;
|
||||
ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
|
||||
ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
|
||||
}
|
||||
|
||||
for (i = 0; i < session->mgmtpool_max; i++) {
|
||||
@ -557,6 +553,7 @@ static struct scsi_host_template iscsi_iser_sht = {
|
||||
.max_sectors = 1024,
|
||||
.cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
|
||||
.eh_abort_handler = iscsi_eh_abort,
|
||||
.eh_device_reset_handler= iscsi_eh_device_reset,
|
||||
.eh_host_reset_handler = iscsi_eh_host_reset,
|
||||
.use_clustering = DISABLE_CLUSTERING,
|
||||
.proc_name = "iscsi_iser",
|
||||
@ -583,7 +580,9 @@ static struct iscsi_transport iscsi_iser_transport = {
|
||||
ISCSI_PERSISTENT_ADDRESS |
|
||||
ISCSI_TARGET_NAME | ISCSI_TPGT |
|
||||
ISCSI_USERNAME | ISCSI_PASSWORD |
|
||||
ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
|
||||
ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
|
||||
ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
|
||||
ISCSI_PING_TMO | ISCSI_RECV_TMO,
|
||||
.host_param_mask = ISCSI_HOST_HWADDRESS |
|
||||
ISCSI_HOST_NETDEV_NAME |
|
||||
ISCSI_HOST_INITIATOR_NAME,
|
||||
|
@ -621,9 +621,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
|
||||
struct iscsi_session *session = conn->session;
|
||||
|
||||
spin_lock(&conn->session->lock);
|
||||
list_del(&mtask->running);
|
||||
__kfifo_put(session->mgmtpool.queue, (void*)&mtask,
|
||||
sizeof(void*));
|
||||
iscsi_free_mgmt_task(conn, mtask);
|
||||
spin_unlock(&session->lock);
|
||||
}
|
||||
}
|
||||
|
@ -2056,7 +2056,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
|
||||
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
|
||||
"mpt_upload: alt_%s has cached_fw=%p \n",
|
||||
ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
|
||||
ioc->alt_ioc->cached_fw = NULL;
|
||||
ioc->cached_fw = NULL;
|
||||
}
|
||||
} else {
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
@ -2262,10 +2262,12 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
|
||||
int ret;
|
||||
|
||||
if (ioc->cached_fw != NULL) {
|
||||
ddlprintk(ioc, printk(MYIOC_s_INFO_FMT
|
||||
"mpt_adapter_disable: Pushing FW onto adapter\n", ioc->name));
|
||||
if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)ioc->cached_fw, NO_SLEEP)) < 0) {
|
||||
printk(MYIOC_s_WARN_FMT "firmware downloadboot failure (%d)!\n",
|
||||
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto "
|
||||
"adapter\n", __FUNCTION__, ioc->name));
|
||||
if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
|
||||
ioc->cached_fw, CAN_SLEEP)) < 0) {
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
": firmware downloadboot failure (%d)!\n",
|
||||
ioc->name, ret);
|
||||
}
|
||||
}
|
||||
@ -2303,13 +2305,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
|
||||
ioc->alloc_total -= sz;
|
||||
}
|
||||
|
||||
if (ioc->cached_fw != NULL) {
|
||||
sz = ioc->facts.FWImageSize;
|
||||
pci_free_consistent(ioc->pcidev, sz,
|
||||
ioc->cached_fw, ioc->cached_fw_dma);
|
||||
ioc->cached_fw = NULL;
|
||||
ioc->alloc_total -= sz;
|
||||
}
|
||||
mpt_free_fw_memory(ioc);
|
||||
|
||||
kfree(ioc->spi_data.nvram);
|
||||
mpt_inactive_raid_list_free(ioc);
|
||||
@ -3047,44 +3043,62 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
|
||||
*
|
||||
* If memory has already been allocated, the same (cached) value
|
||||
* is returned.
|
||||
*/
|
||||
void
|
||||
*
|
||||
* Return 0 if successfull, or non-zero for failure
|
||||
**/
|
||||
int
|
||||
mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
|
||||
{
|
||||
if (ioc->cached_fw)
|
||||
return; /* use already allocated memory */
|
||||
if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
|
||||
int rc;
|
||||
|
||||
if (ioc->cached_fw) {
|
||||
rc = 0; /* use already allocated memory */
|
||||
goto out;
|
||||
}
|
||||
else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
|
||||
ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
|
||||
ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
|
||||
ioc->alloc_total += size;
|
||||
ioc->alt_ioc->alloc_total -= size;
|
||||
} else {
|
||||
if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) )
|
||||
ioc->alloc_total += size;
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma);
|
||||
if (!ioc->cached_fw) {
|
||||
printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
|
||||
ioc->name);
|
||||
rc = -1;
|
||||
} else {
|
||||
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Image @ %p[%p], sz=%d[%x] bytes\n",
|
||||
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, size, size));
|
||||
ioc->alloc_total += size;
|
||||
rc = 0;
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt_free_fw_memory - free firmware memory
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
*
|
||||
* If alt_img is NULL, delete from ioc structure.
|
||||
* Else, delete a secondary image in same format.
|
||||
*/
|
||||
**/
|
||||
void
|
||||
mpt_free_fw_memory(MPT_ADAPTER *ioc)
|
||||
{
|
||||
int sz;
|
||||
|
||||
if (!ioc->cached_fw)
|
||||
return;
|
||||
|
||||
sz = ioc->facts.FWImageSize;
|
||||
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
|
||||
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
|
||||
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
|
||||
pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma);
|
||||
ioc->alloc_total -= sz;
|
||||
ioc->cached_fw = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/**
|
||||
* mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
|
||||
@ -3116,17 +3130,12 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
|
||||
if ((sz = ioc->facts.FWImageSize) == 0)
|
||||
return 0;
|
||||
|
||||
mpt_alloc_fw_memory(ioc, sz);
|
||||
if (mpt_alloc_fw_memory(ioc, ioc->facts.FWImageSize) != 0)
|
||||
return -ENOMEM;
|
||||
|
||||
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
|
||||
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
|
||||
|
||||
if (ioc->cached_fw == NULL) {
|
||||
/* Major Failure.
|
||||
*/
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
prequest = (sleepFlag == NO_SLEEP) ? kzalloc(ioc->req_sz, GFP_ATOMIC) :
|
||||
kzalloc(ioc->req_sz, GFP_KERNEL);
|
||||
if (!prequest) {
|
||||
@ -3498,12 +3507,12 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
|
||||
static int
|
||||
mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
|
||||
{
|
||||
MPT_ADAPTER *iocp=NULL;
|
||||
u32 diag0val;
|
||||
u32 doorbell;
|
||||
int hard_reset_done = 0;
|
||||
int count = 0;
|
||||
u32 diag1val = 0;
|
||||
MpiFwHeader_t *cached_fw; /* Pointer to FW */
|
||||
|
||||
/* Clear any existing interrupts */
|
||||
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
|
||||
@ -3635,22 +3644,24 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
|
||||
}
|
||||
|
||||
if (ioc->cached_fw)
|
||||
iocp = ioc;
|
||||
cached_fw = (MpiFwHeader_t *)ioc->cached_fw;
|
||||
else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
|
||||
iocp = ioc->alt_ioc;
|
||||
if (iocp) {
|
||||
cached_fw = (MpiFwHeader_t *)ioc->alt_ioc->cached_fw;
|
||||
else
|
||||
cached_fw = NULL;
|
||||
if (cached_fw) {
|
||||
/* If the DownloadBoot operation fails, the
|
||||
* IOC will be left unusable. This is a fatal error
|
||||
* case. _diag_reset will return < 0
|
||||
*/
|
||||
for (count = 0; count < 30; count ++) {
|
||||
diag0val = CHIPREG_READ32(&iocp->chip->Diagnostic);
|
||||
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
|
||||
if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
|
||||
break;
|
||||
}
|
||||
|
||||
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "cached_fw: diag0val=%x count=%d\n",
|
||||
iocp->name, diag0val, count));
|
||||
ioc->name, diag0val, count));
|
||||
/* wait 1 sec */
|
||||
if (sleepFlag == CAN_SLEEP) {
|
||||
msleep (1000);
|
||||
@ -3658,8 +3669,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
|
||||
mdelay (1000);
|
||||
}
|
||||
}
|
||||
if ((count = mpt_downloadboot(ioc,
|
||||
(MpiFwHeader_t *)iocp->cached_fw, sleepFlag)) < 0) {
|
||||
if ((count = mpt_downloadboot(ioc, cached_fw, sleepFlag)) < 0) {
|
||||
printk(MYIOC_s_WARN_FMT
|
||||
"firmware downloadboot failure (%d)!\n", ioc->name, count);
|
||||
}
|
||||
|
@ -907,7 +907,7 @@ extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
|
||||
extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
|
||||
extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
|
||||
extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
|
||||
extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
|
||||
extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
|
||||
extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
|
||||
extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
|
||||
extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
|
||||
|
@ -1343,6 +1343,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
|
||||
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
|
||||
memcpy(req->sense, smprep, sizeof(*smprep));
|
||||
req->sense_len = sizeof(*smprep);
|
||||
req->data_len = 0;
|
||||
rsp->data_len -= smprep->ResponseDataLength;
|
||||
} else {
|
||||
printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
|
||||
ioc->name, __FUNCTION__);
|
||||
|
@ -111,7 +111,7 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
|
||||
int mptscsih_resume(struct pci_dev *pdev);
|
||||
#endif
|
||||
|
||||
#define SNS_LEN(scp) sizeof((scp)->sense_buffer)
|
||||
#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/**
|
||||
|
@ -370,7 +370,7 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
|
||||
*/
|
||||
if (cmd->result)
|
||||
memcpy(cmd->sense_buffer, &msg->body[3],
|
||||
min(sizeof(cmd->sense_buffer), (size_t) 40));
|
||||
min(SCSI_SENSE_BUFFERSIZE, 40));
|
||||
|
||||
/* only output error code if AdapterStatus is not HBA_SUCCESS */
|
||||
if ((error >> 8) & 0xff)
|
||||
|
@ -844,8 +844,6 @@ zfcp_unit_enqueue(struct zfcp_port *port, fcp_lun_t fcp_lun)
|
||||
unit->sysfs_device.release = zfcp_sysfs_unit_release;
|
||||
dev_set_drvdata(&unit->sysfs_device, unit);
|
||||
|
||||
init_waitqueue_head(&unit->scsi_scan_wq);
|
||||
|
||||
/* mark unit unusable as long as sysfs registration is not complete */
|
||||
atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
|
||||
|
||||
|
@ -123,6 +123,9 @@ zfcp_ccw_remove(struct ccw_device *ccw_device)
|
||||
|
||||
list_for_each_entry_safe(port, p, &adapter->port_remove_lh, list) {
|
||||
list_for_each_entry_safe(unit, u, &port->unit_remove_lh, list) {
|
||||
if (atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED,
|
||||
&unit->status))
|
||||
scsi_remove_device(unit->device);
|
||||
zfcp_unit_dequeue(unit);
|
||||
}
|
||||
zfcp_port_dequeue(port);
|
||||
|
@ -161,12 +161,6 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
|
||||
(fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
|
||||
strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
|
||||
level = 4;
|
||||
} else if ((prot_status_qual->doubleword[0] != 0) ||
|
||||
(prot_status_qual->doubleword[1] != 0) ||
|
||||
(fsf_status_qual->doubleword[0] != 0) ||
|
||||
(fsf_status_qual->doubleword[1] != 0)) {
|
||||
strncpy(rec->tag2, "qual", ZFCP_DBF_TAG_SIZE);
|
||||
level = 3;
|
||||
} else {
|
||||
strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
|
||||
level = 6;
|
||||
|
@ -118,7 +118,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
|
||||
|
||||
#define ZFCP_SBAL_TIMEOUT (5*HZ)
|
||||
|
||||
#define ZFCP_TYPE2_RECOVERY_TIME (8*HZ)
|
||||
#define ZFCP_TYPE2_RECOVERY_TIME 8 /* seconds */
|
||||
|
||||
/* queue polling (values in microseconds) */
|
||||
#define ZFCP_MAX_INPUT_THRESHOLD 5000 /* FIXME: tune */
|
||||
@ -139,7 +139,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
|
||||
#define ZFCP_STATUS_READS_RECOM FSF_STATUS_READS_RECOM
|
||||
|
||||
/* Do 1st retry in 1 second, then double the timeout for each following retry */
|
||||
#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
|
||||
#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 1
|
||||
#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
|
||||
|
||||
/* timeout value for "default timer" for fsf requests */
|
||||
@ -983,10 +983,6 @@ struct zfcp_unit {
|
||||
struct scsi_device *device; /* scsi device struct pointer */
|
||||
struct zfcp_erp_action erp_action; /* pending error recovery */
|
||||
atomic_t erp_counter;
|
||||
wait_queue_head_t scsi_scan_wq; /* can be used to wait until
|
||||
all scsi_scan_target
|
||||
requests have been
|
||||
completed. */
|
||||
};
|
||||
|
||||
/* FSF request */
|
||||
@ -1127,6 +1123,20 @@ zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct zfcp_fsf_req *
|
||||
zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
|
||||
{
|
||||
struct zfcp_fsf_req *request;
|
||||
unsigned int idx;
|
||||
|
||||
for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
|
||||
list_for_each_entry(request, &adapter->req_list[idx], list)
|
||||
if (request == req)
|
||||
return request;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* functions needed for reference/usage counting
|
||||
*/
|
||||
|
@ -131,7 +131,7 @@ static void zfcp_close_qdio(struct zfcp_adapter *adapter)
|
||||
debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
|
||||
while (qdio_shutdown(adapter->ccw_device,
|
||||
QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
|
||||
msleep(1000);
|
||||
ssleep(1);
|
||||
debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
|
||||
|
||||
/* cleanup used outbound sbals */
|
||||
@ -456,7 +456,7 @@ zfcp_test_link(struct zfcp_port *port)
|
||||
|
||||
zfcp_port_get(port);
|
||||
retval = zfcp_erp_adisc(port);
|
||||
if (retval != 0) {
|
||||
if (retval != 0 && retval != -EBUSY) {
|
||||
zfcp_port_put(port);
|
||||
ZFCP_LOG_NORMAL("reopen needed for port 0x%016Lx "
|
||||
"on adapter %s\n ", port->wwpn,
|
||||
@ -846,7 +846,8 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
|
||||
if (erp_action->fsf_req) {
|
||||
/* take lock to ensure that request is not deleted meanwhile */
|
||||
spin_lock(&adapter->req_list_lock);
|
||||
if (zfcp_reqlist_find(adapter, erp_action->fsf_req->req_id)) {
|
||||
if (zfcp_reqlist_find_safe(adapter, erp_action->fsf_req) &&
|
||||
erp_action->fsf_req->erp_action == erp_action) {
|
||||
/* fsf_req still exists */
|
||||
debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
|
||||
debug_event(adapter->erp_dbf, 3, &erp_action->fsf_req,
|
||||
@ -1609,7 +1610,6 @@ static void zfcp_erp_scsi_scan(struct work_struct *work)
|
||||
scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
|
||||
unit->scsi_lun, 0);
|
||||
atomic_clear_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING, &unit->status);
|
||||
wake_up(&unit->scsi_scan_wq);
|
||||
zfcp_unit_put(unit);
|
||||
kfree(p);
|
||||
}
|
||||
@ -1900,7 +1900,7 @@ zfcp_erp_adapter_strategy(struct zfcp_erp_action *erp_action)
|
||||
ZFCP_LOG_INFO("Waiting to allow the adapter %s "
|
||||
"to recover itself\n",
|
||||
zfcp_get_busid_by_adapter(adapter));
|
||||
msleep(jiffies_to_msecs(ZFCP_TYPE2_RECOVERY_TIME));
|
||||
ssleep(ZFCP_TYPE2_RECOVERY_TIME);
|
||||
}
|
||||
|
||||
return retval;
|
||||
@ -2080,7 +2080,7 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
|
||||
debug_text_event(adapter->erp_dbf, 3, "qdio_down1a");
|
||||
while (qdio_shutdown(adapter->ccw_device,
|
||||
QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
|
||||
msleep(1000);
|
||||
ssleep(1);
|
||||
debug_text_event(adapter->erp_dbf, 3, "qdio_down1b");
|
||||
|
||||
failed_qdio_establish:
|
||||
@ -2165,7 +2165,7 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
|
||||
ZFCP_LOG_DEBUG("host connection still initialising... "
|
||||
"waiting and retrying...\n");
|
||||
/* sleep a little bit before retry */
|
||||
msleep(jiffies_to_msecs(sleep));
|
||||
ssleep(sleep);
|
||||
sleep *= 2;
|
||||
}
|
||||
|
||||
|
@ -1116,6 +1116,10 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
|
||||
&unit->status)))
|
||||
goto unit_blocked;
|
||||
|
||||
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
|
||||
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
|
||||
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
|
||||
@ -1131,22 +1135,13 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
|
||||
|
||||
zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
|
||||
retval = zfcp_fsf_req_send(fsf_req);
|
||||
if (retval) {
|
||||
ZFCP_LOG_INFO("error: Failed to send abort command request "
|
||||
"on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
|
||||
zfcp_get_busid_by_adapter(adapter),
|
||||
unit->port->wwpn, unit->fcp_lun);
|
||||
if (!retval)
|
||||
goto out;
|
||||
|
||||
unit_blocked:
|
||||
zfcp_fsf_req_free(fsf_req);
|
||||
fsf_req = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ZFCP_LOG_DEBUG("Abort FCP Command request initiated "
|
||||
"(adapter%s, port d_id=0x%06x, "
|
||||
"unit x%016Lx, old_req_id=0x%lx)\n",
|
||||
zfcp_get_busid_by_adapter(adapter),
|
||||
unit->port->d_id,
|
||||
unit->fcp_lun, old_req_id);
|
||||
out:
|
||||
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
|
||||
return fsf_req;
|
||||
@ -1164,8 +1159,8 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
|
||||
{
|
||||
int retval = -EINVAL;
|
||||
struct zfcp_unit *unit;
|
||||
unsigned char status_qual =
|
||||
new_fsf_req->qtcb->header.fsf_status_qual.word[0];
|
||||
union fsf_status_qual *fsf_stat_qual =
|
||||
&new_fsf_req->qtcb->header.fsf_status_qual;
|
||||
|
||||
if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
|
||||
/* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
|
||||
@ -1178,7 +1173,7 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
|
||||
switch (new_fsf_req->qtcb->header.fsf_status) {
|
||||
|
||||
case FSF_PORT_HANDLE_NOT_VALID:
|
||||
if (status_qual >> 4 != status_qual % 0xf) {
|
||||
if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) {
|
||||
debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
|
||||
"fsf_s_phand_nv0");
|
||||
/*
|
||||
@ -1207,8 +1202,7 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
|
||||
break;
|
||||
|
||||
case FSF_LUN_HANDLE_NOT_VALID:
|
||||
if (status_qual >> 4 != status_qual % 0xf) {
|
||||
/* 2 */
|
||||
if (fsf_stat_qual->word[0] != fsf_stat_qual->word[1]) {
|
||||
debug_text_event(new_fsf_req->adapter->erp_dbf, 3,
|
||||
"fsf_s_lhand_nv0");
|
||||
/*
|
||||
@ -1674,6 +1668,12 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
|
||||
goto failed_req;
|
||||
}
|
||||
|
||||
if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
|
||||
&els->port->status))) {
|
||||
ret = -EBUSY;
|
||||
goto port_blocked;
|
||||
}
|
||||
|
||||
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
|
||||
if (zfcp_use_one_sbal(els->req, els->req_count,
|
||||
els->resp, els->resp_count)){
|
||||
@ -1755,6 +1755,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
|
||||
"0x%06x)\n", zfcp_get_busid_by_adapter(adapter), d_id);
|
||||
goto out;
|
||||
|
||||
port_blocked:
|
||||
failed_send:
|
||||
zfcp_fsf_req_free(fsf_req);
|
||||
|
||||
@ -3592,6 +3593,12 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
|
||||
goto failed_req_create;
|
||||
}
|
||||
|
||||
if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
|
||||
&unit->status))) {
|
||||
retval = -EBUSY;
|
||||
goto unit_blocked;
|
||||
}
|
||||
|
||||
zfcp_unit_get(unit);
|
||||
fsf_req->unit = unit;
|
||||
|
||||
@ -3732,6 +3739,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
|
||||
send_failed:
|
||||
no_fit:
|
||||
failed_scsi_cmnd:
|
||||
unit_blocked:
|
||||
zfcp_unit_put(unit);
|
||||
zfcp_fsf_req_free(fsf_req);
|
||||
fsf_req = NULL;
|
||||
@ -3766,6 +3774,10 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED,
|
||||
&unit->status)))
|
||||
goto unit_blocked;
|
||||
|
||||
/*
|
||||
* Used to decide on proper handler in the return path,
|
||||
* could be either zfcp_fsf_send_fcp_command_task_handler or
|
||||
@ -3799,25 +3811,13 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
|
||||
|
||||
zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
|
||||
retval = zfcp_fsf_req_send(fsf_req);
|
||||
if (retval) {
|
||||
ZFCP_LOG_INFO("error: Could not send an FCP-command (task "
|
||||
"management) on adapter %s, port 0x%016Lx for "
|
||||
"unit LUN 0x%016Lx\n",
|
||||
zfcp_get_busid_by_adapter(adapter),
|
||||
unit->port->wwpn,
|
||||
unit->fcp_lun);
|
||||
if (!retval)
|
||||
goto out;
|
||||
|
||||
unit_blocked:
|
||||
zfcp_fsf_req_free(fsf_req);
|
||||
fsf_req = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ZFCP_LOG_TRACE("Send FCP Command (task management function) initiated "
|
||||
"(adapter %s, port 0x%016Lx, unit 0x%016Lx, "
|
||||
"tm_flags=0x%x)\n",
|
||||
zfcp_get_busid_by_adapter(adapter),
|
||||
unit->port->wwpn,
|
||||
unit->fcp_lun,
|
||||
tm_flags);
|
||||
out:
|
||||
write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
|
||||
return fsf_req;
|
||||
|
@ -51,7 +51,6 @@ struct zfcp_data zfcp_data = {
|
||||
.queuecommand = zfcp_scsi_queuecommand,
|
||||
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
|
||||
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
|
||||
.eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler,
|
||||
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
|
||||
.can_queue = 4096,
|
||||
.this_id = -1,
|
||||
@ -181,9 +180,6 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
|
||||
|
||||
if (unit) {
|
||||
zfcp_erp_wait(unit->port->adapter);
|
||||
wait_event(unit->scsi_scan_wq,
|
||||
atomic_test_mask(ZFCP_STATUS_UNIT_SCSI_WORK_PENDING,
|
||||
&unit->status) == 0);
|
||||
atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
|
||||
sdpnt->hostdata = NULL;
|
||||
unit->device = NULL;
|
||||
@ -262,8 +258,9 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(
|
||||
!atomic_test_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))) {
|
||||
tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
|
||||
ZFCP_REQ_AUTO_CLEANUP);
|
||||
if (unlikely(tmp == -EBUSY)) {
|
||||
ZFCP_LOG_DEBUG("adapter %s not ready or unit 0x%016Lx "
|
||||
"on port 0x%016Lx in recovery\n",
|
||||
zfcp_get_busid_by_unit(unit),
|
||||
@ -272,9 +269,6 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
|
||||
goto out;
|
||||
}
|
||||
|
||||
tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
|
||||
ZFCP_REQ_AUTO_CLEANUP);
|
||||
|
||||
if (unlikely(tmp < 0)) {
|
||||
ZFCP_LOG_DEBUG("error: initiation of Send FCP Cmnd failed\n");
|
||||
retval = SCSI_MLQUEUE_HOST_BUSY;
|
||||
@ -459,7 +453,9 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
|
||||
retval = SUCCESS;
|
||||
goto out;
|
||||
}
|
||||
ZFCP_LOG_NORMAL("resetting unit 0x%016Lx\n", unit->fcp_lun);
|
||||
ZFCP_LOG_NORMAL("resetting unit 0x%016Lx on port 0x%016Lx, adapter %s\n",
|
||||
unit->fcp_lun, unit->port->wwpn,
|
||||
zfcp_get_busid_by_adapter(unit->port->adapter));
|
||||
|
||||
/*
|
||||
* If we do not know whether the unit supports 'logical unit reset'
|
||||
@ -542,7 +538,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
|
||||
* zfcp_scsi_eh_host_reset_handler - handler for host reset
|
||||
*/
|
||||
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
|
||||
{
|
||||
@ -552,8 +548,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
|
||||
unit = (struct zfcp_unit*) scpnt->device->hostdata;
|
||||
adapter = unit->port->adapter;
|
||||
|
||||
ZFCP_LOG_NORMAL("host/bus reset because of problems with "
|
||||
"unit 0x%016Lx\n", unit->fcp_lun);
|
||||
ZFCP_LOG_NORMAL("host reset because of problems with "
|
||||
"unit 0x%016Lx on port 0x%016Lx, adapter %s\n",
|
||||
unit->fcp_lun, unit->port->wwpn,
|
||||
zfcp_get_busid_by_adapter(unit->port->adapter));
|
||||
|
||||
zfcp_erp_adapter_reopen(adapter, 0);
|
||||
zfcp_erp_wait(adapter);
|
||||
|
2
drivers/scsi/.gitignore
vendored
2
drivers/scsi/.gitignore
vendored
@ -1,3 +1 @@
|
||||
53c700_d.h
|
||||
53c7xx_d.h
|
||||
53c7xx_u.h
|
||||
|
@ -2010,6 +2010,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
|
||||
|| pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
|
||||
|
@ -608,7 +608,8 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
|
||||
scsi_print_sense("53c700", SCp);
|
||||
|
||||
#endif
|
||||
dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
|
||||
dma_unmap_single(hostdata->dev, slot->dma_handle,
|
||||
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
|
||||
/* restore the old result if the request sense was
|
||||
* successful */
|
||||
if (result == 0)
|
||||
@ -1010,7 +1011,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
|
||||
cmnd[1] = (SCp->device->lun & 0x7) << 5;
|
||||
cmnd[2] = 0;
|
||||
cmnd[3] = 0;
|
||||
cmnd[4] = sizeof(SCp->sense_buffer);
|
||||
cmnd[4] = SCSI_SENSE_BUFFERSIZE;
|
||||
cmnd[5] = 0;
|
||||
/* Here's a quiet hack: the
|
||||
* REQUEST_SENSE command is six bytes,
|
||||
@ -1024,14 +1025,14 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
|
||||
SCp->cmd_len = 6; /* command length for
|
||||
* REQUEST_SENSE */
|
||||
slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
|
||||
slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
|
||||
slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
|
||||
slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
|
||||
slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
|
||||
slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
|
||||
slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
|
||||
slot->SG[1].pAddr = 0;
|
||||
slot->resume_offset = hostdata->pScript;
|
||||
dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
|
||||
dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
|
||||
dma_cache_sync(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
|
||||
|
||||
/* queue the command for reissue */
|
||||
slot->state = NCR_700_SLOT_QUEUED;
|
||||
|
@ -2947,7 +2947,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
|
||||
}
|
||||
}
|
||||
memcpy(CCB->CDB, CDB, CDB_Length);
|
||||
CCB->SenseDataLength = sizeof(Command->sense_buffer);
|
||||
CCB->SenseDataLength = SCSI_SENSE_BUFFERSIZE;
|
||||
CCB->SenseDataPointer = pci_map_single(HostAdapter->PCI_Device, Command->sense_buffer, CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
|
||||
CCB->Command = Command;
|
||||
Command->scsi_done = CompletionRoutine;
|
||||
|
@ -341,7 +341,7 @@ config ISCSI_TCP
|
||||
The userspace component needed to initialize the driver, documentation,
|
||||
and sample configuration files can be found here:
|
||||
|
||||
http://linux-iscsi.sf.net
|
||||
http://open-iscsi.org
|
||||
|
||||
config SGIWD93_SCSI
|
||||
tristate "SGI WD93C93 SCSI Driver"
|
||||
@ -573,10 +573,10 @@ config SCSI_ARCMSR_AER
|
||||
source "drivers/scsi/megaraid/Kconfig.megaraid"
|
||||
|
||||
config SCSI_HPTIOP
|
||||
tristate "HighPoint RocketRAID 3xxx Controller support"
|
||||
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
|
||||
depends on SCSI && PCI
|
||||
help
|
||||
This option enables support for HighPoint RocketRAID 3xxx
|
||||
This option enables support for HighPoint RocketRAID 3xxx/4xxx
|
||||
controllers.
|
||||
|
||||
To compile this driver as a module, choose M here; the module
|
||||
@ -1288,17 +1288,6 @@ config SCSI_PAS16
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called pas16.
|
||||
|
||||
config SCSI_PSI240I
|
||||
tristate "PSI240i support"
|
||||
depends on ISA && SCSI
|
||||
help
|
||||
This is support for the PSI240i EIDE interface card which acts as a
|
||||
SCSI host adapter. Please read the SCSI-HOWTO, available from
|
||||
<http://www.tldp.org/docs.html#howto>.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called psi240i.
|
||||
|
||||
config SCSI_QLOGIC_FAS
|
||||
tristate "Qlogic FAS SCSI support"
|
||||
depends on ISA && SCSI
|
||||
@ -1359,21 +1348,6 @@ config SCSI_LPFC
|
||||
This lpfc driver supports the Emulex LightPulse
|
||||
Family of Fibre Channel PCI host adapters.
|
||||
|
||||
config SCSI_SEAGATE
|
||||
tristate "Seagate ST-02 and Future Domain TMC-8xx SCSI support"
|
||||
depends on X86 && ISA && SCSI
|
||||
select CHECK_SIGNATURE
|
||||
---help---
|
||||
These are 8-bit SCSI controllers; the ST-01 is also supported by
|
||||
this driver. It is explained in section 3.9 of the SCSI-HOWTO,
|
||||
available from <http://www.tldp.org/docs.html#howto>. If it
|
||||
doesn't work out of the box, you may have to change some macros at
|
||||
compiletime, which are described in <file:drivers/scsi/seagate.c>.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called seagate.
|
||||
|
||||
# definitely looks not 64bit safe:
|
||||
config SCSI_SIM710
|
||||
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
|
||||
depends on (EISA || MCA) && SCSI
|
||||
|
@ -16,9 +16,8 @@
|
||||
|
||||
CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
|
||||
CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
|
||||
CFLAGS_seagate.o = -DARBITRATE -DPARITY -DSEAGATE_USE_ASM
|
||||
|
||||
subdir-$(CONFIG_PCMCIA) += pcmcia
|
||||
obj-$(CONFIG_PCMCIA) += pcmcia/
|
||||
|
||||
obj-$(CONFIG_SCSI) += scsi_mod.o
|
||||
obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o
|
||||
@ -59,7 +58,6 @@ obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
|
||||
obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
|
||||
obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
|
||||
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
|
||||
obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
|
||||
obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
|
||||
obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
|
||||
obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o
|
||||
@ -90,7 +88,6 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
|
||||
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
|
||||
obj-$(CONFIG_SCSI_LPFC) += lpfc/
|
||||
obj-$(CONFIG_SCSI_PAS16) += pas16.o
|
||||
obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
|
||||
obj-$(CONFIG_SCSI_T128) += t128.o
|
||||
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
|
||||
obj-$(CONFIG_SCSI_DTC3280) += dtc.o
|
||||
|
@ -295,16 +295,16 @@ static __inline__ void initialize_SCp(Scsi_Cmnd * cmd)
|
||||
* various queues are valid.
|
||||
*/
|
||||
|
||||
if (cmd->use_sg) {
|
||||
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
|
||||
cmd->SCp.buffers_residual = cmd->use_sg - 1;
|
||||
if (scsi_bufflen(cmd)) {
|
||||
cmd->SCp.buffer = scsi_sglist(cmd);
|
||||
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
} else {
|
||||
cmd->SCp.buffer = NULL;
|
||||
cmd->SCp.buffers_residual = 0;
|
||||
cmd->SCp.ptr = (char *) cmd->request_buffer;
|
||||
cmd->SCp.this_residual = cmd->request_bufflen;
|
||||
cmd->SCp.ptr = NULL;
|
||||
cmd->SCp.this_residual = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -932,7 +932,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
|
||||
* @instance: adapter to remove
|
||||
*/
|
||||
|
||||
static void __devexit NCR5380_exit(struct Scsi_Host *instance)
|
||||
static void NCR5380_exit(struct Scsi_Host *instance)
|
||||
{
|
||||
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
|
||||
|
||||
@ -975,14 +975,14 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
|
||||
case WRITE_6:
|
||||
case WRITE_10:
|
||||
hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
|
||||
hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;
|
||||
hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
|
||||
hostdata->pendingw++;
|
||||
break;
|
||||
case READ:
|
||||
case READ_6:
|
||||
case READ_10:
|
||||
hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
|
||||
hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;
|
||||
hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
|
||||
hostdata->pendingr++;
|
||||
break;
|
||||
}
|
||||
@ -1157,16 +1157,17 @@ static void NCR5380_main(struct work_struct *work)
|
||||
* Locks: takes the needed instance locks
|
||||
*/
|
||||
|
||||
static irqreturn_t NCR5380_intr(int irq, void *dev_id)
|
||||
static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
|
||||
{
|
||||
NCR5380_local_declare();
|
||||
struct Scsi_Host *instance = (struct Scsi_Host *)dev_id;
|
||||
struct Scsi_Host *instance = dev_id;
|
||||
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
|
||||
int done;
|
||||
unsigned char basr;
|
||||
unsigned long flags;
|
||||
|
||||
dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n", irq));
|
||||
dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n",
|
||||
instance->irq));
|
||||
|
||||
do {
|
||||
done = 1;
|
||||
|
@ -74,17 +74,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
||||
|
||||
if (!dir_in) {
|
||||
/* copy to bounce buffer for a write */
|
||||
if (cmd->use_sg)
|
||||
#if 0
|
||||
panic ("scsi%ddma: incomplete s/g support",
|
||||
instance->host_no);
|
||||
#else
|
||||
memcpy (HDATA(instance)->dma_bounce_buffer,
|
||||
cmd->SCp.ptr, cmd->SCp.this_residual);
|
||||
#endif
|
||||
else
|
||||
memcpy (HDATA(instance)->dma_bounce_buffer,
|
||||
cmd->request_buffer, cmd->request_bufflen);
|
||||
}
|
||||
}
|
||||
|
||||
@ -144,11 +135,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
|
||||
/* copy from a bounce buffer, if necessary */
|
||||
if (status && HDATA(instance)->dma_bounce_buffer) {
|
||||
if (SCpnt && SCpnt->use_sg) {
|
||||
#if 0
|
||||
panic ("scsi%d: incomplete s/g support",
|
||||
instance->host_no);
|
||||
#else
|
||||
if( HDATA(instance)->dma_dir )
|
||||
memcpy (SCpnt->SCp.ptr,
|
||||
HDATA(instance)->dma_bounce_buffer,
|
||||
@ -156,18 +142,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
kfree (HDATA(instance)->dma_bounce_buffer);
|
||||
HDATA(instance)->dma_bounce_buffer = NULL;
|
||||
HDATA(instance)->dma_bounce_len = 0;
|
||||
|
||||
#endif
|
||||
} else {
|
||||
if (HDATA(instance)->dma_dir && SCpnt)
|
||||
memcpy (SCpnt->request_buffer,
|
||||
HDATA(instance)->dma_bounce_buffer,
|
||||
SCpnt->request_bufflen);
|
||||
|
||||
kfree (HDATA(instance)->dma_bounce_buffer);
|
||||
HDATA(instance)->dma_bounce_buffer = NULL;
|
||||
HDATA(instance)->dma_bounce_len = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,12 +70,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
|
||||
|
||||
if (!dir_in) {
|
||||
/* copy to bounce buffer for a write */
|
||||
if (cmd->use_sg) {
|
||||
memcpy (HDATA(a3000_host)->dma_bounce_buffer,
|
||||
cmd->SCp.ptr, cmd->SCp.this_residual);
|
||||
} else
|
||||
memcpy (HDATA(a3000_host)->dma_bounce_buffer,
|
||||
cmd->request_buffer, cmd->request_bufflen);
|
||||
}
|
||||
|
||||
addr = virt_to_bus(HDATA(a3000_host)->dma_bounce_buffer);
|
||||
@ -146,7 +142,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
|
||||
/* copy from a bounce buffer, if necessary */
|
||||
if (status && HDATA(instance)->dma_bounce_buffer) {
|
||||
if (SCpnt && SCpnt->use_sg) {
|
||||
if (SCpnt) {
|
||||
if (HDATA(instance)->dma_dir && SCpnt)
|
||||
memcpy (SCpnt->SCp.ptr,
|
||||
HDATA(instance)->dma_bounce_buffer,
|
||||
@ -155,11 +151,6 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
|
||||
HDATA(instance)->dma_bounce_buffer = NULL;
|
||||
HDATA(instance)->dma_bounce_len = 0;
|
||||
} else {
|
||||
if (HDATA(instance)->dma_dir && SCpnt)
|
||||
memcpy (SCpnt->request_buffer,
|
||||
HDATA(instance)->dma_bounce_buffer,
|
||||
SCpnt->request_bufflen);
|
||||
|
||||
kfree (HDATA(instance)->dma_bounce_buffer);
|
||||
HDATA(instance)->dma_bounce_buffer = NULL;
|
||||
HDATA(instance)->dma_bounce_len = 0;
|
||||
|
@ -31,9 +31,9 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
@ -144,6 +144,7 @@ static char *aac_get_status_string(u32 status);
|
||||
*/
|
||||
|
||||
static int nondasd = -1;
|
||||
static int aac_cache = 0;
|
||||
static int dacmode = -1;
|
||||
|
||||
int aac_commit = -1;
|
||||
@ -152,6 +153,8 @@ int aif_timeout = 120;
|
||||
|
||||
module_param(nondasd, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
|
||||
module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n\tbit 0 - Disable FUA in WRITE SCSI commands\n\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n\tbit 2 - Disable only if Battery not protecting Cache");
|
||||
module_param(dacmode, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
|
||||
module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
|
||||
@ -179,7 +182,7 @@ MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health che
|
||||
|
||||
int aac_check_reset = 1;
|
||||
module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter.");
|
||||
MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter. a value of -1 forces the reset to adapters programmed to ignore it.");
|
||||
|
||||
int expose_physicals = -1;
|
||||
module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
|
||||
@ -193,7 +196,7 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
|
||||
struct fib *fibptr) {
|
||||
struct scsi_device *device;
|
||||
|
||||
if (unlikely(!scsicmd || !scsicmd->scsi_done )) {
|
||||
if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
|
||||
dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
@ -240,7 +243,7 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag)
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
NULL, NULL);
|
||||
if (status < 0 ) {
|
||||
if (status < 0) {
|
||||
printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
|
||||
} else {
|
||||
struct aac_get_config_status_resp *reply
|
||||
@ -363,6 +366,7 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
|
||||
if (buf && transfer_len > 0)
|
||||
memcpy(buf + offset, data, transfer_len);
|
||||
|
||||
flush_kernel_dcache_page(kmap_atomic_to_page(buf - sg->offset));
|
||||
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||
|
||||
}
|
||||
@ -435,7 +439,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
|
||||
sizeof (struct aac_get_name),
|
||||
FsaNormal,
|
||||
0, 1,
|
||||
(fib_callback) get_container_name_callback,
|
||||
(fib_callback)get_container_name_callback,
|
||||
(void *) scsicmd);
|
||||
|
||||
/*
|
||||
@ -659,7 +663,7 @@ struct scsi_inq {
|
||||
static void inqstrcpy(char *a, char *b)
|
||||
{
|
||||
|
||||
while(*a != (char)0)
|
||||
while (*a != (char)0)
|
||||
*b++ = *a++;
|
||||
}
|
||||
|
||||
@ -687,7 +691,12 @@ static char *container_types[] = {
|
||||
"Unknown"
|
||||
};
|
||||
|
||||
|
||||
char * get_container_type(unsigned tindex)
|
||||
{
|
||||
if (tindex >= ARRAY_SIZE(container_types))
|
||||
tindex = ARRAY_SIZE(container_types) - 1;
|
||||
return container_types[tindex];
|
||||
}
|
||||
|
||||
/* Function: setinqstr
|
||||
*
|
||||
@ -707,7 +716,11 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
|
||||
|
||||
if (dev->supplement_adapter_info.AdapterTypeText[0]) {
|
||||
char * cp = dev->supplement_adapter_info.AdapterTypeText;
|
||||
int c = sizeof(str->vid);
|
||||
int c;
|
||||
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
|
||||
inqstrcpy("SMC", str->vid);
|
||||
else {
|
||||
c = sizeof(str->vid);
|
||||
while (*cp && *cp != ' ' && --c)
|
||||
++cp;
|
||||
c = *cp;
|
||||
@ -717,6 +730,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
|
||||
*cp = c;
|
||||
while (*cp && *cp != ' ')
|
||||
++cp;
|
||||
}
|
||||
while (*cp == ' ')
|
||||
++cp;
|
||||
/* last six chars reserved for vol type */
|
||||
@ -898,9 +912,8 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
|
||||
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
|
||||
0, 0);
|
||||
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
|
||||
(sizeof(dev->fsa_dev[cid].sense_data) > sizeof(cmd->sense_buffer))
|
||||
? sizeof(cmd->sense_buffer)
|
||||
: sizeof(dev->fsa_dev[cid].sense_data));
|
||||
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
|
||||
SCSI_SENSE_BUFFERSIZE));
|
||||
cmd->scsi_done(cmd);
|
||||
return 1;
|
||||
}
|
||||
@ -981,7 +994,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
|
||||
aac_fib_init(fib);
|
||||
readcmd = (struct aac_read *) fib_data(fib);
|
||||
readcmd->command = cpu_to_le32(VM_CtBlockRead);
|
||||
readcmd->cid = cpu_to_le16(scmd_id(cmd));
|
||||
readcmd->cid = cpu_to_le32(scmd_id(cmd));
|
||||
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
readcmd->count = cpu_to_le32(count * 512);
|
||||
|
||||
@ -1013,7 +1026,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
|
||||
writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
|
||||
writecmd->count = cpu_to_le32(count<<9);
|
||||
writecmd->cid = cpu_to_le16(scmd_id(cmd));
|
||||
writecmd->flags = fua ?
|
||||
writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
|
||||
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
|
||||
cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) :
|
||||
cpu_to_le16(IO_TYPE_WRITE);
|
||||
writecmd->bpTotal = 0;
|
||||
@ -1072,7 +1086,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
|
||||
aac_fib_init(fib);
|
||||
writecmd = (struct aac_write *) fib_data(fib);
|
||||
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
|
||||
writecmd->cid = cpu_to_le16(scmd_id(cmd));
|
||||
writecmd->cid = cpu_to_le32(scmd_id(cmd));
|
||||
writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
|
||||
writecmd->count = cpu_to_le32(count * 512);
|
||||
writecmd->sg.count = cpu_to_le32(1);
|
||||
@ -1190,6 +1204,15 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
|
||||
(fib_callback) aac_srb_callback, (void *) cmd);
|
||||
}
|
||||
|
||||
static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
|
||||
{
|
||||
if ((sizeof(dma_addr_t) > 4) &&
|
||||
(num_physpages > (0xFFFFFFFFULL >> PAGE_SHIFT)) &&
|
||||
(fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
|
||||
return FAILED;
|
||||
return aac_scsi_32(fib, cmd);
|
||||
}
|
||||
|
||||
int aac_get_adapter_info(struct aac_dev* dev)
|
||||
{
|
||||
struct fib* fibptr;
|
||||
@ -1222,24 +1245,24 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
memcpy(&dev->adapter_info, info, sizeof(*info));
|
||||
|
||||
if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
|
||||
struct aac_supplement_adapter_info * info;
|
||||
struct aac_supplement_adapter_info * sinfo;
|
||||
|
||||
aac_fib_init(fibptr);
|
||||
|
||||
info = (struct aac_supplement_adapter_info *) fib_data(fibptr);
|
||||
sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
|
||||
|
||||
memset(info,0,sizeof(*info));
|
||||
memset(sinfo,0,sizeof(*sinfo));
|
||||
|
||||
rcode = aac_fib_send(RequestSupplementAdapterInfo,
|
||||
fibptr,
|
||||
sizeof(*info),
|
||||
sizeof(*sinfo),
|
||||
FsaNormal,
|
||||
1, 1,
|
||||
NULL,
|
||||
NULL);
|
||||
|
||||
if (rcode >= 0)
|
||||
memcpy(&dev->supplement_adapter_info, info, sizeof(*info));
|
||||
memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
|
||||
}
|
||||
|
||||
|
||||
@ -1267,6 +1290,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
1, 1,
|
||||
NULL, NULL);
|
||||
|
||||
/* reasoned default */
|
||||
dev->maximum_num_physicals = 16;
|
||||
if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
|
||||
dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
|
||||
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
|
||||
@ -1305,19 +1330,21 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
(int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
|
||||
dev->supplement_adapter_info.VpdInfo.Tsid);
|
||||
}
|
||||
if (!aac_check_reset ||
|
||||
if (!aac_check_reset || ((aac_check_reset != 1) &&
|
||||
(dev->supplement_adapter_info.SupportedOptions2 &
|
||||
le32_to_cpu(AAC_OPTION_IGNORE_RESET))) {
|
||||
AAC_OPTION_IGNORE_RESET))) {
|
||||
printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
|
||||
dev->name, dev->id);
|
||||
}
|
||||
}
|
||||
|
||||
dev->cache_protected = 0;
|
||||
dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
|
||||
AAC_FEATURE_JBOD) != 0);
|
||||
dev->nondasd_support = 0;
|
||||
dev->raid_scsi_mode = 0;
|
||||
if(dev->adapter_info.options & AAC_OPT_NONDASD){
|
||||
if(dev->adapter_info.options & AAC_OPT_NONDASD)
|
||||
dev->nondasd_support = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the firmware supports ROMB RAID/SCSI mode and we are currently
|
||||
@ -1339,10 +1366,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
|
||||
dev->name, dev->id);
|
||||
|
||||
if(nondasd != -1) {
|
||||
if (nondasd != -1)
|
||||
dev->nondasd_support = (nondasd!=0);
|
||||
}
|
||||
if(dev->nondasd_support != 0){
|
||||
if(dev->nondasd_support != 0) {
|
||||
printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
|
||||
}
|
||||
|
||||
@ -1376,7 +1402,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
* interface.
|
||||
*/
|
||||
dev->a_ops.adapter_scsi = (dev->dac_support)
|
||||
? aac_scsi_64
|
||||
? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
|
||||
? aac_scsi_32_64
|
||||
: aac_scsi_64)
|
||||
: aac_scsi_32;
|
||||
if (dev->raw_io_interface) {
|
||||
dev->a_ops.adapter_bounds = (dev->raw_io_64)
|
||||
@ -1498,9 +1526,8 @@ static void io_callback(void *context, struct fib * fibptr)
|
||||
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
|
||||
0, 0);
|
||||
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
|
||||
(sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
|
||||
? sizeof(scsicmd->sense_buffer)
|
||||
: sizeof(dev->fsa_dev[cid].sense_data));
|
||||
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
|
||||
SCSI_SENSE_BUFFERSIZE));
|
||||
}
|
||||
aac_fib_complete(fibptr);
|
||||
aac_fib_free(fibptr);
|
||||
@ -1712,8 +1739,8 @@ static void synchronize_callback(void *context, struct fib *fibptr)
|
||||
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
|
||||
0, 0);
|
||||
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
|
||||
min(sizeof(dev->fsa_dev[cid].sense_data),
|
||||
sizeof(cmd->sense_buffer)));
|
||||
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
|
||||
SCSI_SENSE_BUFFERSIZE));
|
||||
}
|
||||
|
||||
aac_fib_complete(fibptr);
|
||||
@ -1798,7 +1825,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
|
||||
if (active)
|
||||
return SCSI_MLQUEUE_DEVICE_BUSY;
|
||||
|
||||
aac = (struct aac_dev *)scsicmd->device->host->hostdata;
|
||||
aac = (struct aac_dev *)sdev->host->hostdata;
|
||||
if (aac->in_reset)
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
@ -1898,7 +1925,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
}
|
||||
}
|
||||
} else { /* check for physical non-dasd devices */
|
||||
if ((dev->nondasd_support == 1) || expose_physicals) {
|
||||
if (dev->nondasd_support || expose_physicals ||
|
||||
dev->jbod) {
|
||||
if (dev->in_reset)
|
||||
return -1;
|
||||
return aac_send_srb_fib(scsicmd);
|
||||
@ -1922,9 +1950,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
SENCODE_INVALID_COMMAND,
|
||||
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
|
||||
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
|
||||
(sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
|
||||
? sizeof(scsicmd->sense_buffer)
|
||||
: sizeof(dev->fsa_dev[cid].sense_data));
|
||||
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
|
||||
SCSI_SENSE_BUFFERSIZE));
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
@ -1939,7 +1966,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
|
||||
memset(&inq_data, 0, sizeof (struct inquiry_data));
|
||||
|
||||
if (scsicmd->cmnd[1] & 0x1 ) {
|
||||
if (scsicmd->cmnd[1] & 0x1) {
|
||||
char *arr = (char *)&inq_data;
|
||||
|
||||
/* EVPD bit set */
|
||||
@ -1974,10 +2001,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
ASENCODE_NO_SENSE, 0, 7, 2, 0);
|
||||
memcpy(scsicmd->sense_buffer,
|
||||
&dev->fsa_dev[cid].sense_data,
|
||||
(sizeof(dev->fsa_dev[cid].sense_data) >
|
||||
sizeof(scsicmd->sense_buffer))
|
||||
? sizeof(scsicmd->sense_buffer)
|
||||
: sizeof(dev->fsa_dev[cid].sense_data));
|
||||
min_t(size_t,
|
||||
sizeof(dev->fsa_dev[cid].sense_data),
|
||||
SCSI_SENSE_BUFFERSIZE));
|
||||
}
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
@ -2092,7 +2118,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
mode_buf[2] = 0; /* Device-specific param,
|
||||
bit 8: 0/1 = write enabled/protected
|
||||
bit 4: 0/1 = FUA enabled */
|
||||
if (dev->raw_io_interface)
|
||||
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
|
||||
mode_buf[2] = 0x10;
|
||||
mode_buf[3] = 0; /* Block descriptor length */
|
||||
if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
|
||||
@ -2100,7 +2126,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
mode_buf[0] = 6;
|
||||
mode_buf[4] = 8;
|
||||
mode_buf[5] = 1;
|
||||
mode_buf[6] = 0x04; /* WCE */
|
||||
mode_buf[6] = ((aac_cache & 6) == 2)
|
||||
? 0 : 0x04; /* WCE */
|
||||
mode_buf_length = 7;
|
||||
if (mode_buf_length > scsicmd->cmnd[4])
|
||||
mode_buf_length = scsicmd->cmnd[4];
|
||||
@ -2123,7 +2150,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
mode_buf[3] = 0; /* Device-specific param,
|
||||
bit 8: 0/1 = write enabled/protected
|
||||
bit 4: 0/1 = FUA enabled */
|
||||
if (dev->raw_io_interface)
|
||||
if (dev->raw_io_interface && ((aac_cache & 5) != 1))
|
||||
mode_buf[3] = 0x10;
|
||||
mode_buf[4] = 0; /* reserved */
|
||||
mode_buf[5] = 0; /* reserved */
|
||||
@ -2134,7 +2161,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
mode_buf[1] = 9;
|
||||
mode_buf[8] = 8;
|
||||
mode_buf[9] = 1;
|
||||
mode_buf[10] = 0x04; /* WCE */
|
||||
mode_buf[10] = ((aac_cache & 6) == 2)
|
||||
? 0 : 0x04; /* WCE */
|
||||
mode_buf_length = 11;
|
||||
if (mode_buf_length > scsicmd->cmnd[8])
|
||||
mode_buf_length = scsicmd->cmnd[8];
|
||||
@ -2210,9 +2238,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
return aac_write(scsicmd);
|
||||
|
||||
case SYNCHRONIZE_CACHE:
|
||||
if (((aac_cache & 6) == 6) && dev->cache_protected) {
|
||||
scsicmd->result = DID_OK << 16 |
|
||||
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
/* Issue FIB to tell Firmware to flush it's cache */
|
||||
if ((aac_cache & 6) != 2)
|
||||
return aac_synchronize(scsicmd);
|
||||
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
/*
|
||||
* Unhandled commands
|
||||
@ -2223,9 +2258,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
|
||||
ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
|
||||
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
|
||||
(sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
|
||||
? sizeof(scsicmd->sense_buffer)
|
||||
: sizeof(dev->fsa_dev[cid].sense_data));
|
||||
min_t(size_t,
|
||||
sizeof(dev->fsa_dev[cid].sense_data),
|
||||
SCSI_SENSE_BUFFERSIZE));
|
||||
scsicmd->scsi_done(scsicmd);
|
||||
return 0;
|
||||
}
|
||||
@ -2385,10 +2420,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
||||
if (le32_to_cpu(srbreply->status) != ST_OK){
|
||||
int len;
|
||||
printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
|
||||
len = (le32_to_cpu(srbreply->sense_data_size) >
|
||||
sizeof(scsicmd->sense_buffer)) ?
|
||||
sizeof(scsicmd->sense_buffer) :
|
||||
le32_to_cpu(srbreply->sense_data_size);
|
||||
len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
|
||||
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
|
||||
}
|
||||
@ -2412,7 +2445,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
||||
case WRITE_12:
|
||||
case READ_16:
|
||||
case WRITE_16:
|
||||
if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
|
||||
if (le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow) {
|
||||
printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
|
||||
} else {
|
||||
printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
|
||||
@ -2488,19 +2521,16 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
||||
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
|
||||
break;
|
||||
}
|
||||
if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition
|
||||
if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
|
||||
int len;
|
||||
scsicmd->result |= SAM_STAT_CHECK_CONDITION;
|
||||
len = (le32_to_cpu(srbreply->sense_data_size) >
|
||||
sizeof(scsicmd->sense_buffer)) ?
|
||||
sizeof(scsicmd->sense_buffer) :
|
||||
le32_to_cpu(srbreply->sense_data_size);
|
||||
len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
#ifdef AAC_DETAILED_STATUS_INFO
|
||||
printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
|
||||
le32_to_cpu(srbreply->status), len);
|
||||
#endif
|
||||
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
|
||||
|
||||
}
|
||||
/*
|
||||
* OR in the scsi status (already shifted up a bit)
|
||||
|
@ -1,4 +1,4 @@
|
||||
#if (!defined(dprintk))
|
||||
#ifndef dprintk
|
||||
# define dprintk(x)
|
||||
#endif
|
||||
/* eg: if (nblank(dprintk(x))) */
|
||||
@ -12,7 +12,7 @@
|
||||
*----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef AAC_DRIVER_BUILD
|
||||
# define AAC_DRIVER_BUILD 2449
|
||||
# define AAC_DRIVER_BUILD 2455
|
||||
# define AAC_DRIVER_BRANCH "-ms"
|
||||
#endif
|
||||
#define MAXIMUM_NUM_CONTAINERS 32
|
||||
@ -349,10 +349,6 @@ struct hw_fib {
|
||||
#define ContainerCommand 500
|
||||
#define ContainerCommand64 501
|
||||
#define ContainerRawIo 502
|
||||
/*
|
||||
* Cluster Commands
|
||||
*/
|
||||
#define ClusterCommand 550
|
||||
/*
|
||||
* Scsi Port commands (scsi passthrough)
|
||||
*/
|
||||
@ -520,6 +516,12 @@ struct aac_driver_ident
|
||||
*/
|
||||
#define AAC_QUIRK_17SG 0x0010
|
||||
|
||||
/*
|
||||
* Some adapter firmware does not support 64 bit scsi passthrough
|
||||
* commands.
|
||||
*/
|
||||
#define AAC_QUIRK_SCSI_32 0x0020
|
||||
|
||||
/*
|
||||
* The adapter interface specs all queues to be located in the same
|
||||
* physically contigous block. The host structure that defines the
|
||||
@ -863,9 +865,10 @@ struct aac_supplement_adapter_info
|
||||
__le32 SupportedOptions2;
|
||||
__le32 ReservedGrowth[1];
|
||||
};
|
||||
#define AAC_FEATURE_FALCON 0x00000010
|
||||
#define AAC_OPTION_MU_RESET 0x00000001
|
||||
#define AAC_OPTION_IGNORE_RESET 0x00000002
|
||||
#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
|
||||
#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
|
||||
#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
|
||||
#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
|
||||
#define AAC_SIS_VERSION_V3 3
|
||||
#define AAC_SIS_SLOT_UNKNOWN 0xFF
|
||||
|
||||
@ -988,7 +991,7 @@ struct aac_dev
|
||||
/*
|
||||
* The following is the device specific extension.
|
||||
*/
|
||||
#if (!defined(AAC_MIN_FOOTPRINT_SIZE))
|
||||
#ifndef AAC_MIN_FOOTPRINT_SIZE
|
||||
# define AAC_MIN_FOOTPRINT_SIZE 8192
|
||||
#endif
|
||||
union
|
||||
@ -1010,6 +1013,8 @@ struct aac_dev
|
||||
* lets break them out so we don't have to do an AND to check them
|
||||
*/
|
||||
u8 nondasd_support;
|
||||
u8 jbod;
|
||||
u8 cache_protected;
|
||||
u8 dac_support;
|
||||
u8 raid_scsi_mode;
|
||||
u8 comm_interface;
|
||||
@ -1066,6 +1071,7 @@ struct aac_dev
|
||||
(dev)->a_ops.adapter_comm(dev, comm)
|
||||
|
||||
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
|
||||
#define FIB_CONTEXT_FLAG (0x00000002)
|
||||
|
||||
/*
|
||||
* Define the command values
|
||||
@ -1764,11 +1770,17 @@ extern struct aac_common aac_config;
|
||||
#define AifEnConfigChange 3 /* Adapter configuration change */
|
||||
#define AifEnContainerChange 4 /* Container configuration change */
|
||||
#define AifEnDeviceFailure 5 /* SCSI device failed */
|
||||
#define AifEnEnclosureManagement 13 /* EM_DRIVE_* */
|
||||
#define EM_DRIVE_INSERTION 31
|
||||
#define EM_DRIVE_REMOVAL 32
|
||||
#define AifEnBatteryEvent 14 /* Change in Battery State */
|
||||
#define AifEnAddContainer 15 /* A new array was created */
|
||||
#define AifEnDeleteContainer 16 /* A container was deleted */
|
||||
#define AifEnExpEvent 23 /* Firmware Event Log */
|
||||
#define AifExeFirmwarePanic 3 /* Firmware Event Panic */
|
||||
#define AifHighPriority 3 /* Highest Priority Event */
|
||||
#define AifEnAddJBOD 30 /* JBOD created */
|
||||
#define AifEnDeleteJBOD 31 /* JBOD deleted */
|
||||
|
||||
#define AifCmdJobProgress 2 /* Progress report */
|
||||
#define AifJobCtrZero 101 /* Array Zero progress */
|
||||
@ -1861,6 +1873,7 @@ int aac_probe_container(struct aac_dev *dev, int cid);
|
||||
int _aac_rx_init(struct aac_dev *dev);
|
||||
int aac_rx_select_comm(struct aac_dev *dev, int comm);
|
||||
int aac_rx_deliver_producer(struct fib * fib);
|
||||
char * get_container_type(unsigned type);
|
||||
extern int numacb;
|
||||
extern int acbsize;
|
||||
extern char aac_driver_version[];
|
||||
|
@ -243,6 +243,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
|
||||
* Search the list of AdapterFibContext addresses on the adapter
|
||||
* to be sure this is a valid address
|
||||
*/
|
||||
spin_lock_irqsave(&dev->fib_lock, flags);
|
||||
entry = dev->fib_list.next;
|
||||
fibctx = NULL;
|
||||
|
||||
@ -258,24 +259,24 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
|
||||
fibctx = NULL;
|
||||
}
|
||||
if (!fibctx) {
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
dprintk ((KERN_INFO "Fib Context not found\n"));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
|
||||
(fibctx->size != sizeof(struct aac_fib_context))) {
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
dprintk ((KERN_INFO "Fib Context corrupt?\n"));
|
||||
return -EINVAL;
|
||||
}
|
||||
status = 0;
|
||||
spin_lock_irqsave(&dev->fib_lock, flags);
|
||||
/*
|
||||
* If there are no fibs to send back, then either wait or return
|
||||
* -EAGAIN
|
||||
*/
|
||||
return_fib:
|
||||
if (!list_empty(&fibctx->fib_list)) {
|
||||
struct list_head * entry;
|
||||
/*
|
||||
* Pull the next fib from the fibs
|
||||
*/
|
||||
@ -327,7 +328,9 @@ return_fib:
|
||||
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
|
||||
{
|
||||
struct fib *fib;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->fib_lock, flags);
|
||||
/*
|
||||
* First free any FIBs that have not been consumed.
|
||||
*/
|
||||
@ -350,6 +353,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
|
||||
* Remove the Context from the AdapterFibContext List
|
||||
*/
|
||||
list_del(&fibctx->next);
|
||||
spin_unlock_irqrestore(&dev->fib_lock, flags);
|
||||
/*
|
||||
* Invalidate context
|
||||
*/
|
||||
@ -431,7 +435,7 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
|
||||
version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
|
||||
version += simple_strtol(driver_version + 1, NULL, 10);
|
||||
response.version = cpu_to_le32(version);
|
||||
# if (defined(AAC_DRIVER_BUILD))
|
||||
# ifdef AAC_DRIVER_BUILD
|
||||
response.build = cpu_to_le32(AAC_DRIVER_BUILD);
|
||||
# else
|
||||
response.build = cpu_to_le32(9999);
|
||||
@ -582,7 +586,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
void* p;
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
|
||||
if(p == 0) {
|
||||
if(!p) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
upsg->sg[i].count,i,upsg->count));
|
||||
rcode = -ENOMEM;
|
||||
@ -594,7 +598,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if (flags & SRB_DataOut) {
|
||||
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
rcode = -EFAULT;
|
||||
@ -626,7 +630,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
void* p;
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
|
||||
if(p == 0) {
|
||||
if(!p) {
|
||||
kfree (usg);
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
usg->sg[i].count,i,usg->count));
|
||||
@ -637,7 +641,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if (flags & SRB_DataOut) {
|
||||
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
|
||||
kfree (usg);
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
@ -668,7 +672,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
void* p;
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
|
||||
if(p == 0) {
|
||||
if(!p) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
usg->sg[i].count,i,usg->count));
|
||||
rcode = -ENOMEM;
|
||||
@ -680,7 +684,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if (flags & SRB_DataOut) {
|
||||
if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
rcode = -EFAULT;
|
||||
@ -698,7 +702,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
dma_addr_t addr;
|
||||
void* p;
|
||||
p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
|
||||
if(p == 0) {
|
||||
if (!p) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
upsg->sg[i].count, i, upsg->count));
|
||||
rcode = -ENOMEM;
|
||||
@ -708,7 +712,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
sg_list[i] = p; // save so we can clean up later
|
||||
sg_indx = i;
|
||||
|
||||
if( flags & SRB_DataOut ){
|
||||
if (flags & SRB_DataOut) {
|
||||
if(copy_from_user(p, sg_user[i],
|
||||
upsg->sg[i].count)) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
|
||||
@ -739,7 +743,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if( flags & SRB_DataIn ) {
|
||||
if (flags & SRB_DataIn) {
|
||||
for(i = 0 ; i <= sg_indx; i++){
|
||||
byte_count = le32_to_cpu(
|
||||
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
|
||||
|
@ -301,10 +301,10 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
|
||||
if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
|
||||
0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
|
||||
(status[0] == 0x00000001)) {
|
||||
if (status[1] & AAC_OPT_NEW_COMM_64)
|
||||
if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
|
||||
dev->raw_io_64 = 1;
|
||||
if (dev->a_ops.adapter_comm &&
|
||||
(status[1] & AAC_OPT_NEW_COMM))
|
||||
(status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)))
|
||||
dev->comm_interface = AAC_COMM_MESSAGE;
|
||||
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
|
||||
(status[2] > dev->base_size)) {
|
||||
|
@ -116,7 +116,9 @@ int aac_fib_setup(struct aac_dev * dev)
|
||||
/*
|
||||
* Initialise the fibs
|
||||
*/
|
||||
for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
|
||||
for (i = 0, fibptr = &dev->fibs[i];
|
||||
i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
|
||||
i++, fibptr++)
|
||||
{
|
||||
fibptr->dev = dev;
|
||||
fibptr->hw_fib_va = hw_fib;
|
||||
@ -171,6 +173,7 @@ struct fib *aac_fib_alloc(struct aac_dev *dev)
|
||||
* each I/O
|
||||
*/
|
||||
fibptr->hw_fib_va->header.XferState = 0;
|
||||
fibptr->flags = 0;
|
||||
fibptr->callback = NULL;
|
||||
fibptr->callback_data = NULL;
|
||||
|
||||
@ -290,7 +293,8 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
|
||||
*index = 0; /* Wrap to front of the Producer Queue. */
|
||||
}
|
||||
|
||||
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
|
||||
/* Queue is full */
|
||||
if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
|
||||
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
|
||||
qid, q->numpending);
|
||||
return 0;
|
||||
@ -323,8 +327,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
|
||||
|
||||
if (qid == AdapNormCmdQueue) {
|
||||
/* if no entries wait for some if caller wants to */
|
||||
while (!aac_get_entry(dev, qid, &entry, index, nonotify))
|
||||
{
|
||||
while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
|
||||
printk(KERN_ERR "GetEntries failed\n");
|
||||
}
|
||||
/*
|
||||
@ -333,8 +336,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
|
||||
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
|
||||
map = 1;
|
||||
} else {
|
||||
while(!aac_get_entry(dev, qid, &entry, index, nonotify))
|
||||
{
|
||||
while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
|
||||
/* if no entries wait for some if caller wants to */
|
||||
}
|
||||
/*
|
||||
@ -402,6 +404,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||
* will have a debug mode where the adapter can notify the host
|
||||
* it had a problem and the host can log that fact.
|
||||
*/
|
||||
fibptr->flags = 0;
|
||||
if (wait && !reply) {
|
||||
return -EINVAL;
|
||||
} else if (!wait && reply) {
|
||||
@ -450,10 +453,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||
if (!wait) {
|
||||
fibptr->callback = callback;
|
||||
fibptr->callback_data = callback_data;
|
||||
fibptr->flags = FIB_CONTEXT_FLAG;
|
||||
}
|
||||
|
||||
fibptr->done = 0;
|
||||
fibptr->flags = 0;
|
||||
|
||||
FIB_COUNTER_INCREMENT(aac_config.FibsSent);
|
||||
|
||||
@ -635,7 +638,7 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
|
||||
/*
|
||||
* If we plan to do anything check the structure type first.
|
||||
*/
|
||||
if ( hw_fib->header.StructType != FIB_MAGIC ) {
|
||||
if (hw_fib->header.StructType != FIB_MAGIC) {
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE)
|
||||
kfree (hw_fib);
|
||||
return -EINVAL;
|
||||
@ -667,10 +670,9 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
|
||||
if (!(nointr & (int)aac_config.irq_mod))
|
||||
aac_adapter_notify(dev, AdapNormRespQueue);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
|
||||
} else {
|
||||
printk(KERN_WARNING "aac_fib_adapter_complete: "
|
||||
"Unknown xferstate detected.\n");
|
||||
BUG();
|
||||
}
|
||||
return 0;
|
||||
@ -773,20 +775,20 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
{
|
||||
struct hw_fib * hw_fib = fibptr->hw_fib_va;
|
||||
struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
|
||||
u32 container;
|
||||
u32 channel, id, lun, container;
|
||||
struct scsi_device *device;
|
||||
enum {
|
||||
NOTHING,
|
||||
DELETE,
|
||||
ADD,
|
||||
CHANGE
|
||||
} device_config_needed;
|
||||
} device_config_needed = NOTHING;
|
||||
|
||||
/* Sniff for container changes */
|
||||
|
||||
if (!dev || !dev->fsa_dev)
|
||||
return;
|
||||
container = (u32)-1;
|
||||
container = channel = id = lun = (u32)-1;
|
||||
|
||||
/*
|
||||
* We have set this up to try and minimize the number of
|
||||
@ -796,13 +798,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
*/
|
||||
switch (le32_to_cpu(aifcmd->command)) {
|
||||
case AifCmdDriverNotify:
|
||||
switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
|
||||
switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
|
||||
/*
|
||||
* Morph or Expand complete
|
||||
*/
|
||||
case AifDenMorphComplete:
|
||||
case AifDenVolumeExtendComplete:
|
||||
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
|
||||
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
|
||||
@ -835,25 +837,29 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
if ((dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data)) &&
|
||||
le32_to_cpu(*(__le32 *)aifcmd->data)) &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
} else for (container = 0;
|
||||
container < dev->maximum_num_containers; ++container) {
|
||||
if ((dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data)) &&
|
||||
le32_to_cpu(*(__le32 *)aifcmd->data)) &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case AifCmdEventNotify:
|
||||
switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
|
||||
switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
|
||||
case AifEnBatteryEvent:
|
||||
dev->cache_protected =
|
||||
(((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
|
||||
break;
|
||||
/*
|
||||
* Add an Array.
|
||||
*/
|
||||
case AifEnAddContainer:
|
||||
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
|
||||
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
dev->fsa_dev[container].config_needed = ADD;
|
||||
@ -866,7 +872,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
* Delete an Array.
|
||||
*/
|
||||
case AifEnDeleteContainer:
|
||||
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
|
||||
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
dev->fsa_dev[container].config_needed = DELETE;
|
||||
@ -880,7 +886,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
* waiting on something else, setup to wait on a Config Change.
|
||||
*/
|
||||
case AifEnContainerChange:
|
||||
container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
|
||||
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
if (dev->fsa_dev[container].config_waiting_on &&
|
||||
@ -895,6 +901,60 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
case AifEnConfigChange:
|
||||
break;
|
||||
|
||||
case AifEnAddJBOD:
|
||||
case AifEnDeleteJBOD:
|
||||
container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
|
||||
if ((container >> 28))
|
||||
break;
|
||||
channel = (container >> 24) & 0xF;
|
||||
if (channel >= dev->maximum_num_channels)
|
||||
break;
|
||||
id = container & 0xFFFF;
|
||||
if (id >= dev->maximum_num_physicals)
|
||||
break;
|
||||
lun = (container >> 16) & 0xFF;
|
||||
channel = aac_phys_to_logical(channel);
|
||||
device_config_needed =
|
||||
(((__le32 *)aifcmd->data)[0] ==
|
||||
cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
|
||||
break;
|
||||
|
||||
case AifEnEnclosureManagement:
|
||||
/*
|
||||
* If in JBOD mode, automatic exposure of new
|
||||
* physical target to be suppressed until configured.
|
||||
*/
|
||||
if (dev->jbod)
|
||||
break;
|
||||
switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
|
||||
case EM_DRIVE_INSERTION:
|
||||
case EM_DRIVE_REMOVAL:
|
||||
container = le32_to_cpu(
|
||||
((__le32 *)aifcmd->data)[2]);
|
||||
if ((container >> 28))
|
||||
break;
|
||||
channel = (container >> 24) & 0xF;
|
||||
if (channel >= dev->maximum_num_channels)
|
||||
break;
|
||||
id = container & 0xFFFF;
|
||||
lun = (container >> 16) & 0xFF;
|
||||
if (id >= dev->maximum_num_physicals) {
|
||||
/* legacy dev_t ? */
|
||||
if ((0x2000 <= id) || lun || channel ||
|
||||
((channel = (id >> 7) & 0x3F) >=
|
||||
dev->maximum_num_channels))
|
||||
break;
|
||||
lun = (id >> 4) & 7;
|
||||
id &= 0xF;
|
||||
}
|
||||
channel = aac_phys_to_logical(channel);
|
||||
device_config_needed =
|
||||
(((__le32 *)aifcmd->data)[3]
|
||||
== cpu_to_le32(EM_DRIVE_INSERTION)) ?
|
||||
ADD : DELETE;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -905,13 +965,13 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
if (container >= dev->maximum_num_containers)
|
||||
break;
|
||||
if ((dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data)) &&
|
||||
le32_to_cpu(*(__le32 *)aifcmd->data)) &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
} else for (container = 0;
|
||||
container < dev->maximum_num_containers; ++container) {
|
||||
if ((dev->fsa_dev[container].config_waiting_on ==
|
||||
le32_to_cpu(*(u32 *)aifcmd->data)) &&
|
||||
le32_to_cpu(*(__le32 *)aifcmd->data)) &&
|
||||
time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
|
||||
dev->fsa_dev[container].config_waiting_on = 0;
|
||||
}
|
||||
@ -926,9 +986,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
* wait for a container change.
|
||||
*/
|
||||
|
||||
if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
|
||||
&& ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
|
||||
|| (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
|
||||
if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
|
||||
(((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
|
||||
((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
|
||||
for (container = 0;
|
||||
container < dev->maximum_num_containers;
|
||||
++container) {
|
||||
@ -943,9 +1003,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
jiffies;
|
||||
}
|
||||
}
|
||||
if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
|
||||
&& (((u32 *)aifcmd->data)[6] == 0)
|
||||
&& (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
|
||||
if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
|
||||
((__le32 *)aifcmd->data)[6] == 0 &&
|
||||
((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
|
||||
for (container = 0;
|
||||
container < dev->maximum_num_containers;
|
||||
++container) {
|
||||
@ -963,7 +1023,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
break;
|
||||
}
|
||||
|
||||
device_config_needed = NOTHING;
|
||||
if (device_config_needed == NOTHING)
|
||||
for (container = 0; container < dev->maximum_num_containers;
|
||||
++container) {
|
||||
if ((dev->fsa_dev[container].config_waiting_on == 0) &&
|
||||
@ -972,6 +1032,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
device_config_needed =
|
||||
dev->fsa_dev[container].config_needed;
|
||||
dev->fsa_dev[container].config_needed = NOTHING;
|
||||
channel = CONTAINER_TO_CHANNEL(container);
|
||||
id = CONTAINER_TO_ID(container);
|
||||
lun = CONTAINER_TO_LUN(container);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -995,34 +1058,56 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
|
||||
/*
|
||||
* force reload of disk info via aac_probe_container
|
||||
*/
|
||||
if ((device_config_needed == CHANGE)
|
||||
&& (dev->fsa_dev[container].valid == 1))
|
||||
if ((channel == CONTAINER_CHANNEL) &&
|
||||
(device_config_needed != NOTHING)) {
|
||||
if (dev->fsa_dev[container].valid == 1)
|
||||
dev->fsa_dev[container].valid = 2;
|
||||
if ((device_config_needed == CHANGE) ||
|
||||
(device_config_needed == ADD))
|
||||
aac_probe_container(dev, container);
|
||||
device = scsi_device_lookup(dev->scsi_host_ptr,
|
||||
CONTAINER_TO_CHANNEL(container),
|
||||
CONTAINER_TO_ID(container),
|
||||
CONTAINER_TO_LUN(container));
|
||||
}
|
||||
device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
|
||||
if (device) {
|
||||
switch (device_config_needed) {
|
||||
case DELETE:
|
||||
if (scsi_device_online(device)) {
|
||||
scsi_device_set_state(device, SDEV_OFFLINE);
|
||||
sdev_printk(KERN_INFO, device,
|
||||
"Device offlined - %s\n",
|
||||
(channel == CONTAINER_CHANNEL) ?
|
||||
"array deleted" :
|
||||
"enclosure services event");
|
||||
}
|
||||
break;
|
||||
case ADD:
|
||||
if (!scsi_device_online(device)) {
|
||||
sdev_printk(KERN_INFO, device,
|
||||
"Device online - %s\n",
|
||||
(channel == CONTAINER_CHANNEL) ?
|
||||
"array created" :
|
||||
"enclosure services event");
|
||||
scsi_device_set_state(device, SDEV_RUNNING);
|
||||
}
|
||||
/* FALLTHRU */
|
||||
case CHANGE:
|
||||
if ((channel == CONTAINER_CHANNEL)
|
||||
&& (!dev->fsa_dev[container].valid)) {
|
||||
if (!scsi_device_online(device))
|
||||
break;
|
||||
scsi_device_set_state(device, SDEV_OFFLINE);
|
||||
sdev_printk(KERN_INFO, device,
|
||||
"Device offlined - %s\n",
|
||||
"array failed");
|
||||
break;
|
||||
}
|
||||
scsi_rescan_device(&device->sdev_gendev);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
scsi_device_put(device);
|
||||
device_config_needed = NOTHING;
|
||||
}
|
||||
if (device_config_needed == ADD) {
|
||||
scsi_add_device(dev->scsi_host_ptr,
|
||||
CONTAINER_TO_CHANNEL(container),
|
||||
CONTAINER_TO_ID(container),
|
||||
CONTAINER_TO_LUN(container));
|
||||
}
|
||||
|
||||
if (device_config_needed == ADD)
|
||||
scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
|
||||
}
|
||||
|
||||
static int _aac_reset_adapter(struct aac_dev *aac, int forced)
|
||||
@ -1099,7 +1184,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
|
||||
free_irq(aac->pdev->irq, aac);
|
||||
kfree(aac->fsa_dev);
|
||||
aac->fsa_dev = NULL;
|
||||
if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
|
||||
quirks = aac_get_driver_ident(index)->quirks;
|
||||
if (quirks & AAC_QUIRK_31BIT) {
|
||||
if (((retval = pci_set_dma_mask(aac->pdev, DMA_31BIT_MASK))) ||
|
||||
((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_31BIT_MASK))))
|
||||
goto out;
|
||||
@ -1110,7 +1196,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
|
||||
}
|
||||
if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
|
||||
goto out;
|
||||
if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
|
||||
if (quirks & AAC_QUIRK_31BIT)
|
||||
if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
|
||||
goto out;
|
||||
if (jafo) {
|
||||
@ -1121,7 +1207,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
|
||||
}
|
||||
}
|
||||
(void)aac_get_adapter_info(aac);
|
||||
quirks = aac_get_driver_ident(index)->quirks;
|
||||
if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
|
||||
host->sg_tablesize = 34;
|
||||
host->max_sectors = (host->sg_tablesize * 8) + 112;
|
||||
@ -1217,12 +1302,13 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
|
||||
}
|
||||
|
||||
/* Quiesce build, flush cache, write through mode */
|
||||
if (forced < 2)
|
||||
aac_send_shutdown(aac);
|
||||
spin_lock_irqsave(host->host_lock, flagv);
|
||||
retval = _aac_reset_adapter(aac, forced);
|
||||
retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
|
||||
spin_unlock_irqrestore(host->host_lock, flagv);
|
||||
|
||||
if (retval == -ENODEV) {
|
||||
if ((forced < 2) && (retval == -ENODEV)) {
|
||||
/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
|
||||
struct fib * fibctx = aac_fib_alloc(aac);
|
||||
if (fibctx) {
|
||||
@ -1339,10 +1425,10 @@ int aac_check_health(struct aac_dev * aac)
|
||||
aif = (struct aac_aifcmd *)hw_fib->data;
|
||||
aif->command = cpu_to_le32(AifCmdEventNotify);
|
||||
aif->seqnum = cpu_to_le32(0xFFFFFFFF);
|
||||
aif->data[0] = AifEnExpEvent;
|
||||
aif->data[1] = AifExeFirmwarePanic;
|
||||
aif->data[2] = AifHighPriority;
|
||||
aif->data[3] = BlinkLED;
|
||||
((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
|
||||
((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
|
||||
((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
|
||||
((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
|
||||
|
||||
/*
|
||||
* Put the FIB onto the
|
||||
@ -1372,14 +1458,14 @@ int aac_check_health(struct aac_dev * aac)
|
||||
|
||||
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
|
||||
|
||||
if (!aac_check_reset ||
|
||||
if (!aac_check_reset || ((aac_check_reset != 1) &&
|
||||
(aac->supplement_adapter_info.SupportedOptions2 &
|
||||
le32_to_cpu(AAC_OPTION_IGNORE_RESET)))
|
||||
AAC_OPTION_IGNORE_RESET)))
|
||||
goto out;
|
||||
host = aac->scsi_host_ptr;
|
||||
if (aac->thread->pid != current->pid)
|
||||
spin_lock_irqsave(host->host_lock, flagv);
|
||||
BlinkLED = _aac_reset_adapter(aac, 0);
|
||||
BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
|
||||
if (aac->thread->pid != current->pid)
|
||||
spin_unlock_irqrestore(host->host_lock, flagv);
|
||||
return BlinkLED;
|
||||
@ -1425,8 +1511,7 @@ int aac_command_thread(void *data)
|
||||
add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
dprintk ((KERN_INFO "aac_command_thread start\n"));
|
||||
while(1)
|
||||
{
|
||||
while (1) {
|
||||
spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
|
||||
while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
|
||||
struct list_head *entry;
|
||||
@ -1448,7 +1533,7 @@ int aac_command_thread(void *data)
|
||||
hw_fib = fib->hw_fib_va;
|
||||
memset(fib, 0, sizeof(struct fib));
|
||||
fib->type = FSAFS_NTC_FIB_CONTEXT;
|
||||
fib->size = sizeof( struct fib );
|
||||
fib->size = sizeof(struct fib);
|
||||
fib->hw_fib_va = hw_fib;
|
||||
fib->data = hw_fib->data;
|
||||
fib->dev = dev;
|
||||
@ -1462,7 +1547,6 @@ int aac_command_thread(void *data)
|
||||
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
|
||||
aac_fib_adapter_complete(fib, (u16)sizeof(u32));
|
||||
} else {
|
||||
struct list_head *entry;
|
||||
/* The u32 here is important and intended. We are using
|
||||
32bit wrapping time to fit the adapter field */
|
||||
|
||||
@ -1655,11 +1739,11 @@ int aac_command_thread(void *data)
|
||||
struct fib *fibptr;
|
||||
|
||||
if ((fibptr = aac_fib_alloc(dev))) {
|
||||
u32 * info;
|
||||
__le32 *info;
|
||||
|
||||
aac_fib_init(fibptr);
|
||||
|
||||
info = (u32 *) fib_data(fibptr);
|
||||
info = (__le32 *) fib_data(fibptr);
|
||||
if (now.tv_usec > 500000)
|
||||
++now.tv_sec;
|
||||
|
||||
|
@ -120,6 +120,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
|
||||
* NOTE: we cannot touch the fib after this
|
||||
* call, because it may have been deallocated.
|
||||
*/
|
||||
fib->flags = 0;
|
||||
fib->callback(fib->callback_data, fib);
|
||||
} else {
|
||||
unsigned long flagv;
|
||||
@ -229,11 +230,9 @@ unsigned int aac_command_normal(struct aac_queue *q)
|
||||
* all QE there are and wake up all the waiters before exiting.
|
||||
*/
|
||||
|
||||
unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
|
||||
unsigned int aac_intr_normal(struct aac_dev * dev, u32 index)
|
||||
{
|
||||
u32 index = le32_to_cpu(Index);
|
||||
|
||||
dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, Index));
|
||||
dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
|
||||
if ((index & 0x00000002L)) {
|
||||
struct hw_fib * hw_fib;
|
||||
struct fib * fib;
|
||||
@ -301,7 +300,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
|
||||
|
||||
if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
|
||||
{
|
||||
u32 *pstatus = (u32 *)hwfib->data;
|
||||
__le32 *pstatus = (__le32 *)hwfib->data;
|
||||
if (*pstatus & cpu_to_le32(0xffff0000))
|
||||
*pstatus = cpu_to_le32(ST_OK);
|
||||
}
|
||||
@ -315,6 +314,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
|
||||
* NOTE: we cannot touch the fib after this
|
||||
* call, because it may have been deallocated.
|
||||
*/
|
||||
fib->flags = 0;
|
||||
fib->callback(fib->callback_data, fib);
|
||||
} else {
|
||||
unsigned long flagv;
|
||||
|
@ -164,22 +164,22 @@ MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
|
||||
* for the card. At that time we can remove the channels from here
|
||||
*/
|
||||
static struct aac_driver_ident aac_drivers[] = {
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 2/Si (Iguana/PERC2Si) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Opal/PERC3Di) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Si (SlimFast/PERC3Si */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Viper/PERC3DiV) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Lexus/PERC3DiL) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Dagger/PERC3DiD) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Boxster/PERC3DiB) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* catapult */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* tomcat */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan-2m) */
|
||||
{ aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S220 (Legend Crusader) */
|
||||
{ aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S230 (Legend Vulcan) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
|
||||
{ aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2120S (Crusader) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
|
||||
{ aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
|
||||
{ aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
|
||||
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
|
||||
@ -224,8 +224,8 @@ static struct aac_driver_ident aac_drivers[] = {
|
||||
{ aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
|
||||
{ aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
|
||||
|
||||
{ aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
|
||||
{ aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
|
||||
{ aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
|
||||
{ aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
|
||||
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
|
||||
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
|
||||
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
|
||||
@ -401,30 +401,44 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
|
||||
|
||||
static int aac_slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
|
||||
if ((sdev->type == TYPE_DISK) &&
|
||||
(sdev_channel(sdev) != CONTAINER_CHANNEL)) {
|
||||
(sdev_channel(sdev) != CONTAINER_CHANNEL) &&
|
||||
(!aac->jbod || sdev->inq_periph_qual) &&
|
||||
(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
|
||||
if (expose_physicals == 0)
|
||||
return -ENXIO;
|
||||
if (expose_physicals < 0) {
|
||||
struct aac_dev *aac =
|
||||
(struct aac_dev *)sdev->host->hostdata;
|
||||
if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
|
||||
if (expose_physicals < 0)
|
||||
sdev->no_uld_attach = 1;
|
||||
}
|
||||
}
|
||||
if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
|
||||
(sdev_channel(sdev) == CONTAINER_CHANNEL)) {
|
||||
(!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
|
||||
!sdev->no_uld_attach) {
|
||||
struct scsi_device * dev;
|
||||
struct Scsi_Host *host = sdev->host;
|
||||
unsigned num_lsu = 0;
|
||||
unsigned num_one = 0;
|
||||
unsigned depth;
|
||||
unsigned cid;
|
||||
|
||||
/*
|
||||
* Firmware has an individual device recovery time typically
|
||||
* of 35 seconds, give us a margin.
|
||||
*/
|
||||
if (sdev->timeout < (45 * HZ))
|
||||
sdev->timeout = 45 * HZ;
|
||||
for (cid = 0; cid < aac->maximum_num_containers; ++cid)
|
||||
if (aac->fsa_dev[cid].valid)
|
||||
++num_lsu;
|
||||
__shost_for_each_device(dev, host) {
|
||||
if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
|
||||
(sdev_channel(dev) == CONTAINER_CHANNEL))
|
||||
(!aac->raid_scsi_mode ||
|
||||
(sdev_channel(sdev) != 2)) &&
|
||||
!dev->no_uld_attach) {
|
||||
if ((sdev_channel(dev) != CONTAINER_CHANNEL)
|
||||
|| !aac->fsa_dev[sdev_id(dev)].valid)
|
||||
++num_lsu;
|
||||
else
|
||||
} else
|
||||
++num_one;
|
||||
}
|
||||
if (num_lsu == 0)
|
||||
@ -481,9 +495,35 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
|
||||
return sdev->queue_depth;
|
||||
}
|
||||
|
||||
static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_device * sdev = to_scsi_device(dev);
|
||||
if (sdev_channel(sdev) != CONTAINER_CHANNEL)
|
||||
return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
|
||||
? "Hidden\n" : "JBOD");
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
get_container_type(((struct aac_dev *)(sdev->host->hostdata))
|
||||
->fsa_dev[sdev_id(sdev)].type));
|
||||
}
|
||||
|
||||
static struct device_attribute aac_raid_level_attr = {
|
||||
.attr = {
|
||||
.name = "level",
|
||||
.mode = S_IRUGO,
|
||||
},
|
||||
.show = aac_show_raid_level
|
||||
};
|
||||
|
||||
static struct device_attribute *aac_dev_attrs[] = {
|
||||
&aac_raid_level_attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
|
||||
{
|
||||
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
return aac_do_ioctl(dev, cmd, arg);
|
||||
}
|
||||
|
||||
@ -506,17 +546,33 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
||||
break;
|
||||
case INQUIRY:
|
||||
case READ_CAPACITY:
|
||||
case TEST_UNIT_READY:
|
||||
/* Mark associated FIB to not complete, eh handler does this */
|
||||
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
|
||||
struct fib * fib = &aac->fibs[count];
|
||||
if (fib->hw_fib_va->header.XferState &&
|
||||
(fib->flags & FIB_CONTEXT_FLAG) &&
|
||||
(fib->callback_data == cmd)) {
|
||||
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
|
||||
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
|
||||
ret = SUCCESS;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case TEST_UNIT_READY:
|
||||
/* Mark associated FIB to not complete, eh handler does this */
|
||||
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
|
||||
struct scsi_cmnd * command;
|
||||
struct fib * fib = &aac->fibs[count];
|
||||
if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
|
||||
(fib->flags & FIB_CONTEXT_FLAG) &&
|
||||
((command = fib->callback_data)) &&
|
||||
(command->device == cmd->device)) {
|
||||
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
|
||||
command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
|
||||
if (command == cmd)
|
||||
ret = SUCCESS;
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -539,6 +595,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
|
||||
struct fib * fib = &aac->fibs[count];
|
||||
if (fib->hw_fib_va->header.XferState &&
|
||||
(fib->flags & FIB_CONTEXT_FLAG) &&
|
||||
(fib->callback_data == cmd)) {
|
||||
fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
|
||||
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
|
||||
@ -584,8 +641,11 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
* support a register, instead of a commanded, reset.
|
||||
*/
|
||||
if ((aac->supplement_adapter_info.SupportedOptions2 &
|
||||
le32_to_cpu(AAC_OPTION_MU_RESET|AAC_OPTION_IGNORE_RESET)) ==
|
||||
le32_to_cpu(AAC_OPTION_MU_RESET))
|
||||
AAC_OPTION_MU_RESET) &&
|
||||
aac_check_reset &&
|
||||
((aac_check_reset != 1) ||
|
||||
(aac->supplement_adapter_info.SupportedOptions2 &
|
||||
AAC_OPTION_IGNORE_RESET)))
|
||||
aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
|
||||
return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
|
||||
}
|
||||
@ -735,6 +795,25 @@ static ssize_t aac_show_vendor(struct class_device *class_dev,
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t aac_show_flags(struct class_device *class_dev, char *buf)
|
||||
{
|
||||
int len = 0;
|
||||
struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
|
||||
|
||||
if (nblank(dprintk(x)))
|
||||
len = snprintf(buf, PAGE_SIZE, "dprintk\n");
|
||||
#ifdef AAC_DETAILED_STATUS_INFO
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"AAC_DETAILED_STATUS_INFO\n");
|
||||
#endif
|
||||
if (dev->raw_io_interface && dev->raw_io_64)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"SAI_READ_CAPACITY_16\n");
|
||||
if (dev->jbod)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t aac_show_kernel_version(struct class_device *class_dev,
|
||||
char *buf)
|
||||
{
|
||||
@ -844,6 +923,13 @@ static struct class_device_attribute aac_vendor = {
|
||||
},
|
||||
.show = aac_show_vendor,
|
||||
};
|
||||
static struct class_device_attribute aac_flags = {
|
||||
.attr = {
|
||||
.name = "flags",
|
||||
.mode = S_IRUGO,
|
||||
},
|
||||
.show = aac_show_flags,
|
||||
};
|
||||
static struct class_device_attribute aac_kernel_version = {
|
||||
.attr = {
|
||||
.name = "hba_kernel_version",
|
||||
@ -898,6 +984,7 @@ static struct class_device_attribute aac_reset = {
|
||||
static struct class_device_attribute *aac_attrs[] = {
|
||||
&aac_model,
|
||||
&aac_vendor,
|
||||
&aac_flags,
|
||||
&aac_kernel_version,
|
||||
&aac_monitor_version,
|
||||
&aac_bios_version,
|
||||
@ -932,6 +1019,7 @@ static struct scsi_host_template aac_driver_template = {
|
||||
.shost_attrs = aac_attrs,
|
||||
.slave_configure = aac_slave_configure,
|
||||
.change_queue_depth = aac_change_queue_depth,
|
||||
.sdev_attrs = aac_dev_attrs,
|
||||
.eh_abort_handler = aac_eh_abort,
|
||||
.eh_host_reset_handler = aac_eh_reset,
|
||||
.can_queue = AAC_NUM_IO_FIB,
|
||||
@ -1076,7 +1164,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
|
||||
* all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
|
||||
* physical channels are address by their actual physical number+1
|
||||
*/
|
||||
if ((aac->nondasd_support == 1) || expose_physicals)
|
||||
if (aac->nondasd_support || expose_physicals || aac->jbod)
|
||||
shost->max_channel = aac->maximum_num_channels;
|
||||
else
|
||||
shost->max_channel = 0;
|
||||
|
@ -465,7 +465,7 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
|
||||
u32 var;
|
||||
|
||||
if (!(dev->supplement_adapter_info.SupportedOptions2 &
|
||||
le32_to_cpu(AAC_OPTION_MU_RESET)) || (bled >= 0) || (bled == -2)) {
|
||||
AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
|
||||
if (bled)
|
||||
printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
|
||||
dev->name, dev->id, bled);
|
||||
@ -549,7 +549,9 @@ int _aac_rx_init(struct aac_dev *dev)
|
||||
dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
|
||||
if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
|
||||
!aac_rx_restart_adapter(dev, 0))
|
||||
++restart;
|
||||
/* Make sure the Hardware FIFO is empty */
|
||||
while ((++restart < 512) &&
|
||||
(rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
|
||||
/*
|
||||
* Check to see if the board panic'd while booting.
|
||||
*/
|
||||
|
@ -8233,7 +8233,7 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
|
||||
if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) {
|
||||
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
|
||||
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
|
||||
sizeof(scp->sense_buffer));
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
/*
|
||||
* Note: The 'status_byte()' macro used by
|
||||
* target drivers defined in scsi.h shifts the
|
||||
@ -9136,7 +9136,7 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
|
||||
BUG_ON(asc_dvc_varp != &boardp->dvc_var.asc_dvc_var);
|
||||
|
||||
dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
|
||||
sizeof(scp->sense_buffer), DMA_FROM_DEVICE);
|
||||
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
|
||||
/*
|
||||
* 'qdonep' contains the command's ending status.
|
||||
*/
|
||||
@ -9166,7 +9166,7 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
|
||||
if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) {
|
||||
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
|
||||
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
|
||||
sizeof(scp->sense_buffer));
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
/*
|
||||
* Note: The 'status_byte()' macro used by
|
||||
* target drivers defined in scsi.h shifts the
|
||||
@ -9881,9 +9881,9 @@ static __le32 advansys_get_sense_buffer_dma(struct scsi_cmnd *scp)
|
||||
{
|
||||
struct asc_board *board = shost_priv(scp->device->host);
|
||||
scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
|
||||
sizeof(scp->sense_buffer), DMA_FROM_DEVICE);
|
||||
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
|
||||
dma_cache_sync(board->dev, scp->sense_buffer,
|
||||
sizeof(scp->sense_buffer), DMA_FROM_DEVICE);
|
||||
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
|
||||
return cpu_to_le32(scp->SCp.dma_handle);
|
||||
}
|
||||
|
||||
@ -9914,7 +9914,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
|
||||
asc_scsi_q->q2.target_ix =
|
||||
ASC_TIDLUN_TO_IX(scp->device->id, scp->device->lun);
|
||||
asc_scsi_q->q1.sense_addr = advansys_get_sense_buffer_dma(scp);
|
||||
asc_scsi_q->q1.sense_len = sizeof(scp->sense_buffer);
|
||||
asc_scsi_q->q1.sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||
|
||||
/*
|
||||
* If there are any outstanding requests for the current target,
|
||||
@ -10173,7 +10173,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
|
||||
scsiqp->target_lun = scp->device->lun;
|
||||
|
||||
scsiqp->sense_addr = cpu_to_le32(virt_to_bus(&scp->sense_buffer[0]));
|
||||
scsiqp->sense_len = sizeof(scp->sense_buffer);
|
||||
scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||
|
||||
/* Build ADV_SCSI_REQ_Q */
|
||||
|
||||
|
@ -260,6 +260,7 @@
|
||||
#include <scsi/scsi_dbg.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport_spi.h>
|
||||
#include <scsi/scsi_eh.h>
|
||||
#include "aha152x.h"
|
||||
|
||||
static LIST_HEAD(aha152x_host_list);
|
||||
@ -558,9 +559,7 @@ struct aha152x_hostdata {
|
||||
struct aha152x_scdata {
|
||||
Scsi_Cmnd *next; /* next sc in queue */
|
||||
struct completion *done;/* semaphore to block on */
|
||||
unsigned char aha_orig_cmd_len;
|
||||
unsigned char aha_orig_cmnd[MAX_COMMAND_SIZE];
|
||||
int aha_orig_resid;
|
||||
struct scsi_eh_save ses;
|
||||
};
|
||||
|
||||
/* access macros for hostdata */
|
||||
@ -1017,16 +1016,10 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
|
||||
SCp.buffers_residual : left buffers in list
|
||||
SCp.phase : current state of the command */
|
||||
|
||||
if ((phase & (check_condition|resetting)) || !scsi_sglist(SCpnt)) {
|
||||
if (phase & check_condition) {
|
||||
SCpnt->SCp.ptr = SCpnt->sense_buffer;
|
||||
SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer);
|
||||
scsi_set_resid(SCpnt, sizeof(SCpnt->sense_buffer));
|
||||
} else {
|
||||
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
|
||||
SCpnt->SCp.ptr = NULL;
|
||||
SCpnt->SCp.this_residual = 0;
|
||||
scsi_set_resid(SCpnt, 0);
|
||||
}
|
||||
SCpnt->SCp.buffer = NULL;
|
||||
SCpnt->SCp.buffers_residual = 0;
|
||||
} else {
|
||||
@ -1561,10 +1554,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
||||
}
|
||||
#endif
|
||||
|
||||
/* restore old command */
|
||||
memcpy(cmd->cmnd, sc->aha_orig_cmnd, sizeof(cmd->cmnd));
|
||||
cmd->cmd_len = sc->aha_orig_cmd_len;
|
||||
scsi_set_resid(cmd, sc->aha_orig_resid);
|
||||
scsi_eh_restore_cmnd(cmd, &sc->ses);
|
||||
|
||||
cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
@ -1587,22 +1577,10 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
||||
DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr));
|
||||
#endif
|
||||
|
||||
/* save old command */
|
||||
sc = SCDATA(ptr);
|
||||
/* It was allocated in aha152x_internal_queue? */
|
||||
BUG_ON(!sc);
|
||||
memcpy(sc->aha_orig_cmnd, ptr->cmnd,
|
||||
sizeof(ptr->cmnd));
|
||||
sc->aha_orig_cmd_len = ptr->cmd_len;
|
||||
sc->aha_orig_resid = scsi_get_resid(ptr);
|
||||
|
||||
ptr->cmnd[0] = REQUEST_SENSE;
|
||||
ptr->cmnd[1] = 0;
|
||||
ptr->cmnd[2] = 0;
|
||||
ptr->cmnd[3] = 0;
|
||||
ptr->cmnd[4] = sizeof(ptr->sense_buffer);
|
||||
ptr->cmnd[5] = 0;
|
||||
ptr->cmd_len = 6;
|
||||
scsi_eh_prep_cmnd(ptr, &sc->ses, NULL, 0, ~0);
|
||||
|
||||
DO_UNLOCK(flags);
|
||||
aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done);
|
||||
|
@ -51,15 +51,6 @@
|
||||
#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
|
||||
#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
|
||||
|
||||
static void BAD_DMA(void *address, unsigned int length)
|
||||
{
|
||||
printk(KERN_CRIT "buf vaddress %p paddress 0x%lx length %d\n",
|
||||
address,
|
||||
SCSI_BUF_PA(address),
|
||||
length);
|
||||
panic("Buffer at physical address > 16Mb used for aha1542");
|
||||
}
|
||||
|
||||
static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
|
||||
struct scatterlist *sgp,
|
||||
int nseg,
|
||||
@ -545,7 +536,7 @@ static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id)
|
||||
we will still have it in the cdb when we come back */
|
||||
if (ccb[mbo].tarstat == 2)
|
||||
memcpy(SCtmp->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
|
||||
sizeof(SCtmp->sense_buffer));
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
|
||||
|
||||
/* is there mail :-) */
|
||||
@ -597,8 +588,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
||||
unchar target = SCpnt->device->id;
|
||||
unchar lun = SCpnt->device->lun;
|
||||
unsigned long flags;
|
||||
void *buff = SCpnt->request_buffer;
|
||||
int bufflen = SCpnt->request_bufflen;
|
||||
int bufflen = scsi_bufflen(SCpnt);
|
||||
int mbo;
|
||||
struct mailbox *mb;
|
||||
struct ccb *ccb;
|
||||
@ -619,7 +609,7 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
||||
#if 0
|
||||
/* scsi_request_sense() provides a buffer of size 256,
|
||||
so there is no reason to expect equality */
|
||||
if (bufflen != sizeof(SCpnt->sense_buffer))
|
||||
if (bufflen != SCSI_SENSE_BUFFERSIZE)
|
||||
printk(KERN_CRIT "aha1542: Wrong buffer length supplied "
|
||||
"for request sense (%d)\n", bufflen);
|
||||
#endif
|
||||
@ -689,42 +679,29 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
||||
|
||||
memcpy(ccb[mbo].cdb, cmd, ccb[mbo].cdblen);
|
||||
|
||||
if (SCpnt->use_sg) {
|
||||
if (bufflen) {
|
||||
struct scatterlist *sg;
|
||||
struct chain *cptr;
|
||||
#ifdef DEBUG
|
||||
unsigned char *ptr;
|
||||
#endif
|
||||
int i;
|
||||
int i, sg_count = scsi_sg_count(SCpnt);
|
||||
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
|
||||
SCpnt->host_scribble = kmalloc(512, GFP_KERNEL | GFP_DMA);
|
||||
SCpnt->host_scribble = kmalloc(sizeof(*cptr)*sg_count,
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
cptr = (struct chain *) SCpnt->host_scribble;
|
||||
if (cptr == NULL) {
|
||||
/* free the claimed mailbox slot */
|
||||
HOSTDATA(SCpnt->device->host)->SCint[mbo] = NULL;
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
|
||||
if (sg->length == 0 || SCpnt->use_sg > 16 ||
|
||||
(((int) sg->offset) & 1) || (sg->length & 1)) {
|
||||
unsigned char *ptr;
|
||||
printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
|
||||
scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
|
||||
printk(KERN_CRIT "%d: %p %d\n", i,
|
||||
sg_virt(sg), sg->length);
|
||||
};
|
||||
printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
|
||||
ptr = (unsigned char *) &cptr[i];
|
||||
for (i = 0; i < 18; i++)
|
||||
printk("%02x ", ptr[i]);
|
||||
panic("Foooooooood fight!");
|
||||
};
|
||||
scsi_for_each_sg(SCpnt, sg, sg_count, i) {
|
||||
any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg));
|
||||
if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD)
|
||||
BAD_SG_DMA(SCpnt, sg, SCpnt->use_sg, i);
|
||||
BAD_SG_DMA(SCpnt, scsi_sglist(SCpnt), sg_count, i);
|
||||
any2scsi(cptr[i].datalen, sg->length);
|
||||
};
|
||||
any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
|
||||
any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
|
||||
any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(cptr));
|
||||
#ifdef DEBUG
|
||||
printk("cptr %x: ", cptr);
|
||||
@ -735,10 +712,8 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
||||
} else {
|
||||
ccb[mbo].op = 0; /* SCSI Initiator Command */
|
||||
SCpnt->host_scribble = NULL;
|
||||
any2scsi(ccb[mbo].datalen, bufflen);
|
||||
if (buff && SCSI_BUF_PA(buff + bufflen - 1) > ISA_DMA_THRESHOLD)
|
||||
BAD_DMA(buff, bufflen);
|
||||
any2scsi(ccb[mbo].dataptr, SCSI_BUF_PA(buff));
|
||||
any2scsi(ccb[mbo].datalen, 0);
|
||||
any2scsi(ccb[mbo].dataptr, 0);
|
||||
};
|
||||
ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */
|
||||
ccb[mbo].rsalen = 16;
|
||||
|
@ -286,7 +286,7 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
|
||||
cdb when we come back */
|
||||
if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
|
||||
memcpy(SCtmp->sense_buffer, ecbptr->sense,
|
||||
sizeof(SCtmp->sense_buffer));
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
|
||||
} else
|
||||
errstatus = 0;
|
||||
|
@ -33,11 +33,10 @@ aic79xx-y += aic79xx_osm.o \
|
||||
aic79xx_proc.o \
|
||||
aic79xx_osm_pci.o
|
||||
|
||||
EXTRA_CFLAGS += -Idrivers/scsi
|
||||
ccflags-y += -Idrivers/scsi
|
||||
ifdef WARNINGS_BECOME_ERRORS
|
||||
EXTRA_CFLAGS += -Werror
|
||||
ccflags-y += -Werror
|
||||
endif
|
||||
#EXTRA_CFLAGS += -g
|
||||
|
||||
# Files generated that shall be removed upon make clean
|
||||
clean-files := aic7xxx_seq.h aic7xxx_reg.h aic7xxx_reg_print.c
|
||||
@ -46,53 +45,45 @@ clean-files += aic79xx_seq.h aic79xx_reg.h aic79xx_reg_print.c
|
||||
# Dependencies for generated files need to be listed explicitly
|
||||
|
||||
$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_seq.h
|
||||
$(obj)/aic7xxx_core.o: $(obj)/aic7xxx_reg.h
|
||||
$(obj)/aic79xx_core.o: $(obj)/aic79xx_seq.h
|
||||
$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
|
||||
$(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
|
||||
$(obj)/aic79xx_core.o: $(obj)/aic79xx_reg.h
|
||||
|
||||
$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_reg.h
|
||||
$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_reg.h
|
||||
$(addprefix $(obj)/,$(aic7xxx-y)): $(obj)/aic7xxx_seq.h
|
||||
$(addprefix $(obj)/,$(aic79xx-y)): $(obj)/aic79xx_seq.h
|
||||
|
||||
aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_seq.h \
|
||||
$(obj)/aic7xxx_reg.h
|
||||
aic7xxx-gen-$(CONFIG_AIC7XXX_BUILD_FIRMWARE) := $(obj)/aic7xxx_reg.h
|
||||
aic7xxx-gen-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) += $(obj)/aic7xxx_reg_print.c
|
||||
|
||||
aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \
|
||||
-p $(obj)/aic7xxx_reg_print.c -i aic7xxx_osm.h
|
||||
|
||||
ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
|
||||
# Create a dependency chain in generated files
|
||||
# to avoid concurrent invocations of the single
|
||||
# rule that builds them all.
|
||||
aic7xxx_seq.h: aic7xxx_reg.h
|
||||
ifeq ($(CONFIG_AIC7XXX_REG_PRETTY_PRINT),y)
|
||||
aic7xxx_reg.h: aic7xxx_reg_print.c
|
||||
endif
|
||||
$(aic7xxx-gen-y): $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
|
||||
$(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
|
||||
$(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \
|
||||
$(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
|
||||
$(src)/aic7xxx.seq
|
||||
|
||||
$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h
|
||||
else
|
||||
$(obj)/aic7xxx_reg_print.c: $(src)/aic7xxx_reg_print.c_shipped
|
||||
endif
|
||||
|
||||
aic79xx-gen-$(CONFIG_AIC79XX_BUILD_FIRMWARE) := $(obj)/aic79xx_seq.h \
|
||||
$(obj)/aic79xx_reg.h
|
||||
aic79xx-gen-$(CONFIG_AIC79XX_BUILD_FIRMWARE) := $(obj)/aic79xx_reg.h
|
||||
aic79xx-gen-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) += $(obj)/aic79xx_reg_print.c
|
||||
|
||||
aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \
|
||||
-p $(obj)/aic79xx_reg_print.c -i aic79xx_osm.h
|
||||
|
||||
ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
|
||||
# Create a dependency chain in generated files
|
||||
# to avoid concurrent invocations of the single
|
||||
# rule that builds them all.
|
||||
aic79xx_seq.h: aic79xx_reg.h
|
||||
ifeq ($(CONFIG_AIC79XX_REG_PRETTY_PRINT),y)
|
||||
aic79xx_reg.h: aic79xx_reg_print.c
|
||||
endif
|
||||
$(aic79xx-gen-y): $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
|
||||
$(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
|
||||
$(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \
|
||||
$(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
|
||||
$(src)/aic79xx.seq
|
||||
|
||||
$(aic79xx-gen-y): $(obj)/aic79xx_seq.h
|
||||
else
|
||||
$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
|
||||
endif
|
||||
|
||||
$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl]
|
||||
|
@ -1784,7 +1784,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
|
||||
if (scb->flags & SCB_SENSE) {
|
||||
sense_size = min(sizeof(struct scsi_sense_data)
|
||||
- ahd_get_sense_residual(scb),
|
||||
(u_long)sizeof(cmd->sense_buffer));
|
||||
(u_long)SCSI_SENSE_BUFFERSIZE);
|
||||
sense_offset = 0;
|
||||
} else {
|
||||
/*
|
||||
@ -1795,11 +1795,11 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
|
||||
scb->sense_data;
|
||||
sense_size = min_t(size_t,
|
||||
scsi_4btoul(siu->sense_length),
|
||||
sizeof(cmd->sense_buffer));
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
sense_offset = SIU_SENSE_OFFSET(siu);
|
||||
}
|
||||
|
||||
memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
|
||||
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
memcpy(cmd->sense_buffer,
|
||||
ahd_get_sense_buf(ahd, scb)
|
||||
+ sense_offset, sense_size);
|
||||
|
@ -1801,12 +1801,12 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
|
||||
|
||||
sense_size = min(sizeof(struct scsi_sense_data)
|
||||
- ahc_get_sense_residual(scb),
|
||||
(u_long)sizeof(cmd->sense_buffer));
|
||||
(u_long)SCSI_SENSE_BUFFERSIZE);
|
||||
memcpy(cmd->sense_buffer,
|
||||
ahc_get_sense_buf(ahc, scb), sense_size);
|
||||
if (sense_size < sizeof(cmd->sense_buffer))
|
||||
if (sense_size < SCSI_SENSE_BUFFERSIZE)
|
||||
memset(&cmd->sense_buffer[sense_size], 0,
|
||||
sizeof(cmd->sense_buffer) - sense_size);
|
||||
SCSI_SENSE_BUFFERSIZE - sense_size);
|
||||
cmd->result |= (DRIVER_SENSE << 24);
|
||||
#ifdef AHC_DEBUG
|
||||
if (ahc_debug & AHC_SHOW_SENSE) {
|
||||
|
@ -2696,7 +2696,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
|
||||
{
|
||||
pci_unmap_single(p->pdev,
|
||||
le32_to_cpu(scb->sg_list[0].address),
|
||||
sizeof(cmd->sense_buffer),
|
||||
SCSI_SENSE_BUFFERSIZE,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
}
|
||||
if (scb->flags & SCB_RECOVERY_SCB)
|
||||
@ -4267,13 +4267,13 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
|
||||
sizeof(generic_sense));
|
||||
|
||||
scb->sense_cmd[1] = (cmd->device->lun << 5);
|
||||
scb->sense_cmd[4] = sizeof(cmd->sense_buffer);
|
||||
scb->sense_cmd[4] = SCSI_SENSE_BUFFERSIZE;
|
||||
|
||||
scb->sg_list[0].length =
|
||||
cpu_to_le32(sizeof(cmd->sense_buffer));
|
||||
cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
|
||||
scb->sg_list[0].address =
|
||||
cpu_to_le32(pci_map_single(p->pdev, cmd->sense_buffer,
|
||||
sizeof(cmd->sense_buffer),
|
||||
SCSI_SENSE_BUFFERSIZE,
|
||||
PCI_DMA_FROMDEVICE));
|
||||
|
||||
/*
|
||||
@ -4296,7 +4296,7 @@ aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat)
|
||||
hscb->residual_data_count[2] = 0;
|
||||
|
||||
scb->sg_count = hscb->SG_segment_count = 1;
|
||||
scb->sg_length = sizeof(cmd->sense_buffer);
|
||||
scb->sg_length = SCSI_SENSE_BUFFERSIZE;
|
||||
scb->tag_action = 0;
|
||||
scb->flags |= SCB_SENSE;
|
||||
/*
|
||||
@ -10293,7 +10293,6 @@ static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
|
||||
aic7xxx_position(cmd) = scb->hscb->tag;
|
||||
cmd->scsi_done = fn;
|
||||
cmd->result = DID_OK;
|
||||
memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
|
||||
aic7xxx_error(cmd) = DID_OK;
|
||||
aic7xxx_status(cmd) = 0;
|
||||
cmd->host_scribble = NULL;
|
||||
|
@ -165,7 +165,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
|
||||
if (dev->port->oob_mode != SATA_OOB_MODE) {
|
||||
flags |= OPEN_REQUIRED;
|
||||
if ((dev->dev_type == SATA_DEV) ||
|
||||
(dev->tproto & SAS_PROTO_STP)) {
|
||||
(dev->tproto & SAS_PROTOCOL_STP)) {
|
||||
struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
|
||||
if (rps_resp->frame_type == SMP_RESPONSE &&
|
||||
rps_resp->function == SMP_REPORT_PHY_SATA &&
|
||||
@ -193,7 +193,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
|
||||
asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
|
||||
|
||||
flags = 0;
|
||||
if (dev->tproto & SAS_PROTO_STP)
|
||||
if (dev->tproto & SAS_PROTOCOL_STP)
|
||||
flags |= STP_CL_POL_NO_TX;
|
||||
asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags);
|
||||
|
||||
@ -201,7 +201,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
|
||||
asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
|
||||
asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
|
||||
|
||||
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTO_STP)) {
|
||||
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
|
||||
i = asd_init_sata(dev);
|
||||
if (i < 0) {
|
||||
asd_free_ddb(asd_ha, ddb);
|
||||
|
@ -903,11 +903,11 @@ void asd_dump_frame_rcvd(struct asd_phy *phy,
|
||||
int i;
|
||||
|
||||
switch ((dl->status_block[1] & 0x70) >> 3) {
|
||||
case SAS_PROTO_STP:
|
||||
case SAS_PROTOCOL_STP:
|
||||
ASD_DPRINTK("STP proto device-to-host FIS:\n");
|
||||
break;
|
||||
default:
|
||||
case SAS_PROTO_SSP:
|
||||
case SAS_PROTOCOL_SSP:
|
||||
ASD_DPRINTK("SAS proto IDENTIFY:\n");
|
||||
break;
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ static int asd_init_phy(struct asd_phy *phy)
|
||||
|
||||
sas_phy->enabled = 1;
|
||||
sas_phy->class = SAS;
|
||||
sas_phy->iproto = SAS_PROTO_ALL;
|
||||
sas_phy->iproto = SAS_PROTOCOL_ALL;
|
||||
sas_phy->tproto = 0;
|
||||
sas_phy->type = PHY_TYPE_PHYSICAL;
|
||||
sas_phy->role = PHY_ROLE_INITIATOR;
|
||||
|
@ -72,6 +72,7 @@ struct flash_struct {
|
||||
u8 manuf;
|
||||
u8 dev_id;
|
||||
u8 sec_prot;
|
||||
u8 method;
|
||||
|
||||
u32 dir_offs;
|
||||
};
|
||||
@ -216,6 +217,8 @@ struct asd_ha_struct {
|
||||
struct dma_pool *scb_pool;
|
||||
|
||||
struct asd_seq_data seq; /* sequencer related */
|
||||
u32 bios_status;
|
||||
const struct firmware *bios_image;
|
||||
};
|
||||
|
||||
/* ---------- Common macros ---------- */
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
@ -36,6 +37,7 @@
|
||||
#include "aic94xx_reg.h"
|
||||
#include "aic94xx_hwi.h"
|
||||
#include "aic94xx_seq.h"
|
||||
#include "aic94xx_sds.h"
|
||||
|
||||
/* The format is "version.release.patchlevel" */
|
||||
#define ASD_DRIVER_VERSION "1.0.3"
|
||||
@ -134,7 +136,7 @@ Err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __devexit asd_unmap_memio(struct asd_ha_struct *asd_ha)
|
||||
static void asd_unmap_memio(struct asd_ha_struct *asd_ha)
|
||||
{
|
||||
struct asd_ha_addrspace *io_handle;
|
||||
|
||||
@ -171,7 +173,7 @@ static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __devexit asd_unmap_ioport(struct asd_ha_struct *asd_ha)
|
||||
static void asd_unmap_ioport(struct asd_ha_struct *asd_ha)
|
||||
{
|
||||
pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
|
||||
}
|
||||
@ -208,7 +210,7 @@ Err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __devexit asd_unmap_ha(struct asd_ha_struct *asd_ha)
|
||||
static void asd_unmap_ha(struct asd_ha_struct *asd_ha)
|
||||
{
|
||||
if (asd_ha->iospace)
|
||||
asd_unmap_ioport(asd_ha);
|
||||
@ -313,6 +315,181 @@ static ssize_t asd_show_dev_pcba_sn(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL);
|
||||
|
||||
#define FLASH_CMD_NONE 0x00
|
||||
#define FLASH_CMD_UPDATE 0x01
|
||||
#define FLASH_CMD_VERIFY 0x02
|
||||
|
||||
struct flash_command {
|
||||
u8 command[8];
|
||||
int code;
|
||||
};
|
||||
|
||||
static struct flash_command flash_command_table[] =
|
||||
{
|
||||
{"verify", FLASH_CMD_VERIFY},
|
||||
{"update", FLASH_CMD_UPDATE},
|
||||
{"", FLASH_CMD_NONE} /* Last entry should be NULL. */
|
||||
};
|
||||
|
||||
struct error_bios {
|
||||
char *reason;
|
||||
int err_code;
|
||||
};
|
||||
|
||||
static struct error_bios flash_error_table[] =
|
||||
{
|
||||
{"Failed to open bios image file", FAIL_OPEN_BIOS_FILE},
|
||||
{"PCI ID mismatch", FAIL_CHECK_PCI_ID},
|
||||
{"Checksum mismatch", FAIL_CHECK_SUM},
|
||||
{"Unknown Error", FAIL_UNKNOWN},
|
||||
{"Failed to verify.", FAIL_VERIFY},
|
||||
{"Failed to reset flash chip.", FAIL_RESET_FLASH},
|
||||
{"Failed to find flash chip type.", FAIL_FIND_FLASH_ID},
|
||||
{"Failed to erash flash chip.", FAIL_ERASE_FLASH},
|
||||
{"Failed to program flash chip.", FAIL_WRITE_FLASH},
|
||||
{"Flash in progress", FLASH_IN_PROGRESS},
|
||||
{"Image file size Error", FAIL_FILE_SIZE},
|
||||
{"Input parameter error", FAIL_PARAMETERS},
|
||||
{"Out of memory", FAIL_OUT_MEMORY},
|
||||
{"OK", 0} /* Last entry err_code = 0. */
|
||||
};
|
||||
|
||||
static ssize_t asd_store_update_bios(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
|
||||
char *cmd_ptr, *filename_ptr;
|
||||
struct bios_file_header header, *hdr_ptr;
|
||||
int res, i;
|
||||
u32 csum = 0;
|
||||
int flash_command = FLASH_CMD_NONE;
|
||||
int err = 0;
|
||||
|
||||
cmd_ptr = kzalloc(count*2, GFP_KERNEL);
|
||||
|
||||
if (!cmd_ptr) {
|
||||
err = FAIL_OUT_MEMORY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
filename_ptr = cmd_ptr + count;
|
||||
res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
|
||||
if (res != 2) {
|
||||
err = FAIL_PARAMETERS;
|
||||
goto out1;
|
||||
}
|
||||
|
||||
for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
|
||||
if (!memcmp(flash_command_table[i].command,
|
||||
cmd_ptr, strlen(cmd_ptr))) {
|
||||
flash_command = flash_command_table[i].code;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (flash_command == FLASH_CMD_NONE) {
|
||||
err = FAIL_PARAMETERS;
|
||||
goto out1;
|
||||
}
|
||||
|
||||
if (asd_ha->bios_status == FLASH_IN_PROGRESS) {
|
||||
err = FLASH_IN_PROGRESS;
|
||||
goto out1;
|
||||
}
|
||||
err = request_firmware(&asd_ha->bios_image,
|
||||
filename_ptr,
|
||||
&asd_ha->pcidev->dev);
|
||||
if (err) {
|
||||
asd_printk("Failed to load bios image file %s, error %d\n",
|
||||
filename_ptr, err);
|
||||
err = FAIL_OPEN_BIOS_FILE;
|
||||
goto out1;
|
||||
}
|
||||
|
||||
hdr_ptr = (struct bios_file_header *)asd_ha->bios_image->data;
|
||||
|
||||
if ((hdr_ptr->contrl_id.vendor != asd_ha->pcidev->vendor ||
|
||||
hdr_ptr->contrl_id.device != asd_ha->pcidev->device) &&
|
||||
(hdr_ptr->contrl_id.sub_vendor != asd_ha->pcidev->vendor ||
|
||||
hdr_ptr->contrl_id.sub_device != asd_ha->pcidev->device)) {
|
||||
|
||||
ASD_DPRINTK("The PCI vendor or device id does not match\n");
|
||||
ASD_DPRINTK("vendor=%x dev=%x sub_vendor=%x sub_dev=%x"
|
||||
" pci vendor=%x pci dev=%x\n",
|
||||
hdr_ptr->contrl_id.vendor,
|
||||
hdr_ptr->contrl_id.device,
|
||||
hdr_ptr->contrl_id.sub_vendor,
|
||||
hdr_ptr->contrl_id.sub_device,
|
||||
asd_ha->pcidev->vendor,
|
||||
asd_ha->pcidev->device);
|
||||
err = FAIL_CHECK_PCI_ID;
|
||||
goto out2;
|
||||
}
|
||||
|
||||
if (hdr_ptr->filelen != asd_ha->bios_image->size) {
|
||||
err = FAIL_FILE_SIZE;
|
||||
goto out2;
|
||||
}
|
||||
|
||||
/* calculate checksum */
|
||||
for (i = 0; i < hdr_ptr->filelen; i++)
|
||||
csum += asd_ha->bios_image->data[i];
|
||||
|
||||
if ((csum & 0x0000ffff) != hdr_ptr->checksum) {
|
||||
ASD_DPRINTK("BIOS file checksum mismatch\n");
|
||||
err = FAIL_CHECK_SUM;
|
||||
goto out2;
|
||||
}
|
||||
if (flash_command == FLASH_CMD_UPDATE) {
|
||||
asd_ha->bios_status = FLASH_IN_PROGRESS;
|
||||
err = asd_write_flash_seg(asd_ha,
|
||||
&asd_ha->bios_image->data[sizeof(*hdr_ptr)],
|
||||
0, hdr_ptr->filelen-sizeof(*hdr_ptr));
|
||||
if (!err)
|
||||
err = asd_verify_flash_seg(asd_ha,
|
||||
&asd_ha->bios_image->data[sizeof(*hdr_ptr)],
|
||||
0, hdr_ptr->filelen-sizeof(*hdr_ptr));
|
||||
} else {
|
||||
asd_ha->bios_status = FLASH_IN_PROGRESS;
|
||||
err = asd_verify_flash_seg(asd_ha,
|
||||
&asd_ha->bios_image->data[sizeof(header)],
|
||||
0, hdr_ptr->filelen-sizeof(header));
|
||||
}
|
||||
|
||||
out2:
|
||||
release_firmware(asd_ha->bios_image);
|
||||
out1:
|
||||
kfree(cmd_ptr);
|
||||
out:
|
||||
asd_ha->bios_status = err;
|
||||
|
||||
if (!err)
|
||||
return count;
|
||||
else
|
||||
return -err;
|
||||
}
|
||||
|
||||
static ssize_t asd_show_update_bios(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
int i;
|
||||
struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
|
||||
|
||||
for (i = 0; flash_error_table[i].err_code != 0; i++) {
|
||||
if (flash_error_table[i].err_code == asd_ha->bios_status)
|
||||
break;
|
||||
}
|
||||
if (asd_ha->bios_status != FLASH_IN_PROGRESS)
|
||||
asd_ha->bios_status = FLASH_OK;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "status=%x %s\n",
|
||||
flash_error_table[i].err_code,
|
||||
flash_error_table[i].reason);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
|
||||
asd_show_update_bios, asd_store_update_bios);
|
||||
|
||||
static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
|
||||
{
|
||||
int err;
|
||||
@ -328,9 +505,14 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
|
||||
err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
|
||||
if (err)
|
||||
goto err_biosb;
|
||||
err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
|
||||
if (err)
|
||||
goto err_update_bios;
|
||||
|
||||
return 0;
|
||||
|
||||
err_update_bios:
|
||||
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
|
||||
err_biosb:
|
||||
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
|
||||
err_rev:
|
||||
@ -343,6 +525,7 @@ static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
|
||||
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
|
||||
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
|
||||
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
|
||||
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
|
||||
}
|
||||
|
||||
/* The first entry, 0, is used for dynamic ids, the rest for devices
|
||||
@ -589,6 +772,7 @@ static int __devinit asd_pci_probe(struct pci_dev *dev,
|
||||
asd_ha->sas_ha.dev = &asd_ha->pcidev->dev;
|
||||
asd_ha->sas_ha.lldd_ha = asd_ha;
|
||||
|
||||
asd_ha->bios_status = FLASH_OK;
|
||||
asd_ha->name = asd_dev->name;
|
||||
asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
|
||||
|
||||
|
@ -788,12 +788,12 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
|
||||
|
||||
/* initiator port settings are in the hi nibble */
|
||||
if (phy->sas_phy.role == PHY_ROLE_INITIATOR)
|
||||
control_phy->port_type = SAS_PROTO_ALL << 4;
|
||||
control_phy->port_type = SAS_PROTOCOL_ALL << 4;
|
||||
else if (phy->sas_phy.role == PHY_ROLE_TARGET)
|
||||
control_phy->port_type = SAS_PROTO_ALL;
|
||||
control_phy->port_type = SAS_PROTOCOL_ALL;
|
||||
else
|
||||
control_phy->port_type =
|
||||
(SAS_PROTO_ALL << 4) | SAS_PROTO_ALL;
|
||||
(SAS_PROTOCOL_ALL << 4) | SAS_PROTOCOL_ALL;
|
||||
|
||||
/* link reset retries, this should be nominal */
|
||||
control_phy->link_reset_retries = 10;
|
||||
|
@ -30,6 +30,7 @@
|
||||
|
||||
#include "aic94xx.h"
|
||||
#include "aic94xx_reg.h"
|
||||
#include "aic94xx_sds.h"
|
||||
|
||||
/* ---------- OCM stuff ---------- */
|
||||
|
||||
@ -1083,3 +1084,391 @@ out:
|
||||
kfree(flash_dir);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* asd_verify_flash_seg - verify data with flash memory
|
||||
* @asd_ha: pointer to the host adapter structure
|
||||
* @src: pointer to the source data to be verified
|
||||
* @dest_offset: offset from flash memory
|
||||
* @bytes_to_verify: total bytes to verify
|
||||
*/
|
||||
int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
|
||||
void *src, u32 dest_offset, u32 bytes_to_verify)
|
||||
{
|
||||
u8 *src_buf;
|
||||
u8 flash_char;
|
||||
int err;
|
||||
u32 nv_offset, reg, i;
|
||||
|
||||
reg = asd_ha->hw_prof.flash.bar;
|
||||
src_buf = NULL;
|
||||
|
||||
err = FLASH_OK;
|
||||
nv_offset = dest_offset;
|
||||
src_buf = (u8 *)src;
|
||||
for (i = 0; i < bytes_to_verify; i++) {
|
||||
flash_char = asd_read_reg_byte(asd_ha, reg + nv_offset + i);
|
||||
if (flash_char != src_buf[i]) {
|
||||
err = FAIL_VERIFY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* asd_write_flash_seg - write data into flash memory
|
||||
* @asd_ha: pointer to the host adapter structure
|
||||
* @src: pointer to the source data to be written
|
||||
* @dest_offset: offset from flash memory
|
||||
* @bytes_to_write: total bytes to write
|
||||
*/
|
||||
int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
|
||||
void *src, u32 dest_offset, u32 bytes_to_write)
|
||||
{
|
||||
u8 *src_buf;
|
||||
u32 nv_offset, reg, i;
|
||||
int err;
|
||||
|
||||
reg = asd_ha->hw_prof.flash.bar;
|
||||
src_buf = NULL;
|
||||
|
||||
err = asd_check_flash_type(asd_ha);
|
||||
if (err) {
|
||||
ASD_DPRINTK("couldn't find the type of flash. err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
nv_offset = dest_offset;
|
||||
err = asd_erase_nv_sector(asd_ha, nv_offset, bytes_to_write);
|
||||
if (err) {
|
||||
ASD_DPRINTK("Erase failed at offset:0x%x\n",
|
||||
nv_offset);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = asd_reset_flash(asd_ha);
|
||||
if (err) {
|
||||
ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
src_buf = (u8 *)src;
|
||||
for (i = 0; i < bytes_to_write; i++) {
|
||||
/* Setup program command sequence */
|
||||
switch (asd_ha->hw_prof.flash.method) {
|
||||
case FLASH_METHOD_A:
|
||||
{
|
||||
asd_write_reg_byte(asd_ha,
|
||||
(reg + 0xAAA), 0xAA);
|
||||
asd_write_reg_byte(asd_ha,
|
||||
(reg + 0x555), 0x55);
|
||||
asd_write_reg_byte(asd_ha,
|
||||
(reg + 0xAAA), 0xA0);
|
||||
asd_write_reg_byte(asd_ha,
|
||||
(reg + nv_offset + i),
|
||||
(*(src_buf + i)));
|
||||
break;
|
||||
}
|
||||
case FLASH_METHOD_B:
|
||||
{
|
||||
asd_write_reg_byte(asd_ha,
|
||||
(reg + 0x555), 0xAA);
|
||||
asd_write_reg_byte(asd_ha,
|
||||
(reg + 0x2AA), 0x55);
|
||||
asd_write_reg_byte(asd_ha,
|
||||
(reg + 0x555), 0xA0);
|
||||
asd_write_reg_byte(asd_ha,
|
||||
(reg + nv_offset + i),
|
||||
(*(src_buf + i)));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (asd_chk_write_status(asd_ha,
|
||||
(nv_offset + i), 0) != 0) {
|
||||
ASD_DPRINTK("aicx: Write failed at offset:0x%x\n",
|
||||
reg + nv_offset + i);
|
||||
return FAIL_WRITE_FLASH;
|
||||
}
|
||||
}
|
||||
|
||||
err = asd_reset_flash(asd_ha);
|
||||
if (err) {
|
||||
ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int asd_chk_write_status(struct asd_ha_struct *asd_ha,
|
||||
u32 sector_addr, u8 erase_flag)
|
||||
{
|
||||
u32 reg;
|
||||
u32 loop_cnt;
|
||||
u8 nv_data1, nv_data2;
|
||||
u8 toggle_bit1;
|
||||
|
||||
/*
|
||||
* Read from DQ2 requires sector address
|
||||
* while it's dont care for DQ6
|
||||
*/
|
||||
reg = asd_ha->hw_prof.flash.bar;
|
||||
|
||||
for (loop_cnt = 0; loop_cnt < 50000; loop_cnt++) {
|
||||
nv_data1 = asd_read_reg_byte(asd_ha, reg);
|
||||
nv_data2 = asd_read_reg_byte(asd_ha, reg);
|
||||
|
||||
toggle_bit1 = ((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6)
|
||||
^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6));
|
||||
|
||||
if (toggle_bit1 == 0) {
|
||||
return 0;
|
||||
} else {
|
||||
if (nv_data2 & FLASH_STATUS_BIT_MASK_DQ5) {
|
||||
nv_data1 = asd_read_reg_byte(asd_ha,
|
||||
reg);
|
||||
nv_data2 = asd_read_reg_byte(asd_ha,
|
||||
reg);
|
||||
toggle_bit1 =
|
||||
((nv_data1 & FLASH_STATUS_BIT_MASK_DQ6)
|
||||
^ (nv_data2 & FLASH_STATUS_BIT_MASK_DQ6));
|
||||
|
||||
if (toggle_bit1 == 0)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ERASE is a sector-by-sector operation and requires
|
||||
* more time to finish while WRITE is byte-byte-byte
|
||||
* operation and takes lesser time to finish.
|
||||
*
|
||||
* For some strange reason a reduced ERASE delay gives different
|
||||
* behaviour across different spirit boards. Hence we set
|
||||
* a optimum balance of 50mus for ERASE which works well
|
||||
* across all boards.
|
||||
*/
|
||||
if (erase_flag) {
|
||||
udelay(FLASH_STATUS_ERASE_DELAY_COUNT);
|
||||
} else {
|
||||
udelay(FLASH_STATUS_WRITE_DELAY_COUNT);
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* asd_hwi_erase_nv_sector - Erase the flash memory sectors.
|
||||
* @asd_ha: pointer to the host adapter structure
|
||||
* @flash_addr: pointer to offset from flash memory
|
||||
* @size: total bytes to erase.
|
||||
*/
|
||||
int asd_erase_nv_sector(struct asd_ha_struct *asd_ha, u32 flash_addr, u32 size)
|
||||
{
|
||||
u32 reg;
|
||||
u32 sector_addr;
|
||||
|
||||
reg = asd_ha->hw_prof.flash.bar;
|
||||
|
||||
/* sector staring address */
|
||||
sector_addr = flash_addr & FLASH_SECTOR_SIZE_MASK;
|
||||
|
||||
/*
|
||||
* Erasing an flash sector needs to be done in six consecutive
|
||||
* write cyles.
|
||||
*/
|
||||
while (sector_addr < flash_addr+size) {
|
||||
switch (asd_ha->hw_prof.flash.method) {
|
||||
case FLASH_METHOD_A:
|
||||
asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0x80);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0xAAA), 0xAA);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x555), 0x55);
|
||||
asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30);
|
||||
break;
|
||||
case FLASH_METHOD_B:
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x555), 0x80);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
|
||||
asd_write_reg_byte(asd_ha, (reg + sector_addr), 0x30);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (asd_chk_write_status(asd_ha, sector_addr, 1) != 0)
|
||||
return FAIL_ERASE_FLASH;
|
||||
|
||||
sector_addr += FLASH_SECTOR_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int asd_check_flash_type(struct asd_ha_struct *asd_ha)
|
||||
{
|
||||
u8 manuf_id;
|
||||
u8 dev_id;
|
||||
u8 sec_prot;
|
||||
u32 inc;
|
||||
u32 reg;
|
||||
int err;
|
||||
|
||||
/* get Flash memory base address */
|
||||
reg = asd_ha->hw_prof.flash.bar;
|
||||
|
||||
/* Determine flash info */
|
||||
err = asd_reset_flash(asd_ha);
|
||||
if (err) {
|
||||
ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_UNKNOWN;
|
||||
asd_ha->hw_prof.flash.manuf = FLASH_MANUF_ID_UNKNOWN;
|
||||
asd_ha->hw_prof.flash.dev_id = FLASH_DEV_ID_UNKNOWN;
|
||||
|
||||
/* Get flash info. This would most likely be AMD Am29LV family flash.
|
||||
* First try the sequence for word mode. It is the same as for
|
||||
* 008B (byte mode only), 160B (word mode) and 800D (word mode).
|
||||
*/
|
||||
inc = asd_ha->hw_prof.flash.wide ? 2 : 1;
|
||||
asd_write_reg_byte(asd_ha, reg + 0xAAA, 0xAA);
|
||||
asd_write_reg_byte(asd_ha, reg + 0x555, 0x55);
|
||||
asd_write_reg_byte(asd_ha, reg + 0xAAA, 0x90);
|
||||
manuf_id = asd_read_reg_byte(asd_ha, reg);
|
||||
dev_id = asd_read_reg_byte(asd_ha, reg + inc);
|
||||
sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc);
|
||||
/* Get out of autoselect mode. */
|
||||
err = asd_reset_flash(asd_ha);
|
||||
if (err) {
|
||||
ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
ASD_DPRINTK("Flash MethodA manuf_id(0x%x) dev_id(0x%x) "
|
||||
"sec_prot(0x%x)\n", manuf_id, dev_id, sec_prot);
|
||||
err = asd_reset_flash(asd_ha);
|
||||
if (err != 0)
|
||||
return err;
|
||||
|
||||
switch (manuf_id) {
|
||||
case FLASH_MANUF_ID_AMD:
|
||||
switch (sec_prot) {
|
||||
case FLASH_DEV_ID_AM29LV800DT:
|
||||
case FLASH_DEV_ID_AM29LV640MT:
|
||||
case FLASH_DEV_ID_AM29F800B:
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case FLASH_MANUF_ID_ST:
|
||||
switch (sec_prot) {
|
||||
case FLASH_DEV_ID_STM29W800DT:
|
||||
case FLASH_DEV_ID_STM29LV640:
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case FLASH_MANUF_ID_FUJITSU:
|
||||
switch (sec_prot) {
|
||||
case FLASH_DEV_ID_MBM29LV800TE:
|
||||
case FLASH_DEV_ID_MBM29DL800TA:
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case FLASH_MANUF_ID_MACRONIX:
|
||||
switch (sec_prot) {
|
||||
case FLASH_DEV_ID_MX29LV800BT:
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_A;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN) {
|
||||
err = asd_reset_flash(asd_ha);
|
||||
if (err) {
|
||||
ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Issue Unlock sequence for AM29LV008BT */
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x555), 0xAA);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x2AA), 0x55);
|
||||
asd_write_reg_byte(asd_ha, (reg + 0x555), 0x90);
|
||||
manuf_id = asd_read_reg_byte(asd_ha, reg);
|
||||
dev_id = asd_read_reg_byte(asd_ha, reg + inc);
|
||||
sec_prot = asd_read_reg_byte(asd_ha, reg + inc + inc);
|
||||
|
||||
ASD_DPRINTK("Flash MethodB manuf_id(0x%x) dev_id(0x%x) sec_prot"
|
||||
"(0x%x)\n", manuf_id, dev_id, sec_prot);
|
||||
|
||||
err = asd_reset_flash(asd_ha);
|
||||
if (err != 0) {
|
||||
ASD_DPRINTK("couldn't reset flash. err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
switch (manuf_id) {
|
||||
case FLASH_MANUF_ID_AMD:
|
||||
switch (dev_id) {
|
||||
case FLASH_DEV_ID_AM29LV008BT:
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case FLASH_MANUF_ID_ST:
|
||||
switch (dev_id) {
|
||||
case FLASH_DEV_ID_STM29008:
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case FLASH_MANUF_ID_FUJITSU:
|
||||
switch (dev_id) {
|
||||
case FLASH_DEV_ID_MBM29LV008TA:
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case FLASH_MANUF_ID_INTEL:
|
||||
switch (dev_id) {
|
||||
case FLASH_DEV_ID_I28LV00TAT:
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case FLASH_MANUF_ID_MACRONIX:
|
||||
switch (dev_id) {
|
||||
case FLASH_DEV_ID_I28LV00TAT:
|
||||
asd_ha->hw_prof.flash.method = FLASH_METHOD_B;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return FAIL_FIND_FLASH_ID;
|
||||
}
|
||||
}
|
||||
|
||||
if (asd_ha->hw_prof.flash.method == FLASH_METHOD_UNKNOWN)
|
||||
return FAIL_FIND_FLASH_ID;
|
||||
|
||||
asd_ha->hw_prof.flash.manuf = manuf_id;
|
||||
asd_ha->hw_prof.flash.dev_id = dev_id;
|
||||
asd_ha->hw_prof.flash.sec_prot = sec_prot;
|
||||
return 0;
|
||||
}
|
||||
|
121
drivers/scsi/aic94xx/aic94xx_sds.h
Normal file
121
drivers/scsi/aic94xx/aic94xx_sds.h
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
* Aic94xx SAS/SATA driver hardware interface header file.
|
||||
*
|
||||
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
|
||||
* Copyright (C) 2005 Gilbert Wu <gilbert_wu@adaptec.com>
|
||||
*
|
||||
* This file is licensed under GPLv2.
|
||||
*
|
||||
* This file is part of the aic94xx driver.
|
||||
*
|
||||
* The aic94xx driver is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; version 2 of the
|
||||
* License.
|
||||
*
|
||||
* The aic94xx driver is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with the aic94xx driver; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*
|
||||
*/
|
||||
#ifndef _AIC94XX_SDS_H_
|
||||
#define _AIC94XX_SDS_H_
|
||||
|
||||
enum {
|
||||
FLASH_METHOD_UNKNOWN,
|
||||
FLASH_METHOD_A,
|
||||
FLASH_METHOD_B
|
||||
};
|
||||
|
||||
#define FLASH_MANUF_ID_AMD 0x01
|
||||
#define FLASH_MANUF_ID_ST 0x20
|
||||
#define FLASH_MANUF_ID_FUJITSU 0x04
|
||||
#define FLASH_MANUF_ID_MACRONIX 0xC2
|
||||
#define FLASH_MANUF_ID_INTEL 0x89
|
||||
#define FLASH_MANUF_ID_UNKNOWN 0xFF
|
||||
|
||||
#define FLASH_DEV_ID_AM29LV008BT 0x3E
|
||||
#define FLASH_DEV_ID_AM29LV800DT 0xDA
|
||||
#define FLASH_DEV_ID_STM29W800DT 0xD7
|
||||
#define FLASH_DEV_ID_STM29LV640 0xDE
|
||||
#define FLASH_DEV_ID_STM29008 0xEA
|
||||
#define FLASH_DEV_ID_MBM29LV800TE 0xDA
|
||||
#define FLASH_DEV_ID_MBM29DL800TA 0x4A
|
||||
#define FLASH_DEV_ID_MBM29LV008TA 0x3E
|
||||
#define FLASH_DEV_ID_AM29LV640MT 0x7E
|
||||
#define FLASH_DEV_ID_AM29F800B 0xD6
|
||||
#define FLASH_DEV_ID_MX29LV800BT 0xDA
|
||||
#define FLASH_DEV_ID_MX29LV008CT 0xDA
|
||||
#define FLASH_DEV_ID_I28LV00TAT 0x3E
|
||||
#define FLASH_DEV_ID_UNKNOWN 0xFF
|
||||
|
||||
/* status bit mask values */
|
||||
#define FLASH_STATUS_BIT_MASK_DQ6 0x40
|
||||
#define FLASH_STATUS_BIT_MASK_DQ5 0x20
|
||||
#define FLASH_STATUS_BIT_MASK_DQ2 0x04
|
||||
|
||||
/* minimum value in micro seconds needed for checking status */
|
||||
#define FLASH_STATUS_ERASE_DELAY_COUNT 50
|
||||
#define FLASH_STATUS_WRITE_DELAY_COUNT 25
|
||||
|
||||
#define FLASH_SECTOR_SIZE 0x010000
|
||||
#define FLASH_SECTOR_SIZE_MASK 0xffff0000
|
||||
|
||||
#define FLASH_OK 0x000000
|
||||
#define FAIL_OPEN_BIOS_FILE 0x000100
|
||||
#define FAIL_CHECK_PCI_ID 0x000200
|
||||
#define FAIL_CHECK_SUM 0x000300
|
||||
#define FAIL_UNKNOWN 0x000400
|
||||
#define FAIL_VERIFY 0x000500
|
||||
#define FAIL_RESET_FLASH 0x000600
|
||||
#define FAIL_FIND_FLASH_ID 0x000700
|
||||
#define FAIL_ERASE_FLASH 0x000800
|
||||
#define FAIL_WRITE_FLASH 0x000900
|
||||
#define FAIL_FILE_SIZE 0x000a00
|
||||
#define FAIL_PARAMETERS 0x000b00
|
||||
#define FAIL_OUT_MEMORY 0x000c00
|
||||
#define FLASH_IN_PROGRESS 0x001000
|
||||
|
||||
struct controller_id {
|
||||
u32 vendor; /* PCI Vendor ID */
|
||||
u32 device; /* PCI Device ID */
|
||||
u32 sub_vendor; /* PCI Subvendor ID */
|
||||
u32 sub_device; /* PCI Subdevice ID */
|
||||
};
|
||||
|
||||
struct image_info {
|
||||
u32 ImageId; /* Identifies the image */
|
||||
u32 ImageOffset; /* Offset the beginning of the file */
|
||||
u32 ImageLength; /* length of the image */
|
||||
u32 ImageChecksum; /* Image checksum */
|
||||
u32 ImageVersion; /* Version of the image, could be build number */
|
||||
};
|
||||
|
||||
struct bios_file_header {
|
||||
u8 signature[32]; /* Signature/Cookie to identify the file */
|
||||
u32 checksum; /*Entire file checksum with this field zero */
|
||||
u32 antidote; /* Entire file checksum with this field 0xFFFFFFFF */
|
||||
struct controller_id contrl_id; /*PCI id to identify the controller */
|
||||
u32 filelen; /*Length of the entire file*/
|
||||
u32 chunk_num; /*The chunk/part number for multiple Image files */
|
||||
u32 total_chunks; /*Total number of chunks/parts in the image file */
|
||||
u32 num_images; /* Number of images in the file */
|
||||
u32 build_num; /* Build number of this image */
|
||||
struct image_info image_header;
|
||||
};
|
||||
|
||||
int asd_verify_flash_seg(struct asd_ha_struct *asd_ha,
|
||||
void *src, u32 dest_offset, u32 bytes_to_verify);
|
||||
int asd_write_flash_seg(struct asd_ha_struct *asd_ha,
|
||||
void *src, u32 dest_offset, u32 bytes_to_write);
|
||||
int asd_chk_write_status(struct asd_ha_struct *asd_ha,
|
||||
u32 sector_addr, u8 erase_flag);
|
||||
int asd_check_flash_type(struct asd_ha_struct *asd_ha);
|
||||
int asd_erase_nv_sector(struct asd_ha_struct *asd_ha,
|
||||
u32 flash_addr, u32 size);
|
||||
#endif
|
@ -187,29 +187,13 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
|
||||
ts->buf_valid_size = 0;
|
||||
edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
|
||||
r = edb->vaddr;
|
||||
if (task->task_proto == SAS_PROTO_SSP) {
|
||||
if (task->task_proto == SAS_PROTOCOL_SSP) {
|
||||
struct ssp_response_iu *iu =
|
||||
r + 16 + sizeof(struct ssp_frame_hdr);
|
||||
|
||||
ts->residual = le32_to_cpu(*(__le32 *)r);
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
if (iu->datapres == 0)
|
||||
ts->stat = iu->status;
|
||||
else if (iu->datapres == 1)
|
||||
ts->stat = iu->resp_data[3];
|
||||
else if (iu->datapres == 2) {
|
||||
ts->stat = SAM_CHECK_COND;
|
||||
ts->buf_valid_size = min((u32) SAS_STATUS_BUF_SIZE,
|
||||
be32_to_cpu(iu->sense_data_len));
|
||||
memcpy(ts->buf, iu->sense_data, ts->buf_valid_size);
|
||||
if (iu->status != SAM_CHECK_COND) {
|
||||
ASD_DPRINTK("device %llx sent sense data, but "
|
||||
"stat(0x%x) is not CHECK_CONDITION"
|
||||
"\n",
|
||||
SAS_ADDR(task->dev->sas_addr),
|
||||
iu->status);
|
||||
}
|
||||
}
|
||||
|
||||
sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu);
|
||||
} else {
|
||||
struct ata_task_resp *resp = (void *) &ts->buf[0];
|
||||
|
||||
@ -341,14 +325,14 @@ Again:
|
||||
}
|
||||
|
||||
switch (task->task_proto) {
|
||||
case SATA_PROTO:
|
||||
case SAS_PROTO_STP:
|
||||
case SAS_PROTOCOL_SATA:
|
||||
case SAS_PROTOCOL_STP:
|
||||
asd_unbuild_ata_ascb(ascb);
|
||||
break;
|
||||
case SAS_PROTO_SMP:
|
||||
case SAS_PROTOCOL_SMP:
|
||||
asd_unbuild_smp_ascb(ascb);
|
||||
break;
|
||||
case SAS_PROTO_SSP:
|
||||
case SAS_PROTOCOL_SSP:
|
||||
asd_unbuild_ssp_ascb(ascb);
|
||||
default:
|
||||
break;
|
||||
@ -586,17 +570,17 @@ int asd_execute_task(struct sas_task *task, const int num,
|
||||
list_for_each_entry(a, &alist, list) {
|
||||
t = a->uldd_task;
|
||||
a->uldd_timer = 1;
|
||||
if (t->task_proto & SAS_PROTO_STP)
|
||||
t->task_proto = SAS_PROTO_STP;
|
||||
if (t->task_proto & SAS_PROTOCOL_STP)
|
||||
t->task_proto = SAS_PROTOCOL_STP;
|
||||
switch (t->task_proto) {
|
||||
case SATA_PROTO:
|
||||
case SAS_PROTO_STP:
|
||||
case SAS_PROTOCOL_SATA:
|
||||
case SAS_PROTOCOL_STP:
|
||||
res = asd_build_ata_ascb(a, t, gfp_flags);
|
||||
break;
|
||||
case SAS_PROTO_SMP:
|
||||
case SAS_PROTOCOL_SMP:
|
||||
res = asd_build_smp_ascb(a, t, gfp_flags);
|
||||
break;
|
||||
case SAS_PROTO_SSP:
|
||||
case SAS_PROTOCOL_SSP:
|
||||
res = asd_build_ssp_ascb(a, t, gfp_flags);
|
||||
break;
|
||||
default:
|
||||
@ -633,14 +617,14 @@ out_err_unmap:
|
||||
t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
switch (t->task_proto) {
|
||||
case SATA_PROTO:
|
||||
case SAS_PROTO_STP:
|
||||
case SAS_PROTOCOL_SATA:
|
||||
case SAS_PROTOCOL_STP:
|
||||
asd_unbuild_ata_ascb(a);
|
||||
break;
|
||||
case SAS_PROTO_SMP:
|
||||
case SAS_PROTOCOL_SMP:
|
||||
asd_unbuild_smp_ascb(a);
|
||||
break;
|
||||
case SAS_PROTO_SSP:
|
||||
case SAS_PROTOCOL_SSP:
|
||||
asd_unbuild_ssp_ascb(a);
|
||||
default:
|
||||
break;
|
||||
|
@ -372,21 +372,21 @@ int asd_abort_task(struct sas_task *task)
|
||||
scb->header.opcode = ABORT_TASK;
|
||||
|
||||
switch (task->task_proto) {
|
||||
case SATA_PROTO:
|
||||
case SAS_PROTO_STP:
|
||||
case SAS_PROTOCOL_SATA:
|
||||
case SAS_PROTOCOL_STP:
|
||||
scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
|
||||
break;
|
||||
case SAS_PROTO_SSP:
|
||||
case SAS_PROTOCOL_SSP:
|
||||
scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
|
||||
scb->abort_task.proto_conn_rate |= task->dev->linkrate;
|
||||
break;
|
||||
case SAS_PROTO_SMP:
|
||||
case SAS_PROTOCOL_SMP:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (task->task_proto == SAS_PROTO_SSP) {
|
||||
if (task->task_proto == SAS_PROTOCOL_SSP) {
|
||||
scb->abort_task.ssp_frame.frame_type = SSP_TASK;
|
||||
memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
|
||||
task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
|
||||
@ -512,7 +512,7 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
|
||||
int res = 1;
|
||||
struct scb *scb;
|
||||
|
||||
if (!(dev->tproto & SAS_PROTO_SSP))
|
||||
if (!(dev->tproto & SAS_PROTOCOL_SSP))
|
||||
return TMF_RESP_FUNC_ESUPP;
|
||||
|
||||
ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
|
||||
|
@ -634,9 +634,9 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
|
||||
pcmd->result = DID_OK << 16;
|
||||
if (sensebuffer) {
|
||||
int sense_data_length =
|
||||
sizeof(struct SENSE_DATA) < sizeof(pcmd->sense_buffer)
|
||||
? sizeof(struct SENSE_DATA) : sizeof(pcmd->sense_buffer);
|
||||
memset(sensebuffer, 0, sizeof(pcmd->sense_buffer));
|
||||
sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
|
||||
? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
|
||||
memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
|
||||
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
|
||||
sensebuffer->Valid = 1;
|
||||
|
@ -511,9 +511,9 @@ static inline void initialize_SCp(Scsi_Cmnd *cmd)
|
||||
* various queues are valid.
|
||||
*/
|
||||
|
||||
if (cmd->use_sg) {
|
||||
cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer;
|
||||
cmd->SCp.buffers_residual = cmd->use_sg - 1;
|
||||
if (scsi_bufflen(cmd)) {
|
||||
cmd->SCp.buffer = scsi_sglist(cmd);
|
||||
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
/* ++roman: Try to merge some scatter-buffers if they are at
|
||||
@ -523,8 +523,8 @@ static inline void initialize_SCp(Scsi_Cmnd *cmd)
|
||||
} else {
|
||||
cmd->SCp.buffer = NULL;
|
||||
cmd->SCp.buffers_residual = 0;
|
||||
cmd->SCp.ptr = (char *)cmd->request_buffer;
|
||||
cmd->SCp.this_residual = cmd->request_bufflen;
|
||||
cmd->SCp.ptr = NULL;
|
||||
cmd->SCp.this_residual = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -936,21 +936,21 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
|
||||
}
|
||||
# endif
|
||||
# ifdef NCR5380_STAT_LIMIT
|
||||
if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
|
||||
if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
|
||||
# endif
|
||||
switch (cmd->cmnd[0]) {
|
||||
case WRITE:
|
||||
case WRITE_6:
|
||||
case WRITE_10:
|
||||
hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
|
||||
hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;
|
||||
hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
|
||||
hostdata->pendingw++;
|
||||
break;
|
||||
case READ:
|
||||
case READ_6:
|
||||
case READ_10:
|
||||
hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
|
||||
hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;
|
||||
hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
|
||||
hostdata->pendingr++;
|
||||
break;
|
||||
}
|
||||
@ -1352,21 +1352,21 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
|
||||
static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd)
|
||||
{
|
||||
# ifdef NCR5380_STAT_LIMIT
|
||||
if (cmd->request_bufflen > NCR5380_STAT_LIMIT)
|
||||
if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT)
|
||||
# endif
|
||||
switch (cmd->cmnd[0]) {
|
||||
case WRITE:
|
||||
case WRITE_6:
|
||||
case WRITE_10:
|
||||
hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase);
|
||||
/*hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;*/
|
||||
/*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/
|
||||
hostdata->pendingw--;
|
||||
break;
|
||||
case READ:
|
||||
case READ_6:
|
||||
case READ_10:
|
||||
hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase);
|
||||
/*hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;*/
|
||||
/*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/
|
||||
hostdata->pendingr--;
|
||||
break;
|
||||
}
|
||||
@ -1868,7 +1868,7 @@ static int do_abort(struct Scsi_Host *host)
|
||||
* the target sees, so we just handshake.
|
||||
*/
|
||||
|
||||
while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ)
|
||||
while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
|
||||
;
|
||||
|
||||
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
|
||||
|
@ -471,18 +471,8 @@ go_42:
|
||||
/*
|
||||
* Complete the command
|
||||
*/
|
||||
if (workreq->use_sg) {
|
||||
pci_unmap_sg(dev->pdev,
|
||||
(struct scatterlist *)workreq->request_buffer,
|
||||
workreq->use_sg,
|
||||
workreq->sc_data_direction);
|
||||
} else if (workreq->request_bufflen &&
|
||||
workreq->sc_data_direction != DMA_NONE) {
|
||||
pci_unmap_single(dev->pdev,
|
||||
workreq->SCp.dma_handle,
|
||||
workreq->request_bufflen,
|
||||
workreq->sc_data_direction);
|
||||
}
|
||||
scsi_dma_unmap(workreq);
|
||||
|
||||
spin_lock_irqsave(dev->host->host_lock, flags);
|
||||
(*workreq->scsi_done) (workreq);
|
||||
#ifdef ED_DBGP
|
||||
@ -624,7 +614,7 @@ static int atp870u_queuecommand(struct scsi_cmnd * req_p,
|
||||
|
||||
c = scmd_channel(req_p);
|
||||
req_p->sense_buffer[0]=0;
|
||||
req_p->resid = 0;
|
||||
scsi_set_resid(req_p, 0);
|
||||
if (scmd_channel(req_p) > 1) {
|
||||
req_p->result = 0x00040000;
|
||||
done(req_p);
|
||||
@ -722,7 +712,6 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
|
||||
unsigned short int tmpcip, w;
|
||||
unsigned long l, bttl = 0;
|
||||
unsigned int workport;
|
||||
struct scatterlist *sgpnt;
|
||||
unsigned long sg_count;
|
||||
|
||||
if (dev->in_snd[c] != 0) {
|
||||
@ -793,6 +782,8 @@ oktosend:
|
||||
}
|
||||
printk("\n");
|
||||
#endif
|
||||
l = scsi_bufflen(workreq);
|
||||
|
||||
if (dev->dev_id == ATP885_DEVID) {
|
||||
j = inb(dev->baseport + 0x29) & 0xfe;
|
||||
outb(j, dev->baseport + 0x29);
|
||||
@ -800,12 +791,11 @@ oktosend:
|
||||
}
|
||||
|
||||
if (workreq->cmnd[0] == READ_CAPACITY) {
|
||||
if (workreq->request_bufflen > 8) {
|
||||
workreq->request_bufflen = 0x08;
|
||||
}
|
||||
if (l > 8)
|
||||
l = 8;
|
||||
}
|
||||
if (workreq->cmnd[0] == 0x00) {
|
||||
workreq->request_bufflen = 0;
|
||||
l = 0;
|
||||
}
|
||||
|
||||
tmport = workport + 0x1b;
|
||||
@ -852,40 +842,8 @@ oktosend:
|
||||
#ifdef ED_DBGP
|
||||
printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp);
|
||||
#endif
|
||||
/*
|
||||
* Figure out the transfer size
|
||||
*/
|
||||
if (workreq->use_sg) {
|
||||
#ifdef ED_DBGP
|
||||
printk("Using SGL\n");
|
||||
#endif
|
||||
l = 0;
|
||||
|
||||
sgpnt = (struct scatterlist *) workreq->request_buffer;
|
||||
sg_count = pci_map_sg(dev->pdev, sgpnt, workreq->use_sg,
|
||||
workreq->sc_data_direction);
|
||||
|
||||
for (i = 0; i < workreq->use_sg; i++) {
|
||||
if (sgpnt[i].length == 0 || workreq->use_sg > ATP870U_SCATTER) {
|
||||
panic("Foooooooood fight!");
|
||||
}
|
||||
l += sgpnt[i].length;
|
||||
}
|
||||
#ifdef ED_DBGP
|
||||
printk( "send_s870: workreq->use_sg %d, sg_count %d l %8ld\n", workreq->use_sg, sg_count, l);
|
||||
#endif
|
||||
} else if(workreq->request_bufflen && workreq->sc_data_direction != PCI_DMA_NONE) {
|
||||
#ifdef ED_DBGP
|
||||
printk("Not using SGL\n");
|
||||
#endif
|
||||
workreq->SCp.dma_handle = pci_map_single(dev->pdev, workreq->request_buffer,
|
||||
workreq->request_bufflen,
|
||||
workreq->sc_data_direction);
|
||||
l = workreq->request_bufflen;
|
||||
#ifdef ED_DBGP
|
||||
printk( "send_s870: workreq->use_sg %d, l %8ld\n", workreq->use_sg, l);
|
||||
#endif
|
||||
} else l = 0;
|
||||
sg_count = scsi_dma_map(workreq);
|
||||
/*
|
||||
* Write transfer size
|
||||
*/
|
||||
@ -938,12 +896,12 @@ oktosend:
|
||||
* a linear chain.
|
||||
*/
|
||||
|
||||
if (workreq->use_sg) {
|
||||
sgpnt = (struct scatterlist *) workreq->request_buffer;
|
||||
if (l) {
|
||||
struct scatterlist *sgpnt;
|
||||
i = 0;
|
||||
for (j = 0; j < workreq->use_sg; j++) {
|
||||
bttl = sg_dma_address(&sgpnt[j]);
|
||||
l=sg_dma_len(&sgpnt[j]);
|
||||
scsi_for_each_sg(workreq, sgpnt, sg_count, j) {
|
||||
bttl = sg_dma_address(sgpnt);
|
||||
l=sg_dma_len(sgpnt);
|
||||
#ifdef ED_DBGP
|
||||
printk("1. bttl %x, l %x\n",bttl, l);
|
||||
#endif
|
||||
@ -965,32 +923,6 @@ oktosend:
|
||||
printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3]));
|
||||
printk("2. bttl %x, l %x\n",bttl, l);
|
||||
#endif
|
||||
} else {
|
||||
/*
|
||||
* For a linear request write a chain of blocks
|
||||
*/
|
||||
bttl = workreq->SCp.dma_handle;
|
||||
l = workreq->request_bufflen;
|
||||
i = 0;
|
||||
#ifdef ED_DBGP
|
||||
printk("3. bttl %x, l %x\n",bttl, l);
|
||||
#endif
|
||||
while (l > 0x10000) {
|
||||
(((u16 *) (prd))[i + 3]) = 0x0000;
|
||||
(((u16 *) (prd))[i + 2]) = 0x0000;
|
||||
(((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
|
||||
l -= 0x10000;
|
||||
bttl += 0x10000;
|
||||
i += 0x04;
|
||||
}
|
||||
(((u16 *) (prd))[i + 3]) = cpu_to_le16(0x8000);
|
||||
(((u16 *) (prd))[i + 2]) = cpu_to_le16(l);
|
||||
(((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
|
||||
#ifdef ED_DBGP
|
||||
printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3]));
|
||||
printk("4. bttl %x, l %x\n",bttl, l);
|
||||
#endif
|
||||
|
||||
}
|
||||
tmpcip += 4;
|
||||
#ifdef ED_DBGP
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/compat.h>
|
||||
#include <linux/chio.h> /* here are all the ioctls */
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/idr.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
@ -33,6 +34,7 @@
|
||||
|
||||
#define CH_DT_MAX 16
|
||||
#define CH_TYPES 8
|
||||
#define CH_MAX_DEVS 128
|
||||
|
||||
MODULE_DESCRIPTION("device driver for scsi media changer devices");
|
||||
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org>");
|
||||
@ -88,17 +90,6 @@ static const char * vendor_labels[CH_TYPES-4] = {
|
||||
|
||||
#define MAX_RETRIES 1
|
||||
|
||||
static int ch_probe(struct device *);
|
||||
static int ch_remove(struct device *);
|
||||
static int ch_open(struct inode * inode, struct file * filp);
|
||||
static int ch_release(struct inode * inode, struct file * filp);
|
||||
static int ch_ioctl(struct inode * inode, struct file * filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long ch_ioctl_compat(struct file * filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
#endif
|
||||
|
||||
static struct class * ch_sysfs_class;
|
||||
|
||||
typedef struct {
|
||||
@ -114,30 +105,8 @@ typedef struct {
|
||||
struct mutex lock;
|
||||
} scsi_changer;
|
||||
|
||||
static LIST_HEAD(ch_devlist);
|
||||
static DEFINE_SPINLOCK(ch_devlist_lock);
|
||||
static int ch_devcount;
|
||||
|
||||
static struct scsi_driver ch_template =
|
||||
{
|
||||
.owner = THIS_MODULE,
|
||||
.gendrv = {
|
||||
.name = "ch",
|
||||
.probe = ch_probe,
|
||||
.remove = ch_remove,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct file_operations changer_fops =
|
||||
{
|
||||
.owner = THIS_MODULE,
|
||||
.open = ch_open,
|
||||
.release = ch_release,
|
||||
.ioctl = ch_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = ch_ioctl_compat,
|
||||
#endif
|
||||
};
|
||||
static DEFINE_IDR(ch_index_idr);
|
||||
static DEFINE_SPINLOCK(ch_index_lock);
|
||||
|
||||
static const struct {
|
||||
unsigned char sense;
|
||||
@ -599,20 +568,17 @@ ch_release(struct inode *inode, struct file *file)
|
||||
static int
|
||||
ch_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
scsi_changer *tmp, *ch;
|
||||
scsi_changer *ch;
|
||||
int minor = iminor(inode);
|
||||
|
||||
spin_lock(&ch_devlist_lock);
|
||||
ch = NULL;
|
||||
list_for_each_entry(tmp,&ch_devlist,list) {
|
||||
if (tmp->minor == minor)
|
||||
ch = tmp;
|
||||
}
|
||||
spin_lock(&ch_index_lock);
|
||||
ch = idr_find(&ch_index_idr, minor);
|
||||
|
||||
if (NULL == ch || scsi_device_get(ch->device)) {
|
||||
spin_unlock(&ch_devlist_lock);
|
||||
spin_unlock(&ch_index_lock);
|
||||
return -ENXIO;
|
||||
}
|
||||
spin_unlock(&ch_devlist_lock);
|
||||
spin_unlock(&ch_index_lock);
|
||||
|
||||
file->private_data = ch;
|
||||
return 0;
|
||||
@ -626,7 +592,7 @@ ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ch_ioctl(struct inode * inode, struct file * file,
|
||||
static long ch_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
scsi_changer *ch = file->private_data;
|
||||
@ -887,8 +853,7 @@ static long ch_ioctl_compat(struct file * file,
|
||||
case CHIOINITELEM:
|
||||
case CHIOSVOLTAG:
|
||||
/* compatible */
|
||||
return ch_ioctl(NULL /* inode, unused */,
|
||||
file, cmd, arg);
|
||||
return ch_ioctl(file, cmd, arg);
|
||||
case CHIOGSTATUS32:
|
||||
{
|
||||
struct changer_element_status32 ces32;
|
||||
@ -915,6 +880,8 @@ static long ch_ioctl_compat(struct file * file,
|
||||
static int ch_probe(struct device *dev)
|
||||
{
|
||||
struct scsi_device *sd = to_scsi_device(dev);
|
||||
struct class_device *class_dev;
|
||||
int minor, ret = -ENOMEM;
|
||||
scsi_changer *ch;
|
||||
|
||||
if (sd->type != TYPE_MEDIUM_CHANGER)
|
||||
@ -924,50 +891,85 @@ static int ch_probe(struct device *dev)
|
||||
if (NULL == ch)
|
||||
return -ENOMEM;
|
||||
|
||||
ch->minor = ch_devcount;
|
||||
if (!idr_pre_get(&ch_index_idr, GFP_KERNEL))
|
||||
goto free_ch;
|
||||
|
||||
spin_lock(&ch_index_lock);
|
||||
ret = idr_get_new(&ch_index_idr, ch, &minor);
|
||||
spin_unlock(&ch_index_lock);
|
||||
|
||||
if (ret)
|
||||
goto free_ch;
|
||||
|
||||
if (minor > CH_MAX_DEVS) {
|
||||
ret = -ENODEV;
|
||||
goto remove_idr;
|
||||
}
|
||||
|
||||
ch->minor = minor;
|
||||
sprintf(ch->name,"ch%d",ch->minor);
|
||||
|
||||
class_dev = class_device_create(ch_sysfs_class, NULL,
|
||||
MKDEV(SCSI_CHANGER_MAJOR, ch->minor),
|
||||
dev, "s%s", ch->name);
|
||||
if (IS_ERR(class_dev)) {
|
||||
printk(KERN_WARNING "ch%d: class_device_create failed\n",
|
||||
ch->minor);
|
||||
ret = PTR_ERR(class_dev);
|
||||
goto remove_idr;
|
||||
}
|
||||
|
||||
mutex_init(&ch->lock);
|
||||
ch->device = sd;
|
||||
ch_readconfig(ch);
|
||||
if (init)
|
||||
ch_init_elem(ch);
|
||||
|
||||
class_device_create(ch_sysfs_class, NULL,
|
||||
MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
|
||||
dev, "s%s", ch->name);
|
||||
|
||||
dev_set_drvdata(dev, ch);
|
||||
sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
|
||||
|
||||
spin_lock(&ch_devlist_lock);
|
||||
list_add_tail(&ch->list,&ch_devlist);
|
||||
ch_devcount++;
|
||||
spin_unlock(&ch_devlist_lock);
|
||||
return 0;
|
||||
remove_idr:
|
||||
idr_remove(&ch_index_idr, minor);
|
||||
free_ch:
|
||||
kfree(ch);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ch_remove(struct device *dev)
|
||||
{
|
||||
struct scsi_device *sd = to_scsi_device(dev);
|
||||
scsi_changer *tmp, *ch;
|
||||
scsi_changer *ch = dev_get_drvdata(dev);
|
||||
|
||||
spin_lock(&ch_devlist_lock);
|
||||
ch = NULL;
|
||||
list_for_each_entry(tmp,&ch_devlist,list) {
|
||||
if (tmp->device == sd)
|
||||
ch = tmp;
|
||||
}
|
||||
BUG_ON(NULL == ch);
|
||||
list_del(&ch->list);
|
||||
spin_unlock(&ch_devlist_lock);
|
||||
spin_lock(&ch_index_lock);
|
||||
idr_remove(&ch_index_idr, ch->minor);
|
||||
spin_unlock(&ch_index_lock);
|
||||
|
||||
class_device_destroy(ch_sysfs_class,
|
||||
MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
|
||||
kfree(ch->dt);
|
||||
kfree(ch);
|
||||
ch_devcount--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct scsi_driver ch_template = {
|
||||
.owner = THIS_MODULE,
|
||||
.gendrv = {
|
||||
.name = "ch",
|
||||
.probe = ch_probe,
|
||||
.remove = ch_remove,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct file_operations changer_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ch_open,
|
||||
.release = ch_release,
|
||||
.unlocked_ioctl = ch_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = ch_ioctl_compat,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init init_ch_module(void)
|
||||
{
|
||||
int rc;
|
||||
@ -1001,6 +1003,7 @@ static void __exit exit_ch_module(void)
|
||||
scsi_unregister_driver(&ch_template.gendrv);
|
||||
unregister_chrdev(SCSI_CHANGER_MAJOR, "ch");
|
||||
class_destroy(ch_sysfs_class);
|
||||
idr_destroy(&ch_index_idr);
|
||||
}
|
||||
|
||||
module_init(init_ch_module);
|
||||
|
@ -362,7 +362,6 @@ void scsi_print_command(struct scsi_cmnd *cmd)
|
||||
EXPORT_SYMBOL(scsi_print_command);
|
||||
|
||||
/**
|
||||
*
|
||||
* scsi_print_status - print scsi status description
|
||||
* @scsi_status: scsi status value
|
||||
*
|
||||
@ -1369,7 +1368,7 @@ EXPORT_SYMBOL(scsi_print_sense);
|
||||
static const char * const hostbyte_table[]={
|
||||
"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
|
||||
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
|
||||
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY"};
|
||||
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"};
|
||||
#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
|
||||
|
||||
static const char * const driverbyte_table[]={
|
||||
|
@ -1629,8 +1629,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
|
||||
sizeof(srb->cmd->sense_buffer));
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
|
||||
} else {
|
||||
ptr = (u8 *)srb->cmd->cmnd;
|
||||
@ -1915,8 +1914,7 @@ static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO,
|
||||
sizeof(srb->cmd->sense_buffer));
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
|
||||
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
|
||||
}
|
||||
srb->state |= SRB_COMMAND;
|
||||
@ -3685,7 +3683,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
||||
srb->target_status = 0;
|
||||
|
||||
/* KG: Can this prevent crap sense data ? */
|
||||
memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
|
||||
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
|
||||
/* Save some data */
|
||||
srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
|
||||
@ -3694,15 +3692,15 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
||||
srb->segment_x[0].length;
|
||||
srb->xferred = srb->total_xfer_length;
|
||||
/* srb->segment_x : a one entry of S/G list table */
|
||||
srb->total_xfer_length = sizeof(cmd->sense_buffer);
|
||||
srb->segment_x[0].length = sizeof(cmd->sense_buffer);
|
||||
srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
|
||||
srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
|
||||
/* Map sense buffer */
|
||||
srb->segment_x[0].address =
|
||||
pci_map_single(acb->dev, cmd->sense_buffer,
|
||||
sizeof(cmd->sense_buffer), PCI_DMA_FROMDEVICE);
|
||||
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
|
||||
dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
|
||||
cmd->sense_buffer, srb->segment_x[0].address,
|
||||
sizeof(cmd->sense_buffer));
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
srb->sg_count = 1;
|
||||
srb->sg_index = 0;
|
||||
|
||||
|
@ -2296,9 +2296,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
|
||||
|
||||
// copy over the request sense data if it was a check
|
||||
// condition status
|
||||
if(dev_status == 0x02 /*CHECK_CONDITION*/) {
|
||||
u32 len = sizeof(cmd->sense_buffer);
|
||||
len = (len > 40) ? 40 : len;
|
||||
if (dev_status == SAM_STAT_CHECK_CONDITION) {
|
||||
u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
|
||||
// Copy over the sense data
|
||||
memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
|
||||
if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
|
||||
|
@ -1623,9 +1623,9 @@ static void map_dma(unsigned int i, struct hostdata *ha)
|
||||
if (SCpnt->sense_buffer)
|
||||
cpp->sense_addr =
|
||||
H2DEV(pci_map_single(ha->pdev, SCpnt->sense_buffer,
|
||||
sizeof SCpnt->sense_buffer, PCI_DMA_FROMDEVICE));
|
||||
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
|
||||
|
||||
cpp->sense_len = sizeof SCpnt->sense_buffer;
|
||||
cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||
|
||||
count = scsi_dma_map(SCpnt);
|
||||
BUG_ON(count < 0);
|
||||
|
@ -369,7 +369,6 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
|
||||
cp = &hd->ccb[y];
|
||||
|
||||
memset(cp, 0, sizeof(struct eata_ccb));
|
||||
memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
|
||||
|
||||
cp->status = USED; /* claim free slot */
|
||||
|
||||
@ -385,7 +384,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
|
||||
cp->DataIn = 0; /* Input mode */
|
||||
|
||||
cp->Interpret = (cmd->device->id == hd->hostid);
|
||||
cp->cp_datalen = cpu_to_be32(cmd->request_bufflen);
|
||||
cp->cp_datalen = cpu_to_be32(scsi_bufflen(cmd));
|
||||
cp->Auto_Req_Sen = 0;
|
||||
cp->cp_reqDMA = 0;
|
||||
cp->reqlen = 0;
|
||||
@ -402,14 +401,14 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
|
||||
cp->cmd = cmd;
|
||||
cmd->host_scribble = (char *) &hd->ccb[y];
|
||||
|
||||
if (cmd->use_sg == 0) {
|
||||
if (!scsi_bufflen(cmd)) {
|
||||
cmd->SCp.buffers_residual = 1;
|
||||
cmd->SCp.ptr = cmd->request_buffer;
|
||||
cmd->SCp.this_residual = cmd->request_bufflen;
|
||||
cmd->SCp.ptr = NULL;
|
||||
cmd->SCp.this_residual = 0;
|
||||
cmd->SCp.buffer = NULL;
|
||||
} else {
|
||||
cmd->SCp.buffer = cmd->request_buffer;
|
||||
cmd->SCp.buffers_residual = cmd->use_sg;
|
||||
cmd->SCp.buffer = scsi_sglist(cmd);
|
||||
cmd->SCp.buffers_residual = scsi_sg_count(cmd);
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
}
|
||||
|
@ -1017,24 +1017,6 @@ static irqreturn_t fd_mcs_intr(int irq, void *dev_id)
|
||||
printk(" ** IN DONE %d ** ", current_SC->SCp.have_data_in);
|
||||
#endif
|
||||
|
||||
#if ERRORS_ONLY
|
||||
if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
|
||||
if ((unsigned char) (*((char *) current_SC->request_buffer + 2)) & 0x0f) {
|
||||
unsigned char key;
|
||||
unsigned char code;
|
||||
unsigned char qualifier;
|
||||
|
||||
key = (unsigned char) (*((char *) current_SC->request_buffer + 2)) & 0x0f;
|
||||
code = (unsigned char) (*((char *) current_SC->request_buffer + 12));
|
||||
qualifier = (unsigned char) (*((char *) current_SC->request_buffer + 13));
|
||||
|
||||
if (key != UNIT_ATTENTION && !(key == NOT_READY && code == 0x04 && (!qualifier || qualifier == 0x02 || qualifier == 0x01))
|
||||
&& !(key == ILLEGAL_REQUEST && (code == 0x25 || code == 0x24 || !code)))
|
||||
|
||||
printk("fd_mcs: REQUEST SENSE " "Key = %x, Code = %x, Qualifier = %x\n", key, code, qualifier);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if EVERY_ACCESS
|
||||
printk("BEFORE MY_DONE. . .");
|
||||
#endif
|
||||
@ -1097,7 +1079,9 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
||||
panic("fd_mcs: fd_mcs_queue() NOT REENTRANT!\n");
|
||||
}
|
||||
#if EVERY_ACCESS
|
||||
printk("queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n", SCpnt->target, *(unsigned char *) SCpnt->cmnd, SCpnt->use_sg, SCpnt->request_bufflen);
|
||||
printk("queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
|
||||
SCpnt->target, *(unsigned char *) SCpnt->cmnd,
|
||||
scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
|
||||
#endif
|
||||
|
||||
fd_mcs_make_bus_idle(shpnt);
|
||||
@ -1107,14 +1091,14 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
|
||||
|
||||
/* Initialize static data */
|
||||
|
||||
if (current_SC->use_sg) {
|
||||
current_SC->SCp.buffer = (struct scatterlist *) current_SC->request_buffer;
|
||||
if (scsi_bufflen(current_SC)) {
|
||||
current_SC->SCp.buffer = scsi_sglist(current_SC);
|
||||
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
|
||||
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
|
||||
current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
|
||||
current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
|
||||
} else {
|
||||
current_SC->SCp.ptr = (char *) current_SC->request_buffer;
|
||||
current_SC->SCp.this_residual = current_SC->request_bufflen;
|
||||
current_SC->SCp.ptr = NULL;
|
||||
current_SC->SCp.this_residual = 0;
|
||||
current_SC->SCp.buffer = NULL;
|
||||
current_SC->SCp.buffers_residual = 0;
|
||||
}
|
||||
@ -1166,7 +1150,9 @@ static void fd_mcs_print_info(Scsi_Cmnd * SCpnt)
|
||||
break;
|
||||
}
|
||||
|
||||
printk("(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n", SCpnt->SCp.phase, SCpnt->device->id, *(unsigned char *) SCpnt->cmnd, SCpnt->use_sg, SCpnt->request_bufflen);
|
||||
printk("(%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
|
||||
SCpnt->SCp.phase, SCpnt->device->id, *(unsigned char *) SCpnt->cmnd,
|
||||
scsi_sg_count(SCpnt), scsi_bufflen(SCpnt));
|
||||
printk("sent_command = %d, have_data_in = %d, timeout = %d\n", SCpnt->SCp.sent_command, SCpnt->SCp.have_data_in, SCpnt->timeout);
|
||||
#if DEBUG_RACE
|
||||
printk("in_interrupt_flag = %d\n", in_interrupt_flag);
|
||||
|
@ -141,7 +141,7 @@
|
||||
static void gdth_delay(int milliseconds);
|
||||
static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs);
|
||||
static irqreturn_t gdth_interrupt(int irq, void *dev_id);
|
||||
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
|
||||
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
|
||||
int gdth_from_wait, int* pIndex);
|
||||
static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
|
||||
Scsi_Cmnd *scp);
|
||||
@ -165,7 +165,6 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
|
||||
static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
|
||||
|
||||
static void gdth_enable_int(gdth_ha_str *ha);
|
||||
static unchar gdth_get_status(gdth_ha_str *ha, int irq);
|
||||
static int gdth_test_busy(gdth_ha_str *ha);
|
||||
static int gdth_get_cmd_index(gdth_ha_str *ha);
|
||||
static void gdth_release_event(gdth_ha_str *ha);
|
||||
@ -1334,14 +1333,12 @@ static void __init gdth_enable_int(gdth_ha_str *ha)
|
||||
}
|
||||
|
||||
/* return IStatus if interrupt was from this card else 0 */
|
||||
static unchar gdth_get_status(gdth_ha_str *ha, int irq)
|
||||
static unchar gdth_get_status(gdth_ha_str *ha)
|
||||
{
|
||||
unchar IStatus = 0;
|
||||
|
||||
TRACE(("gdth_get_status() irq %d ctr_count %d\n", irq, gdth_ctr_count));
|
||||
TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
|
||||
|
||||
if (ha->irq != (unchar)irq) /* check IRQ */
|
||||
return false;
|
||||
if (ha->type == GDT_EISA)
|
||||
IStatus = inb((ushort)ha->bmic + EDOORREG);
|
||||
else if (ha->type == GDT_ISA)
|
||||
@ -1523,7 +1520,7 @@ static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time)
|
||||
return 1; /* no wait required */
|
||||
|
||||
do {
|
||||
__gdth_interrupt(ha, (int)ha->irq, true, &wait_index);
|
||||
__gdth_interrupt(ha, true, &wait_index);
|
||||
if (wait_index == index) {
|
||||
answer_found = TRUE;
|
||||
break;
|
||||
@ -3036,7 +3033,7 @@ static void gdth_clear_events(void)
|
||||
|
||||
/* SCSI interface functions */
|
||||
|
||||
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
|
||||
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
|
||||
int gdth_from_wait, int* pIndex)
|
||||
{
|
||||
gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
|
||||
@ -3054,7 +3051,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
|
||||
int act_int_coal = 0;
|
||||
#endif
|
||||
|
||||
TRACE(("gdth_interrupt() IRQ %d\n",irq));
|
||||
TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
|
||||
|
||||
/* if polling and not from gdth_wait() -> return */
|
||||
if (gdth_polling) {
|
||||
@ -3067,7 +3064,8 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
|
||||
spin_lock_irqsave(&ha->smp_lock, flags);
|
||||
|
||||
/* search controller */
|
||||
if (0 == (IStatus = gdth_get_status(ha, irq))) {
|
||||
IStatus = gdth_get_status(ha);
|
||||
if (IStatus == 0) {
|
||||
/* spurious interrupt */
|
||||
if (!gdth_polling)
|
||||
spin_unlock_irqrestore(&ha->smp_lock, flags);
|
||||
@ -3294,9 +3292,9 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int irq,
|
||||
|
||||
static irqreturn_t gdth_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
gdth_ha_str *ha = (gdth_ha_str *)dev_id;
|
||||
gdth_ha_str *ha = dev_id;
|
||||
|
||||
return __gdth_interrupt(ha, irq, false, NULL);
|
||||
return __gdth_interrupt(ha, false, NULL);
|
||||
}
|
||||
|
||||
static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
|
||||
|
@ -54,8 +54,7 @@ static struct class shost_class = {
|
||||
};
|
||||
|
||||
/**
|
||||
* scsi_host_set_state - Take the given host through the host
|
||||
* state model.
|
||||
* scsi_host_set_state - Take the given host through the host state model.
|
||||
* @shost: scsi host to change the state of.
|
||||
* @state: state to change to.
|
||||
*
|
||||
@ -440,7 +439,6 @@ static int __scsi_host_match(struct class_device *cdev, void *data)
|
||||
|
||||
/**
|
||||
* scsi_host_lookup - get a reference to a Scsi_Host by host no
|
||||
*
|
||||
* @hostnum: host number to locate
|
||||
*
|
||||
* Return value:
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* HighPoint RR3xxx controller driver for Linux
|
||||
* HighPoint RR3xxx/4xxx controller driver for Linux
|
||||
* Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
@ -38,80 +38,84 @@
|
||||
#include "hptiop.h"
|
||||
|
||||
MODULE_AUTHOR("HighPoint Technologies, Inc.");
|
||||
MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
|
||||
MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
|
||||
|
||||
static char driver_name[] = "hptiop";
|
||||
static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
|
||||
static const char driver_ver[] = "v1.2 (070830)";
|
||||
static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
|
||||
static const char driver_ver[] = "v1.3 (071203)";
|
||||
|
||||
static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
|
||||
static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
|
||||
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
|
||||
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
|
||||
struct hpt_iop_request_scsi_command *req);
|
||||
static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
|
||||
static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
|
||||
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
|
||||
|
||||
static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
|
||||
{
|
||||
readl(&iop->outbound_intstatus);
|
||||
}
|
||||
|
||||
static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
|
||||
static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
|
||||
{
|
||||
u32 req = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < millisec; i++) {
|
||||
req = readl(&iop->inbound_queue);
|
||||
req = readl(&hba->u.itl.iop->inbound_queue);
|
||||
if (req != IOPMU_QUEUE_EMPTY)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
if (req != IOPMU_QUEUE_EMPTY) {
|
||||
writel(req, &iop->outbound_queue);
|
||||
hptiop_pci_posting_flush(iop);
|
||||
writel(req, &hba->u.itl.iop->outbound_queue);
|
||||
readl(&hba->u.itl.iop->outbound_intstatus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
|
||||
static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
|
||||
{
|
||||
if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
|
||||
return hptiop_host_request_callback(hba,
|
||||
tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
|
||||
else
|
||||
return hptiop_iop_request_callback(hba, tag);
|
||||
return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
|
||||
}
|
||||
|
||||
static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
|
||||
static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
|
||||
{
|
||||
if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
|
||||
hptiop_host_request_callback_itl(hba,
|
||||
tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
|
||||
else
|
||||
hptiop_iop_request_callback_itl(hba, tag);
|
||||
}
|
||||
|
||||
static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
|
||||
{
|
||||
u32 req;
|
||||
|
||||
while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
|
||||
while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
|
||||
IOPMU_QUEUE_EMPTY) {
|
||||
|
||||
if (req & IOPMU_QUEUE_MASK_HOST_BITS)
|
||||
hptiop_request_callback(hba, req);
|
||||
hptiop_request_callback_itl(hba, req);
|
||||
else {
|
||||
struct hpt_iop_request_header __iomem * p;
|
||||
|
||||
p = (struct hpt_iop_request_header __iomem *)
|
||||
((char __iomem *)hba->iop + req);
|
||||
((char __iomem *)hba->u.itl.iop + req);
|
||||
|
||||
if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
|
||||
if (readl(&p->context))
|
||||
hptiop_request_callback(hba, req);
|
||||
hptiop_request_callback_itl(hba, req);
|
||||
else
|
||||
writel(1, &p->context);
|
||||
}
|
||||
else
|
||||
hptiop_request_callback(hba, req);
|
||||
hptiop_request_callback_itl(hba, req);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int __iop_intr(struct hptiop_hba *hba)
|
||||
static int iop_intr_itl(struct hptiop_hba *hba)
|
||||
{
|
||||
struct hpt_iopmu __iomem *iop = hba->iop;
|
||||
struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
|
||||
u32 status;
|
||||
int ret = 0;
|
||||
|
||||
@ -119,6 +123,7 @@ static int __iop_intr(struct hptiop_hba *hba)
|
||||
|
||||
if (status & IOPMU_OUTBOUND_INT_MSG0) {
|
||||
u32 msg = readl(&iop->outbound_msgaddr0);
|
||||
|
||||
dprintk("received outbound msg %x\n", msg);
|
||||
writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
|
||||
hptiop_message_callback(hba, msg);
|
||||
@ -126,31 +131,115 @@ static int __iop_intr(struct hptiop_hba *hba)
|
||||
}
|
||||
|
||||
if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
|
||||
hptiop_drain_outbound_queue(hba);
|
||||
hptiop_drain_outbound_queue_itl(hba);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iop_send_sync_request(struct hptiop_hba *hba,
|
||||
static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
|
||||
{
|
||||
u32 outbound_tail = readl(&mu->outbound_tail);
|
||||
u32 outbound_head = readl(&mu->outbound_head);
|
||||
|
||||
if (outbound_tail != outbound_head) {
|
||||
u64 p;
|
||||
|
||||
memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
|
||||
outbound_tail++;
|
||||
|
||||
if (outbound_tail == MVIOP_QUEUE_LEN)
|
||||
outbound_tail = 0;
|
||||
writel(outbound_tail, &mu->outbound_tail);
|
||||
return p;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
|
||||
{
|
||||
u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
|
||||
u32 head = inbound_head + 1;
|
||||
|
||||
if (head == MVIOP_QUEUE_LEN)
|
||||
head = 0;
|
||||
|
||||
memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
|
||||
writel(head, &hba->u.mv.mu->inbound_head);
|
||||
writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
|
||||
&hba->u.mv.regs->inbound_doorbell);
|
||||
}
|
||||
|
||||
static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
|
||||
{
|
||||
u32 req_type = (tag >> 5) & 0x7;
|
||||
struct hpt_iop_request_scsi_command *req;
|
||||
|
||||
dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
|
||||
|
||||
BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
|
||||
|
||||
switch (req_type) {
|
||||
case IOP_REQUEST_TYPE_GET_CONFIG:
|
||||
case IOP_REQUEST_TYPE_SET_CONFIG:
|
||||
hba->msg_done = 1;
|
||||
break;
|
||||
|
||||
case IOP_REQUEST_TYPE_SCSI_COMMAND:
|
||||
req = hba->reqs[tag >> 8].req_virt;
|
||||
if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
|
||||
req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
|
||||
|
||||
hptiop_finish_scsi_req(hba, tag>>8, req);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int iop_intr_mv(struct hptiop_hba *hba)
|
||||
{
|
||||
u32 status;
|
||||
int ret = 0;
|
||||
|
||||
status = readl(&hba->u.mv.regs->outbound_doorbell);
|
||||
writel(~status, &hba->u.mv.regs->outbound_doorbell);
|
||||
|
||||
if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
|
||||
u32 msg;
|
||||
msg = readl(&hba->u.mv.mu->outbound_msg);
|
||||
dprintk("received outbound msg %x\n", msg);
|
||||
hptiop_message_callback(hba, msg);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
|
||||
u64 tag;
|
||||
|
||||
while ((tag = mv_outbound_read(hba->u.mv.mu)))
|
||||
hptiop_request_callback_mv(hba, tag);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iop_send_sync_request_itl(struct hptiop_hba *hba,
|
||||
void __iomem *_req, u32 millisec)
|
||||
{
|
||||
struct hpt_iop_request_header __iomem *req = _req;
|
||||
u32 i;
|
||||
|
||||
writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
|
||||
&req->flags);
|
||||
|
||||
writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
|
||||
writel(0, &req->context);
|
||||
|
||||
writel((unsigned long)req - (unsigned long)hba->iop,
|
||||
&hba->iop->inbound_queue);
|
||||
|
||||
hptiop_pci_posting_flush(hba->iop);
|
||||
writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
|
||||
&hba->u.itl.iop->inbound_queue);
|
||||
readl(&hba->u.itl.iop->outbound_intstatus);
|
||||
|
||||
for (i = 0; i < millisec; i++) {
|
||||
__iop_intr(hba);
|
||||
iop_intr_itl(hba);
|
||||
if (readl(&req->context))
|
||||
return 0;
|
||||
msleep(1);
|
||||
@ -159,19 +248,49 @@ static int iop_send_sync_request(struct hptiop_hba *hba,
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int iop_send_sync_request_mv(struct hptiop_hba *hba,
|
||||
u32 size_bits, u32 millisec)
|
||||
{
|
||||
struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
|
||||
u32 i;
|
||||
|
||||
hba->msg_done = 0;
|
||||
reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
|
||||
mv_inbound_write(hba->u.mv.internal_req_phy |
|
||||
MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
|
||||
|
||||
for (i = 0; i < millisec; i++) {
|
||||
iop_intr_mv(hba);
|
||||
if (hba->msg_done)
|
||||
return 0;
|
||||
msleep(1);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
|
||||
{
|
||||
writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
|
||||
readl(&hba->u.itl.iop->outbound_intstatus);
|
||||
}
|
||||
|
||||
static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
|
||||
{
|
||||
writel(msg, &hba->u.mv.mu->inbound_msg);
|
||||
writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
|
||||
readl(&hba->u.mv.regs->inbound_doorbell);
|
||||
}
|
||||
|
||||
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
hba->msg_done = 0;
|
||||
|
||||
writel(msg, &hba->iop->inbound_msgaddr0);
|
||||
|
||||
hptiop_pci_posting_flush(hba->iop);
|
||||
hba->ops->post_msg(hba, msg);
|
||||
|
||||
for (i = 0; i < millisec; i++) {
|
||||
spin_lock_irq(hba->host->host_lock);
|
||||
__iop_intr(hba);
|
||||
hba->ops->iop_intr(hba);
|
||||
spin_unlock_irq(hba->host->host_lock);
|
||||
if (hba->msg_done)
|
||||
break;
|
||||
@ -181,46 +300,67 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
|
||||
return hba->msg_done? 0 : -1;
|
||||
}
|
||||
|
||||
static int iop_get_config(struct hptiop_hba *hba,
|
||||
static int iop_get_config_itl(struct hptiop_hba *hba,
|
||||
struct hpt_iop_request_get_config *config)
|
||||
{
|
||||
u32 req32;
|
||||
struct hpt_iop_request_get_config __iomem *req;
|
||||
|
||||
req32 = readl(&hba->iop->inbound_queue);
|
||||
req32 = readl(&hba->u.itl.iop->inbound_queue);
|
||||
if (req32 == IOPMU_QUEUE_EMPTY)
|
||||
return -1;
|
||||
|
||||
req = (struct hpt_iop_request_get_config __iomem *)
|
||||
((unsigned long)hba->iop + req32);
|
||||
((unsigned long)hba->u.itl.iop + req32);
|
||||
|
||||
writel(0, &req->header.flags);
|
||||
writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
|
||||
writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
|
||||
writel(IOP_RESULT_PENDING, &req->header.result);
|
||||
|
||||
if (iop_send_sync_request(hba, req, 20000)) {
|
||||
if (iop_send_sync_request_itl(hba, req, 20000)) {
|
||||
dprintk("Get config send cmd failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy_fromio(config, req, sizeof(*config));
|
||||
writel(req32, &hba->iop->outbound_queue);
|
||||
writel(req32, &hba->u.itl.iop->outbound_queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iop_set_config(struct hptiop_hba *hba,
|
||||
static int iop_get_config_mv(struct hptiop_hba *hba,
|
||||
struct hpt_iop_request_get_config *config)
|
||||
{
|
||||
struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
|
||||
|
||||
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
|
||||
req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
|
||||
req->header.size =
|
||||
cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
|
||||
req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
|
||||
req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_GET_CONFIG<<5);
|
||||
|
||||
if (iop_send_sync_request_mv(hba, 0, 20000)) {
|
||||
dprintk("Get config send cmd failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iop_set_config_itl(struct hptiop_hba *hba,
|
||||
struct hpt_iop_request_set_config *config)
|
||||
{
|
||||
u32 req32;
|
||||
struct hpt_iop_request_set_config __iomem *req;
|
||||
|
||||
req32 = readl(&hba->iop->inbound_queue);
|
||||
req32 = readl(&hba->u.itl.iop->inbound_queue);
|
||||
if (req32 == IOPMU_QUEUE_EMPTY)
|
||||
return -1;
|
||||
|
||||
req = (struct hpt_iop_request_set_config __iomem *)
|
||||
((unsigned long)hba->iop + req32);
|
||||
((unsigned long)hba->u.itl.iop + req32);
|
||||
|
||||
memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
|
||||
(u8 *)config + sizeof(struct hpt_iop_request_header),
|
||||
@ -232,22 +372,52 @@ static int iop_set_config(struct hptiop_hba *hba,
|
||||
writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
|
||||
writel(IOP_RESULT_PENDING, &req->header.result);
|
||||
|
||||
if (iop_send_sync_request(hba, req, 20000)) {
|
||||
if (iop_send_sync_request_itl(hba, req, 20000)) {
|
||||
dprintk("Set config send cmd failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
writel(req32, &hba->iop->outbound_queue);
|
||||
writel(req32, &hba->u.itl.iop->outbound_queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iop_set_config_mv(struct hptiop_hba *hba,
|
||||
struct hpt_iop_request_set_config *config)
|
||||
{
|
||||
struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
|
||||
|
||||
memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
|
||||
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
|
||||
req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
|
||||
req->header.size =
|
||||
cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
|
||||
req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
|
||||
req->header.context = cpu_to_le64(IOP_REQUEST_TYPE_SET_CONFIG<<5);
|
||||
|
||||
if (iop_send_sync_request_mv(hba, 0, 20000)) {
|
||||
dprintk("Set config send cmd failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
|
||||
{
|
||||
writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
|
||||
&hba->u.itl.iop->outbound_intmask);
|
||||
}
|
||||
|
||||
static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
|
||||
{
|
||||
writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
|
||||
&hba->u.mv.regs->outbound_intmask);
|
||||
}
|
||||
|
||||
static int hptiop_initialize_iop(struct hptiop_hba *hba)
|
||||
{
|
||||
struct hpt_iopmu __iomem *iop = hba->iop;
|
||||
|
||||
/* enable interrupts */
|
||||
writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
|
||||
&iop->outbound_intmask);
|
||||
hba->ops->enable_intr(hba);
|
||||
|
||||
hba->initialized = 1;
|
||||
|
||||
@ -261,37 +431,74 @@ static int hptiop_initialize_iop(struct hptiop_hba *hba)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hptiop_map_pci_bar(struct hptiop_hba *hba)
|
||||
static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
|
||||
{
|
||||
u32 mem_base_phy, length;
|
||||
void __iomem *mem_base_virt;
|
||||
|
||||
struct pci_dev *pcidev = hba->pcidev;
|
||||
|
||||
if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
|
||||
|
||||
if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
|
||||
printk(KERN_ERR "scsi%d: pci resource invalid\n",
|
||||
hba->host->host_no);
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mem_base_phy = pci_resource_start(pcidev, 0);
|
||||
length = pci_resource_len(pcidev, 0);
|
||||
mem_base_phy = pci_resource_start(pcidev, index);
|
||||
length = pci_resource_len(pcidev, index);
|
||||
mem_base_virt = ioremap(mem_base_phy, length);
|
||||
|
||||
if (!mem_base_virt) {
|
||||
printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
|
||||
hba->host->host_no);
|
||||
return 0;
|
||||
}
|
||||
return mem_base_virt;
|
||||
}
|
||||
|
||||
static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
|
||||
{
|
||||
hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
|
||||
if (hba->u.itl.iop)
|
||||
return 0;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
|
||||
{
|
||||
iounmap(hba->u.itl.iop);
|
||||
}
|
||||
|
||||
static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
|
||||
{
|
||||
hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
|
||||
if (hba->u.mv.regs == 0)
|
||||
return -1;
|
||||
|
||||
hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
|
||||
if (hba->u.mv.mu == 0) {
|
||||
iounmap(hba->u.mv.regs);
|
||||
return -1;
|
||||
}
|
||||
|
||||
hba->iop = mem_base_virt;
|
||||
dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
|
||||
{
|
||||
iounmap(hba->u.mv.regs);
|
||||
iounmap(hba->u.mv.mu);
|
||||
}
|
||||
|
||||
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
|
||||
{
|
||||
dprintk("iop message 0x%x\n", msg);
|
||||
|
||||
if (msg == IOPMU_INBOUND_MSG0_NOP)
|
||||
hba->msg_done = 1;
|
||||
|
||||
if (!hba->initialized)
|
||||
return;
|
||||
|
||||
@ -303,7 +510,7 @@ static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
|
||||
hba->msg_done = 1;
|
||||
}
|
||||
|
||||
static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
|
||||
static struct hptiop_request *get_req(struct hptiop_hba *hba)
|
||||
{
|
||||
struct hptiop_request *ret;
|
||||
|
||||
@ -316,30 +523,19 @@ static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
|
||||
static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
|
||||
{
|
||||
dprintk("free_req(%d, %p)\n", req->index, req);
|
||||
req->next = hba->req_list;
|
||||
hba->req_list = req;
|
||||
}
|
||||
|
||||
static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
|
||||
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
|
||||
struct hpt_iop_request_scsi_command *req)
|
||||
{
|
||||
struct hpt_iop_request_scsi_command *req;
|
||||
struct scsi_cmnd *scp;
|
||||
u32 tag;
|
||||
|
||||
if (hba->iopintf_v2) {
|
||||
tag = _tag & ~ IOPMU_QUEUE_REQUEST_RESULT_BIT;
|
||||
req = hba->reqs[tag].req_virt;
|
||||
if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
|
||||
req->header.result = IOP_RESULT_SUCCESS;
|
||||
} else {
|
||||
tag = _tag;
|
||||
req = hba->reqs[tag].req_virt;
|
||||
}
|
||||
|
||||
dprintk("hptiop_host_request_callback: req=%p, type=%d, "
|
||||
dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
|
||||
"result=%d, context=0x%x tag=%d\n",
|
||||
req, req->header.type, req->header.result,
|
||||
req->header.context, tag);
|
||||
@ -354,6 +550,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
|
||||
|
||||
switch (le32_to_cpu(req->header.result)) {
|
||||
case IOP_RESULT_SUCCESS:
|
||||
scsi_set_resid(scp,
|
||||
scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
|
||||
scp->result = (DID_OK<<16);
|
||||
break;
|
||||
case IOP_RESULT_BAD_TARGET:
|
||||
@ -371,12 +569,12 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
|
||||
case IOP_RESULT_INVALID_REQUEST:
|
||||
scp->result = (DID_ABORT<<16);
|
||||
break;
|
||||
case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
|
||||
case IOP_RESULT_CHECK_CONDITION:
|
||||
scsi_set_resid(scp,
|
||||
scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
|
||||
scp->result = SAM_STAT_CHECK_CONDITION;
|
||||
memset(&scp->sense_buffer,
|
||||
0, sizeof(scp->sense_buffer));
|
||||
memcpy(&scp->sense_buffer, &req->sg_list,
|
||||
min(sizeof(scp->sense_buffer),
|
||||
min_t(size_t, SCSI_SENSE_BUFFERSIZE,
|
||||
le32_to_cpu(req->dataxfer_length)));
|
||||
break;
|
||||
|
||||
@ -391,15 +589,33 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 _tag)
|
||||
free_req(hba, &hba->reqs[tag]);
|
||||
}
|
||||
|
||||
void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
|
||||
static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
|
||||
{
|
||||
struct hpt_iop_request_scsi_command *req;
|
||||
u32 tag;
|
||||
|
||||
if (hba->iopintf_v2) {
|
||||
tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
|
||||
req = hba->reqs[tag].req_virt;
|
||||
if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
|
||||
req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
|
||||
} else {
|
||||
tag = _tag;
|
||||
req = hba->reqs[tag].req_virt;
|
||||
}
|
||||
|
||||
hptiop_finish_scsi_req(hba, tag, req);
|
||||
}
|
||||
|
||||
void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
|
||||
{
|
||||
struct hpt_iop_request_header __iomem *req;
|
||||
struct hpt_iop_request_ioctl_command __iomem *p;
|
||||
struct hpt_ioctl_k *arg;
|
||||
|
||||
req = (struct hpt_iop_request_header __iomem *)
|
||||
((unsigned long)hba->iop + tag);
|
||||
dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
|
||||
((unsigned long)hba->u.itl.iop + tag);
|
||||
dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
|
||||
"result=%d, context=0x%x tag=%d\n",
|
||||
req, readl(&req->type), readl(&req->result),
|
||||
readl(&req->context), tag);
|
||||
@ -427,7 +643,7 @@ void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
|
||||
arg->result = HPT_IOCTL_RESULT_FAILED;
|
||||
|
||||
arg->done(arg);
|
||||
writel(tag, &hba->iop->outbound_queue);
|
||||
writel(tag, &hba->u.itl.iop->outbound_queue);
|
||||
}
|
||||
|
||||
static irqreturn_t hptiop_intr(int irq, void *dev_id)
|
||||
@ -437,7 +653,7 @@ static irqreturn_t hptiop_intr(int irq, void *dev_id)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
handled = __iop_intr(hba);
|
||||
handled = hba->ops->iop_intr(hba);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
return handled;
|
||||
@ -469,6 +685,57 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
|
||||
return HPT_SCP(scp)->sgcnt;
|
||||
}
|
||||
|
||||
static void hptiop_post_req_itl(struct hptiop_hba *hba,
|
||||
struct hptiop_request *_req)
|
||||
{
|
||||
struct hpt_iop_request_header *reqhdr = _req->req_virt;
|
||||
|
||||
reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
|
||||
(u32)_req->index);
|
||||
reqhdr->context_hi32 = 0;
|
||||
|
||||
if (hba->iopintf_v2) {
|
||||
u32 size, size_bits;
|
||||
|
||||
size = le32_to_cpu(reqhdr->size);
|
||||
if (size < 256)
|
||||
size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
|
||||
else if (size < 512)
|
||||
size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
|
||||
else
|
||||
size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
|
||||
IOPMU_QUEUE_ADDR_HOST_BIT;
|
||||
writel(_req->req_shifted_phy | size_bits,
|
||||
&hba->u.itl.iop->inbound_queue);
|
||||
} else
|
||||
writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
|
||||
&hba->u.itl.iop->inbound_queue);
|
||||
}
|
||||
|
||||
static void hptiop_post_req_mv(struct hptiop_hba *hba,
|
||||
struct hptiop_request *_req)
|
||||
{
|
||||
struct hpt_iop_request_header *reqhdr = _req->req_virt;
|
||||
u32 size, size_bit;
|
||||
|
||||
reqhdr->context = cpu_to_le32(_req->index<<8 |
|
||||
IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
|
||||
reqhdr->context_hi32 = 0;
|
||||
size = le32_to_cpu(reqhdr->size);
|
||||
|
||||
if (size <= 256)
|
||||
size_bit = 0;
|
||||
else if (size <= 256*2)
|
||||
size_bit = 1;
|
||||
else if (size <= 256*3)
|
||||
size_bit = 2;
|
||||
else
|
||||
size_bit = 3;
|
||||
|
||||
mv_inbound_write((_req->req_shifted_phy << 5) |
|
||||
MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
|
||||
}
|
||||
|
||||
static int hptiop_queuecommand(struct scsi_cmnd *scp,
|
||||
void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
@ -518,9 +785,6 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
|
||||
req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
|
||||
req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
|
||||
req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
|
||||
req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
|
||||
(u32)_req->index);
|
||||
req->header.context_hi32 = 0;
|
||||
req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
|
||||
req->channel = scp->device->channel;
|
||||
req->target = scp->device->id;
|
||||
@ -531,21 +795,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
|
||||
+ sg_count * sizeof(struct hpt_iopsg));
|
||||
|
||||
memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
|
||||
|
||||
if (hba->iopintf_v2) {
|
||||
u32 size_bits;
|
||||
if (req->header.size < 256)
|
||||
size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
|
||||
else if (req->header.size < 512)
|
||||
size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
|
||||
else
|
||||
size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
|
||||
IOPMU_QUEUE_ADDR_HOST_BIT;
|
||||
writel(_req->req_shifted_phy | size_bits, &hba->iop->inbound_queue);
|
||||
} else
|
||||
writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
|
||||
&hba->iop->inbound_queue);
|
||||
|
||||
hba->ops->post_req(hba, _req);
|
||||
return 0;
|
||||
|
||||
cmd_done:
|
||||
@ -563,9 +813,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
|
||||
{
|
||||
if (atomic_xchg(&hba->resetting, 1) == 0) {
|
||||
atomic_inc(&hba->reset_count);
|
||||
writel(IOPMU_INBOUND_MSG0_RESET,
|
||||
&hba->iop->inbound_msgaddr0);
|
||||
hptiop_pci_posting_flush(hba->iop);
|
||||
hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
|
||||
}
|
||||
|
||||
wait_event_timeout(hba->reset_wq,
|
||||
@ -601,8 +849,10 @@ static int hptiop_reset(struct scsi_cmnd *scp)
|
||||
static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
|
||||
int queue_depth)
|
||||
{
|
||||
if(queue_depth > 256)
|
||||
queue_depth = 256;
|
||||
struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
|
||||
|
||||
if (queue_depth > hba->max_requests)
|
||||
queue_depth = hba->max_requests;
|
||||
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
|
||||
return queue_depth;
|
||||
}
|
||||
@ -663,6 +913,26 @@ static struct scsi_host_template driver_template = {
|
||||
.change_queue_depth = hptiop_adjust_disk_queue_depth,
|
||||
};
|
||||
|
||||
static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
|
||||
{
|
||||
hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
|
||||
0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
|
||||
if (hba->u.mv.internal_req)
|
||||
return 0;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
|
||||
{
|
||||
if (hba->u.mv.internal_req) {
|
||||
dma_free_coherent(&hba->pcidev->dev, 0x800,
|
||||
hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
|
||||
return 0;
|
||||
} else
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int __devinit hptiop_probe(struct pci_dev *pcidev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
@ -708,6 +978,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
|
||||
|
||||
hba = (struct hptiop_hba *)host->hostdata;
|
||||
|
||||
hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
|
||||
hba->pcidev = pcidev;
|
||||
hba->host = host;
|
||||
hba->initialized = 0;
|
||||
@ -725,16 +996,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
|
||||
host->n_io_port = 0;
|
||||
host->irq = pcidev->irq;
|
||||
|
||||
if (hptiop_map_pci_bar(hba))
|
||||
if (hba->ops->map_pci_bar(hba))
|
||||
goto free_scsi_host;
|
||||
|
||||
if (iop_wait_ready(hba->iop, 20000)) {
|
||||
if (hba->ops->iop_wait_ready(hba, 20000)) {
|
||||
printk(KERN_ERR "scsi%d: firmware not ready\n",
|
||||
hba->host->host_no);
|
||||
goto unmap_pci_bar;
|
||||
}
|
||||
|
||||
if (iop_get_config(hba, &iop_config)) {
|
||||
if (hba->ops->internal_memalloc) {
|
||||
if (hba->ops->internal_memalloc(hba)) {
|
||||
printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
|
||||
hba->host->host_no);
|
||||
goto unmap_pci_bar;
|
||||
}
|
||||
}
|
||||
|
||||
if (hba->ops->get_config(hba, &iop_config)) {
|
||||
printk(KERN_ERR "scsi%d: get config failed\n",
|
||||
hba->host->host_no);
|
||||
goto unmap_pci_bar;
|
||||
@ -770,7 +1049,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
|
||||
set_config.vbus_id = cpu_to_le16(host->host_no);
|
||||
set_config.max_host_request_size = cpu_to_le16(req_size);
|
||||
|
||||
if (iop_set_config(hba, &set_config)) {
|
||||
if (hba->ops->set_config(hba, &set_config)) {
|
||||
printk(KERN_ERR "scsi%d: set config failed\n",
|
||||
hba->host->host_no);
|
||||
goto unmap_pci_bar;
|
||||
@ -839,21 +1118,24 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
|
||||
|
||||
free_request_mem:
|
||||
dma_free_coherent(&hba->pcidev->dev,
|
||||
hba->req_size*hba->max_requests + 0x20,
|
||||
hba->req_size * hba->max_requests + 0x20,
|
||||
hba->dma_coherent, hba->dma_coherent_handle);
|
||||
|
||||
free_request_irq:
|
||||
free_irq(hba->pcidev->irq, hba);
|
||||
|
||||
unmap_pci_bar:
|
||||
iounmap(hba->iop);
|
||||
if (hba->ops->internal_memfree)
|
||||
hba->ops->internal_memfree(hba);
|
||||
|
||||
free_pci_regions:
|
||||
pci_release_regions(pcidev) ;
|
||||
hba->ops->unmap_pci_bar(hba);
|
||||
|
||||
free_scsi_host:
|
||||
scsi_host_put(host);
|
||||
|
||||
free_pci_regions:
|
||||
pci_release_regions(pcidev);
|
||||
|
||||
disable_pci_device:
|
||||
pci_disable_device(pcidev);
|
||||
|
||||
@ -865,8 +1147,6 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
|
||||
{
|
||||
struct Scsi_Host *host = pci_get_drvdata(pcidev);
|
||||
struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
|
||||
struct hpt_iopmu __iomem *iop = hba->iop;
|
||||
u32 int_mask;
|
||||
|
||||
dprintk("hptiop_shutdown(%p)\n", hba);
|
||||
|
||||
@ -876,11 +1156,24 @@ static void hptiop_shutdown(struct pci_dev *pcidev)
|
||||
hba->host->host_no);
|
||||
|
||||
/* disable all outbound interrupts */
|
||||
int_mask = readl(&iop->outbound_intmask);
|
||||
hba->ops->disable_intr(hba);
|
||||
}
|
||||
|
||||
static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
|
||||
{
|
||||
u32 int_mask;
|
||||
|
||||
int_mask = readl(&hba->u.itl.iop->outbound_intmask);
|
||||
writel(int_mask |
|
||||
IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
|
||||
&iop->outbound_intmask);
|
||||
hptiop_pci_posting_flush(iop);
|
||||
&hba->u.itl.iop->outbound_intmask);
|
||||
readl(&hba->u.itl.iop->outbound_intmask);
|
||||
}
|
||||
|
||||
static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
|
||||
{
|
||||
writel(0, &hba->u.mv.regs->outbound_intmask);
|
||||
readl(&hba->u.mv.regs->outbound_intmask);
|
||||
}
|
||||
|
||||
static void hptiop_remove(struct pci_dev *pcidev)
|
||||
@ -901,7 +1194,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
|
||||
hba->dma_coherent,
|
||||
hba->dma_coherent_handle);
|
||||
|
||||
iounmap(hba->iop);
|
||||
if (hba->ops->internal_memfree)
|
||||
hba->ops->internal_memfree(hba);
|
||||
|
||||
hba->ops->unmap_pci_bar(hba);
|
||||
|
||||
pci_release_regions(hba->pcidev);
|
||||
pci_set_drvdata(hba->pcidev, NULL);
|
||||
@ -910,11 +1206,50 @@ static void hptiop_remove(struct pci_dev *pcidev)
|
||||
scsi_host_put(host);
|
||||
}
|
||||
|
||||
static struct hptiop_adapter_ops hptiop_itl_ops = {
|
||||
.iop_wait_ready = iop_wait_ready_itl,
|
||||
.internal_memalloc = 0,
|
||||
.internal_memfree = 0,
|
||||
.map_pci_bar = hptiop_map_pci_bar_itl,
|
||||
.unmap_pci_bar = hptiop_unmap_pci_bar_itl,
|
||||
.enable_intr = hptiop_enable_intr_itl,
|
||||
.disable_intr = hptiop_disable_intr_itl,
|
||||
.get_config = iop_get_config_itl,
|
||||
.set_config = iop_set_config_itl,
|
||||
.iop_intr = iop_intr_itl,
|
||||
.post_msg = hptiop_post_msg_itl,
|
||||
.post_req = hptiop_post_req_itl,
|
||||
};
|
||||
|
||||
static struct hptiop_adapter_ops hptiop_mv_ops = {
|
||||
.iop_wait_ready = iop_wait_ready_mv,
|
||||
.internal_memalloc = hptiop_internal_memalloc_mv,
|
||||
.internal_memfree = hptiop_internal_memfree_mv,
|
||||
.map_pci_bar = hptiop_map_pci_bar_mv,
|
||||
.unmap_pci_bar = hptiop_unmap_pci_bar_mv,
|
||||
.enable_intr = hptiop_enable_intr_mv,
|
||||
.disable_intr = hptiop_disable_intr_mv,
|
||||
.get_config = iop_get_config_mv,
|
||||
.set_config = iop_set_config_mv,
|
||||
.iop_intr = iop_intr_mv,
|
||||
.post_msg = hptiop_post_msg_mv,
|
||||
.post_req = hptiop_post_req_mv,
|
||||
};
|
||||
|
||||
static struct pci_device_id hptiop_id_table[] = {
|
||||
{ PCI_VDEVICE(TTI, 0x3220) },
|
||||
{ PCI_VDEVICE(TTI, 0x3320) },
|
||||
{ PCI_VDEVICE(TTI, 0x3520) },
|
||||
{ PCI_VDEVICE(TTI, 0x4320) },
|
||||
{ PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
|
||||
{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* HighPoint RR3xxx controller driver for Linux
|
||||
* HighPoint RR3xxx/4xxx controller driver for Linux
|
||||
* Copyright (C) 2006-2007 HighPoint Technologies, Inc. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
@ -18,8 +18,7 @@
|
||||
#ifndef _HPTIOP_H_
|
||||
#define _HPTIOP_H_
|
||||
|
||||
struct hpt_iopmu
|
||||
{
|
||||
struct hpt_iopmu_itl {
|
||||
__le32 resrved0[4];
|
||||
__le32 inbound_msgaddr0;
|
||||
__le32 inbound_msgaddr1;
|
||||
@ -54,6 +53,40 @@ struct hpt_iopmu
|
||||
#define IOPMU_INBOUND_INT_ERROR 8
|
||||
#define IOPMU_INBOUND_INT_POSTQUEUE 0x10
|
||||
|
||||
#define MVIOP_QUEUE_LEN 512
|
||||
|
||||
struct hpt_iopmu_mv {
|
||||
__le32 inbound_head;
|
||||
__le32 inbound_tail;
|
||||
__le32 outbound_head;
|
||||
__le32 outbound_tail;
|
||||
__le32 inbound_msg;
|
||||
__le32 outbound_msg;
|
||||
__le32 reserve[10];
|
||||
__le64 inbound_q[MVIOP_QUEUE_LEN];
|
||||
__le64 outbound_q[MVIOP_QUEUE_LEN];
|
||||
};
|
||||
|
||||
struct hpt_iopmv_regs {
|
||||
__le32 reserved[0x20400 / 4];
|
||||
__le32 inbound_doorbell;
|
||||
__le32 inbound_intmask;
|
||||
__le32 outbound_doorbell;
|
||||
__le32 outbound_intmask;
|
||||
};
|
||||
|
||||
#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
|
||||
#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
|
||||
|
||||
#define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff
|
||||
#define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1
|
||||
#define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2
|
||||
|
||||
#define MVIOP_MU_INBOUND_INT_MSG 1
|
||||
#define MVIOP_MU_INBOUND_INT_POSTQUEUE 2
|
||||
#define MVIOP_MU_OUTBOUND_INT_MSG 1
|
||||
#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
|
||||
|
||||
enum hpt_iopmu_message {
|
||||
/* host-to-iop messages */
|
||||
IOPMU_INBOUND_MSG0_NOP = 0,
|
||||
@ -72,8 +105,7 @@ enum hpt_iopmu_message {
|
||||
IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff,
|
||||
};
|
||||
|
||||
struct hpt_iop_request_header
|
||||
{
|
||||
struct hpt_iop_request_header {
|
||||
__le32 size;
|
||||
__le32 type;
|
||||
__le32 flags;
|
||||
@ -104,11 +136,10 @@ enum hpt_iop_result_type {
|
||||
IOP_RESULT_RESET,
|
||||
IOP_RESULT_INVALID_REQUEST,
|
||||
IOP_RESULT_BAD_TARGET,
|
||||
IOP_RESULT_MODE_SENSE_CHECK_CONDITION,
|
||||
IOP_RESULT_CHECK_CONDITION,
|
||||
};
|
||||
|
||||
struct hpt_iop_request_get_config
|
||||
{
|
||||
struct hpt_iop_request_get_config {
|
||||
struct hpt_iop_request_header header;
|
||||
__le32 interface_version;
|
||||
__le32 firmware_version;
|
||||
@ -121,8 +152,7 @@ struct hpt_iop_request_get_config
|
||||
__le32 sdram_size;
|
||||
};
|
||||
|
||||
struct hpt_iop_request_set_config
|
||||
{
|
||||
struct hpt_iop_request_set_config {
|
||||
struct hpt_iop_request_header header;
|
||||
__le32 iop_id;
|
||||
__le16 vbus_id;
|
||||
@ -130,15 +160,13 @@ struct hpt_iop_request_set_config
|
||||
__le32 reserve[6];
|
||||
};
|
||||
|
||||
struct hpt_iopsg
|
||||
{
|
||||
struct hpt_iopsg {
|
||||
__le32 size;
|
||||
__le32 eot; /* non-zero: end of table */
|
||||
__le64 pci_address;
|
||||
};
|
||||
|
||||
struct hpt_iop_request_block_command
|
||||
{
|
||||
struct hpt_iop_request_block_command {
|
||||
struct hpt_iop_request_header header;
|
||||
u8 channel;
|
||||
u8 target;
|
||||
@ -156,8 +184,7 @@ struct hpt_iop_request_block_command
|
||||
#define IOP_BLOCK_COMMAND_FLUSH 4
|
||||
#define IOP_BLOCK_COMMAND_SHUTDOWN 5
|
||||
|
||||
struct hpt_iop_request_scsi_command
|
||||
{
|
||||
struct hpt_iop_request_scsi_command {
|
||||
struct hpt_iop_request_header header;
|
||||
u8 channel;
|
||||
u8 target;
|
||||
@ -168,8 +195,7 @@ struct hpt_iop_request_scsi_command
|
||||
struct hpt_iopsg sg_list[1];
|
||||
};
|
||||
|
||||
struct hpt_iop_request_ioctl_command
|
||||
{
|
||||
struct hpt_iop_request_ioctl_command {
|
||||
struct hpt_iop_request_header header;
|
||||
__le32 ioctl_code;
|
||||
__le32 inbuf_size;
|
||||
@ -182,10 +208,10 @@ struct hpt_iop_request_ioctl_command
|
||||
#define HPTIOP_MAX_REQUESTS 256u
|
||||
|
||||
struct hptiop_request {
|
||||
struct hptiop_request * next;
|
||||
void * req_virt;
|
||||
struct hptiop_request *next;
|
||||
void *req_virt;
|
||||
u32 req_shifted_phy;
|
||||
struct scsi_cmnd * scp;
|
||||
struct scsi_cmnd *scp;
|
||||
int index;
|
||||
};
|
||||
|
||||
@ -198,9 +224,21 @@ struct hpt_scsi_pointer {
|
||||
#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
|
||||
|
||||
struct hptiop_hba {
|
||||
struct hpt_iopmu __iomem * iop;
|
||||
struct Scsi_Host * host;
|
||||
struct pci_dev * pcidev;
|
||||
struct hptiop_adapter_ops *ops;
|
||||
union {
|
||||
struct {
|
||||
struct hpt_iopmu_itl __iomem *iop;
|
||||
} itl;
|
||||
struct {
|
||||
struct hpt_iopmv_regs *regs;
|
||||
struct hpt_iopmu_mv __iomem *mu;
|
||||
void *internal_req;
|
||||
dma_addr_t internal_req_phy;
|
||||
} mv;
|
||||
} u;
|
||||
|
||||
struct Scsi_Host *host;
|
||||
struct pci_dev *pcidev;
|
||||
|
||||
/* IOP config info */
|
||||
u32 interface_version;
|
||||
@ -213,15 +251,15 @@ struct hptiop_hba {
|
||||
|
||||
u32 req_size; /* host-allocated request buffer size */
|
||||
|
||||
int iopintf_v2: 1;
|
||||
int initialized: 1;
|
||||
int msg_done: 1;
|
||||
u32 iopintf_v2: 1;
|
||||
u32 initialized: 1;
|
||||
u32 msg_done: 1;
|
||||
|
||||
struct hptiop_request * req_list;
|
||||
struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
|
||||
|
||||
/* used to free allocated dma area */
|
||||
void * dma_coherent;
|
||||
void *dma_coherent;
|
||||
dma_addr_t dma_coherent_handle;
|
||||
|
||||
atomic_t reset_count;
|
||||
@ -231,19 +269,35 @@ struct hptiop_hba {
|
||||
wait_queue_head_t ioctl_wq;
|
||||
};
|
||||
|
||||
struct hpt_ioctl_k
|
||||
{
|
||||
struct hpt_ioctl_k {
|
||||
struct hptiop_hba * hba;
|
||||
u32 ioctl_code;
|
||||
u32 inbuf_size;
|
||||
u32 outbuf_size;
|
||||
void * inbuf;
|
||||
void * outbuf;
|
||||
u32 * bytes_returned;
|
||||
void *inbuf;
|
||||
void *outbuf;
|
||||
u32 *bytes_returned;
|
||||
void (*done)(struct hpt_ioctl_k *);
|
||||
int result; /* HPT_IOCTL_RESULT_ */
|
||||
};
|
||||
|
||||
struct hptiop_adapter_ops {
|
||||
int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
|
||||
int (*internal_memalloc)(struct hptiop_hba *hba);
|
||||
int (*internal_memfree)(struct hptiop_hba *hba);
|
||||
int (*map_pci_bar)(struct hptiop_hba *hba);
|
||||
void (*unmap_pci_bar)(struct hptiop_hba *hba);
|
||||
void (*enable_intr)(struct hptiop_hba *hba);
|
||||
void (*disable_intr)(struct hptiop_hba *hba);
|
||||
int (*get_config)(struct hptiop_hba *hba,
|
||||
struct hpt_iop_request_get_config *config);
|
||||
int (*set_config)(struct hptiop_hba *hba,
|
||||
struct hpt_iop_request_set_config *config);
|
||||
int (*iop_intr)(struct hptiop_hba *hba);
|
||||
void (*post_msg)(struct hptiop_hba *hba, u32 msg);
|
||||
void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
|
||||
};
|
||||
|
||||
#define HPT_IOCTL_RESULT_OK 0
|
||||
#define HPT_IOCTL_RESULT_FAILED (-1)
|
||||
|
||||
|
@ -629,6 +629,16 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
|
||||
list_del(&evt_struct->list);
|
||||
del_timer(&evt_struct->timer);
|
||||
|
||||
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
|
||||
* Firmware will send a CRQ with a transport event (0xFF) to
|
||||
* tell this client what has happened to the transport. This
|
||||
* will be handled in ibmvscsi_handle_crq()
|
||||
*/
|
||||
if (rc == H_CLOSED) {
|
||||
dev_warn(hostdata->dev, "send warning. "
|
||||
"Receive queue closed, will retry.\n");
|
||||
goto send_busy;
|
||||
}
|
||||
dev_err(hostdata->dev, "send error %d\n", rc);
|
||||
atomic_inc(&hostdata->request_limit);
|
||||
goto send_error;
|
||||
@ -976,11 +986,14 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||
int rsp_rc;
|
||||
unsigned long flags;
|
||||
u16 lun = lun_from_dev(cmd->device);
|
||||
unsigned long wait_switch = 0;
|
||||
|
||||
/* First, find this command in our sent list so we can figure
|
||||
* out the correct tag
|
||||
*/
|
||||
spin_lock_irqsave(hostdata->host->host_lock, flags);
|
||||
wait_switch = jiffies + (init_timeout * HZ);
|
||||
do {
|
||||
found_evt = NULL;
|
||||
list_for_each_entry(tmp_evt, &hostdata->sent, list) {
|
||||
if (tmp_evt->cmnd == cmd) {
|
||||
@ -997,7 +1010,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||
evt = get_event_struct(&hostdata->pool);
|
||||
if (evt == NULL) {
|
||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||
sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
|
||||
sdev_printk(KERN_ERR, cmd->device,
|
||||
"failed to allocate abort event\n");
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
@ -1015,19 +1029,31 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||
tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
|
||||
tsk_mgmt->task_tag = (u64) found_evt;
|
||||
|
||||
sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
|
||||
tsk_mgmt->lun, tsk_mgmt->task_tag);
|
||||
|
||||
evt->sync_srp = &srp_rsp;
|
||||
|
||||
init_completion(&evt->comp);
|
||||
rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
|
||||
|
||||
if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
|
||||
break;
|
||||
|
||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||
msleep(10);
|
||||
spin_lock_irqsave(hostdata->host->host_lock, flags);
|
||||
} while (time_before(jiffies, wait_switch));
|
||||
|
||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||
|
||||
if (rsp_rc != 0) {
|
||||
sdev_printk(KERN_ERR, cmd->device,
|
||||
"failed to send abort() event. rc=%d\n", rsp_rc);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
sdev_printk(KERN_INFO, cmd->device,
|
||||
"aborting command. lun 0x%lx, tag 0x%lx\n",
|
||||
(((u64) lun) << 48), (u64) found_evt);
|
||||
|
||||
wait_for_completion(&evt->comp);
|
||||
|
||||
/* make sure we got a good response */
|
||||
@ -1099,12 +1125,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
int rsp_rc;
|
||||
unsigned long flags;
|
||||
u16 lun = lun_from_dev(cmd->device);
|
||||
unsigned long wait_switch = 0;
|
||||
|
||||
spin_lock_irqsave(hostdata->host->host_lock, flags);
|
||||
wait_switch = jiffies + (init_timeout * HZ);
|
||||
do {
|
||||
evt = get_event_struct(&hostdata->pool);
|
||||
if (evt == NULL) {
|
||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||
sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
|
||||
sdev_printk(KERN_ERR, cmd->device,
|
||||
"failed to allocate reset event\n");
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
@ -1121,19 +1151,30 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
tsk_mgmt->lun = ((u64) lun) << 48;
|
||||
tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
|
||||
|
||||
sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
|
||||
tsk_mgmt->lun);
|
||||
|
||||
evt->sync_srp = &srp_rsp;
|
||||
|
||||
init_completion(&evt->comp);
|
||||
rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
|
||||
|
||||
if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
|
||||
break;
|
||||
|
||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||
msleep(10);
|
||||
spin_lock_irqsave(hostdata->host->host_lock, flags);
|
||||
} while (time_before(jiffies, wait_switch));
|
||||
|
||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||
|
||||
if (rsp_rc != 0) {
|
||||
sdev_printk(KERN_ERR, cmd->device,
|
||||
"failed to send reset event. rc=%d\n", rsp_rc);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
|
||||
(((u64) lun) << 48));
|
||||
|
||||
wait_for_completion(&evt->comp);
|
||||
|
||||
/* make sure we got a good response */
|
||||
@ -1386,8 +1427,10 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
|
||||
unsigned long lock_flags = 0;
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, lock_flags);
|
||||
if (sdev->type == TYPE_DISK)
|
||||
if (sdev->type == TYPE_DISK) {
|
||||
sdev->allow_restart = 1;
|
||||
sdev->timeout = 60 * HZ;
|
||||
}
|
||||
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
|
||||
spin_unlock_irqrestore(shost->host_lock, lock_flags);
|
||||
return 0;
|
||||
|
@ -292,7 +292,7 @@ static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
|
||||
dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
|
||||
cmd->usg_sg);
|
||||
|
||||
if (sc->use_sg)
|
||||
if (scsi_sg_count(sc))
|
||||
err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
|
||||
|
||||
spin_lock_irqsave(&target->lock, flags);
|
||||
|
@ -837,19 +837,16 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
|
||||
|
||||
/* Phase 4 - Setup scatter/gather buffers */
|
||||
case 4:
|
||||
if (cmd->use_sg) {
|
||||
/* if many buffers are available, start filling the first */
|
||||
cmd->SCp.buffer =
|
||||
(struct scatterlist *) cmd->request_buffer;
|
||||
if (scsi_bufflen(cmd)) {
|
||||
cmd->SCp.buffer = scsi_sglist(cmd);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
} else {
|
||||
/* else fill the only available buffer */
|
||||
cmd->SCp.buffer = NULL;
|
||||
cmd->SCp.this_residual = cmd->request_bufflen;
|
||||
cmd->SCp.ptr = cmd->request_buffer;
|
||||
cmd->SCp.this_residual = 0;
|
||||
cmd->SCp.ptr = NULL;
|
||||
}
|
||||
cmd->SCp.buffers_residual = cmd->use_sg - 1;
|
||||
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
|
||||
cmd->SCp.phase++;
|
||||
if (cmd->SCp.this_residual & 0x01)
|
||||
cmd->SCp.this_residual++;
|
||||
|
@ -369,16 +369,16 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
|
||||
* - SCp.phase records this command's SRCID_ER bit setting
|
||||
*/
|
||||
|
||||
if (cmd->use_sg) {
|
||||
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
|
||||
cmd->SCp.buffers_residual = cmd->use_sg - 1;
|
||||
if (scsi_bufflen(cmd)) {
|
||||
cmd->SCp.buffer = scsi_sglist(cmd);
|
||||
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
|
||||
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
|
||||
cmd->SCp.this_residual = cmd->SCp.buffer->length;
|
||||
} else {
|
||||
cmd->SCp.buffer = NULL;
|
||||
cmd->SCp.buffers_residual = 0;
|
||||
cmd->SCp.ptr = (char *) cmd->request_buffer;
|
||||
cmd->SCp.this_residual = cmd->request_bufflen;
|
||||
cmd->SCp.ptr = NULL;
|
||||
cmd->SCp.this_residual = 0;
|
||||
}
|
||||
cmd->SCp.have_data_in = 0;
|
||||
|
||||
|
@ -84,7 +84,7 @@
|
||||
/*
|
||||
* Global Data
|
||||
*/
|
||||
static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
|
||||
static LIST_HEAD(ipr_ioa_head);
|
||||
static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
|
||||
static unsigned int ipr_max_speed = 1;
|
||||
static int ipr_testmode = 0;
|
||||
|
@ -702,12 +702,8 @@ ips_release(struct Scsi_Host *sh)
|
||||
/* free extra memory */
|
||||
ips_free(ha);
|
||||
|
||||
/* Free I/O Region */
|
||||
if (ha->io_addr)
|
||||
release_region(ha->io_addr, ha->io_len);
|
||||
|
||||
/* free IRQ */
|
||||
free_irq(ha->irq, ha);
|
||||
free_irq(ha->pcidev->irq, ha);
|
||||
|
||||
scsi_host_put(sh);
|
||||
|
||||
@ -1637,7 +1633,7 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
|
||||
return (IPS_FAILURE);
|
||||
}
|
||||
|
||||
if (ha->device_id == IPS_DEVICEID_COPPERHEAD &&
|
||||
if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
|
||||
pt->CoppCP.cmd.flashfw.op_code ==
|
||||
IPS_CMD_RW_BIOSFW) {
|
||||
ret = ips_flash_copperhead(ha, pt, scb);
|
||||
@ -2021,7 +2017,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
|
||||
pt->ExtendedStatus = scb->extended_status;
|
||||
pt->AdapterType = ha->ad_type;
|
||||
|
||||
if (ha->device_id == IPS_DEVICEID_COPPERHEAD &&
|
||||
if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD &&
|
||||
(scb->cmd.flashfw.op_code == IPS_CMD_DOWNLOAD ||
|
||||
scb->cmd.flashfw.op_code == IPS_CMD_RW_BIOSFW))
|
||||
ips_free_flash_copperhead(ha);
|
||||
@ -2075,7 +2071,7 @@ ips_host_info(ips_ha_t * ha, char *ptr, off_t offset, int len)
|
||||
ha->mem_ptr);
|
||||
}
|
||||
|
||||
copy_info(&info, "\tIRQ number : %d\n", ha->irq);
|
||||
copy_info(&info, "\tIRQ number : %d\n", ha->pcidev->irq);
|
||||
|
||||
/* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */
|
||||
/* That keeps everything happy for "text" operations on the proc file. */
|
||||
@ -2232,31 +2228,31 @@ ips_identify_controller(ips_ha_t * ha)
|
||||
{
|
||||
METHOD_TRACE("ips_identify_controller", 1);
|
||||
|
||||
switch (ha->device_id) {
|
||||
switch (ha->pcidev->device) {
|
||||
case IPS_DEVICEID_COPPERHEAD:
|
||||
if (ha->revision_id <= IPS_REVID_SERVERAID) {
|
||||
if (ha->pcidev->revision <= IPS_REVID_SERVERAID) {
|
||||
ha->ad_type = IPS_ADTYPE_SERVERAID;
|
||||
} else if (ha->revision_id == IPS_REVID_SERVERAID2) {
|
||||
} else if (ha->pcidev->revision == IPS_REVID_SERVERAID2) {
|
||||
ha->ad_type = IPS_ADTYPE_SERVERAID2;
|
||||
} else if (ha->revision_id == IPS_REVID_NAVAJO) {
|
||||
} else if (ha->pcidev->revision == IPS_REVID_NAVAJO) {
|
||||
ha->ad_type = IPS_ADTYPE_NAVAJO;
|
||||
} else if ((ha->revision_id == IPS_REVID_SERVERAID2)
|
||||
} else if ((ha->pcidev->revision == IPS_REVID_SERVERAID2)
|
||||
&& (ha->slot_num == 0)) {
|
||||
ha->ad_type = IPS_ADTYPE_KIOWA;
|
||||
} else if ((ha->revision_id >= IPS_REVID_CLARINETP1) &&
|
||||
(ha->revision_id <= IPS_REVID_CLARINETP3)) {
|
||||
} else if ((ha->pcidev->revision >= IPS_REVID_CLARINETP1) &&
|
||||
(ha->pcidev->revision <= IPS_REVID_CLARINETP3)) {
|
||||
if (ha->enq->ucMaxPhysicalDevices == 15)
|
||||
ha->ad_type = IPS_ADTYPE_SERVERAID3L;
|
||||
else
|
||||
ha->ad_type = IPS_ADTYPE_SERVERAID3;
|
||||
} else if ((ha->revision_id >= IPS_REVID_TROMBONE32) &&
|
||||
(ha->revision_id <= IPS_REVID_TROMBONE64)) {
|
||||
} else if ((ha->pcidev->revision >= IPS_REVID_TROMBONE32) &&
|
||||
(ha->pcidev->revision <= IPS_REVID_TROMBONE64)) {
|
||||
ha->ad_type = IPS_ADTYPE_SERVERAID4H;
|
||||
}
|
||||
break;
|
||||
|
||||
case IPS_DEVICEID_MORPHEUS:
|
||||
switch (ha->subdevice_id) {
|
||||
switch (ha->pcidev->subsystem_device) {
|
||||
case IPS_SUBDEVICEID_4L:
|
||||
ha->ad_type = IPS_ADTYPE_SERVERAID4L;
|
||||
break;
|
||||
@ -2285,7 +2281,7 @@ ips_identify_controller(ips_ha_t * ha)
|
||||
break;
|
||||
|
||||
case IPS_DEVICEID_MARCO:
|
||||
switch (ha->subdevice_id) {
|
||||
switch (ha->pcidev->subsystem_device) {
|
||||
case IPS_SUBDEVICEID_6M:
|
||||
ha->ad_type = IPS_ADTYPE_SERVERAID6M;
|
||||
break;
|
||||
@ -2332,20 +2328,20 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
|
||||
|
||||
strncpy(ha->bios_version, " ?", 8);
|
||||
|
||||
if (ha->device_id == IPS_DEVICEID_COPPERHEAD) {
|
||||
if (ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) {
|
||||
if (IPS_USE_MEMIO(ha)) {
|
||||
/* Memory Mapped I/O */
|
||||
|
||||
/* test 1st byte */
|
||||
writel(0, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
|
||||
return;
|
||||
|
||||
writel(1, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
|
||||
@ -2353,20 +2349,20 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
|
||||
|
||||
/* Get Major version */
|
||||
writel(0x1FF, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
major = readb(ha->mem_ptr + IPS_REG_FLDP);
|
||||
|
||||
/* Get Minor version */
|
||||
writel(0x1FE, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
minor = readb(ha->mem_ptr + IPS_REG_FLDP);
|
||||
|
||||
/* Get SubMinor version */
|
||||
writel(0x1FD, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
subminor = readb(ha->mem_ptr + IPS_REG_FLDP);
|
||||
|
||||
@ -2375,14 +2371,14 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
|
||||
|
||||
/* test 1st byte */
|
||||
outl(0, ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
|
||||
return;
|
||||
|
||||
outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
|
||||
@ -2390,21 +2386,21 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
|
||||
|
||||
/* Get Major version */
|
||||
outl(cpu_to_le32(0x1FF), ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
major = inb(ha->io_addr + IPS_REG_FLDP);
|
||||
|
||||
/* Get Minor version */
|
||||
outl(cpu_to_le32(0x1FE), ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
minor = inb(ha->io_addr + IPS_REG_FLDP);
|
||||
|
||||
/* Get SubMinor version */
|
||||
outl(cpu_to_le32(0x1FD), ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
subminor = inb(ha->io_addr + IPS_REG_FLDP);
|
||||
@ -2740,8 +2736,6 @@ ips_next(ips_ha_t * ha, int intr)
|
||||
SC->result = DID_OK;
|
||||
SC->host_scribble = NULL;
|
||||
|
||||
memset(SC->sense_buffer, 0, sizeof (SC->sense_buffer));
|
||||
|
||||
scb->target_id = SC->device->id;
|
||||
scb->lun = SC->device->lun;
|
||||
scb->bus = SC->device->channel;
|
||||
@ -2782,7 +2776,8 @@ ips_next(ips_ha_t * ha, int intr)
|
||||
|
||||
/* Allow a WRITE BUFFER Command to Have no Data */
|
||||
/* This is Used by Tape Flash Utilites */
|
||||
if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) && (scb->data_len == 0))
|
||||
if ((scb->scsi_cmd->cmnd[0] == WRITE_BUFFER) &&
|
||||
(scb->data_len == 0))
|
||||
scb->dcdb.cmd_attribute = 0;
|
||||
|
||||
if (!(scb->dcdb.cmd_attribute & 0x3))
|
||||
@ -3438,13 +3433,11 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
|
||||
(IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
|
||||
memcpy(scb->scsi_cmd->sense_buffer,
|
||||
tapeDCDB->sense_info,
|
||||
sizeof (scb->scsi_cmd->
|
||||
sense_buffer));
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
} else {
|
||||
memcpy(scb->scsi_cmd->sense_buffer,
|
||||
scb->dcdb.sense_info,
|
||||
sizeof (scb->scsi_cmd->
|
||||
sense_buffer));
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
}
|
||||
device_error = 2; /* check condition */
|
||||
}
|
||||
@ -3824,7 +3817,6 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
|
||||
/* attempted, a Check Condition occurred, and Sense */
|
||||
/* Data indicating an Invalid CDB OpCode is returned. */
|
||||
sp = (char *) scb->scsi_cmd->sense_buffer;
|
||||
memset(sp, 0, sizeof (scb->scsi_cmd->sense_buffer));
|
||||
|
||||
sp[0] = 0x70; /* Error Code */
|
||||
sp[2] = ILLEGAL_REQUEST; /* Sense Key 5 Illegal Req. */
|
||||
@ -4393,8 +4385,6 @@ ips_free(ips_ha_t * ha)
|
||||
ha->mem_ptr = NULL;
|
||||
}
|
||||
|
||||
if (ha->mem_addr)
|
||||
release_mem_region(ha->mem_addr, ha->mem_len);
|
||||
ha->mem_addr = 0;
|
||||
|
||||
}
|
||||
@ -4757,7 +4747,7 @@ ips_poll_for_flush_complete(ips_ha_t * ha)
|
||||
break;
|
||||
|
||||
/* Success is when we see the Flush Command ID */
|
||||
if (cstatus.fields.command_id == IPS_MAX_CMDS )
|
||||
if (cstatus.fields.command_id == IPS_MAX_CMDS)
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -4903,7 +4893,7 @@ ips_init_copperhead(ips_ha_t * ha)
|
||||
/* Enable busmastering */
|
||||
outb(IPS_BIT_EBM, ha->io_addr + IPS_REG_SCPR);
|
||||
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
/* fix for anaconda64 */
|
||||
outl(0, ha->io_addr + IPS_REG_NDAE);
|
||||
|
||||
@ -4997,7 +4987,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
|
||||
/* Enable busmastering */
|
||||
writeb(IPS_BIT_EBM, ha->mem_ptr + IPS_REG_SCPR);
|
||||
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
/* fix for anaconda64 */
|
||||
writel(0, ha->mem_ptr + IPS_REG_NDAE);
|
||||
|
||||
@ -5142,7 +5132,7 @@ ips_reset_copperhead(ips_ha_t * ha)
|
||||
METHOD_TRACE("ips_reset_copperhead", 1);
|
||||
|
||||
DEBUG_VAR(1, "(%s%d) ips_reset_copperhead: io addr: %x, irq: %d",
|
||||
ips_name, ha->host_num, ha->io_addr, ha->irq);
|
||||
ips_name, ha->host_num, ha->io_addr, ha->pcidev->irq);
|
||||
|
||||
reset_counter = 0;
|
||||
|
||||
@ -5187,7 +5177,7 @@ ips_reset_copperhead_memio(ips_ha_t * ha)
|
||||
METHOD_TRACE("ips_reset_copperhead_memio", 1);
|
||||
|
||||
DEBUG_VAR(1, "(%s%d) ips_reset_copperhead_memio: mem addr: %x, irq: %d",
|
||||
ips_name, ha->host_num, ha->mem_addr, ha->irq);
|
||||
ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
|
||||
|
||||
reset_counter = 0;
|
||||
|
||||
@ -5233,7 +5223,7 @@ ips_reset_morpheus(ips_ha_t * ha)
|
||||
METHOD_TRACE("ips_reset_morpheus", 1);
|
||||
|
||||
DEBUG_VAR(1, "(%s%d) ips_reset_morpheus: mem addr: %x, irq: %d",
|
||||
ips_name, ha->host_num, ha->mem_addr, ha->irq);
|
||||
ips_name, ha->host_num, ha->mem_addr, ha->pcidev->irq);
|
||||
|
||||
reset_counter = 0;
|
||||
|
||||
@ -6196,32 +6186,32 @@ ips_erase_bios(ips_ha_t * ha)
|
||||
|
||||
/* Clear the status register */
|
||||
outl(0, ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
outb(0x50, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* Erase Setup */
|
||||
outb(0x20, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* Erase Confirm */
|
||||
outb(0xD0, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* Erase Status */
|
||||
outb(0x70, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
timeout = 80000; /* 80 seconds */
|
||||
|
||||
while (timeout > 0) {
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64) {
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
|
||||
outl(0, ha->io_addr + IPS_REG_FLAP);
|
||||
udelay(25); /* 25 us */
|
||||
}
|
||||
@ -6241,13 +6231,13 @@ ips_erase_bios(ips_ha_t * ha)
|
||||
|
||||
/* try to suspend the erase */
|
||||
outb(0xB0, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* wait for 10 seconds */
|
||||
timeout = 10000;
|
||||
while (timeout > 0) {
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64) {
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
|
||||
outl(0, ha->io_addr + IPS_REG_FLAP);
|
||||
udelay(25); /* 25 us */
|
||||
}
|
||||
@ -6277,12 +6267,12 @@ ips_erase_bios(ips_ha_t * ha)
|
||||
/* Otherwise, we were successful */
|
||||
/* clear status */
|
||||
outb(0x50, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* enable reads */
|
||||
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
return (0);
|
||||
@ -6308,32 +6298,32 @@ ips_erase_bios_memio(ips_ha_t * ha)
|
||||
|
||||
/* Clear the status register */
|
||||
writel(0, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* Erase Setup */
|
||||
writeb(0x20, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* Erase Confirm */
|
||||
writeb(0xD0, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* Erase Status */
|
||||
writeb(0x70, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
timeout = 80000; /* 80 seconds */
|
||||
|
||||
while (timeout > 0) {
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64) {
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
|
||||
writel(0, ha->mem_ptr + IPS_REG_FLAP);
|
||||
udelay(25); /* 25 us */
|
||||
}
|
||||
@ -6353,13 +6343,13 @@ ips_erase_bios_memio(ips_ha_t * ha)
|
||||
|
||||
/* try to suspend the erase */
|
||||
writeb(0xB0, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* wait for 10 seconds */
|
||||
timeout = 10000;
|
||||
while (timeout > 0) {
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64) {
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
|
||||
writel(0, ha->mem_ptr + IPS_REG_FLAP);
|
||||
udelay(25); /* 25 us */
|
||||
}
|
||||
@ -6389,12 +6379,12 @@ ips_erase_bios_memio(ips_ha_t * ha)
|
||||
/* Otherwise, we were successful */
|
||||
/* clear status */
|
||||
writeb(0x50, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* enable reads */
|
||||
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
return (0);
|
||||
@ -6423,21 +6413,21 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
for (i = 0; i < buffersize; i++) {
|
||||
/* write a byte */
|
||||
outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
outb(0x40, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
outb(buffer[i], ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* wait up to one second */
|
||||
timeout = 1000;
|
||||
while (timeout > 0) {
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64) {
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
|
||||
outl(0, ha->io_addr + IPS_REG_FLAP);
|
||||
udelay(25); /* 25 us */
|
||||
}
|
||||
@ -6454,11 +6444,11 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
if (timeout == 0) {
|
||||
/* timeout error */
|
||||
outl(0, ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
return (1);
|
||||
@ -6468,11 +6458,11 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
if (status & 0x18) {
|
||||
/* programming error */
|
||||
outl(0, ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
return (1);
|
||||
@ -6481,11 +6471,11 @@ ips_program_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
|
||||
/* Enable reading */
|
||||
outl(0, ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
outb(0xFF, ha->io_addr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
return (0);
|
||||
@ -6514,21 +6504,21 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
for (i = 0; i < buffersize; i++) {
|
||||
/* write a byte */
|
||||
writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
writeb(0x40, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
writeb(buffer[i], ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
/* wait up to one second */
|
||||
timeout = 1000;
|
||||
while (timeout > 0) {
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64) {
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64) {
|
||||
writel(0, ha->mem_ptr + IPS_REG_FLAP);
|
||||
udelay(25); /* 25 us */
|
||||
}
|
||||
@ -6545,11 +6535,11 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
if (timeout == 0) {
|
||||
/* timeout error */
|
||||
writel(0, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
return (1);
|
||||
@ -6559,11 +6549,11 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
if (status & 0x18) {
|
||||
/* programming error */
|
||||
writel(0, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
return (1);
|
||||
@ -6572,11 +6562,11 @@ ips_program_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
|
||||
/* Enable reading */
|
||||
writel(0, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
writeb(0xFF, ha->mem_ptr + IPS_REG_FLDP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
return (0);
|
||||
@ -6601,14 +6591,14 @@ ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
|
||||
/* test 1st byte */
|
||||
outl(0, ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
if (inb(ha->io_addr + IPS_REG_FLDP) != 0x55)
|
||||
return (1);
|
||||
|
||||
outl(cpu_to_le32(1), ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
if (inb(ha->io_addr + IPS_REG_FLDP) != 0xAA)
|
||||
return (1);
|
||||
@ -6617,7 +6607,7 @@ ips_verify_bios(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
for (i = 2; i < buffersize; i++) {
|
||||
|
||||
outl(cpu_to_le32(i + offset), ha->io_addr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
checksum = (uint8_t) checksum + inb(ha->io_addr + IPS_REG_FLDP);
|
||||
@ -6650,14 +6640,14 @@ ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
|
||||
/* test 1st byte */
|
||||
writel(0, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0x55)
|
||||
return (1);
|
||||
|
||||
writel(1, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
if (readb(ha->mem_ptr + IPS_REG_FLDP) != 0xAA)
|
||||
return (1);
|
||||
@ -6666,7 +6656,7 @@ ips_verify_bios_memio(ips_ha_t * ha, char *buffer, uint32_t buffersize,
|
||||
for (i = 2; i < buffersize; i++) {
|
||||
|
||||
writel(i + offset, ha->mem_ptr + IPS_REG_FLAP);
|
||||
if (ha->revision_id == IPS_REVID_TROMBONE64)
|
||||
if (ha->pcidev->revision == IPS_REVID_TROMBONE64)
|
||||
udelay(25); /* 25 us */
|
||||
|
||||
checksum =
|
||||
@ -6837,24 +6827,18 @@ ips_register_scsi(int index)
|
||||
}
|
||||
ha = IPS_HA(sh);
|
||||
memcpy(ha, oldha, sizeof (ips_ha_t));
|
||||
free_irq(oldha->irq, oldha);
|
||||
free_irq(oldha->pcidev->irq, oldha);
|
||||
/* Install the interrupt handler with the new ha */
|
||||
if (request_irq(ha->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
|
||||
if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
|
||||
IPS_PRINTK(KERN_WARNING, ha->pcidev,
|
||||
"Unable to install interrupt handler\n");
|
||||
scsi_host_put(sh);
|
||||
return -1;
|
||||
goto err_out_sh;
|
||||
}
|
||||
|
||||
kfree(oldha);
|
||||
ips_sh[index] = sh;
|
||||
ips_ha[index] = ha;
|
||||
|
||||
/* Store away needed values for later use */
|
||||
sh->io_port = ha->io_addr;
|
||||
sh->n_io_port = ha->io_addr ? 255 : 0;
|
||||
sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr;
|
||||
sh->irq = ha->irq;
|
||||
sh->sg_tablesize = sh->hostt->sg_tablesize;
|
||||
sh->can_queue = sh->hostt->can_queue;
|
||||
sh->cmd_per_lun = sh->hostt->cmd_per_lun;
|
||||
@ -6867,10 +6851,21 @@ ips_register_scsi(int index)
|
||||
sh->max_channel = ha->nbus - 1;
|
||||
sh->can_queue = ha->max_cmds - 1;
|
||||
|
||||
scsi_add_host(sh, NULL);
|
||||
if (scsi_add_host(sh, &ha->pcidev->dev))
|
||||
goto err_out;
|
||||
|
||||
ips_sh[index] = sh;
|
||||
ips_ha[index] = ha;
|
||||
|
||||
scsi_scan_host(sh);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
free_irq(ha->pcidev->irq, ha);
|
||||
err_out_sh:
|
||||
scsi_host_put(sh);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*---------------------------------------------------------------------------*/
|
||||
@ -6882,20 +6877,14 @@ ips_register_scsi(int index)
|
||||
static void __devexit
|
||||
ips_remove_device(struct pci_dev *pci_dev)
|
||||
{
|
||||
int i;
|
||||
struct Scsi_Host *sh;
|
||||
ips_ha_t *ha;
|
||||
struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
|
||||
|
||||
pci_set_drvdata(pci_dev, NULL);
|
||||
|
||||
for (i = 0; i < IPS_MAX_ADAPTERS; i++) {
|
||||
ha = ips_ha[i];
|
||||
if (ha) {
|
||||
if ((pci_dev->bus->number == ha->pcidev->bus->number) &&
|
||||
(pci_dev->devfn == ha->pcidev->devfn)) {
|
||||
sh = ips_sh[i];
|
||||
ips_release(sh);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pci_release_regions(pci_dev);
|
||||
pci_disable_device(pci_dev);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
@ -6949,12 +6938,17 @@ module_exit(ips_module_exit);
|
||||
static int __devinit
|
||||
ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
|
||||
{
|
||||
int uninitialized_var(index);
|
||||
int index = -1;
|
||||
int rc;
|
||||
|
||||
METHOD_TRACE("ips_insert_device", 1);
|
||||
if (pci_enable_device(pci_dev))
|
||||
return -1;
|
||||
rc = pci_enable_device(pci_dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pci_dev, "ips");
|
||||
if (rc)
|
||||
goto err_out;
|
||||
|
||||
rc = ips_init_phase1(pci_dev, &index);
|
||||
if (rc == SUCCESS)
|
||||
@ -6970,6 +6964,19 @@ ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
|
||||
ips_num_controllers++;
|
||||
|
||||
ips_next_controller = ips_num_controllers;
|
||||
|
||||
if (rc < 0) {
|
||||
rc = -ENODEV;
|
||||
goto err_out_regions;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pci_dev, ips_sh[index]);
|
||||
return 0;
|
||||
|
||||
err_out_regions:
|
||||
pci_release_regions(pci_dev);
|
||||
err_out:
|
||||
pci_disable_device(pci_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -6992,8 +6999,6 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
|
||||
uint32_t mem_len;
|
||||
uint8_t bus;
|
||||
uint8_t func;
|
||||
uint8_t irq;
|
||||
uint16_t subdevice_id;
|
||||
int j;
|
||||
int index;
|
||||
dma_addr_t dma_address;
|
||||
@ -7004,7 +7009,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
|
||||
METHOD_TRACE("ips_init_phase1", 1);
|
||||
index = IPS_MAX_ADAPTERS;
|
||||
for (j = 0; j < IPS_MAX_ADAPTERS; j++) {
|
||||
if (ips_ha[j] == 0) {
|
||||
if (ips_ha[j] == NULL) {
|
||||
index = j;
|
||||
break;
|
||||
}
|
||||
@ -7014,7 +7019,6 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
|
||||
return -1;
|
||||
|
||||
/* stuff that we get in dev */
|
||||
irq = pci_dev->irq;
|
||||
bus = pci_dev->bus->number;
|
||||
func = pci_dev->devfn;
|
||||
|
||||
@ -7042,34 +7046,17 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
|
||||
uint32_t base;
|
||||
uint32_t offs;
|
||||
|
||||
if (!request_mem_region(mem_addr, mem_len, "ips")) {
|
||||
IPS_PRINTK(KERN_WARNING, pci_dev,
|
||||
"Couldn't allocate IO Memory space %x len %d.\n",
|
||||
mem_addr, mem_len);
|
||||
return -1;
|
||||
}
|
||||
|
||||
base = mem_addr & PAGE_MASK;
|
||||
offs = mem_addr - base;
|
||||
ioremap_ptr = ioremap(base, PAGE_SIZE);
|
||||
if (!ioremap_ptr)
|
||||
return -1;
|
||||
mem_ptr = ioremap_ptr + offs;
|
||||
} else {
|
||||
ioremap_ptr = NULL;
|
||||
mem_ptr = NULL;
|
||||
}
|
||||
|
||||
/* setup I/O mapped area (if applicable) */
|
||||
if (io_addr) {
|
||||
if (!request_region(io_addr, io_len, "ips")) {
|
||||
IPS_PRINTK(KERN_WARNING, pci_dev,
|
||||
"Couldn't allocate IO space %x len %d.\n",
|
||||
io_addr, io_len);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
subdevice_id = pci_dev->subsystem_device;
|
||||
|
||||
/* found a controller */
|
||||
ha = kzalloc(sizeof (ips_ha_t), GFP_KERNEL);
|
||||
if (ha == NULL) {
|
||||
@ -7078,13 +7065,11 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
ips_sh[index] = NULL;
|
||||
ips_ha[index] = ha;
|
||||
ha->active = 1;
|
||||
|
||||
/* Store info in HA structure */
|
||||
ha->irq = irq;
|
||||
ha->io_addr = io_addr;
|
||||
ha->io_len = io_len;
|
||||
ha->mem_addr = mem_addr;
|
||||
@ -7092,10 +7077,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
|
||||
ha->mem_ptr = mem_ptr;
|
||||
ha->ioremap_ptr = ioremap_ptr;
|
||||
ha->host_num = (uint32_t) index;
|
||||
ha->revision_id = pci_dev->revision;
|
||||
ha->slot_num = PCI_SLOT(pci_dev->devfn);
|
||||
ha->device_id = pci_dev->device;
|
||||
ha->subdevice_id = subdevice_id;
|
||||
ha->pcidev = pci_dev;
|
||||
|
||||
/*
|
||||
@ -7240,7 +7222,7 @@ ips_init_phase2(int index)
|
||||
}
|
||||
|
||||
/* Install the interrupt handler */
|
||||
if (request_irq(ha->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
|
||||
if (request_irq(ha->pcidev->irq, do_ipsintr, IRQF_SHARED, ips_name, ha)) {
|
||||
IPS_PRINTK(KERN_WARNING, ha->pcidev,
|
||||
"Unable to install interrupt handler\n");
|
||||
return ips_abort_init(ha, index);
|
||||
@ -7253,14 +7235,14 @@ ips_init_phase2(int index)
|
||||
if (!ips_allocatescbs(ha)) {
|
||||
IPS_PRINTK(KERN_WARNING, ha->pcidev,
|
||||
"Unable to allocate a CCB\n");
|
||||
free_irq(ha->irq, ha);
|
||||
free_irq(ha->pcidev->irq, ha);
|
||||
return ips_abort_init(ha, index);
|
||||
}
|
||||
|
||||
if (!ips_hainit(ha)) {
|
||||
IPS_PRINTK(KERN_WARNING, ha->pcidev,
|
||||
"Unable to initialize controller\n");
|
||||
free_irq(ha->irq, ha);
|
||||
free_irq(ha->pcidev->irq, ha);
|
||||
return ips_abort_init(ha, index);
|
||||
}
|
||||
/* Free the temporary SCB */
|
||||
@ -7270,7 +7252,7 @@ ips_init_phase2(int index)
|
||||
if (!ips_allocatescbs(ha)) {
|
||||
IPS_PRINTK(KERN_WARNING, ha->pcidev,
|
||||
"Unable to allocate CCBs\n");
|
||||
free_irq(ha->irq, ha);
|
||||
free_irq(ha->pcidev->irq, ha);
|
||||
return ips_abort_init(ha, index);
|
||||
}
|
||||
|
||||
|
@ -60,14 +60,14 @@
|
||||
*/
|
||||
#define IPS_HA(x) ((ips_ha_t *) x->hostdata)
|
||||
#define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs)
|
||||
#define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \
|
||||
(ha->revision_id >= IPS_REVID_TROMBONE32) && \
|
||||
(ha->revision_id <= IPS_REVID_TROMBONE64)) ? 1 : 0)
|
||||
#define IPS_IS_CLARINET(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \
|
||||
(ha->revision_id >= IPS_REVID_CLARINETP1) && \
|
||||
(ha->revision_id <= IPS_REVID_CLARINETP3)) ? 1 : 0)
|
||||
#define IPS_IS_MORPHEUS(ha) (ha->device_id == IPS_DEVICEID_MORPHEUS)
|
||||
#define IPS_IS_MARCO(ha) (ha->device_id == IPS_DEVICEID_MARCO)
|
||||
#define IPS_IS_TROMBONE(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \
|
||||
(ha->pcidev->revision >= IPS_REVID_TROMBONE32) && \
|
||||
(ha->pcidev->revision <= IPS_REVID_TROMBONE64)) ? 1 : 0)
|
||||
#define IPS_IS_CLARINET(ha) (((ha->pcidev->device == IPS_DEVICEID_COPPERHEAD) && \
|
||||
(ha->pcidev->revision >= IPS_REVID_CLARINETP1) && \
|
||||
(ha->pcidev->revision <= IPS_REVID_CLARINETP3)) ? 1 : 0)
|
||||
#define IPS_IS_MORPHEUS(ha) (ha->pcidev->device == IPS_DEVICEID_MORPHEUS)
|
||||
#define IPS_IS_MARCO(ha) (ha->pcidev->device == IPS_DEVICEID_MARCO)
|
||||
#define IPS_USE_I2O_DELIVER(ha) ((IPS_IS_MORPHEUS(ha) || \
|
||||
(IPS_IS_TROMBONE(ha) && \
|
||||
(ips_force_i2o))) ? 1 : 0)
|
||||
@ -1034,7 +1034,6 @@ typedef struct ips_ha {
|
||||
uint8_t ha_id[IPS_MAX_CHANNELS+1];
|
||||
uint32_t dcdb_active[IPS_MAX_CHANNELS];
|
||||
uint32_t io_addr; /* Base I/O address */
|
||||
uint8_t irq; /* IRQ for adapter */
|
||||
uint8_t ntargets; /* Number of targets */
|
||||
uint8_t nbus; /* Number of buses */
|
||||
uint8_t nlun; /* Number of Luns */
|
||||
@ -1066,10 +1065,7 @@ typedef struct ips_ha {
|
||||
int ioctl_reset; /* IOCTL Requested Reset Flag */
|
||||
uint16_t reset_count; /* number of resets */
|
||||
time_t last_ffdc; /* last time we sent ffdc info*/
|
||||
uint8_t revision_id; /* Revision level */
|
||||
uint16_t device_id; /* PCI device ID */
|
||||
uint8_t slot_num; /* PCI Slot Number */
|
||||
uint16_t subdevice_id; /* Subsystem device ID */
|
||||
int ioctl_len; /* size of ioctl buffer */
|
||||
dma_addr_t ioctl_busaddr; /* dma address of ioctl buffer*/
|
||||
uint8_t bios_version[8]; /* BIOS Revision */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -24,71 +24,61 @@
|
||||
|
||||
#include <scsi/libiscsi.h>
|
||||
|
||||
/* Socket's Receive state machine */
|
||||
#define IN_PROGRESS_WAIT_HEADER 0x0
|
||||
#define IN_PROGRESS_HEADER_GATHER 0x1
|
||||
#define IN_PROGRESS_DATA_RECV 0x2
|
||||
#define IN_PROGRESS_DDIGEST_RECV 0x3
|
||||
#define IN_PROGRESS_PAD_RECV 0x4
|
||||
|
||||
/* xmit state machine */
|
||||
#define XMSTATE_VALUE_IDLE 0
|
||||
#define XMSTATE_BIT_CMD_HDR_INIT 0
|
||||
#define XMSTATE_BIT_CMD_HDR_XMIT 1
|
||||
#define XMSTATE_BIT_IMM_HDR 2
|
||||
#define XMSTATE_BIT_IMM_DATA 3
|
||||
#define XMSTATE_BIT_UNS_INIT 4
|
||||
#define XMSTATE_BIT_UNS_HDR 5
|
||||
#define XMSTATE_BIT_UNS_DATA 6
|
||||
#define XMSTATE_BIT_SOL_HDR 7
|
||||
#define XMSTATE_BIT_SOL_DATA 8
|
||||
#define XMSTATE_BIT_W_PAD 9
|
||||
#define XMSTATE_BIT_W_RESEND_PAD 10
|
||||
#define XMSTATE_BIT_W_RESEND_DATA_DIGEST 11
|
||||
#define XMSTATE_BIT_IMM_HDR_INIT 12
|
||||
#define XMSTATE_BIT_SOL_HDR_INIT 13
|
||||
|
||||
#define ISCSI_PAD_LEN 4
|
||||
#define ISCSI_SG_TABLESIZE SG_ALL
|
||||
#define ISCSI_TCP_MAX_CMD_LEN 16
|
||||
|
||||
struct crypto_hash;
|
||||
struct socket;
|
||||
struct iscsi_tcp_conn;
|
||||
struct iscsi_segment;
|
||||
|
||||
typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
|
||||
struct iscsi_segment *);
|
||||
|
||||
struct iscsi_segment {
|
||||
unsigned char *data;
|
||||
unsigned int size;
|
||||
unsigned int copied;
|
||||
unsigned int total_size;
|
||||
unsigned int total_copied;
|
||||
|
||||
struct hash_desc *hash;
|
||||
unsigned char recv_digest[ISCSI_DIGEST_SIZE];
|
||||
unsigned char digest[ISCSI_DIGEST_SIZE];
|
||||
unsigned int digest_len;
|
||||
|
||||
struct scatterlist *sg;
|
||||
void *sg_mapped;
|
||||
unsigned int sg_offset;
|
||||
|
||||
iscsi_segment_done_fn_t *done;
|
||||
};
|
||||
|
||||
/* Socket connection recieve helper */
|
||||
struct iscsi_tcp_recv {
|
||||
struct iscsi_hdr *hdr;
|
||||
struct sk_buff *skb;
|
||||
int offset;
|
||||
int len;
|
||||
int hdr_offset;
|
||||
int copy;
|
||||
int copied;
|
||||
int padding;
|
||||
struct iscsi_cmd_task *ctask; /* current cmd in progress */
|
||||
struct iscsi_segment segment;
|
||||
|
||||
/* Allocate buffer for BHS + AHS */
|
||||
uint32_t hdr_buf[64];
|
||||
|
||||
/* copied and flipped values */
|
||||
int datalen;
|
||||
int datadgst;
|
||||
char zero_copy_hdr;
|
||||
};
|
||||
|
||||
/* Socket connection send helper */
|
||||
struct iscsi_tcp_send {
|
||||
struct iscsi_hdr *hdr;
|
||||
struct iscsi_segment segment;
|
||||
struct iscsi_segment data_segment;
|
||||
};
|
||||
|
||||
struct iscsi_tcp_conn {
|
||||
struct iscsi_conn *iscsi_conn;
|
||||
struct socket *sock;
|
||||
struct iscsi_hdr hdr; /* header placeholder */
|
||||
char hdrext[4*sizeof(__u16) +
|
||||
sizeof(__u32)];
|
||||
int data_copied;
|
||||
int stop_stage; /* conn_stop() flag: *
|
||||
* stop to recover, *
|
||||
* stop to terminate */
|
||||
/* iSCSI connection-wide sequencing */
|
||||
int hdr_size; /* PDU header size */
|
||||
|
||||
/* control data */
|
||||
struct iscsi_tcp_recv in; /* TCP receive context */
|
||||
int in_progress; /* connection state machine */
|
||||
struct iscsi_tcp_send out; /* TCP send context */
|
||||
|
||||
/* old values for socket callbacks */
|
||||
void (*old_data_ready)(struct sock *, int);
|
||||
@ -103,29 +93,19 @@ struct iscsi_tcp_conn {
|
||||
uint32_t sendpage_failures_cnt;
|
||||
uint32_t discontiguous_hdr_cnt;
|
||||
|
||||
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
|
||||
};
|
||||
int error;
|
||||
|
||||
struct iscsi_buf {
|
||||
struct scatterlist sg;
|
||||
unsigned int sent;
|
||||
char use_sendmsg;
|
||||
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
|
||||
};
|
||||
|
||||
struct iscsi_data_task {
|
||||
struct iscsi_data hdr; /* PDU */
|
||||
char hdrext[sizeof(__u32)]; /* Header-Digest */
|
||||
struct iscsi_buf digestbuf; /* digest buffer */
|
||||
uint32_t digest; /* data digest */
|
||||
char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
|
||||
};
|
||||
|
||||
struct iscsi_tcp_mgmt_task {
|
||||
struct iscsi_hdr hdr;
|
||||
char hdrext[sizeof(__u32)]; /* Header-Digest */
|
||||
unsigned long xmstate; /* mgmt xmit progress */
|
||||
struct iscsi_buf headbuf; /* header buffer */
|
||||
struct iscsi_buf sendbuf; /* in progress buffer */
|
||||
int sent;
|
||||
char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
|
||||
};
|
||||
|
||||
struct iscsi_r2t_info {
|
||||
@ -133,38 +113,26 @@ struct iscsi_r2t_info {
|
||||
__be32 exp_statsn; /* copied from R2T */
|
||||
uint32_t data_length; /* copied from R2T */
|
||||
uint32_t data_offset; /* copied from R2T */
|
||||
struct iscsi_buf headbuf; /* Data-Out Header Buffer */
|
||||
struct iscsi_buf sendbuf; /* Data-Out in progress buffer*/
|
||||
int sent; /* R2T sequence progress */
|
||||
int data_count; /* DATA-Out payload progress */
|
||||
struct scatterlist *sg; /* per-R2T SG list */
|
||||
int solicit_datasn;
|
||||
struct iscsi_data_task dtask; /* which data task */
|
||||
struct iscsi_data_task dtask; /* Data-Out header buf */
|
||||
};
|
||||
|
||||
struct iscsi_tcp_cmd_task {
|
||||
struct iscsi_cmd hdr;
|
||||
char hdrext[4*sizeof(__u16)+ /* AHS */
|
||||
sizeof(__u32)]; /* HeaderDigest */
|
||||
char pad[ISCSI_PAD_LEN];
|
||||
int pad_count; /* padded bytes */
|
||||
struct iscsi_buf headbuf; /* header buf (xmit) */
|
||||
struct iscsi_buf sendbuf; /* in progress buffer*/
|
||||
unsigned long xmstate; /* xmit xtate machine */
|
||||
struct iscsi_hdr_buff {
|
||||
struct iscsi_cmd cmd_hdr;
|
||||
char hdrextbuf[ISCSI_MAX_AHS_SIZE +
|
||||
ISCSI_DIGEST_SIZE];
|
||||
} hdr;
|
||||
|
||||
int sent;
|
||||
struct scatterlist *sg; /* per-cmd SG list */
|
||||
struct scatterlist *bad_sg; /* assert statement */
|
||||
int sg_count; /* SG's to process */
|
||||
uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
|
||||
int data_offset;
|
||||
struct iscsi_r2t_info *r2t; /* in progress R2T */
|
||||
struct iscsi_queue r2tpool;
|
||||
struct iscsi_pool r2tpool;
|
||||
struct kfifo *r2tqueue;
|
||||
struct iscsi_r2t_info **r2ts;
|
||||
int digest_count;
|
||||
uint32_t immdigest; /* for imm data */
|
||||
struct iscsi_buf immbuf; /* for imm data digest */
|
||||
struct iscsi_data_task unsol_dtask; /* unsol data task */
|
||||
struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
|
||||
};
|
||||
|
||||
#endif /* ISCSI_H */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -38,6 +38,15 @@ config SCSI_SAS_ATA
|
||||
Builds in ATA support into libsas. Will necessitate
|
||||
the loading of libata along with libsas.
|
||||
|
||||
config SCSI_SAS_HOST_SMP
|
||||
bool "Support for SMP interpretation for SAS hosts"
|
||||
default y
|
||||
depends on SCSI_SAS_LIBSAS
|
||||
help
|
||||
Allows sas hosts to receive SMP frames. Selecting this
|
||||
option builds an SMP interpreter into libsas. Say
|
||||
N here if you want to save the few kb this consumes.
|
||||
|
||||
config SCSI_SAS_LIBSAS_DEBUG
|
||||
bool "Compile the SAS Domain Transport Attributes in debug mode"
|
||||
default y
|
||||
|
@ -33,5 +33,7 @@ libsas-y += sas_init.o \
|
||||
sas_dump.o \
|
||||
sas_discover.o \
|
||||
sas_expander.o \
|
||||
sas_scsi_host.o
|
||||
sas_scsi_host.o \
|
||||
sas_task.o
|
||||
libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o
|
||||
libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o
|
@ -498,7 +498,7 @@ static int sas_execute_task(struct sas_task *task, void *buffer, int size,
|
||||
goto ex_err;
|
||||
}
|
||||
wait_for_completion(&task->completion);
|
||||
res = -ETASK;
|
||||
res = -ECOMM;
|
||||
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
|
||||
int res2;
|
||||
SAS_DPRINTK("task aborted, flags:0x%x\n",
|
||||
|
@ -98,7 +98,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
|
||||
dev->dev_type = SATA_PM;
|
||||
else
|
||||
dev->dev_type = SATA_DEV;
|
||||
dev->tproto = SATA_PROTO;
|
||||
dev->tproto = SAS_PROTOCOL_SATA;
|
||||
} else {
|
||||
struct sas_identify_frame *id =
|
||||
(struct sas_identify_frame *) dev->frame_rcvd;
|
||||
|
@ -96,7 +96,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
|
||||
}
|
||||
|
||||
wait_for_completion(&task->completion);
|
||||
res = -ETASK;
|
||||
res = -ECOMM;
|
||||
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
||||
SAS_DPRINTK("smp task timed out or aborted\n");
|
||||
i->dft->lldd_abort_task(task);
|
||||
@ -109,6 +109,16 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
|
||||
task->task_status.stat == SAM_GOOD) {
|
||||
res = 0;
|
||||
break;
|
||||
} if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
||||
task->task_status.stat == SAS_DATA_UNDERRUN) {
|
||||
/* no error, but return the number of bytes of
|
||||
* underrun */
|
||||
res = task->task_status.residual;
|
||||
break;
|
||||
} if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
||||
task->task_status.stat == SAS_DATA_OVERRUN) {
|
||||
res = -EMSGSIZE;
|
||||
break;
|
||||
} else {
|
||||
SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
|
||||
"status 0x%x\n", __FUNCTION__,
|
||||
@ -656,9 +666,9 @@ static struct domain_device *sas_ex_discover_end_dev(
|
||||
sas_ex_get_linkrate(parent, child, phy);
|
||||
|
||||
#ifdef CONFIG_SCSI_SAS_ATA
|
||||
if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
|
||||
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
|
||||
child->dev_type = SATA_DEV;
|
||||
if (phy->attached_tproto & SAS_PROTO_STP)
|
||||
if (phy->attached_tproto & SAS_PROTOCOL_STP)
|
||||
child->tproto = phy->attached_tproto;
|
||||
if (phy->attached_sata_dev)
|
||||
child->tproto |= SATA_DEV;
|
||||
@ -695,7 +705,7 @@ static struct domain_device *sas_ex_discover_end_dev(
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
if (phy->attached_tproto & SAS_PROTO_SSP) {
|
||||
if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
|
||||
child->dev_type = SAS_END_DEV;
|
||||
rphy = sas_end_device_alloc(phy->port);
|
||||
/* FIXME: error handling */
|
||||
@ -1896,11 +1906,9 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
|
||||
}
|
||||
|
||||
/* no rphy means no smp target support (ie aic94xx host) */
|
||||
if (!rphy) {
|
||||
printk("%s: can we send a smp request to a host?\n",
|
||||
__FUNCTION__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!rphy)
|
||||
return sas_smp_host_handler(shost, req, rsp);
|
||||
|
||||
type = rphy->identify.device_type;
|
||||
|
||||
if (type != SAS_EDGE_EXPANDER_DEVICE &&
|
||||
@ -1926,6 +1934,15 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
|
||||
|
||||
ret = smp_execute_task(dev, bio_data(req->bio), req->data_len,
|
||||
bio_data(rsp->bio), rsp->data_len);
|
||||
if (ret > 0) {
|
||||
/* positive number is the untransferred residual */
|
||||
rsp->data_len = ret;
|
||||
req->data_len = 0;
|
||||
ret = 0;
|
||||
} else if (ret == 0) {
|
||||
rsp->data_len = 0;
|
||||
req->data_len = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
274
drivers/scsi/libsas/sas_host_smp.c
Normal file
274
drivers/scsi/libsas/sas_host_smp.c
Normal file
@ -0,0 +1,274 @@
|
||||
/*
|
||||
* Serial Attached SCSI (SAS) Expander discovery and configuration
|
||||
*
|
||||
* Copyright (C) 2007 James E.J. Bottomley
|
||||
* <James.Bottomley@HansenPartnership.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; version 2 only.
|
||||
*/
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/blkdev.h>
|
||||
|
||||
#include "sas_internal.h"
|
||||
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_sas.h>
|
||||
#include "../scsi_sas_internal.h"
|
||||
|
||||
static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
|
||||
u8 phy_id)
|
||||
{
|
||||
struct sas_phy *phy;
|
||||
struct sas_rphy *rphy;
|
||||
|
||||
if (phy_id >= sas_ha->num_phys) {
|
||||
resp_data[2] = SMP_RESP_NO_PHY;
|
||||
return;
|
||||
}
|
||||
resp_data[2] = SMP_RESP_FUNC_ACC;
|
||||
|
||||
phy = sas_ha->sas_phy[phy_id]->phy;
|
||||
resp_data[9] = phy_id;
|
||||
resp_data[13] = phy->negotiated_linkrate;
|
||||
memcpy(resp_data + 16, sas_ha->sas_addr, SAS_ADDR_SIZE);
|
||||
memcpy(resp_data + 24, sas_ha->sas_phy[phy_id]->attached_sas_addr,
|
||||
SAS_ADDR_SIZE);
|
||||
resp_data[40] = (phy->minimum_linkrate << 4) |
|
||||
phy->minimum_linkrate_hw;
|
||||
resp_data[41] = (phy->maximum_linkrate << 4) |
|
||||
phy->maximum_linkrate_hw;
|
||||
|
||||
if (!sas_ha->sas_phy[phy_id]->port ||
|
||||
!sas_ha->sas_phy[phy_id]->port->port_dev)
|
||||
return;
|
||||
|
||||
rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
|
||||
resp_data[12] = rphy->identify.device_type << 4;
|
||||
resp_data[14] = rphy->identify.initiator_port_protocols;
|
||||
resp_data[15] = rphy->identify.target_port_protocols;
|
||||
}
|
||||
|
||||
static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data,
|
||||
u8 phy_id)
|
||||
{
|
||||
struct sas_rphy *rphy;
|
||||
struct dev_to_host_fis *fis;
|
||||
int i;
|
||||
|
||||
if (phy_id >= sas_ha->num_phys) {
|
||||
resp_data[2] = SMP_RESP_NO_PHY;
|
||||
return;
|
||||
}
|
||||
|
||||
resp_data[2] = SMP_RESP_PHY_NO_SATA;
|
||||
|
||||
if (!sas_ha->sas_phy[phy_id]->port)
|
||||
return;
|
||||
|
||||
rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
|
||||
fis = (struct dev_to_host_fis *)
|
||||
sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd;
|
||||
if (rphy->identify.target_port_protocols != SAS_PROTOCOL_SATA)
|
||||
return;
|
||||
|
||||
resp_data[2] = SMP_RESP_FUNC_ACC;
|
||||
resp_data[9] = phy_id;
|
||||
memcpy(resp_data + 16, sas_ha->sas_phy[phy_id]->attached_sas_addr,
|
||||
SAS_ADDR_SIZE);
|
||||
|
||||
/* check to see if we have a valid d2h fis */
|
||||
if (fis->fis_type != 0x34)
|
||||
return;
|
||||
|
||||
/* the d2h fis is required by the standard to be in LE format */
|
||||
for (i = 0; i < 20; i += 4) {
|
||||
u8 *dst = resp_data + 24 + i, *src =
|
||||
&sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd[i];
|
||||
dst[0] = src[3];
|
||||
dst[1] = src[2];
|
||||
dst[2] = src[1];
|
||||
dst[3] = src[0];
|
||||
}
|
||||
}
|
||||
|
||||
static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
|
||||
u8 phy_op, enum sas_linkrate min,
|
||||
enum sas_linkrate max, u8 *resp_data)
|
||||
{
|
||||
struct sas_internal *i =
|
||||
to_sas_internal(sas_ha->core.shost->transportt);
|
||||
struct sas_phy_linkrates rates;
|
||||
|
||||
if (phy_id >= sas_ha->num_phys) {
|
||||
resp_data[2] = SMP_RESP_NO_PHY;
|
||||
return;
|
||||
}
|
||||
switch (phy_op) {
|
||||
case PHY_FUNC_NOP:
|
||||
case PHY_FUNC_LINK_RESET:
|
||||
case PHY_FUNC_HARD_RESET:
|
||||
case PHY_FUNC_DISABLE:
|
||||
case PHY_FUNC_CLEAR_ERROR_LOG:
|
||||
case PHY_FUNC_CLEAR_AFFIL:
|
||||
case PHY_FUNC_TX_SATA_PS_SIGNAL:
|
||||
break;
|
||||
|
||||
default:
|
||||
resp_data[2] = SMP_RESP_PHY_UNK_OP;
|
||||
return;
|
||||
}
|
||||
|
||||
rates.minimum_linkrate = min;
|
||||
rates.maximum_linkrate = max;
|
||||
|
||||
if (i->dft->lldd_control_phy(sas_ha->sas_phy[phy_id], phy_op, &rates))
|
||||
resp_data[2] = SMP_RESP_FUNC_FAILED;
|
||||
else
|
||||
resp_data[2] = SMP_RESP_FUNC_ACC;
|
||||
}
|
||||
|
||||
int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
|
||||
struct request *rsp)
|
||||
{
|
||||
u8 *req_data = NULL, *resp_data = NULL, *buf;
|
||||
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
|
||||
int error = -EINVAL, resp_data_len = rsp->data_len;
|
||||
|
||||
/* eight is the minimum size for request and response frames */
|
||||
if (req->data_len < 8 || rsp->data_len < 8)
|
||||
goto out;
|
||||
|
||||
if (bio_offset(req->bio) + req->data_len > PAGE_SIZE ||
|
||||
bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) {
|
||||
shost_printk(KERN_ERR, shost,
|
||||
"SMP request/response frame crosses page boundary");
|
||||
goto out;
|
||||
}
|
||||
|
||||
req_data = kzalloc(req->data_len, GFP_KERNEL);
|
||||
|
||||
/* make sure frame can always be built ... we copy
|
||||
* back only the requested length */
|
||||
resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL);
|
||||
|
||||
if (!req_data || !resp_data) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
|
||||
memcpy(req_data, buf, req->data_len);
|
||||
kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
|
||||
local_irq_enable();
|
||||
|
||||
if (req_data[0] != SMP_REQUEST)
|
||||
goto out;
|
||||
|
||||
/* always succeeds ... even if we can't process the request
|
||||
* the result is in the response frame */
|
||||
error = 0;
|
||||
|
||||
/* set up default don't know response */
|
||||
resp_data[0] = SMP_RESPONSE;
|
||||
resp_data[1] = req_data[1];
|
||||
resp_data[2] = SMP_RESP_FUNC_UNK;
|
||||
|
||||
switch (req_data[1]) {
|
||||
case SMP_REPORT_GENERAL:
|
||||
req->data_len -= 8;
|
||||
resp_data_len -= 32;
|
||||
resp_data[2] = SMP_RESP_FUNC_ACC;
|
||||
resp_data[9] = sas_ha->num_phys;
|
||||
break;
|
||||
|
||||
case SMP_REPORT_MANUF_INFO:
|
||||
req->data_len -= 8;
|
||||
resp_data_len -= 64;
|
||||
resp_data[2] = SMP_RESP_FUNC_ACC;
|
||||
memcpy(resp_data + 12, shost->hostt->name,
|
||||
SAS_EXPANDER_VENDOR_ID_LEN);
|
||||
memcpy(resp_data + 20, "libsas virt phy",
|
||||
SAS_EXPANDER_PRODUCT_ID_LEN);
|
||||
break;
|
||||
|
||||
case SMP_READ_GPIO_REG:
|
||||
/* FIXME: need GPIO support in the transport class */
|
||||
break;
|
||||
|
||||
case SMP_DISCOVER:
|
||||
req->data_len =- 16;
|
||||
if (req->data_len < 0) {
|
||||
req->data_len = 0;
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
resp_data_len -= 56;
|
||||
sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
|
||||
break;
|
||||
|
||||
case SMP_REPORT_PHY_ERR_LOG:
|
||||
/* FIXME: could implement this with additional
|
||||
* libsas callbacks providing the HW supports it */
|
||||
break;
|
||||
|
||||
case SMP_REPORT_PHY_SATA:
|
||||
req->data_len =- 16;
|
||||
if (req->data_len < 0) {
|
||||
req->data_len = 0;
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
resp_data_len -= 60;
|
||||
sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
|
||||
break;
|
||||
|
||||
case SMP_REPORT_ROUTE_INFO:
|
||||
/* Can't implement; hosts have no routes */
|
||||
break;
|
||||
|
||||
case SMP_WRITE_GPIO_REG:
|
||||
/* FIXME: need GPIO support in the transport class */
|
||||
break;
|
||||
|
||||
case SMP_CONF_ROUTE_INFO:
|
||||
/* Can't implement; hosts have no routes */
|
||||
break;
|
||||
|
||||
case SMP_PHY_CONTROL:
|
||||
req->data_len =- 44;
|
||||
if (req->data_len < 0) {
|
||||
req->data_len = 0;
|
||||
error = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
resp_data_len -= 8;
|
||||
sas_phy_control(sas_ha, req_data[9], req_data[10],
|
||||
req_data[32] >> 4, req_data[33] >> 4,
|
||||
resp_data);
|
||||
break;
|
||||
|
||||
case SMP_PHY_TEST_FUNCTION:
|
||||
/* FIXME: should this be implemented? */
|
||||
break;
|
||||
|
||||
default:
|
||||
/* probably a 2.0 function */
|
||||
break;
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
|
||||
memcpy(buf, resp_data, rsp->data_len);
|
||||
flush_kernel_dcache_page(bio_page(rsp->bio));
|
||||
kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
|
||||
local_irq_enable();
|
||||
rsp->data_len = resp_data_len;
|
||||
|
||||
out:
|
||||
kfree(req_data);
|
||||
kfree(resp_data);
|
||||
return error;
|
||||
}
|
@ -45,7 +45,7 @@
|
||||
void sas_scsi_recover_host(struct Scsi_Host *shost);
|
||||
|
||||
int sas_show_class(enum sas_class class, char *buf);
|
||||
int sas_show_proto(enum sas_proto proto, char *buf);
|
||||
int sas_show_proto(enum sas_protocol proto, char *buf);
|
||||
int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
|
||||
int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
|
||||
|
||||
@ -80,6 +80,20 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
|
||||
|
||||
void sas_hae_reset(struct work_struct *work);
|
||||
|
||||
#ifdef CONFIG_SCSI_SAS_HOST_SMP
|
||||
extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
|
||||
struct request *rsp);
|
||||
#else
|
||||
static inline int sas_smp_host_handler(struct Scsi_Host *shost,
|
||||
struct request *req,
|
||||
struct request *rsp)
|
||||
{
|
||||
shost_printk(KERN_ERR, shost,
|
||||
"Cannot send SMP to a sas host (not enabled in CONFIG)\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void sas_queue_event(int event, spinlock_t *lock,
|
||||
unsigned long *pending,
|
||||
struct work_struct *work,
|
||||
|
@ -108,7 +108,7 @@ static void sas_scsi_task_done(struct sas_task *task)
|
||||
break;
|
||||
case SAM_CHECK_COND:
|
||||
memcpy(sc->sense_buffer, ts->buf,
|
||||
max(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
|
||||
min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
|
||||
stat = SAM_CHECK_COND;
|
||||
break;
|
||||
default:
|
||||
@ -148,7 +148,6 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
|
||||
if (!task)
|
||||
return NULL;
|
||||
|
||||
*(u32 *)cmd->sense_buffer = 0;
|
||||
task->uldd_task = cmd;
|
||||
ASSIGN_SAS_TASK(cmd, task);
|
||||
|
||||
@ -200,6 +199,10 @@ int sas_queue_up(struct sas_task *task)
|
||||
*/
|
||||
int sas_queuecommand(struct scsi_cmnd *cmd,
|
||||
void (*scsi_done)(struct scsi_cmnd *))
|
||||
__releases(host->host_lock)
|
||||
__acquires(dev->sata_dev.ap->lock)
|
||||
__releases(dev->sata_dev.ap->lock)
|
||||
__acquires(host->host_lock)
|
||||
{
|
||||
int res = 0;
|
||||
struct domain_device *dev = cmd_to_domain_dev(cmd);
|
||||
@ -410,7 +413,7 @@ static int sas_recover_I_T(struct domain_device *dev)
|
||||
}
|
||||
|
||||
/* Find the sas_phy that's attached to this device */
|
||||
struct sas_phy *find_local_sas_phy(struct domain_device *dev)
|
||||
static struct sas_phy *find_local_sas_phy(struct domain_device *dev)
|
||||
{
|
||||
struct domain_device *pdev = dev->parent;
|
||||
struct ex_phy *exphy = NULL;
|
||||
|
36
drivers/scsi/libsas/sas_task.c
Normal file
36
drivers/scsi/libsas/sas_task.c
Normal file
@ -0,0 +1,36 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <scsi/sas.h>
|
||||
#include <scsi/libsas.h>
|
||||
|
||||
/* fill task_status_struct based on SSP response frame */
|
||||
void sas_ssp_task_response(struct device *dev, struct sas_task *task,
|
||||
struct ssp_response_iu *iu)
|
||||
{
|
||||
struct task_status_struct *tstat = &task->task_status;
|
||||
|
||||
tstat->resp = SAS_TASK_COMPLETE;
|
||||
|
||||
if (iu->datapres == 0)
|
||||
tstat->stat = iu->status;
|
||||
else if (iu->datapres == 1)
|
||||
tstat->stat = iu->resp_data[3];
|
||||
else if (iu->datapres == 2) {
|
||||
tstat->stat = SAM_CHECK_COND;
|
||||
tstat->buf_valid_size =
|
||||
min_t(int, SAS_STATUS_BUF_SIZE,
|
||||
be32_to_cpu(iu->sense_data_len));
|
||||
memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
|
||||
|
||||
if (iu->status != SAM_CHECK_COND)
|
||||
dev_printk(KERN_WARNING, dev,
|
||||
"dev %llx sent sense data, but "
|
||||
"stat(%x) is not CHECK CONDITION\n",
|
||||
SAS_ADDR(task->dev->sas_addr),
|
||||
iu->status);
|
||||
}
|
||||
else
|
||||
/* when datapres contains corrupt/unknown value... */
|
||||
tstat->stat = SAM_CHECK_COND;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sas_ssp_task_response);
|
||||
|
@ -192,18 +192,18 @@ static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
|
||||
|
||||
if (dma_map) {
|
||||
iue = (struct iu_entry *) sc->SCp.ptr;
|
||||
sg = sc->request_buffer;
|
||||
sg = scsi_sglist(sc);
|
||||
|
||||
dprintk("%p %u %u %d\n", iue, sc->request_bufflen,
|
||||
md->len, sc->use_sg);
|
||||
dprintk("%p %u %u %d\n", iue, scsi_bufflen(sc),
|
||||
md->len, scsi_sg_count(sc));
|
||||
|
||||
nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg,
|
||||
nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!nsg) {
|
||||
printk("fail to map %p %d\n", iue, sc->use_sg);
|
||||
printk("fail to map %p %d\n", iue, scsi_sg_count(sc));
|
||||
return 0;
|
||||
}
|
||||
len = min(sc->request_bufflen, md->len);
|
||||
len = min(scsi_bufflen(sc), md->len);
|
||||
} else
|
||||
len = md->len;
|
||||
|
||||
@ -229,10 +229,10 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
|
||||
|
||||
if (dma_map || ext_desc) {
|
||||
iue = (struct iu_entry *) sc->SCp.ptr;
|
||||
sg = sc->request_buffer;
|
||||
sg = scsi_sglist(sc);
|
||||
|
||||
dprintk("%p %u %u %d %d\n",
|
||||
iue, sc->request_bufflen, id->len,
|
||||
iue, scsi_bufflen(sc), id->len,
|
||||
cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
|
||||
}
|
||||
|
||||
@ -268,13 +268,14 @@ static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
|
||||
|
||||
rdma:
|
||||
if (dma_map) {
|
||||
nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
|
||||
nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!nsg) {
|
||||
eprintk("fail to map %p %d\n", iue, sc->use_sg);
|
||||
eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc));
|
||||
err = -EIO;
|
||||
goto free_mem;
|
||||
}
|
||||
len = min(sc->request_bufflen, id->len);
|
||||
len = min(scsi_bufflen(sc), id->len);
|
||||
} else
|
||||
len = id->len;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user