mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 10:13:57 +08:00
SCSI for-linus on 20140613
This is just a couple of drivers (hpsa and lpfc) that got left out for further testing in linux-next. We also have one fix to a prior submission (qla2xxx sparse). Signed-off-by: James Bottomley <JBottomley@Parallels.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJTm48MAAoJEDeqqVYsXL0M1YEH/iZyEILT4EIZxre/tspqX/LB dxtGlmlF8AEU8/Eze3k/OB5nSuGcnYZ1hN1CgT2zZEv+sih6FekQOQV06qTwzwbo DnWA3dOrPVgMzzSVvXFEjryroIUNhZvMy8TGu+DefE9b6FUs6B3VZlMR3A+TcSgV cgknkG2Q6mWN8rO44pTSVlVDe2JpkvCYsHnqhO8uneQXVHNtsPpV7FfoLMLjBUDX dgsaDiUjyrj0sdR1yOgRjDH68FPewEiEONdtKi63kkI6zWDFASiKDY9yc1eIyjVd /1gbBJxwTRl4dWEdsigr/pOBxs6yjXGBSl/6PPDtuvdpWLFWUg4C2XtDLz0KLfU= =tdDT -----END PGP SIGNATURE----- Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull more SCSI updates from James Bottomley: "This is just a couple of drivers (hpsa and lpfc) that got left out for further testing in linux-next. We also have one fix to a prior submission (qla2xxx sparse)" * tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (36 commits) qla2xxx: fix sparse warnings introduced by previous target mode t10-dif patch lpfc: Update lpfc version to driver version 10.2.8001.0 lpfc: Fix ExpressLane priority setup lpfc: mark old devices as obsolete lpfc: Fix for initializing RRQ bitmap lpfc: Fix for cleaning up stale ring flag and sp_queue_event entries lpfc: Update lpfc version to driver version 10.2.8000.0 lpfc: Update Copyright on changed files from 8.3.45 patches lpfc: Update Copyright on changed files lpfc: Fixed locking for scsi task management commands lpfc: Convert runtime references to old xlane cfg param to fof cfg param lpfc: Fix FW dump using sysfs lpfc: Fix SLI4 s abort loop to process all FCP rings and under ring_lock lpfc: Fixed kernel panic in lpfc_abort_handler lpfc: Fix locking for postbufq when freeing lpfc: Fix locking for lpfc_hba_down_post lpfc: Fix dynamic transitions of FirstBurst from on to off hpsa: fix handling of hpsa_volume_offline return value hpsa: return -ENOMEM not -1 on kzalloc failure in hpsa_get_device_id hpsa: remove messages about volume status VPD inquiry page not supported ...
This commit is contained in:
commit
abf04af74a
@ -48,6 +48,7 @@
|
|||||||
#include <linux/bitmap.h>
|
#include <linux/bitmap.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
#include <asm/div64.h>
|
#include <asm/div64.h>
|
||||||
#include "hpsa_cmd.h"
|
#include "hpsa_cmd.h"
|
||||||
#include "hpsa.h"
|
#include "hpsa.h"
|
||||||
@ -193,7 +194,8 @@ static int number_of_controllers;
|
|||||||
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
|
static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
|
||||||
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
|
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
|
||||||
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
|
static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
|
||||||
static void start_io(struct ctlr_info *h);
|
static void lock_and_start_io(struct ctlr_info *h);
|
||||||
|
static void start_io(struct ctlr_info *h, unsigned long *flags);
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
|
static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
|
||||||
@ -695,7 +697,7 @@ static inline void addQ(struct list_head *list, struct CommandList *c)
|
|||||||
static inline u32 next_command(struct ctlr_info *h, u8 q)
|
static inline u32 next_command(struct ctlr_info *h, u8 q)
|
||||||
{
|
{
|
||||||
u32 a;
|
u32 a;
|
||||||
struct reply_pool *rq = &h->reply_queue[q];
|
struct reply_queue_buffer *rq = &h->reply_queue[q];
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (h->transMethod & CFGTBL_Trans_io_accel1)
|
if (h->transMethod & CFGTBL_Trans_io_accel1)
|
||||||
@ -844,8 +846,8 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
|
|||||||
spin_lock_irqsave(&h->lock, flags);
|
spin_lock_irqsave(&h->lock, flags);
|
||||||
addQ(&h->reqQ, c);
|
addQ(&h->reqQ, c);
|
||||||
h->Qdepth++;
|
h->Qdepth++;
|
||||||
|
start_io(h, &flags);
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
spin_unlock_irqrestore(&h->lock, flags);
|
||||||
start_io(h);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void removeQ(struct CommandList *c)
|
static inline void removeQ(struct CommandList *c)
|
||||||
@ -1554,9 +1556,13 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
|
|||||||
dev_warn(&h->pdev->dev,
|
dev_warn(&h->pdev->dev,
|
||||||
"%s: task complete with check condition.\n",
|
"%s: task complete with check condition.\n",
|
||||||
"HP SSD Smart Path");
|
"HP SSD Smart Path");
|
||||||
|
cmd->result |= SAM_STAT_CHECK_CONDITION;
|
||||||
if (c2->error_data.data_present !=
|
if (c2->error_data.data_present !=
|
||||||
IOACCEL2_SENSE_DATA_PRESENT)
|
IOACCEL2_SENSE_DATA_PRESENT) {
|
||||||
|
memset(cmd->sense_buffer, 0,
|
||||||
|
SCSI_SENSE_BUFFERSIZE);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
/* copy the sense data */
|
/* copy the sense data */
|
||||||
data_len = c2->error_data.sense_data_len;
|
data_len = c2->error_data.sense_data_len;
|
||||||
if (data_len > SCSI_SENSE_BUFFERSIZE)
|
if (data_len > SCSI_SENSE_BUFFERSIZE)
|
||||||
@ -1566,7 +1572,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
|
|||||||
sizeof(c2->error_data.sense_data_buff);
|
sizeof(c2->error_data.sense_data_buff);
|
||||||
memcpy(cmd->sense_buffer,
|
memcpy(cmd->sense_buffer,
|
||||||
c2->error_data.sense_data_buff, data_len);
|
c2->error_data.sense_data_buff, data_len);
|
||||||
cmd->result |= SAM_STAT_CHECK_CONDITION;
|
|
||||||
retry = 1;
|
retry = 1;
|
||||||
break;
|
break;
|
||||||
case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
|
case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
|
||||||
@ -1651,16 +1656,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
|
|||||||
if (is_logical_dev_addr_mode(dev->scsi3addr) &&
|
if (is_logical_dev_addr_mode(dev->scsi3addr) &&
|
||||||
c2->error_data.serv_response ==
|
c2->error_data.serv_response ==
|
||||||
IOACCEL2_SERV_RESPONSE_FAILURE) {
|
IOACCEL2_SERV_RESPONSE_FAILURE) {
|
||||||
if (c2->error_data.status ==
|
|
||||||
IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
|
|
||||||
dev_warn(&h->pdev->dev,
|
|
||||||
"%s: Path is unavailable, retrying on standard path.\n",
|
|
||||||
"HP SSD Smart Path");
|
|
||||||
else
|
|
||||||
dev_warn(&h->pdev->dev,
|
|
||||||
"%s: Error 0x%02x, retrying on standard path.\n",
|
|
||||||
"HP SSD Smart Path", c2->error_data.status);
|
|
||||||
|
|
||||||
dev->offload_enabled = 0;
|
dev->offload_enabled = 0;
|
||||||
h->drv_req_rescan = 1; /* schedule controller for a rescan */
|
h->drv_req_rescan = 1; /* schedule controller for a rescan */
|
||||||
cmd->result = DID_SOFT_ERROR << 16;
|
cmd->result = DID_SOFT_ERROR << 16;
|
||||||
@ -1991,20 +1986,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
|
|||||||
wait_for_completion(&wait);
|
wait_for_completion(&wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 lockup_detected(struct ctlr_info *h)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
u32 rc, *lockup_detected;
|
||||||
|
|
||||||
|
cpu = get_cpu();
|
||||||
|
lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
|
||||||
|
rc = *lockup_detected;
|
||||||
|
put_cpu();
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
|
static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
|
||||||
struct CommandList *c)
|
struct CommandList *c)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/* If controller lockup detected, fake a hardware error. */
|
/* If controller lockup detected, fake a hardware error. */
|
||||||
spin_lock_irqsave(&h->lock, flags);
|
if (unlikely(lockup_detected(h)))
|
||||||
if (unlikely(h->lockup_detected)) {
|
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
|
||||||
c->err_info->CommandStatus = CMD_HARDWARE_ERR;
|
c->err_info->CommandStatus = CMD_HARDWARE_ERR;
|
||||||
} else {
|
else
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
|
||||||
hpsa_scsi_do_simple_cmd_core(h, c);
|
hpsa_scsi_do_simple_cmd_core(h, c);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define MAX_DRIVER_CMD_RETRIES 25
|
#define MAX_DRIVER_CMD_RETRIES 25
|
||||||
@ -2429,7 +2430,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
|
|||||||
buflen = 16;
|
buflen = 16;
|
||||||
buf = kzalloc(64, GFP_KERNEL);
|
buf = kzalloc(64, GFP_KERNEL);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return -1;
|
return -ENOMEM;
|
||||||
rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
|
rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
|
||||||
if (rc == 0)
|
if (rc == 0)
|
||||||
memcpy(device_id, &buf[8], buflen);
|
memcpy(device_id, &buf[8], buflen);
|
||||||
@ -2515,27 +2516,21 @@ static int hpsa_get_volume_status(struct ctlr_info *h,
|
|||||||
return HPSA_VPD_LV_STATUS_UNSUPPORTED;
|
return HPSA_VPD_LV_STATUS_UNSUPPORTED;
|
||||||
|
|
||||||
/* Does controller have VPD for logical volume status? */
|
/* Does controller have VPD for logical volume status? */
|
||||||
if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
|
if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
|
||||||
dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
|
|
||||||
goto exit_failed;
|
goto exit_failed;
|
||||||
}
|
|
||||||
|
|
||||||
/* Get the size of the VPD return buffer */
|
/* Get the size of the VPD return buffer */
|
||||||
rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
|
rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
|
||||||
buf, HPSA_VPD_HEADER_SZ);
|
buf, HPSA_VPD_HEADER_SZ);
|
||||||
if (rc != 0) {
|
if (rc != 0)
|
||||||
dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
|
|
||||||
goto exit_failed;
|
goto exit_failed;
|
||||||
}
|
|
||||||
size = buf[3];
|
size = buf[3];
|
||||||
|
|
||||||
/* Now get the whole VPD buffer */
|
/* Now get the whole VPD buffer */
|
||||||
rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
|
rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
|
||||||
buf, size + HPSA_VPD_HEADER_SZ);
|
buf, size + HPSA_VPD_HEADER_SZ);
|
||||||
if (rc != 0) {
|
if (rc != 0)
|
||||||
dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
|
|
||||||
goto exit_failed;
|
goto exit_failed;
|
||||||
}
|
|
||||||
status = buf[4]; /* status byte */
|
status = buf[4]; /* status byte */
|
||||||
|
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
@ -2548,11 +2543,11 @@ exit_failed:
|
|||||||
/* Determine offline status of a volume.
|
/* Determine offline status of a volume.
|
||||||
* Return either:
|
* Return either:
|
||||||
* 0 (not offline)
|
* 0 (not offline)
|
||||||
* -1 (offline for unknown reasons)
|
* 0xff (offline for unknown reasons)
|
||||||
* # (integer code indicating one of several NOT READY states
|
* # (integer code indicating one of several NOT READY states
|
||||||
* describing why a volume is to be kept offline)
|
* describing why a volume is to be kept offline)
|
||||||
*/
|
*/
|
||||||
static unsigned char hpsa_volume_offline(struct ctlr_info *h,
|
static int hpsa_volume_offline(struct ctlr_info *h,
|
||||||
unsigned char scsi3addr[])
|
unsigned char scsi3addr[])
|
||||||
{
|
{
|
||||||
struct CommandList *c;
|
struct CommandList *c;
|
||||||
@ -2651,11 +2646,15 @@ static int hpsa_update_device_info(struct ctlr_info *h,
|
|||||||
|
|
||||||
if (this_device->devtype == TYPE_DISK &&
|
if (this_device->devtype == TYPE_DISK &&
|
||||||
is_logical_dev_addr_mode(scsi3addr)) {
|
is_logical_dev_addr_mode(scsi3addr)) {
|
||||||
|
int volume_offline;
|
||||||
|
|
||||||
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
|
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
|
||||||
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
|
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
|
||||||
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
|
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
|
||||||
this_device->volume_offline =
|
volume_offline = hpsa_volume_offline(h, scsi3addr);
|
||||||
hpsa_volume_offline(h, scsi3addr);
|
if (volume_offline < 0 || volume_offline > 0xff)
|
||||||
|
volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
|
||||||
|
this_device->volume_offline = volume_offline & 0xff;
|
||||||
} else {
|
} else {
|
||||||
this_device->raid_level = RAID_UNKNOWN;
|
this_device->raid_level = RAID_UNKNOWN;
|
||||||
this_device->offload_config = 0;
|
this_device->offload_config = 0;
|
||||||
@ -2861,26 +2860,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
|
|||||||
nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
|
nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
|
||||||
responsesize;
|
responsesize;
|
||||||
|
|
||||||
|
|
||||||
/* find ioaccel2 handle in list of physicals: */
|
/* find ioaccel2 handle in list of physicals: */
|
||||||
for (i = 0; i < nphysicals; i++) {
|
for (i = 0; i < nphysicals; i++) {
|
||||||
|
struct ext_report_lun_entry *entry = &physicals->LUN[i];
|
||||||
|
|
||||||
/* handle is in bytes 28-31 of each lun */
|
/* handle is in bytes 28-31 of each lun */
|
||||||
if (memcmp(&((struct ReportExtendedLUNdata *)
|
if (entry->ioaccel_handle != find)
|
||||||
physicals)->LUN[i][20], &find, 4) != 0) {
|
|
||||||
continue; /* didn't match */
|
continue; /* didn't match */
|
||||||
}
|
|
||||||
found = 1;
|
found = 1;
|
||||||
memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
|
memcpy(scsi3addr, entry->lunid, 8);
|
||||||
physicals)->LUN[i][0], 8);
|
|
||||||
if (h->raid_offload_debug > 0)
|
if (h->raid_offload_debug > 0)
|
||||||
dev_info(&h->pdev->dev,
|
dev_info(&h->pdev->dev,
|
||||||
"%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
|
"%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
|
||||||
__func__, find,
|
__func__, find,
|
||||||
((struct ReportExtendedLUNdata *)
|
entry->ioaccel_handle, scsi3addr);
|
||||||
physicals)->LUN[i][20],
|
|
||||||
scsi3addr[0], scsi3addr[1], scsi3addr[2],
|
|
||||||
scsi3addr[3], scsi3addr[4], scsi3addr[5],
|
|
||||||
scsi3addr[6], scsi3addr[7]);
|
|
||||||
break; /* found it */
|
break; /* found it */
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2965,7 +2958,8 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
|
|||||||
return RAID_CTLR_LUNID;
|
return RAID_CTLR_LUNID;
|
||||||
|
|
||||||
if (i < logicals_start)
|
if (i < logicals_start)
|
||||||
return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
|
return &physdev_list->LUN[i -
|
||||||
|
(raid_ctlr_position == 0)].lunid[0];
|
||||||
|
|
||||||
if (i < last_device)
|
if (i < last_device)
|
||||||
return &logdev_list->LUN[i - nphysicals -
|
return &logdev_list->LUN[i - nphysicals -
|
||||||
@ -3074,7 +3068,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
|||||||
ndev_allocated++;
|
ndev_allocated++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(is_scsi_rev_5(h)))
|
if (is_scsi_rev_5(h))
|
||||||
raid_ctlr_position = 0;
|
raid_ctlr_position = 0;
|
||||||
else
|
else
|
||||||
raid_ctlr_position = nphysicals + nlogicals;
|
raid_ctlr_position = nphysicals + nlogicals;
|
||||||
@ -3971,7 +3965,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
|
|||||||
struct hpsa_scsi_dev_t *dev;
|
struct hpsa_scsi_dev_t *dev;
|
||||||
unsigned char scsi3addr[8];
|
unsigned char scsi3addr[8];
|
||||||
struct CommandList *c;
|
struct CommandList *c;
|
||||||
unsigned long flags;
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
/* Get the ptr to our adapter structure out of cmd->host. */
|
/* Get the ptr to our adapter structure out of cmd->host. */
|
||||||
@ -3984,14 +3977,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
|
|||||||
}
|
}
|
||||||
memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
|
memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
|
||||||
|
|
||||||
spin_lock_irqsave(&h->lock, flags);
|
if (unlikely(lockup_detected(h))) {
|
||||||
if (unlikely(h->lockup_detected)) {
|
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
|
||||||
cmd->result = DID_ERROR << 16;
|
cmd->result = DID_ERROR << 16;
|
||||||
done(cmd);
|
done(cmd);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
|
||||||
c = cmd_alloc(h);
|
c = cmd_alloc(h);
|
||||||
if (c == NULL) { /* trouble... */
|
if (c == NULL) { /* trouble... */
|
||||||
dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
|
dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
|
||||||
@ -4103,16 +4093,13 @@ static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
|
|||||||
* we can prevent new rescan threads from piling up on a
|
* we can prevent new rescan threads from piling up on a
|
||||||
* locked up controller.
|
* locked up controller.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&h->lock, flags);
|
if (unlikely(lockup_detected(h))) {
|
||||||
if (unlikely(h->lockup_detected)) {
|
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
|
||||||
spin_lock_irqsave(&h->scan_lock, flags);
|
spin_lock_irqsave(&h->scan_lock, flags);
|
||||||
h->scan_finished = 1;
|
h->scan_finished = 1;
|
||||||
wake_up_all(&h->scan_wait_queue);
|
wake_up_all(&h->scan_wait_queue);
|
||||||
spin_unlock_irqrestore(&h->scan_lock, flags);
|
spin_unlock_irqrestore(&h->scan_lock, flags);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4963,7 +4950,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||||||
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
|
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
|
||||||
if (buff == NULL)
|
if (buff == NULL)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if (iocommand.Request.Type.Direction == XFER_WRITE) {
|
if (iocommand.Request.Type.Direction & XFER_WRITE) {
|
||||||
/* Copy the data into the buffer we created */
|
/* Copy the data into the buffer we created */
|
||||||
if (copy_from_user(buff, iocommand.buf,
|
if (copy_from_user(buff, iocommand.buf,
|
||||||
iocommand.buf_size)) {
|
iocommand.buf_size)) {
|
||||||
@ -5026,7 +5013,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||||||
rc = -EFAULT;
|
rc = -EFAULT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (iocommand.Request.Type.Direction == XFER_READ &&
|
if ((iocommand.Request.Type.Direction & XFER_READ) &&
|
||||||
iocommand.buf_size > 0) {
|
iocommand.buf_size > 0) {
|
||||||
/* Copy the data out of the buffer we created */
|
/* Copy the data out of the buffer we created */
|
||||||
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
|
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
|
||||||
@ -5103,7 +5090,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||||||
status = -ENOMEM;
|
status = -ENOMEM;
|
||||||
goto cleanup1;
|
goto cleanup1;
|
||||||
}
|
}
|
||||||
if (ioc->Request.Type.Direction == XFER_WRITE) {
|
if (ioc->Request.Type.Direction & XFER_WRITE) {
|
||||||
if (copy_from_user(buff[sg_used], data_ptr, sz)) {
|
if (copy_from_user(buff[sg_used], data_ptr, sz)) {
|
||||||
status = -ENOMEM;
|
status = -ENOMEM;
|
||||||
goto cleanup1;
|
goto cleanup1;
|
||||||
@ -5155,7 +5142,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||||||
status = -EFAULT;
|
status = -EFAULT;
|
||||||
goto cleanup0;
|
goto cleanup0;
|
||||||
}
|
}
|
||||||
if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
|
if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
|
||||||
/* Copy the data out of the buffer we created */
|
/* Copy the data out of the buffer we created */
|
||||||
BYTE __user *ptr = ioc->buf;
|
BYTE __user *ptr = ioc->buf;
|
||||||
for (i = 0; i < sg_used; i++) {
|
for (i = 0; i < sg_used; i++) {
|
||||||
@ -5459,13 +5446,12 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
|
|||||||
|
|
||||||
/* Takes cmds off the submission queue and sends them to the hardware,
|
/* Takes cmds off the submission queue and sends them to the hardware,
|
||||||
* then puts them on the queue of cmds waiting for completion.
|
* then puts them on the queue of cmds waiting for completion.
|
||||||
|
* Assumes h->lock is held
|
||||||
*/
|
*/
|
||||||
static void start_io(struct ctlr_info *h)
|
static void start_io(struct ctlr_info *h, unsigned long *flags)
|
||||||
{
|
{
|
||||||
struct CommandList *c;
|
struct CommandList *c;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&h->lock, flags);
|
|
||||||
while (!list_empty(&h->reqQ)) {
|
while (!list_empty(&h->reqQ)) {
|
||||||
c = list_entry(h->reqQ.next, struct CommandList, list);
|
c = list_entry(h->reqQ.next, struct CommandList, list);
|
||||||
/* can't do anything if fifo is full */
|
/* can't do anything if fifo is full */
|
||||||
@ -5488,14 +5474,20 @@ static void start_io(struct ctlr_info *h)
|
|||||||
* condition.
|
* condition.
|
||||||
*/
|
*/
|
||||||
h->commands_outstanding++;
|
h->commands_outstanding++;
|
||||||
if (h->commands_outstanding > h->max_outstanding)
|
|
||||||
h->max_outstanding = h->commands_outstanding;
|
|
||||||
|
|
||||||
/* Tell the controller execute command */
|
/* Tell the controller execute command */
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
spin_unlock_irqrestore(&h->lock, *flags);
|
||||||
h->access.submit_command(h, c);
|
h->access.submit_command(h, c);
|
||||||
spin_lock_irqsave(&h->lock, flags);
|
spin_lock_irqsave(&h->lock, *flags);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void lock_and_start_io(struct ctlr_info *h)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&h->lock, flags);
|
||||||
|
start_io(h, &flags);
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
spin_unlock_irqrestore(&h->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5563,7 +5555,7 @@ static inline void finish_cmd(struct CommandList *c)
|
|||||||
else if (c->cmd_type == CMD_IOCTL_PEND)
|
else if (c->cmd_type == CMD_IOCTL_PEND)
|
||||||
complete(c->waiting);
|
complete(c->waiting);
|
||||||
if (unlikely(io_may_be_stalled))
|
if (unlikely(io_may_be_stalled))
|
||||||
start_io(h);
|
lock_and_start_io(h);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 hpsa_tag_contains_index(u32 tag)
|
static inline u32 hpsa_tag_contains_index(u32 tag)
|
||||||
@ -5840,12 +5832,12 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
|
|||||||
dev_info(&pdev->dev, "using doorbell to reset controller\n");
|
dev_info(&pdev->dev, "using doorbell to reset controller\n");
|
||||||
writel(use_doorbell, vaddr + SA5_DOORBELL);
|
writel(use_doorbell, vaddr + SA5_DOORBELL);
|
||||||
|
|
||||||
/* PMC hardware guys tell us we need a 5 second delay after
|
/* PMC hardware guys tell us we need a 10 second delay after
|
||||||
* doorbell reset and before any attempt to talk to the board
|
* doorbell reset and before any attempt to talk to the board
|
||||||
* at all to ensure that this actually works and doesn't fall
|
* at all to ensure that this actually works and doesn't fall
|
||||||
* over in some weird corner cases.
|
* over in some weird corner cases.
|
||||||
*/
|
*/
|
||||||
msleep(5000);
|
msleep(10000);
|
||||||
} else { /* Try to do it the PCI power state way */
|
} else { /* Try to do it the PCI power state way */
|
||||||
|
|
||||||
/* Quoting from the Open CISS Specification: "The Power
|
/* Quoting from the Open CISS Specification: "The Power
|
||||||
@ -6166,6 +6158,8 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
|
|||||||
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
|
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
|
||||||
dev_info(&h->pdev->dev, "MSIX\n");
|
dev_info(&h->pdev->dev, "MSIX\n");
|
||||||
h->msix_vector = MAX_REPLY_QUEUES;
|
h->msix_vector = MAX_REPLY_QUEUES;
|
||||||
|
if (h->msix_vector > num_online_cpus())
|
||||||
|
h->msix_vector = num_online_cpus();
|
||||||
err = pci_enable_msix(h->pdev, hpsa_msix_entries,
|
err = pci_enable_msix(h->pdev, hpsa_msix_entries,
|
||||||
h->msix_vector);
|
h->msix_vector);
|
||||||
if (err > 0) {
|
if (err > 0) {
|
||||||
@ -6615,6 +6609,17 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
|
|||||||
h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
|
h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hpsa_irq_affinity_hints(struct ctlr_info *h)
|
||||||
|
{
|
||||||
|
int i, cpu, rc;
|
||||||
|
|
||||||
|
cpu = cpumask_first(cpu_online_mask);
|
||||||
|
for (i = 0; i < h->msix_vector; i++) {
|
||||||
|
rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
|
||||||
|
cpu = cpumask_next(cpu, cpu_online_mask);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int hpsa_request_irq(struct ctlr_info *h,
|
static int hpsa_request_irq(struct ctlr_info *h,
|
||||||
irqreturn_t (*msixhandler)(int, void *),
|
irqreturn_t (*msixhandler)(int, void *),
|
||||||
irqreturn_t (*intxhandler)(int, void *))
|
irqreturn_t (*intxhandler)(int, void *))
|
||||||
@ -6634,6 +6639,7 @@ static int hpsa_request_irq(struct ctlr_info *h,
|
|||||||
rc = request_irq(h->intr[i], msixhandler,
|
rc = request_irq(h->intr[i], msixhandler,
|
||||||
0, h->devname,
|
0, h->devname,
|
||||||
&h->q[i]);
|
&h->q[i]);
|
||||||
|
hpsa_irq_affinity_hints(h);
|
||||||
} else {
|
} else {
|
||||||
/* Use single reply pool */
|
/* Use single reply pool */
|
||||||
if (h->msix_vector > 0 || h->msi_vector) {
|
if (h->msix_vector > 0 || h->msi_vector) {
|
||||||
@ -6685,12 +6691,15 @@ static void free_irqs(struct ctlr_info *h)
|
|||||||
if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
|
if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
|
||||||
/* Single reply queue, only one irq to free */
|
/* Single reply queue, only one irq to free */
|
||||||
i = h->intr_mode;
|
i = h->intr_mode;
|
||||||
|
irq_set_affinity_hint(h->intr[i], NULL);
|
||||||
free_irq(h->intr[i], &h->q[i]);
|
free_irq(h->intr[i], &h->q[i]);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < h->msix_vector; i++)
|
for (i = 0; i < h->msix_vector; i++) {
|
||||||
|
irq_set_affinity_hint(h->intr[i], NULL);
|
||||||
free_irq(h->intr[i], &h->q[i]);
|
free_irq(h->intr[i], &h->q[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
|
static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
|
||||||
@ -6707,6 +6716,20 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
|
|||||||
#endif /* CONFIG_PCI_MSI */
|
#endif /* CONFIG_PCI_MSI */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hpsa_free_reply_queues(struct ctlr_info *h)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < h->nreply_queues; i++) {
|
||||||
|
if (!h->reply_queue[i].head)
|
||||||
|
continue;
|
||||||
|
pci_free_consistent(h->pdev, h->reply_queue_size,
|
||||||
|
h->reply_queue[i].head, h->reply_queue[i].busaddr);
|
||||||
|
h->reply_queue[i].head = NULL;
|
||||||
|
h->reply_queue[i].busaddr = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
|
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
|
||||||
{
|
{
|
||||||
hpsa_free_irqs_and_disable_msix(h);
|
hpsa_free_irqs_and_disable_msix(h);
|
||||||
@ -6714,8 +6737,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
|
|||||||
hpsa_free_cmd_pool(h);
|
hpsa_free_cmd_pool(h);
|
||||||
kfree(h->ioaccel1_blockFetchTable);
|
kfree(h->ioaccel1_blockFetchTable);
|
||||||
kfree(h->blockFetchTable);
|
kfree(h->blockFetchTable);
|
||||||
pci_free_consistent(h->pdev, h->reply_pool_size,
|
hpsa_free_reply_queues(h);
|
||||||
h->reply_pool, h->reply_pool_dhandle);
|
|
||||||
if (h->vaddr)
|
if (h->vaddr)
|
||||||
iounmap(h->vaddr);
|
iounmap(h->vaddr);
|
||||||
if (h->transtable)
|
if (h->transtable)
|
||||||
@ -6740,16 +6762,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
|
||||||
|
{
|
||||||
|
int i, cpu;
|
||||||
|
|
||||||
|
cpu = cpumask_first(cpu_online_mask);
|
||||||
|
for (i = 0; i < num_online_cpus(); i++) {
|
||||||
|
u32 *lockup_detected;
|
||||||
|
lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
|
||||||
|
*lockup_detected = value;
|
||||||
|
cpu = cpumask_next(cpu, cpu_online_mask);
|
||||||
|
}
|
||||||
|
wmb(); /* be sure the per-cpu variables are out to memory */
|
||||||
|
}
|
||||||
|
|
||||||
static void controller_lockup_detected(struct ctlr_info *h)
|
static void controller_lockup_detected(struct ctlr_info *h)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
u32 lockup_detected;
|
||||||
|
|
||||||
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
||||||
spin_lock_irqsave(&h->lock, flags);
|
spin_lock_irqsave(&h->lock, flags);
|
||||||
h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
|
lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
|
||||||
|
if (!lockup_detected) {
|
||||||
|
/* no heartbeat, but controller gave us a zero. */
|
||||||
|
dev_warn(&h->pdev->dev,
|
||||||
|
"lockup detected but scratchpad register is zero\n");
|
||||||
|
lockup_detected = 0xffffffff;
|
||||||
|
}
|
||||||
|
set_lockup_detected_for_all_cpus(h, lockup_detected);
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
spin_unlock_irqrestore(&h->lock, flags);
|
||||||
dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
|
dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
|
||||||
h->lockup_detected);
|
lockup_detected);
|
||||||
pci_disable_device(h->pdev);
|
pci_disable_device(h->pdev);
|
||||||
spin_lock_irqsave(&h->lock, flags);
|
spin_lock_irqsave(&h->lock, flags);
|
||||||
fail_all_cmds_on_list(h, &h->cmpQ);
|
fail_all_cmds_on_list(h, &h->cmpQ);
|
||||||
@ -6884,7 +6928,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
|
|||||||
struct ctlr_info *h = container_of(to_delayed_work(work),
|
struct ctlr_info *h = container_of(to_delayed_work(work),
|
||||||
struct ctlr_info, monitor_ctlr_work);
|
struct ctlr_info, monitor_ctlr_work);
|
||||||
detect_controller_lockup(h);
|
detect_controller_lockup(h);
|
||||||
if (h->lockup_detected)
|
if (lockup_detected(h))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
|
if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
|
||||||
@ -6934,7 +6978,6 @@ reinit_after_soft_reset:
|
|||||||
* the 5 lower bits of the address are used by the hardware. and by
|
* the 5 lower bits of the address are used by the hardware. and by
|
||||||
* the driver. See comments in hpsa.h for more info.
|
* the driver. See comments in hpsa.h for more info.
|
||||||
*/
|
*/
|
||||||
#define COMMANDLIST_ALIGNMENT 128
|
|
||||||
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
|
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
|
||||||
h = kzalloc(sizeof(*h), GFP_KERNEL);
|
h = kzalloc(sizeof(*h), GFP_KERNEL);
|
||||||
if (!h)
|
if (!h)
|
||||||
@ -6949,6 +6992,13 @@ reinit_after_soft_reset:
|
|||||||
spin_lock_init(&h->offline_device_lock);
|
spin_lock_init(&h->offline_device_lock);
|
||||||
spin_lock_init(&h->scan_lock);
|
spin_lock_init(&h->scan_lock);
|
||||||
spin_lock_init(&h->passthru_count_lock);
|
spin_lock_init(&h->passthru_count_lock);
|
||||||
|
|
||||||
|
/* Allocate and clear per-cpu variable lockup_detected */
|
||||||
|
h->lockup_detected = alloc_percpu(u32);
|
||||||
|
if (!h->lockup_detected)
|
||||||
|
goto clean1;
|
||||||
|
set_lockup_detected_for_all_cpus(h, 0);
|
||||||
|
|
||||||
rc = hpsa_pci_init(h);
|
rc = hpsa_pci_init(h);
|
||||||
if (rc != 0)
|
if (rc != 0)
|
||||||
goto clean1;
|
goto clean1;
|
||||||
@ -7072,6 +7122,8 @@ clean4:
|
|||||||
free_irqs(h);
|
free_irqs(h);
|
||||||
clean2:
|
clean2:
|
||||||
clean1:
|
clean1:
|
||||||
|
if (h->lockup_detected)
|
||||||
|
free_percpu(h->lockup_detected);
|
||||||
kfree(h);
|
kfree(h);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
@ -7080,16 +7132,10 @@ static void hpsa_flush_cache(struct ctlr_info *h)
|
|||||||
{
|
{
|
||||||
char *flush_buf;
|
char *flush_buf;
|
||||||
struct CommandList *c;
|
struct CommandList *c;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/* Don't bother trying to flush the cache if locked up */
|
/* Don't bother trying to flush the cache if locked up */
|
||||||
spin_lock_irqsave(&h->lock, flags);
|
if (unlikely(lockup_detected(h)))
|
||||||
if (unlikely(h->lockup_detected)) {
|
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&h->lock, flags);
|
|
||||||
|
|
||||||
flush_buf = kzalloc(4, GFP_KERNEL);
|
flush_buf = kzalloc(4, GFP_KERNEL);
|
||||||
if (!flush_buf)
|
if (!flush_buf)
|
||||||
return;
|
return;
|
||||||
@ -7165,8 +7211,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
|
|||||||
pci_free_consistent(h->pdev,
|
pci_free_consistent(h->pdev,
|
||||||
h->nr_cmds * sizeof(struct ErrorInfo),
|
h->nr_cmds * sizeof(struct ErrorInfo),
|
||||||
h->errinfo_pool, h->errinfo_pool_dhandle);
|
h->errinfo_pool, h->errinfo_pool_dhandle);
|
||||||
pci_free_consistent(h->pdev, h->reply_pool_size,
|
hpsa_free_reply_queues(h);
|
||||||
h->reply_pool, h->reply_pool_dhandle);
|
|
||||||
kfree(h->cmd_pool_bits);
|
kfree(h->cmd_pool_bits);
|
||||||
kfree(h->blockFetchTable);
|
kfree(h->blockFetchTable);
|
||||||
kfree(h->ioaccel1_blockFetchTable);
|
kfree(h->ioaccel1_blockFetchTable);
|
||||||
@ -7174,6 +7219,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
|
|||||||
kfree(h->hba_inquiry_data);
|
kfree(h->hba_inquiry_data);
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
pci_release_regions(pdev);
|
pci_release_regions(pdev);
|
||||||
|
free_percpu(h->lockup_detected);
|
||||||
kfree(h);
|
kfree(h);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7278,8 +7324,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
|
|||||||
* 10 = 6 s/g entry or 24k
|
* 10 = 6 s/g entry or 24k
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/* If the controller supports either ioaccel method then
|
||||||
|
* we can also use the RAID stack submit path that does not
|
||||||
|
* perform the superfluous readl() after each command submission.
|
||||||
|
*/
|
||||||
|
if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
|
||||||
|
access = SA5_performant_access_no_read;
|
||||||
|
|
||||||
/* Controller spec: zero out this buffer. */
|
/* Controller spec: zero out this buffer. */
|
||||||
memset(h->reply_pool, 0, h->reply_pool_size);
|
for (i = 0; i < h->nreply_queues; i++)
|
||||||
|
memset(h->reply_queue[i].head, 0, h->reply_queue_size);
|
||||||
|
|
||||||
bft[7] = SG_ENTRIES_IN_CMD + 4;
|
bft[7] = SG_ENTRIES_IN_CMD + 4;
|
||||||
calc_bucket_map(bft, ARRAY_SIZE(bft),
|
calc_bucket_map(bft, ARRAY_SIZE(bft),
|
||||||
@ -7295,8 +7349,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
|
|||||||
|
|
||||||
for (i = 0; i < h->nreply_queues; i++) {
|
for (i = 0; i < h->nreply_queues; i++) {
|
||||||
writel(0, &h->transtable->RepQAddr[i].upper);
|
writel(0, &h->transtable->RepQAddr[i].upper);
|
||||||
writel(h->reply_pool_dhandle +
|
writel(h->reply_queue[i].busaddr,
|
||||||
(h->max_commands * sizeof(u64) * i),
|
|
||||||
&h->transtable->RepQAddr[i].lower);
|
&h->transtable->RepQAddr[i].lower);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7344,8 +7397,10 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
|
|||||||
h->ioaccel1_blockFetchTable);
|
h->ioaccel1_blockFetchTable);
|
||||||
|
|
||||||
/* initialize all reply queue entries to unused */
|
/* initialize all reply queue entries to unused */
|
||||||
memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
|
for (i = 0; i < h->nreply_queues; i++)
|
||||||
h->reply_pool_size);
|
memset(h->reply_queue[i].head,
|
||||||
|
(u8) IOACCEL_MODE1_REPLY_UNUSED,
|
||||||
|
h->reply_queue_size);
|
||||||
|
|
||||||
/* set all the constant fields in the accelerator command
|
/* set all the constant fields in the accelerator command
|
||||||
* frames once at init time to save CPU cycles later.
|
* frames once at init time to save CPU cycles later.
|
||||||
@ -7407,7 +7462,6 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
|
|||||||
* because the 7 lower bits of the address are used by the
|
* because the 7 lower bits of the address are used by the
|
||||||
* hardware.
|
* hardware.
|
||||||
*/
|
*/
|
||||||
#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
|
|
||||||
BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
|
BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
|
||||||
IOACCEL1_COMMANDLIST_ALIGNMENT);
|
IOACCEL1_COMMANDLIST_ALIGNMENT);
|
||||||
h->ioaccel_cmd_pool =
|
h->ioaccel_cmd_pool =
|
||||||
@ -7445,7 +7499,6 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
|
|||||||
if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
|
if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
|
||||||
h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
|
h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
|
||||||
|
|
||||||
#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
|
|
||||||
BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
|
BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
|
||||||
IOACCEL2_COMMANDLIST_ALIGNMENT);
|
IOACCEL2_COMMANDLIST_ALIGNMENT);
|
||||||
h->ioaccel2_cmd_pool =
|
h->ioaccel2_cmd_pool =
|
||||||
@ -7503,16 +7556,17 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO, check that this next line h->nreply_queues is correct */
|
|
||||||
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
|
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
|
||||||
hpsa_get_max_perf_mode_cmds(h);
|
hpsa_get_max_perf_mode_cmds(h);
|
||||||
/* Performant mode ring buffer and supporting data structures */
|
/* Performant mode ring buffer and supporting data structures */
|
||||||
h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
|
h->reply_queue_size = h->max_commands * sizeof(u64);
|
||||||
h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
|
|
||||||
&(h->reply_pool_dhandle));
|
|
||||||
|
|
||||||
for (i = 0; i < h->nreply_queues; i++) {
|
for (i = 0; i < h->nreply_queues; i++) {
|
||||||
h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
|
h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
|
||||||
|
h->reply_queue_size,
|
||||||
|
&(h->reply_queue[i].busaddr));
|
||||||
|
if (!h->reply_queue[i].head)
|
||||||
|
goto clean_up;
|
||||||
h->reply_queue[i].size = h->max_commands;
|
h->reply_queue[i].size = h->max_commands;
|
||||||
h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
|
h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
|
||||||
h->reply_queue[i].current_entry = 0;
|
h->reply_queue[i].current_entry = 0;
|
||||||
@ -7521,18 +7575,14 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|||||||
/* Need a block fetch table for performant mode */
|
/* Need a block fetch table for performant mode */
|
||||||
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
|
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
|
||||||
sizeof(u32)), GFP_KERNEL);
|
sizeof(u32)), GFP_KERNEL);
|
||||||
|
if (!h->blockFetchTable)
|
||||||
if ((h->reply_pool == NULL)
|
|
||||||
|| (h->blockFetchTable == NULL))
|
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
|
|
||||||
hpsa_enter_performant_mode(h, trans_support);
|
hpsa_enter_performant_mode(h, trans_support);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
clean_up:
|
clean_up:
|
||||||
if (h->reply_pool)
|
hpsa_free_reply_queues(h);
|
||||||
pci_free_consistent(h->pdev, h->reply_pool_size,
|
|
||||||
h->reply_pool, h->reply_pool_dhandle);
|
|
||||||
kfree(h->blockFetchTable);
|
kfree(h->blockFetchTable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,11 +57,12 @@ struct hpsa_scsi_dev_t {
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct reply_pool {
|
struct reply_queue_buffer {
|
||||||
u64 *head;
|
u64 *head;
|
||||||
size_t size;
|
size_t size;
|
||||||
u8 wraparound;
|
u8 wraparound;
|
||||||
u32 current_entry;
|
u32 current_entry;
|
||||||
|
dma_addr_t busaddr;
|
||||||
};
|
};
|
||||||
|
|
||||||
#pragma pack(1)
|
#pragma pack(1)
|
||||||
@ -116,11 +117,8 @@ struct ctlr_info {
|
|||||||
int nr_cmds; /* Number of commands allowed on this controller */
|
int nr_cmds; /* Number of commands allowed on this controller */
|
||||||
struct CfgTable __iomem *cfgtable;
|
struct CfgTable __iomem *cfgtable;
|
||||||
int interrupts_enabled;
|
int interrupts_enabled;
|
||||||
int major;
|
|
||||||
int max_commands;
|
int max_commands;
|
||||||
int commands_outstanding;
|
int commands_outstanding;
|
||||||
int max_outstanding; /* Debug */
|
|
||||||
int usage_count; /* number of opens all all minor devices */
|
|
||||||
# define PERF_MODE_INT 0
|
# define PERF_MODE_INT 0
|
||||||
# define DOORBELL_INT 1
|
# define DOORBELL_INT 1
|
||||||
# define SIMPLE_MODE_INT 2
|
# define SIMPLE_MODE_INT 2
|
||||||
@ -177,11 +175,9 @@ struct ctlr_info {
|
|||||||
/*
|
/*
|
||||||
* Performant mode completion buffers
|
* Performant mode completion buffers
|
||||||
*/
|
*/
|
||||||
u64 *reply_pool;
|
size_t reply_queue_size;
|
||||||
size_t reply_pool_size;
|
struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
|
||||||
struct reply_pool reply_queue[MAX_REPLY_QUEUES];
|
|
||||||
u8 nreply_queues;
|
u8 nreply_queues;
|
||||||
dma_addr_t reply_pool_dhandle;
|
|
||||||
u32 *blockFetchTable;
|
u32 *blockFetchTable;
|
||||||
u32 *ioaccel1_blockFetchTable;
|
u32 *ioaccel1_blockFetchTable;
|
||||||
u32 *ioaccel2_blockFetchTable;
|
u32 *ioaccel2_blockFetchTable;
|
||||||
@ -196,7 +192,7 @@ struct ctlr_info {
|
|||||||
u64 last_heartbeat_timestamp;
|
u64 last_heartbeat_timestamp;
|
||||||
u32 heartbeat_sample_interval;
|
u32 heartbeat_sample_interval;
|
||||||
atomic_t firmware_flash_in_progress;
|
atomic_t firmware_flash_in_progress;
|
||||||
u32 lockup_detected;
|
u32 *lockup_detected;
|
||||||
struct delayed_work monitor_ctlr_work;
|
struct delayed_work monitor_ctlr_work;
|
||||||
int remove_in_progress;
|
int remove_in_progress;
|
||||||
u32 fifo_recently_full;
|
u32 fifo_recently_full;
|
||||||
@ -233,11 +229,9 @@ struct ctlr_info {
|
|||||||
#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
|
#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
|
||||||
|
|
||||||
#define RESCAN_REQUIRED_EVENT_BITS \
|
#define RESCAN_REQUIRED_EVENT_BITS \
|
||||||
(CTLR_STATE_CHANGE_EVENT | \
|
(CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
|
||||||
CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
|
|
||||||
CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
|
CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
|
||||||
CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
|
CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
|
||||||
CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \
|
|
||||||
CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
|
CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
|
||||||
CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
|
CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
|
||||||
spinlock_t offline_device_lock;
|
spinlock_t offline_device_lock;
|
||||||
@ -346,22 +340,23 @@ struct offline_device_entry {
|
|||||||
static void SA5_submit_command(struct ctlr_info *h,
|
static void SA5_submit_command(struct ctlr_info *h,
|
||||||
struct CommandList *c)
|
struct CommandList *c)
|
||||||
{
|
{
|
||||||
dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
|
|
||||||
c->Header.Tag.lower);
|
|
||||||
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
|
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
|
||||||
(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
|
(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void SA5_submit_command_no_read(struct ctlr_info *h,
|
||||||
|
struct CommandList *c)
|
||||||
|
{
|
||||||
|
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
|
||||||
|
}
|
||||||
|
|
||||||
static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
|
static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
|
||||||
struct CommandList *c)
|
struct CommandList *c)
|
||||||
{
|
{
|
||||||
dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
|
|
||||||
c->Header.Tag.lower);
|
|
||||||
if (c->cmd_type == CMD_IOACCEL2)
|
if (c->cmd_type == CMD_IOACCEL2)
|
||||||
writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
|
writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
|
||||||
else
|
else
|
||||||
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
|
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
|
||||||
(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -399,7 +394,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
|
|||||||
|
|
||||||
static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
|
static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
|
||||||
{
|
{
|
||||||
struct reply_pool *rq = &h->reply_queue[q];
|
struct reply_queue_buffer *rq = &h->reply_queue[q];
|
||||||
unsigned long flags, register_value = FIFO_EMPTY;
|
unsigned long flags, register_value = FIFO_EMPTY;
|
||||||
|
|
||||||
/* msi auto clears the interrupt pending bit. */
|
/* msi auto clears the interrupt pending bit. */
|
||||||
@ -478,7 +473,6 @@ static bool SA5_intr_pending(struct ctlr_info *h)
|
|||||||
{
|
{
|
||||||
unsigned long register_value =
|
unsigned long register_value =
|
||||||
readl(h->vaddr + SA5_INTR_STATUS);
|
readl(h->vaddr + SA5_INTR_STATUS);
|
||||||
dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
|
|
||||||
return register_value & SA5_INTR_PENDING;
|
return register_value & SA5_INTR_PENDING;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -515,7 +509,7 @@ static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
|
|||||||
static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
|
static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
|
||||||
{
|
{
|
||||||
u64 register_value;
|
u64 register_value;
|
||||||
struct reply_pool *rq = &h->reply_queue[q];
|
struct reply_queue_buffer *rq = &h->reply_queue[q];
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
BUG_ON(q >= h->nreply_queues);
|
BUG_ON(q >= h->nreply_queues);
|
||||||
@ -573,6 +567,14 @@ static struct access_method SA5_performant_access = {
|
|||||||
SA5_performant_completed,
|
SA5_performant_completed,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct access_method SA5_performant_access_no_read = {
|
||||||
|
SA5_submit_command_no_read,
|
||||||
|
SA5_performant_intr_mask,
|
||||||
|
SA5_fifo_full,
|
||||||
|
SA5_performant_intr_pending,
|
||||||
|
SA5_performant_completed,
|
||||||
|
};
|
||||||
|
|
||||||
struct board_type {
|
struct board_type {
|
||||||
u32 board_id;
|
u32 board_id;
|
||||||
char *product_name;
|
char *product_name;
|
||||||
|
@ -151,7 +151,7 @@
|
|||||||
#define HPSA_VPD_HEADER_SZ 4
|
#define HPSA_VPD_HEADER_SZ 4
|
||||||
|
|
||||||
/* Logical volume states */
|
/* Logical volume states */
|
||||||
#define HPSA_VPD_LV_STATUS_UNSUPPORTED -1
|
#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
|
||||||
#define HPSA_LV_OK 0x0
|
#define HPSA_LV_OK 0x0
|
||||||
#define HPSA_LV_UNDERGOING_ERASE 0x0F
|
#define HPSA_LV_UNDERGOING_ERASE 0x0F
|
||||||
#define HPSA_LV_UNDERGOING_RPI 0x12
|
#define HPSA_LV_UNDERGOING_RPI 0x12
|
||||||
@ -238,11 +238,21 @@ struct ReportLUNdata {
|
|||||||
u8 LUN[HPSA_MAX_LUN][8];
|
u8 LUN[HPSA_MAX_LUN][8];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ext_report_lun_entry {
|
||||||
|
u8 lunid[8];
|
||||||
|
u8 wwid[8];
|
||||||
|
u8 device_type;
|
||||||
|
u8 device_flags;
|
||||||
|
u8 lun_count; /* multi-lun device, how many luns */
|
||||||
|
u8 redundant_paths;
|
||||||
|
u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
|
||||||
|
};
|
||||||
|
|
||||||
struct ReportExtendedLUNdata {
|
struct ReportExtendedLUNdata {
|
||||||
u8 LUNListLength[4];
|
u8 LUNListLength[4];
|
||||||
u8 extended_response_flag;
|
u8 extended_response_flag;
|
||||||
u8 reserved[3];
|
u8 reserved[3];
|
||||||
u8 LUN[HPSA_MAX_LUN][24];
|
struct ext_report_lun_entry LUN[HPSA_MAX_LUN];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SenseSubsystem_info {
|
struct SenseSubsystem_info {
|
||||||
@ -375,6 +385,7 @@ struct ctlr_info; /* defined in hpsa.h */
|
|||||||
* or a bus address.
|
* or a bus address.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define COMMANDLIST_ALIGNMENT 128
|
||||||
struct CommandList {
|
struct CommandList {
|
||||||
struct CommandListHeader Header;
|
struct CommandListHeader Header;
|
||||||
struct RequestBlock Request;
|
struct RequestBlock Request;
|
||||||
@ -389,21 +400,7 @@ struct CommandList {
|
|||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct completion *waiting;
|
struct completion *waiting;
|
||||||
void *scsi_cmd;
|
void *scsi_cmd;
|
||||||
|
} __aligned(COMMANDLIST_ALIGNMENT);
|
||||||
/* on 64 bit architectures, to get this to be 32-byte-aligned
|
|
||||||
* it so happens we need PAD_64 bytes of padding, on 32 bit systems,
|
|
||||||
* we need PAD_32 bytes of padding (see below). This does that.
|
|
||||||
* If it happens that 64 bit and 32 bit systems need different
|
|
||||||
* padding, PAD_32 and PAD_64 can be set independently, and.
|
|
||||||
* the code below will do the right thing.
|
|
||||||
*/
|
|
||||||
#define IS_32_BIT ((8 - sizeof(long))/4)
|
|
||||||
#define IS_64_BIT (!IS_32_BIT)
|
|
||||||
#define PAD_32 (40)
|
|
||||||
#define PAD_64 (12)
|
|
||||||
#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
|
|
||||||
u8 pad[COMMANDLIST_PAD];
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Max S/G elements in I/O accelerator command */
|
/* Max S/G elements in I/O accelerator command */
|
||||||
#define IOACCEL1_MAXSGENTRIES 24
|
#define IOACCEL1_MAXSGENTRIES 24
|
||||||
@ -413,6 +410,7 @@ struct CommandList {
|
|||||||
* Structure for I/O accelerator (mode 1) commands.
|
* Structure for I/O accelerator (mode 1) commands.
|
||||||
* Note that this structure must be 128-byte aligned in size.
|
* Note that this structure must be 128-byte aligned in size.
|
||||||
*/
|
*/
|
||||||
|
#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
|
||||||
struct io_accel1_cmd {
|
struct io_accel1_cmd {
|
||||||
u16 dev_handle; /* 0x00 - 0x01 */
|
u16 dev_handle; /* 0x00 - 0x01 */
|
||||||
u8 reserved1; /* 0x02 */
|
u8 reserved1; /* 0x02 */
|
||||||
@ -440,12 +438,7 @@ struct io_accel1_cmd {
|
|||||||
struct vals32 host_addr; /* 0x70 - 0x77 */
|
struct vals32 host_addr; /* 0x70 - 0x77 */
|
||||||
u8 CISS_LUN[8]; /* 0x78 - 0x7F */
|
u8 CISS_LUN[8]; /* 0x78 - 0x7F */
|
||||||
struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
|
struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
|
||||||
#define IOACCEL1_PAD_64 0
|
} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
|
||||||
#define IOACCEL1_PAD_32 0
|
|
||||||
#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \
|
|
||||||
IS_64_BIT * IOACCEL1_PAD_64)
|
|
||||||
u8 pad[IOACCEL1_PAD];
|
|
||||||
};
|
|
||||||
|
|
||||||
#define IOACCEL1_FUNCTION_SCSIIO 0x00
|
#define IOACCEL1_FUNCTION_SCSIIO 0x00
|
||||||
#define IOACCEL1_SGLOFFSET 32
|
#define IOACCEL1_SGLOFFSET 32
|
||||||
@ -510,14 +503,11 @@ struct io_accel2_scsi_response {
|
|||||||
u8 sense_data_buff[32]; /* sense/response data buffer */
|
u8 sense_data_buff[32]; /* sense/response data buffer */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IOACCEL2_64_PAD 76
|
|
||||||
#define IOACCEL2_32_PAD 76
|
|
||||||
#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \
|
|
||||||
IS_64_BIT * IOACCEL2_64_PAD)
|
|
||||||
/*
|
/*
|
||||||
* Structure for I/O accelerator (mode 2 or m2) commands.
|
* Structure for I/O accelerator (mode 2 or m2) commands.
|
||||||
* Note that this structure must be 128-byte aligned in size.
|
* Note that this structure must be 128-byte aligned in size.
|
||||||
*/
|
*/
|
||||||
|
#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
|
||||||
struct io_accel2_cmd {
|
struct io_accel2_cmd {
|
||||||
u8 IU_type; /* IU Type */
|
u8 IU_type; /* IU Type */
|
||||||
u8 direction; /* direction, memtype, and encryption */
|
u8 direction; /* direction, memtype, and encryption */
|
||||||
@ -544,8 +534,7 @@ struct io_accel2_cmd {
|
|||||||
u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
|
u32 tweak_upper; /* Encryption tweak, upper 4 bytes */
|
||||||
struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
|
struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
|
||||||
struct io_accel2_scsi_response error_data;
|
struct io_accel2_scsi_response error_data;
|
||||||
u8 pad[IOACCEL2_PAD];
|
} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* defines for Mode 2 command struct
|
* defines for Mode 2 command struct
|
||||||
@ -636,7 +625,7 @@ struct TransTable_struct {
|
|||||||
u32 RepQCount;
|
u32 RepQCount;
|
||||||
u32 RepQCtrAddrLow32;
|
u32 RepQCtrAddrLow32;
|
||||||
u32 RepQCtrAddrHigh32;
|
u32 RepQCtrAddrHigh32;
|
||||||
#define MAX_REPLY_QUEUES 8
|
#define MAX_REPLY_QUEUES 64
|
||||||
struct vals32 RepQAddr[MAX_REPLY_QUEUES];
|
struct vals32 RepQAddr[MAX_REPLY_QUEUES];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||||
@ -640,6 +640,7 @@ struct lpfc_hba {
|
|||||||
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
|
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
|
||||||
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
|
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
|
||||||
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
|
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
|
||||||
|
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
|
||||||
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
|
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
|
||||||
struct lpfc_dmabuf slim2p;
|
struct lpfc_dmabuf slim2p;
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||||
@ -919,10 +919,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
|
|||||||
phba->cfg_sriov_nr_virtfn = 0;
|
phba->cfg_sriov_nr_virtfn = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (opcode == LPFC_FW_DUMP)
|
||||||
|
phba->hba_flag |= HBA_FW_DUMP_OP;
|
||||||
|
|
||||||
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
|
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
|
||||||
|
|
||||||
if (status != 0)
|
if (status != 0) {
|
||||||
|
phba->hba_flag &= ~HBA_FW_DUMP_OP;
|
||||||
return status;
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
/* wait for the device to be quiesced before firmware reset */
|
/* wait for the device to be quiesced before firmware reset */
|
||||||
msleep(100);
|
msleep(100);
|
||||||
@ -2364,7 +2369,7 @@ lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
|
|||||||
uint8_t wwpn[WWN_SZ];
|
uint8_t wwpn[WWN_SZ];
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!phba->cfg_EnableXLane)
|
if (!phba->cfg_fof)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
/* count may include a LF at end of string */
|
/* count may include a LF at end of string */
|
||||||
@ -2432,7 +2437,7 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
|
|||||||
uint8_t wwpn[WWN_SZ];
|
uint8_t wwpn[WWN_SZ];
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!phba->cfg_EnableXLane)
|
if (!phba->cfg_fof)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
/* count may include a LF at end of string */
|
/* count may include a LF at end of string */
|
||||||
@ -2499,7 +2504,7 @@ lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
|
|||||||
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
|
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
|
||||||
int val = 0;
|
int val = 0;
|
||||||
|
|
||||||
if (!phba->cfg_EnableXLane)
|
if (!phba->cfg_fof)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (!isdigit(buf[0]))
|
if (!isdigit(buf[0]))
|
||||||
@ -2565,7 +2570,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
|
|||||||
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
if (!phba->cfg_EnableXLane)
|
if (!phba->cfg_fof)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (oas_state) {
|
if (oas_state) {
|
||||||
@ -2670,7 +2675,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
|
|||||||
uint64_t oas_lun;
|
uint64_t oas_lun;
|
||||||
int len = 0;
|
int len = 0;
|
||||||
|
|
||||||
if (!phba->cfg_EnableXLane)
|
if (!phba->cfg_fof)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
|
if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
|
||||||
@ -2716,7 +2721,7 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
|
|||||||
uint64_t scsi_lun;
|
uint64_t scsi_lun;
|
||||||
ssize_t rc;
|
ssize_t rc;
|
||||||
|
|
||||||
if (!phba->cfg_EnableXLane)
|
if (!phba->cfg_fof)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
|
if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
|
||||||
@ -4655,7 +4660,7 @@ LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
|
|||||||
# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
|
# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
|
||||||
# Value range is [0x0,0x7f]. Default value is 0
|
# Value range is [0x0,0x7f]. Default value is 0
|
||||||
*/
|
*/
|
||||||
LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
|
LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
|
# lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2009-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2009-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2010-2012 Emulex. All rights reserved. *
|
* Copyright (C) 2010-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
@ -289,6 +289,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
|
|||||||
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
|
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
|
||||||
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
|
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
|
||||||
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
|
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
|
||||||
|
void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
|
||||||
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
|
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
|
||||||
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
|
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
|
||||||
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
|
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
|
||||||
@ -310,6 +311,9 @@ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
|
|||||||
int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
|
int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
|
||||||
int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
|
int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
|
||||||
uint64_t, lpfc_ctx_cmd);
|
uint64_t, lpfc_ctx_cmd);
|
||||||
|
int
|
||||||
|
lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
|
||||||
|
uint16_t, uint64_t, lpfc_ctx_cmd);
|
||||||
|
|
||||||
void lpfc_mbox_timeout(unsigned long);
|
void lpfc_mbox_timeout(unsigned long);
|
||||||
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
|
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2007-2012 Emulex. All rights reserved. *
|
* Copyright (C) 2007-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
@ -2314,7 +2314,7 @@ proc_cq:
|
|||||||
goto too_big;
|
goto too_big;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (phba->cfg_EnableXLane) {
|
if (phba->cfg_fof) {
|
||||||
|
|
||||||
/* OAS CQ */
|
/* OAS CQ */
|
||||||
qp = phba->sli4_hba.oas_cq;
|
qp = phba->sli4_hba.oas_cq;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||||
@ -5634,6 +5634,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||||||
ndlp->active_rrqs_xri_bitmap =
|
ndlp->active_rrqs_xri_bitmap =
|
||||||
mempool_alloc(vport->phba->active_rrq_pool,
|
mempool_alloc(vport->phba->active_rrq_pool,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
if (ndlp->active_rrqs_xri_bitmap)
|
||||||
|
memset(ndlp->active_rrqs_xri_bitmap, 0,
|
||||||
|
ndlp->phba->cfg_rrq_xri_bitmap_sz);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2009-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2009-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||||
@ -819,8 +819,140 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
|
||||||
|
* rspiocb which got deferred
|
||||||
|
*
|
||||||
|
* @phba: pointer to lpfc HBA data structure.
|
||||||
|
*
|
||||||
|
* This routine will cleanup completed slow path events after HBA is reset
|
||||||
|
* when bringing down the SLI Layer.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* Return codes
|
||||||
|
* void.
|
||||||
|
**/
|
||||||
|
static void
|
||||||
|
lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
|
||||||
|
{
|
||||||
|
struct lpfc_iocbq *rspiocbq;
|
||||||
|
struct hbq_dmabuf *dmabuf;
|
||||||
|
struct lpfc_cq_event *cq_event;
|
||||||
|
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
|
||||||
|
while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
|
||||||
|
/* Get the response iocb from the head of work queue */
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
list_remove_head(&phba->sli4_hba.sp_queue_event,
|
||||||
|
cq_event, struct lpfc_cq_event, list);
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
|
||||||
|
switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
|
||||||
|
case CQE_CODE_COMPL_WQE:
|
||||||
|
rspiocbq = container_of(cq_event, struct lpfc_iocbq,
|
||||||
|
cq_event);
|
||||||
|
lpfc_sli_release_iocbq(phba, rspiocbq);
|
||||||
|
break;
|
||||||
|
case CQE_CODE_RECEIVE:
|
||||||
|
case CQE_CODE_RECEIVE_V1:
|
||||||
|
dmabuf = container_of(cq_event, struct hbq_dmabuf,
|
||||||
|
cq_event);
|
||||||
|
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
|
||||||
|
* @phba: pointer to lpfc HBA data structure.
|
||||||
|
*
|
||||||
|
* This routine will cleanup posted ELS buffers after the HBA is reset
|
||||||
|
* when bringing down the SLI Layer.
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* Return codes
|
||||||
|
* void.
|
||||||
|
**/
|
||||||
|
static void
|
||||||
|
lpfc_hba_free_post_buf(struct lpfc_hba *phba)
|
||||||
|
{
|
||||||
|
struct lpfc_sli *psli = &phba->sli;
|
||||||
|
struct lpfc_sli_ring *pring;
|
||||||
|
struct lpfc_dmabuf *mp, *next_mp;
|
||||||
|
LIST_HEAD(buflist);
|
||||||
|
int count;
|
||||||
|
|
||||||
|
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
|
||||||
|
lpfc_sli_hbqbuf_free_all(phba);
|
||||||
|
else {
|
||||||
|
/* Cleanup preposted buffers on the ELS ring */
|
||||||
|
pring = &psli->ring[LPFC_ELS_RING];
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
list_splice_init(&pring->postbufq, &buflist);
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
|
||||||
|
count = 0;
|
||||||
|
list_for_each_entry_safe(mp, next_mp, &buflist, list) {
|
||||||
|
list_del(&mp->list);
|
||||||
|
count++;
|
||||||
|
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
||||||
|
kfree(mp);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
pring->postbufq_cnt -= count;
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
|
||||||
|
* @phba: pointer to lpfc HBA data structure.
|
||||||
|
*
|
||||||
|
* This routine will cleanup the txcmplq after the HBA is reset when bringing
|
||||||
|
* down the SLI Layer.
|
||||||
|
*
|
||||||
|
* Return codes
|
||||||
|
* void
|
||||||
|
**/
|
||||||
|
static void
|
||||||
|
lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
|
||||||
|
{
|
||||||
|
struct lpfc_sli *psli = &phba->sli;
|
||||||
|
struct lpfc_sli_ring *pring;
|
||||||
|
LIST_HEAD(completions);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < psli->num_rings; i++) {
|
||||||
|
pring = &psli->ring[i];
|
||||||
|
if (phba->sli_rev >= LPFC_SLI_REV4)
|
||||||
|
spin_lock_irq(&pring->ring_lock);
|
||||||
|
else
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
/* At this point in time the HBA is either reset or DOA. Either
|
||||||
|
* way, nothing should be on txcmplq as it will NEVER complete.
|
||||||
|
*/
|
||||||
|
list_splice_init(&pring->txcmplq, &completions);
|
||||||
|
pring->txcmplq_cnt = 0;
|
||||||
|
|
||||||
|
if (phba->sli_rev >= LPFC_SLI_REV4)
|
||||||
|
spin_unlock_irq(&pring->ring_lock);
|
||||||
|
else
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
|
||||||
|
/* Cancel all the IOCBs from the completions list */
|
||||||
|
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
||||||
|
IOERR_SLI_ABORTED);
|
||||||
|
lpfc_sli_abort_iocb_ring(phba, pring);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
|
* lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
|
||||||
|
int i;
|
||||||
* @phba: pointer to lpfc HBA data structure.
|
* @phba: pointer to lpfc HBA data structure.
|
||||||
*
|
*
|
||||||
* This routine will do uninitialization after the HBA is reset when bring
|
* This routine will do uninitialization after the HBA is reset when bring
|
||||||
@ -833,44 +965,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
|
|||||||
static int
|
static int
|
||||||
lpfc_hba_down_post_s3(struct lpfc_hba *phba)
|
lpfc_hba_down_post_s3(struct lpfc_hba *phba)
|
||||||
{
|
{
|
||||||
struct lpfc_sli *psli = &phba->sli;
|
lpfc_hba_free_post_buf(phba);
|
||||||
struct lpfc_sli_ring *pring;
|
lpfc_hba_clean_txcmplq(phba);
|
||||||
struct lpfc_dmabuf *mp, *next_mp;
|
|
||||||
LIST_HEAD(completions);
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
|
|
||||||
lpfc_sli_hbqbuf_free_all(phba);
|
|
||||||
else {
|
|
||||||
/* Cleanup preposted buffers on the ELS ring */
|
|
||||||
pring = &psli->ring[LPFC_ELS_RING];
|
|
||||||
list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
|
|
||||||
list_del(&mp->list);
|
|
||||||
pring->postbufq_cnt--;
|
|
||||||
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
|
||||||
kfree(mp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irq(&phba->hbalock);
|
|
||||||
for (i = 0; i < psli->num_rings; i++) {
|
|
||||||
pring = &psli->ring[i];
|
|
||||||
|
|
||||||
/* At this point in time the HBA is either reset or DOA. Either
|
|
||||||
* way, nothing should be on txcmplq as it will NEVER complete.
|
|
||||||
*/
|
|
||||||
list_splice_init(&pring->txcmplq, &completions);
|
|
||||||
spin_unlock_irq(&phba->hbalock);
|
|
||||||
|
|
||||||
/* Cancel all the IOCBs from the completions list */
|
|
||||||
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
|
||||||
IOERR_SLI_ABORTED);
|
|
||||||
|
|
||||||
lpfc_sli_abort_iocb_ring(phba, pring);
|
|
||||||
spin_lock_irq(&phba->hbalock);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&phba->hbalock);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -890,13 +986,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
|||||||
{
|
{
|
||||||
struct lpfc_scsi_buf *psb, *psb_next;
|
struct lpfc_scsi_buf *psb, *psb_next;
|
||||||
LIST_HEAD(aborts);
|
LIST_HEAD(aborts);
|
||||||
int ret;
|
|
||||||
unsigned long iflag = 0;
|
unsigned long iflag = 0;
|
||||||
struct lpfc_sglq *sglq_entry = NULL;
|
struct lpfc_sglq *sglq_entry = NULL;
|
||||||
|
|
||||||
ret = lpfc_hba_down_post_s3(phba);
|
lpfc_hba_free_post_buf(phba);
|
||||||
if (ret)
|
lpfc_hba_clean_txcmplq(phba);
|
||||||
return ret;
|
|
||||||
/* At this point in time the HBA is either reset or DOA. Either
|
/* At this point in time the HBA is either reset or DOA. Either
|
||||||
* way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
|
* way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
|
||||||
* on the lpfc_sgl_list so that it can either be freed if the
|
* on the lpfc_sgl_list so that it can either be freed if the
|
||||||
@ -932,6 +1027,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
|||||||
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
|
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
|
||||||
list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
|
list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
|
||||||
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
|
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
|
||||||
|
|
||||||
|
lpfc_sli4_free_sp_events(phba);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1250,7 +1347,6 @@ static void
|
|||||||
lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
|
lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
|
||||||
{
|
{
|
||||||
uint32_t old_host_status = phba->work_hs;
|
uint32_t old_host_status = phba->work_hs;
|
||||||
struct lpfc_sli_ring *pring;
|
|
||||||
struct lpfc_sli *psli = &phba->sli;
|
struct lpfc_sli *psli = &phba->sli;
|
||||||
|
|
||||||
/* If the pci channel is offline, ignore possible errors,
|
/* If the pci channel is offline, ignore possible errors,
|
||||||
@ -1279,8 +1375,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
|
|||||||
* dropped by the firmware. Error iocb (I/O) on txcmplq and let the
|
* dropped by the firmware. Error iocb (I/O) on txcmplq and let the
|
||||||
* SCSI layer retry it after re-establishing link.
|
* SCSI layer retry it after re-establishing link.
|
||||||
*/
|
*/
|
||||||
pring = &psli->ring[psli->fcp_ring];
|
lpfc_sli_abort_fcp_rings(phba);
|
||||||
lpfc_sli_abort_iocb_ring(phba, pring);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There was a firmware error. Take the hba offline and then
|
* There was a firmware error. Take the hba offline and then
|
||||||
@ -1348,7 +1443,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
|
|||||||
{
|
{
|
||||||
struct lpfc_vport *vport = phba->pport;
|
struct lpfc_vport *vport = phba->pport;
|
||||||
struct lpfc_sli *psli = &phba->sli;
|
struct lpfc_sli *psli = &phba->sli;
|
||||||
struct lpfc_sli_ring *pring;
|
|
||||||
uint32_t event_data;
|
uint32_t event_data;
|
||||||
unsigned long temperature;
|
unsigned long temperature;
|
||||||
struct temp_event temp_event_data;
|
struct temp_event temp_event_data;
|
||||||
@ -1400,8 +1494,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
|
|||||||
* Error iocb (I/O) on txcmplq and let the SCSI layer
|
* Error iocb (I/O) on txcmplq and let the SCSI layer
|
||||||
* retry it after re-establishing link.
|
* retry it after re-establishing link.
|
||||||
*/
|
*/
|
||||||
pring = &psli->ring[psli->fcp_ring];
|
lpfc_sli_abort_fcp_rings(phba);
|
||||||
lpfc_sli_abort_iocb_ring(phba, pring);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There was a firmware error. Take the hba offline and then
|
* There was a firmware error. Take the hba offline and then
|
||||||
@ -1940,78 +2033,81 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
|||||||
|
|
||||||
switch (dev_id) {
|
switch (dev_id) {
|
||||||
case PCI_DEVICE_ID_FIREFLY:
|
case PCI_DEVICE_ID_FIREFLY:
|
||||||
m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LP6000", "PCI",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_SUPERFLY:
|
case PCI_DEVICE_ID_SUPERFLY:
|
||||||
if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
|
if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
|
||||||
m = (typeof(m)){"LP7000", "PCI",
|
m = (typeof(m)){"LP7000", "PCI", ""};
|
||||||
"Fibre Channel Adapter"};
|
|
||||||
else
|
else
|
||||||
m = (typeof(m)){"LP7000E", "PCI",
|
m = (typeof(m)){"LP7000E", "PCI", ""};
|
||||||
"Fibre Channel Adapter"};
|
m.function = "Obsolete, Unsupported Fibre Channel Adapter";
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_DRAGONFLY:
|
case PCI_DEVICE_ID_DRAGONFLY:
|
||||||
m = (typeof(m)){"LP8000", "PCI",
|
m = (typeof(m)){"LP8000", "PCI",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_CENTAUR:
|
case PCI_DEVICE_ID_CENTAUR:
|
||||||
if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
|
if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
|
||||||
m = (typeof(m)){"LP9002", "PCI",
|
m = (typeof(m)){"LP9002", "PCI", ""};
|
||||||
"Fibre Channel Adapter"};
|
|
||||||
else
|
else
|
||||||
m = (typeof(m)){"LP9000", "PCI",
|
m = (typeof(m)){"LP9000", "PCI", ""};
|
||||||
"Fibre Channel Adapter"};
|
m.function = "Obsolete, Unsupported Fibre Channel Adapter";
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_RFLY:
|
case PCI_DEVICE_ID_RFLY:
|
||||||
m = (typeof(m)){"LP952", "PCI",
|
m = (typeof(m)){"LP952", "PCI",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_PEGASUS:
|
case PCI_DEVICE_ID_PEGASUS:
|
||||||
m = (typeof(m)){"LP9802", "PCI-X",
|
m = (typeof(m)){"LP9802", "PCI-X",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_THOR:
|
case PCI_DEVICE_ID_THOR:
|
||||||
m = (typeof(m)){"LP10000", "PCI-X",
|
m = (typeof(m)){"LP10000", "PCI-X",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_VIPER:
|
case PCI_DEVICE_ID_VIPER:
|
||||||
m = (typeof(m)){"LPX1000", "PCI-X",
|
m = (typeof(m)){"LPX1000", "PCI-X",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_PFLY:
|
case PCI_DEVICE_ID_PFLY:
|
||||||
m = (typeof(m)){"LP982", "PCI-X",
|
m = (typeof(m)){"LP982", "PCI-X",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_TFLY:
|
case PCI_DEVICE_ID_TFLY:
|
||||||
m = (typeof(m)){"LP1050", "PCI-X",
|
m = (typeof(m)){"LP1050", "PCI-X",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_HELIOS:
|
case PCI_DEVICE_ID_HELIOS:
|
||||||
m = (typeof(m)){"LP11000", "PCI-X2",
|
m = (typeof(m)){"LP11000", "PCI-X2",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_HELIOS_SCSP:
|
case PCI_DEVICE_ID_HELIOS_SCSP:
|
||||||
m = (typeof(m)){"LP11000-SP", "PCI-X2",
|
m = (typeof(m)){"LP11000-SP", "PCI-X2",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_HELIOS_DCSP:
|
case PCI_DEVICE_ID_HELIOS_DCSP:
|
||||||
m = (typeof(m)){"LP11002-SP", "PCI-X2",
|
m = (typeof(m)){"LP11002-SP", "PCI-X2",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_NEPTUNE:
|
case PCI_DEVICE_ID_NEPTUNE:
|
||||||
m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LPe1000", "PCIe",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_NEPTUNE_SCSP:
|
case PCI_DEVICE_ID_NEPTUNE_SCSP:
|
||||||
m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LPe1000-SP", "PCIe",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_NEPTUNE_DCSP:
|
case PCI_DEVICE_ID_NEPTUNE_DCSP:
|
||||||
m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LPe1002-SP", "PCIe",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_BMID:
|
case PCI_DEVICE_ID_BMID:
|
||||||
m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_BSMB:
|
case PCI_DEVICE_ID_BSMB:
|
||||||
m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LP111", "PCI-X2",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_ZEPHYR:
|
case PCI_DEVICE_ID_ZEPHYR:
|
||||||
m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
|
||||||
@ -2030,16 +2126,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
|||||||
m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_LP101:
|
case PCI_DEVICE_ID_LP101:
|
||||||
m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LP101", "PCI-X",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_LP10000S:
|
case PCI_DEVICE_ID_LP10000S:
|
||||||
m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LP10000-S", "PCI",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_LP11000S:
|
case PCI_DEVICE_ID_LP11000S:
|
||||||
m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LP11000-S", "PCI-X2",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_LPE11000S:
|
case PCI_DEVICE_ID_LPE11000S:
|
||||||
m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LPe11000-S", "PCIe",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_SAT:
|
case PCI_DEVICE_ID_SAT:
|
||||||
m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
|
||||||
@ -2060,20 +2160,21 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
|||||||
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_HORNET:
|
case PCI_DEVICE_ID_HORNET:
|
||||||
m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
|
m = (typeof(m)){"LP21000", "PCIe",
|
||||||
|
"Obsolete, Unsupported FCoE Adapter"};
|
||||||
GE = 1;
|
GE = 1;
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_PROTEUS_VF:
|
case PCI_DEVICE_ID_PROTEUS_VF:
|
||||||
m = (typeof(m)){"LPev12000", "PCIe IOV",
|
m = (typeof(m)){"LPev12000", "PCIe IOV",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_PROTEUS_PF:
|
case PCI_DEVICE_ID_PROTEUS_PF:
|
||||||
m = (typeof(m)){"LPev12000", "PCIe IOV",
|
m = (typeof(m)){"LPev12000", "PCIe IOV",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_PROTEUS_S:
|
case PCI_DEVICE_ID_PROTEUS_S:
|
||||||
m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
|
m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_TIGERSHARK:
|
case PCI_DEVICE_ID_TIGERSHARK:
|
||||||
oneConnect = 1;
|
oneConnect = 1;
|
||||||
@ -2089,17 +2190,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
|
|||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_BALIUS:
|
case PCI_DEVICE_ID_BALIUS:
|
||||||
m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
|
m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
|
||||||
"Fibre Channel Adapter"};
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
case PCI_DEVICE_ID_LANCER_FC:
|
case PCI_DEVICE_ID_LANCER_FC:
|
||||||
case PCI_DEVICE_ID_LANCER_FC_VF:
|
|
||||||
m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
|
m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
|
||||||
break;
|
break;
|
||||||
|
case PCI_DEVICE_ID_LANCER_FC_VF:
|
||||||
|
m = (typeof(m)){"LPe16000", "PCIe",
|
||||||
|
"Obsolete, Unsupported Fibre Channel Adapter"};
|
||||||
|
break;
|
||||||
case PCI_DEVICE_ID_LANCER_FCOE:
|
case PCI_DEVICE_ID_LANCER_FCOE:
|
||||||
case PCI_DEVICE_ID_LANCER_FCOE_VF:
|
|
||||||
oneConnect = 1;
|
oneConnect = 1;
|
||||||
m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
|
m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
|
||||||
break;
|
break;
|
||||||
|
case PCI_DEVICE_ID_LANCER_FCOE_VF:
|
||||||
|
oneConnect = 1;
|
||||||
|
m = (typeof(m)){"OCe15100", "PCIe",
|
||||||
|
"Obsolete, Unsupported FCoE"};
|
||||||
|
break;
|
||||||
case PCI_DEVICE_ID_SKYHAWK:
|
case PCI_DEVICE_ID_SKYHAWK:
|
||||||
case PCI_DEVICE_ID_SKYHAWK_VF:
|
case PCI_DEVICE_ID_SKYHAWK_VF:
|
||||||
oneConnect = 1;
|
oneConnect = 1;
|
||||||
@ -4614,7 +4722,10 @@ lpfc_reset_hba(struct lpfc_hba *phba)
|
|||||||
phba->link_state = LPFC_HBA_ERROR;
|
phba->link_state = LPFC_HBA_ERROR;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
|
||||||
|
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
|
||||||
|
else
|
||||||
|
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
|
||||||
lpfc_offline(phba);
|
lpfc_offline(phba);
|
||||||
lpfc_sli_brdrestart(phba);
|
lpfc_sli_brdrestart(phba);
|
||||||
lpfc_online(phba);
|
lpfc_online(phba);
|
||||||
@ -9663,9 +9774,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
|
|||||||
static void
|
static void
|
||||||
lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
|
lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
|
||||||
{
|
{
|
||||||
struct lpfc_sli *psli = &phba->sli;
|
|
||||||
struct lpfc_sli_ring *pring;
|
|
||||||
|
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
"2723 PCI channel I/O abort preparing for recovery\n");
|
"2723 PCI channel I/O abort preparing for recovery\n");
|
||||||
|
|
||||||
@ -9673,8 +9781,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
|
|||||||
* There may be errored I/Os through HBA, abort all I/Os on txcmplq
|
* There may be errored I/Os through HBA, abort all I/Os on txcmplq
|
||||||
* and let the SCSI mid-layer to retry them to recover.
|
* and let the SCSI mid-layer to retry them to recover.
|
||||||
*/
|
*/
|
||||||
pring = &psli->ring[psli->fcp_ring];
|
lpfc_sli_abort_fcp_rings(phba);
|
||||||
lpfc_sli_abort_iocb_ring(phba, pring);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -10417,17 +10524,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
|
|||||||
static void
|
static void
|
||||||
lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
|
lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
|
||||||
{
|
{
|
||||||
struct lpfc_sli *psli = &phba->sli;
|
|
||||||
struct lpfc_sli_ring *pring;
|
|
||||||
|
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||||
"2828 PCI channel I/O abort preparing for recovery\n");
|
"2828 PCI channel I/O abort preparing for recovery\n");
|
||||||
/*
|
/*
|
||||||
* There may be errored I/Os through HBA, abort all I/Os on txcmplq
|
* There may be errored I/Os through HBA, abort all I/Os on txcmplq
|
||||||
* and let the SCSI mid-layer to retry them to recover.
|
* and let the SCSI mid-layer to retry them to recover.
|
||||||
*/
|
*/
|
||||||
pring = &psli->ring[psli->fcp_ring];
|
lpfc_sli_abort_fcp_rings(phba);
|
||||||
lpfc_sli_abort_iocb_ring(phba, pring);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -10898,7 +11001,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
|
|||||||
if (phba->sli4_hba.pc_sli4_params.oas_supported) {
|
if (phba->sli4_hba.pc_sli4_params.oas_supported) {
|
||||||
phba->cfg_fof = 1;
|
phba->cfg_fof = 1;
|
||||||
} else {
|
} else {
|
||||||
phba->cfg_EnableXLane = 0;
|
phba->cfg_fof = 0;
|
||||||
if (phba->device_data_mem_pool)
|
if (phba->device_data_mem_pool)
|
||||||
mempool_destroy(phba->device_data_mem_pool);
|
mempool_destroy(phba->device_data_mem_pool);
|
||||||
phba->device_data_mem_pool = NULL;
|
phba->device_data_mem_pool = NULL;
|
||||||
@ -10928,7 +11031,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
|
|||||||
if (rc)
|
if (rc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (phba->cfg_EnableXLane) {
|
if (phba->cfg_fof) {
|
||||||
|
|
||||||
rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
|
rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
|
||||||
phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
|
phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
|
||||||
@ -10947,8 +11050,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_oas_wq:
|
out_oas_wq:
|
||||||
if (phba->cfg_EnableXLane)
|
lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
|
||||||
lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
|
|
||||||
out_oas_cq:
|
out_oas_cq:
|
||||||
lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
|
lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
|
||||||
return rc;
|
return rc;
|
||||||
@ -10982,7 +11084,7 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
|
|||||||
|
|
||||||
phba->sli4_hba.fof_eq = qdesc;
|
phba->sli4_hba.fof_eq = qdesc;
|
||||||
|
|
||||||
if (phba->cfg_EnableXLane) {
|
if (phba->cfg_fof) {
|
||||||
|
|
||||||
/* Create OAS CQ */
|
/* Create OAS CQ */
|
||||||
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2012 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||||
@ -73,7 +73,7 @@ lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
|
|||||||
{
|
{
|
||||||
struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
|
struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
|
||||||
|
|
||||||
if (vport->phba->cfg_EnableXLane)
|
if (vport->phba->cfg_fof)
|
||||||
return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
|
return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
|
||||||
else
|
else
|
||||||
return (struct lpfc_rport_data *)sdev->hostdata;
|
return (struct lpfc_rport_data *)sdev->hostdata;
|
||||||
@ -3462,7 +3462,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
|||||||
* If the OAS driver feature is enabled and the lun is enabled for
|
* If the OAS driver feature is enabled and the lun is enabled for
|
||||||
* OAS, set the oas iocb related flags.
|
* OAS, set the oas iocb related flags.
|
||||||
*/
|
*/
|
||||||
if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *)
|
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
|
||||||
scsi_cmnd->device->hostdata)->oas_enabled)
|
scsi_cmnd->device->hostdata)->oas_enabled)
|
||||||
lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
|
lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
|
||||||
return 0;
|
return 0;
|
||||||
@ -4314,6 +4314,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
|||||||
fcp_cmnd->fcpCntl1 = SIMPLE_Q;
|
fcp_cmnd->fcpCntl1 = SIMPLE_Q;
|
||||||
|
|
||||||
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
|
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
|
||||||
|
piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are three possibilities here - use scatter-gather segment, use
|
* There are three possibilities here - use scatter-gather segment, use
|
||||||
@ -4782,7 +4783,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
|||||||
struct lpfc_scsi_buf *lpfc_cmd;
|
struct lpfc_scsi_buf *lpfc_cmd;
|
||||||
IOCB_t *cmd, *icmd;
|
IOCB_t *cmd, *icmd;
|
||||||
int ret = SUCCESS, status = 0;
|
int ret = SUCCESS, status = 0;
|
||||||
unsigned long flags;
|
struct lpfc_sli_ring *pring_s4;
|
||||||
|
int ring_number, ret_val;
|
||||||
|
unsigned long flags, iflags;
|
||||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
|
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
|
||||||
|
|
||||||
status = fc_block_scsi_eh(cmnd);
|
status = fc_block_scsi_eh(cmnd);
|
||||||
@ -4833,6 +4836,14 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
|||||||
|
|
||||||
BUG_ON(iocb->context1 != lpfc_cmd);
|
BUG_ON(iocb->context1 != lpfc_cmd);
|
||||||
|
|
||||||
|
/* abort issued in recovery is still in progress */
|
||||||
|
if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
|
||||||
|
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
|
||||||
|
"3389 SCSI Layer I/O Abort Request is pending\n");
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||||
|
goto wait_for_cmpl;
|
||||||
|
}
|
||||||
|
|
||||||
abtsiocb = __lpfc_sli_get_iocbq(phba);
|
abtsiocb = __lpfc_sli_get_iocbq(phba);
|
||||||
if (abtsiocb == NULL) {
|
if (abtsiocb == NULL) {
|
||||||
ret = FAILED;
|
ret = FAILED;
|
||||||
@ -4871,11 +4882,23 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
|||||||
|
|
||||||
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
|
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
|
||||||
abtsiocb->vport = vport;
|
abtsiocb->vport = vport;
|
||||||
|
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||||
|
ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
|
||||||
|
pring_s4 = &phba->sli.ring[ring_number];
|
||||||
|
/* Note: both hbalock and ring_lock must be set here */
|
||||||
|
spin_lock_irqsave(&pring_s4->ring_lock, iflags);
|
||||||
|
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
|
||||||
|
abtsiocb, 0);
|
||||||
|
spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
|
||||||
|
} else {
|
||||||
|
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
|
||||||
|
abtsiocb, 0);
|
||||||
|
}
|
||||||
/* no longer need the lock after this point */
|
/* no longer need the lock after this point */
|
||||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||||
|
|
||||||
if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
|
|
||||||
IOCB_ERROR) {
|
if (ret_val == IOCB_ERROR) {
|
||||||
lpfc_sli_release_iocbq(phba, abtsiocb);
|
lpfc_sli_release_iocbq(phba, abtsiocb);
|
||||||
ret = FAILED;
|
ret = FAILED;
|
||||||
goto out;
|
goto out;
|
||||||
@ -4885,12 +4908,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
|||||||
lpfc_sli_handle_fast_ring_event(phba,
|
lpfc_sli_handle_fast_ring_event(phba,
|
||||||
&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
|
&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
|
||||||
|
|
||||||
|
wait_for_cmpl:
|
||||||
lpfc_cmd->waitq = &waitq;
|
lpfc_cmd->waitq = &waitq;
|
||||||
/* Wait for abort to complete */
|
/* Wait for abort to complete */
|
||||||
wait_event_timeout(waitq,
|
wait_event_timeout(waitq,
|
||||||
(lpfc_cmd->pCmd != cmnd),
|
(lpfc_cmd->pCmd != cmnd),
|
||||||
msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
|
msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
|
||||||
|
|
||||||
|
spin_lock_irqsave(shost->host_lock, flags);
|
||||||
lpfc_cmd->waitq = NULL;
|
lpfc_cmd->waitq = NULL;
|
||||||
|
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||||
|
|
||||||
if (lpfc_cmd->pCmd == cmnd) {
|
if (lpfc_cmd->pCmd == cmnd) {
|
||||||
ret = FAILED;
|
ret = FAILED;
|
||||||
@ -5172,8 +5199,9 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
|
|||||||
|
|
||||||
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
|
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
|
||||||
if (cnt)
|
if (cnt)
|
||||||
lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
|
lpfc_sli_abort_taskmgmt(vport,
|
||||||
tgt_id, lun_id, context);
|
&phba->sli.ring[phba->sli.fcp_ring],
|
||||||
|
tgt_id, lun_id, context);
|
||||||
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
|
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
|
||||||
while (time_after(later, jiffies) && cnt) {
|
while (time_after(later, jiffies) && cnt) {
|
||||||
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
|
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
|
||||||
@ -5491,7 +5519,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
|
|||||||
if (!rport || fc_remote_port_chkready(rport))
|
if (!rport || fc_remote_port_chkready(rport))
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
if (phba->cfg_EnableXLane) {
|
if (phba->cfg_fof) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check to see if the device data structure for the lun
|
* Check to see if the device data structure for the lun
|
||||||
@ -5616,7 +5644,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
|
|||||||
struct lpfc_device_data *device_data = sdev->hostdata;
|
struct lpfc_device_data *device_data = sdev->hostdata;
|
||||||
|
|
||||||
atomic_dec(&phba->sdev_cnt);
|
atomic_dec(&phba->sdev_cnt);
|
||||||
if ((phba->cfg_EnableXLane) && (device_data)) {
|
if ((phba->cfg_fof) && (device_data)) {
|
||||||
spin_lock_irqsave(&phba->devicelock, flags);
|
spin_lock_irqsave(&phba->devicelock, flags);
|
||||||
device_data->available = false;
|
device_data->available = false;
|
||||||
if (!device_data->oas_enabled)
|
if (!device_data->oas_enabled)
|
||||||
@ -5655,7 +5683,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
|
|||||||
int memory_flags;
|
int memory_flags;
|
||||||
|
|
||||||
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
|
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
|
||||||
!(phba->cfg_EnableXLane))
|
!(phba->cfg_fof))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Attempt to create the device data to contain lun info */
|
/* Attempt to create the device data to contain lun info */
|
||||||
@ -5693,7 +5721,7 @@ lpfc_delete_device_data(struct lpfc_hba *phba,
|
|||||||
{
|
{
|
||||||
|
|
||||||
if (unlikely(!phba) || !lun_info ||
|
if (unlikely(!phba) || !lun_info ||
|
||||||
!(phba->cfg_EnableXLane))
|
!(phba->cfg_fof))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!list_empty(&lun_info->listentry))
|
if (!list_empty(&lun_info->listentry))
|
||||||
@ -5727,7 +5755,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
|
|||||||
struct lpfc_device_data *lun_info;
|
struct lpfc_device_data *lun_info;
|
||||||
|
|
||||||
if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
|
if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
|
||||||
!phba->cfg_EnableXLane)
|
!phba->cfg_fof)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Check to see if the lun is already enabled for OAS. */
|
/* Check to see if the lun is already enabled for OAS. */
|
||||||
@ -5789,7 +5817,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
|
|||||||
!starting_lun || !found_vport_wwpn ||
|
!starting_lun || !found_vport_wwpn ||
|
||||||
!found_target_wwpn || !found_lun || !found_lun_status ||
|
!found_target_wwpn || !found_lun || !found_lun_status ||
|
||||||
(*starting_lun == NO_MORE_OAS_LUN) ||
|
(*starting_lun == NO_MORE_OAS_LUN) ||
|
||||||
!phba->cfg_EnableXLane)
|
!phba->cfg_fof)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
lun = *starting_lun;
|
lun = *starting_lun;
|
||||||
@ -5873,7 +5901,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
|
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
|
||||||
!phba->cfg_EnableXLane)
|
!phba->cfg_fof)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
spin_lock_irqsave(&phba->devicelock, flags);
|
spin_lock_irqsave(&phba->devicelock, flags);
|
||||||
@ -5930,7 +5958,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
|
if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
|
||||||
!phba->cfg_EnableXLane)
|
!phba->cfg_fof)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
spin_lock_irqsave(&phba->devicelock, flags);
|
spin_lock_irqsave(&phba->devicelock, flags);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
||||||
@ -3532,20 +3532,63 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
|
|||||||
/* Error everything on txq and txcmplq
|
/* Error everything on txq and txcmplq
|
||||||
* First do the txq.
|
* First do the txq.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&phba->hbalock);
|
if (phba->sli_rev >= LPFC_SLI_REV4) {
|
||||||
list_splice_init(&pring->txq, &completions);
|
spin_lock_irq(&pring->ring_lock);
|
||||||
|
list_splice_init(&pring->txq, &completions);
|
||||||
|
pring->txq_cnt = 0;
|
||||||
|
spin_unlock_irq(&pring->ring_lock);
|
||||||
|
|
||||||
/* Next issue ABTS for everything on the txcmplq */
|
spin_lock_irq(&phba->hbalock);
|
||||||
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
/* Next issue ABTS for everything on the txcmplq */
|
||||||
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
|
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
||||||
|
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
} else {
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
list_splice_init(&pring->txq, &completions);
|
||||||
|
pring->txq_cnt = 0;
|
||||||
|
|
||||||
spin_unlock_irq(&phba->hbalock);
|
/* Next issue ABTS for everything on the txcmplq */
|
||||||
|
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
||||||
|
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
}
|
||||||
|
|
||||||
/* Cancel all the IOCBs from the completions list */
|
/* Cancel all the IOCBs from the completions list */
|
||||||
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
||||||
IOERR_SLI_ABORTED);
|
IOERR_SLI_ABORTED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
|
||||||
|
* @phba: Pointer to HBA context object.
|
||||||
|
* @pring: Pointer to driver SLI ring object.
|
||||||
|
*
|
||||||
|
* This function aborts all iocbs in FCP rings and frees all the iocb
|
||||||
|
* objects in txq. This function issues an abort iocb for all the iocb commands
|
||||||
|
* in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
|
||||||
|
* the return of this function. The caller is not required to hold any locks.
|
||||||
|
**/
|
||||||
|
void
|
||||||
|
lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
|
||||||
|
{
|
||||||
|
struct lpfc_sli *psli = &phba->sli;
|
||||||
|
struct lpfc_sli_ring *pring;
|
||||||
|
uint32_t i;
|
||||||
|
|
||||||
|
/* Look on all the FCP Rings for the iotag */
|
||||||
|
if (phba->sli_rev >= LPFC_SLI_REV4) {
|
||||||
|
for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
|
||||||
|
pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
|
||||||
|
lpfc_sli_abort_iocb_ring(phba, pring);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pring = &psli->ring[psli->fcp_ring];
|
||||||
|
lpfc_sli_abort_iocb_ring(phba, pring);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
|
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
|
||||||
* @phba: Pointer to HBA context object.
|
* @phba: Pointer to HBA context object.
|
||||||
@ -3563,28 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
|
|||||||
LIST_HEAD(txcmplq);
|
LIST_HEAD(txcmplq);
|
||||||
struct lpfc_sli *psli = &phba->sli;
|
struct lpfc_sli *psli = &phba->sli;
|
||||||
struct lpfc_sli_ring *pring;
|
struct lpfc_sli_ring *pring;
|
||||||
|
uint32_t i;
|
||||||
/* Currently, only one fcp ring */
|
|
||||||
pring = &psli->ring[psli->fcp_ring];
|
|
||||||
|
|
||||||
spin_lock_irq(&phba->hbalock);
|
spin_lock_irq(&phba->hbalock);
|
||||||
/* Retrieve everything on txq */
|
|
||||||
list_splice_init(&pring->txq, &txq);
|
|
||||||
|
|
||||||
/* Retrieve everything on the txcmplq */
|
|
||||||
list_splice_init(&pring->txcmplq, &txcmplq);
|
|
||||||
|
|
||||||
/* Indicate the I/O queues are flushed */
|
/* Indicate the I/O queues are flushed */
|
||||||
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
|
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
|
||||||
spin_unlock_irq(&phba->hbalock);
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
|
||||||
/* Flush the txq */
|
/* Look on all the FCP Rings for the iotag */
|
||||||
lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
|
if (phba->sli_rev >= LPFC_SLI_REV4) {
|
||||||
IOERR_SLI_DOWN);
|
for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
|
||||||
|
pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
|
||||||
|
|
||||||
/* Flush the txcmpq */
|
spin_lock_irq(&pring->ring_lock);
|
||||||
lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
|
/* Retrieve everything on txq */
|
||||||
IOERR_SLI_DOWN);
|
list_splice_init(&pring->txq, &txq);
|
||||||
|
/* Retrieve everything on the txcmplq */
|
||||||
|
list_splice_init(&pring->txcmplq, &txcmplq);
|
||||||
|
pring->txq_cnt = 0;
|
||||||
|
pring->txcmplq_cnt = 0;
|
||||||
|
spin_unlock_irq(&pring->ring_lock);
|
||||||
|
|
||||||
|
/* Flush the txq */
|
||||||
|
lpfc_sli_cancel_iocbs(phba, &txq,
|
||||||
|
IOSTAT_LOCAL_REJECT,
|
||||||
|
IOERR_SLI_DOWN);
|
||||||
|
/* Flush the txcmpq */
|
||||||
|
lpfc_sli_cancel_iocbs(phba, &txcmplq,
|
||||||
|
IOSTAT_LOCAL_REJECT,
|
||||||
|
IOERR_SLI_DOWN);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pring = &psli->ring[psli->fcp_ring];
|
||||||
|
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
/* Retrieve everything on txq */
|
||||||
|
list_splice_init(&pring->txq, &txq);
|
||||||
|
/* Retrieve everything on the txcmplq */
|
||||||
|
list_splice_init(&pring->txcmplq, &txcmplq);
|
||||||
|
pring->txq_cnt = 0;
|
||||||
|
pring->txcmplq_cnt = 0;
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
|
||||||
|
/* Flush the txq */
|
||||||
|
lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
|
||||||
|
IOERR_SLI_DOWN);
|
||||||
|
/* Flush the txcmpq */
|
||||||
|
lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
|
||||||
|
IOERR_SLI_DOWN);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3987,12 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
|
|||||||
{
|
{
|
||||||
struct lpfc_sli *psli = &phba->sli;
|
struct lpfc_sli *psli = &phba->sli;
|
||||||
uint16_t cfg_value;
|
uint16_t cfg_value;
|
||||||
int rc;
|
int rc = 0;
|
||||||
|
|
||||||
/* Reset HBA */
|
/* Reset HBA */
|
||||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||||
"0295 Reset HBA Data: x%x x%x\n",
|
"0295 Reset HBA Data: x%x x%x x%x\n",
|
||||||
phba->pport->port_state, psli->sli_flag);
|
phba->pport->port_state, psli->sli_flag,
|
||||||
|
phba->hba_flag);
|
||||||
|
|
||||||
/* perform board reset */
|
/* perform board reset */
|
||||||
phba->fc_eventTag = 0;
|
phba->fc_eventTag = 0;
|
||||||
@ -4005,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
|
|||||||
phba->fcf.fcf_flag = 0;
|
phba->fcf.fcf_flag = 0;
|
||||||
spin_unlock_irq(&phba->hbalock);
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
|
||||||
|
/* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
|
||||||
|
if (phba->hba_flag & HBA_FW_DUMP_OP) {
|
||||||
|
phba->hba_flag &= ~HBA_FW_DUMP_OP;
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/* Now physically reset the device */
|
/* Now physically reset the device */
|
||||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||||
"0389 Performing PCI function reset!\n");
|
"0389 Performing PCI function reset!\n");
|
||||||
@ -5002,7 +5079,7 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
|
|||||||
} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
|
} while (++fcp_eqidx < phba->cfg_fcp_io_channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (phba->cfg_EnableXLane)
|
if (phba->cfg_fof)
|
||||||
lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
|
lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
|
||||||
|
|
||||||
if (phba->sli4_hba.hba_eq) {
|
if (phba->sli4_hba.hba_eq) {
|
||||||
@ -6722,7 +6799,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
|
|||||||
LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
|
LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
|
||||||
MAILBOX_t *mb = &pmbox->u.mb;
|
MAILBOX_t *mb = &pmbox->u.mb;
|
||||||
struct lpfc_sli *psli = &phba->sli;
|
struct lpfc_sli *psli = &phba->sli;
|
||||||
struct lpfc_sli_ring *pring;
|
|
||||||
|
|
||||||
/* If the mailbox completed, process the completion and return */
|
/* If the mailbox completed, process the completion and return */
|
||||||
if (lpfc_sli4_process_missed_mbox_completions(phba))
|
if (lpfc_sli4_process_missed_mbox_completions(phba))
|
||||||
@ -6764,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
|
|||||||
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
|
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
|
||||||
spin_unlock_irq(&phba->hbalock);
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
|
||||||
pring = &psli->ring[psli->fcp_ring];
|
lpfc_sli_abort_fcp_rings(phba);
|
||||||
lpfc_sli_abort_iocb_ring(phba, pring);
|
|
||||||
|
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||||
"0345 Resetting board due to mailbox timeout\n");
|
"0345 Resetting board due to mailbox timeout\n");
|
||||||
@ -8133,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
|||||||
abort_tag = (uint32_t) iocbq->iotag;
|
abort_tag = (uint32_t) iocbq->iotag;
|
||||||
xritag = iocbq->sli4_xritag;
|
xritag = iocbq->sli4_xritag;
|
||||||
wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
|
wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
|
||||||
|
wqe->generic.wqe_com.word10 = 0;
|
||||||
/* words0-2 bpl convert bde */
|
/* words0-2 bpl convert bde */
|
||||||
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
|
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
|
||||||
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
|
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
|
||||||
@ -8639,8 +8715,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
|
|||||||
|
|
||||||
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
|
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
|
||||||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
|
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
|
||||||
if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
|
if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
|
||||||
LPFC_IO_OAS))) {
|
|
||||||
wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
|
wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
|
||||||
} else {
|
} else {
|
||||||
wq = phba->sli4_hba.oas_wq;
|
wq = phba->sli4_hba.oas_wq;
|
||||||
@ -8735,7 +8810,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
|
|||||||
|
|
||||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||||
if (piocb->iocb_flag & LPFC_IO_FCP) {
|
if (piocb->iocb_flag & LPFC_IO_FCP) {
|
||||||
if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag &
|
if (!phba->cfg_fof || (!(piocb->iocb_flag &
|
||||||
LPFC_IO_OAS))) {
|
LPFC_IO_OAS))) {
|
||||||
if (unlikely(!phba->sli4_hba.fcp_wq))
|
if (unlikely(!phba->sli4_hba.fcp_wq))
|
||||||
return IOCB_ERROR;
|
return IOCB_ERROR;
|
||||||
@ -9170,6 +9245,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
|
|||||||
pring->sli.sli3.next_cmdidx = 0;
|
pring->sli.sli3.next_cmdidx = 0;
|
||||||
pring->sli.sli3.local_getidx = 0;
|
pring->sli.sli3.local_getidx = 0;
|
||||||
pring->sli.sli3.cmdidx = 0;
|
pring->sli.sli3.cmdidx = 0;
|
||||||
|
pring->flag = 0;
|
||||||
INIT_LIST_HEAD(&pring->txq);
|
INIT_LIST_HEAD(&pring->txq);
|
||||||
INIT_LIST_HEAD(&pring->txcmplq);
|
INIT_LIST_HEAD(&pring->txcmplq);
|
||||||
INIT_LIST_HEAD(&pring->iocb_continueq);
|
INIT_LIST_HEAD(&pring->iocb_continueq);
|
||||||
@ -9804,43 +9880,6 @@ abort_iotag_exit:
|
|||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
|
|
||||||
* @phba: Pointer to HBA context object.
|
|
||||||
* @pring: Pointer to driver SLI ring object.
|
|
||||||
*
|
|
||||||
* This function aborts all iocbs in the given ring and frees all the iocb
|
|
||||||
* objects in txq. This function issues abort iocbs unconditionally for all
|
|
||||||
* the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
|
|
||||||
* to complete before the return of this function. The caller is not required
|
|
||||||
* to hold any locks.
|
|
||||||
**/
|
|
||||||
static void
|
|
||||||
lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
|
|
||||||
{
|
|
||||||
LIST_HEAD(completions);
|
|
||||||
struct lpfc_iocbq *iocb, *next_iocb;
|
|
||||||
|
|
||||||
if (pring->ringno == LPFC_ELS_RING)
|
|
||||||
lpfc_fabric_abort_hba(phba);
|
|
||||||
|
|
||||||
spin_lock_irq(&phba->hbalock);
|
|
||||||
|
|
||||||
/* Take off all the iocbs on txq for cancelling */
|
|
||||||
list_splice_init(&pring->txq, &completions);
|
|
||||||
pring->txq_cnt = 0;
|
|
||||||
|
|
||||||
/* Next issue ABTS for everything on the txcmplq */
|
|
||||||
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
|
||||||
lpfc_sli_abort_iotag_issue(phba, pring, iocb);
|
|
||||||
|
|
||||||
spin_unlock_irq(&phba->hbalock);
|
|
||||||
|
|
||||||
/* Cancel all the IOCBs from the completions list */
|
|
||||||
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
|
||||||
IOERR_SLI_ABORTED);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
|
* lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
|
||||||
* @phba: pointer to lpfc HBA data structure.
|
* @phba: pointer to lpfc HBA data structure.
|
||||||
@ -9856,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
|
|||||||
|
|
||||||
for (i = 0; i < psli->num_rings; i++) {
|
for (i = 0; i < psli->num_rings; i++) {
|
||||||
pring = &psli->ring[i];
|
pring = &psli->ring[i];
|
||||||
lpfc_sli_iocb_ring_abort(phba, pring);
|
lpfc_sli_abort_iocb_ring(phba, pring);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -10080,6 +10119,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
|||||||
return errcnt;
|
return errcnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
|
||||||
|
* @vport: Pointer to virtual port.
|
||||||
|
* @pring: Pointer to driver SLI ring object.
|
||||||
|
* @tgt_id: SCSI ID of the target.
|
||||||
|
* @lun_id: LUN ID of the scsi device.
|
||||||
|
* @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
|
||||||
|
*
|
||||||
|
* This function sends an abort command for every SCSI command
|
||||||
|
* associated with the given virtual port pending on the ring
|
||||||
|
* filtered by lpfc_sli_validate_fcp_iocb function.
|
||||||
|
* When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
|
||||||
|
* FCP iocbs associated with lun specified by tgt_id and lun_id
|
||||||
|
* parameters
|
||||||
|
* When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
|
||||||
|
* FCP iocbs associated with SCSI target specified by tgt_id parameter.
|
||||||
|
* When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
|
||||||
|
* FCP iocbs associated with virtual port.
|
||||||
|
* This function returns number of iocbs it aborted .
|
||||||
|
* This function is called with no locks held right after a taskmgmt
|
||||||
|
* command is sent.
|
||||||
|
**/
|
||||||
|
int
|
||||||
|
lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||||
|
uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
|
||||||
|
{
|
||||||
|
struct lpfc_hba *phba = vport->phba;
|
||||||
|
struct lpfc_iocbq *abtsiocbq;
|
||||||
|
struct lpfc_iocbq *iocbq;
|
||||||
|
IOCB_t *icmd;
|
||||||
|
int sum, i, ret_val;
|
||||||
|
unsigned long iflags;
|
||||||
|
struct lpfc_sli_ring *pring_s4;
|
||||||
|
uint32_t ring_number;
|
||||||
|
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
|
||||||
|
/* all I/Os are in process of being flushed */
|
||||||
|
if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
sum = 0;
|
||||||
|
|
||||||
|
for (i = 1; i <= phba->sli.last_iotag; i++) {
|
||||||
|
iocbq = phba->sli.iocbq_lookup[i];
|
||||||
|
|
||||||
|
if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
|
||||||
|
cmd) != 0)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the iocbq is already being aborted, don't take a second
|
||||||
|
* action, but do count it.
|
||||||
|
*/
|
||||||
|
if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* issue ABTS for this IOCB based on iotag */
|
||||||
|
abtsiocbq = __lpfc_sli_get_iocbq(phba);
|
||||||
|
if (abtsiocbq == NULL)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
icmd = &iocbq->iocb;
|
||||||
|
abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
|
||||||
|
abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
|
||||||
|
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||||
|
abtsiocbq->iocb.un.acxri.abortIoTag =
|
||||||
|
iocbq->sli4_xritag;
|
||||||
|
else
|
||||||
|
abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
|
||||||
|
abtsiocbq->iocb.ulpLe = 1;
|
||||||
|
abtsiocbq->iocb.ulpClass = icmd->ulpClass;
|
||||||
|
abtsiocbq->vport = vport;
|
||||||
|
|
||||||
|
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
|
||||||
|
abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
|
||||||
|
if (iocbq->iocb_flag & LPFC_IO_FCP)
|
||||||
|
abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
|
||||||
|
|
||||||
|
if (lpfc_is_link_up(phba))
|
||||||
|
abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
|
||||||
|
else
|
||||||
|
abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
|
||||||
|
|
||||||
|
/* Setup callback routine and issue the command. */
|
||||||
|
abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Indicate the IO is being aborted by the driver and set
|
||||||
|
* the caller's flag into the aborted IO.
|
||||||
|
*/
|
||||||
|
iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
|
||||||
|
|
||||||
|
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||||
|
ring_number = MAX_SLI3_CONFIGURED_RINGS +
|
||||||
|
iocbq->fcp_wqidx;
|
||||||
|
pring_s4 = &phba->sli.ring[ring_number];
|
||||||
|
/* Note: both hbalock and ring_lock must be set here */
|
||||||
|
spin_lock_irqsave(&pring_s4->ring_lock, iflags);
|
||||||
|
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
|
||||||
|
abtsiocbq, 0);
|
||||||
|
spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
|
||||||
|
} else {
|
||||||
|
ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
|
||||||
|
abtsiocbq, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (ret_val == IOCB_ERROR)
|
||||||
|
__lpfc_sli_release_iocbq(phba, abtsiocbq);
|
||||||
|
else
|
||||||
|
sum++;
|
||||||
|
}
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
return sum;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
|
* lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
|
||||||
* @phba: Pointer to HBA context object.
|
* @phba: Pointer to HBA context object.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2009-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2009-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||||
* EMULEX and SLI are trademarks of Emulex. *
|
* EMULEX and SLI are trademarks of Emulex. *
|
||||||
* www.emulex.com *
|
* www.emulex.com *
|
||||||
* *
|
* *
|
||||||
@ -18,7 +18,7 @@
|
|||||||
* included with this package. *
|
* included with this package. *
|
||||||
*******************************************************************/
|
*******************************************************************/
|
||||||
|
|
||||||
#define LPFC_DRIVER_VERSION "8.3.45"
|
#define LPFC_DRIVER_VERSION "10.2.8001.0."
|
||||||
#define LPFC_DRIVER_NAME "lpfc"
|
#define LPFC_DRIVER_NAME "lpfc"
|
||||||
|
|
||||||
/* Used for SLI 2/3 */
|
/* Used for SLI 2/3 */
|
||||||
@ -30,4 +30,4 @@
|
|||||||
|
|
||||||
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
|
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
|
||||||
LPFC_DRIVER_VERSION
|
LPFC_DRIVER_VERSION
|
||||||
#define LPFC_COPYRIGHT "Copyright(c) 2004-2013 Emulex. All rights reserved."
|
#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved."
|
||||||
|
@ -1648,16 +1648,16 @@ typedef struct {
|
|||||||
*/
|
*/
|
||||||
struct crc_context {
|
struct crc_context {
|
||||||
uint32_t handle; /* System handle. */
|
uint32_t handle; /* System handle. */
|
||||||
uint32_t ref_tag;
|
__le32 ref_tag;
|
||||||
uint16_t app_tag;
|
__le16 app_tag;
|
||||||
uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
|
uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
|
||||||
uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
|
uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
|
||||||
uint16_t guard_seed; /* Initial Guard Seed */
|
__le16 guard_seed; /* Initial Guard Seed */
|
||||||
uint16_t prot_opts; /* Requested Data Protection Mode */
|
__le16 prot_opts; /* Requested Data Protection Mode */
|
||||||
uint16_t blk_size; /* Data size in bytes */
|
__le16 blk_size; /* Data size in bytes */
|
||||||
uint16_t runt_blk_guard; /* Guard value for runt block (tape
|
uint16_t runt_blk_guard; /* Guard value for runt block (tape
|
||||||
* only) */
|
* only) */
|
||||||
uint32_t byte_count; /* Total byte count/ total data
|
__le32 byte_count; /* Total byte count/ total data
|
||||||
* transfer count */
|
* transfer count */
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
@ -1671,10 +1671,10 @@ struct crc_context {
|
|||||||
uint32_t reserved_6;
|
uint32_t reserved_6;
|
||||||
} nobundling;
|
} nobundling;
|
||||||
struct {
|
struct {
|
||||||
uint32_t dif_byte_count; /* Total DIF byte
|
__le32 dif_byte_count; /* Total DIF byte
|
||||||
* count */
|
* count */
|
||||||
uint16_t reserved_1;
|
uint16_t reserved_1;
|
||||||
uint16_t dseg_count; /* Data segment count */
|
__le16 dseg_count; /* Data segment count */
|
||||||
uint32_t reserved_2;
|
uint32_t reserved_2;
|
||||||
uint32_t data_address[2];
|
uint32_t data_address[2];
|
||||||
uint32_t data_length;
|
uint32_t data_length;
|
||||||
|
@ -1996,7 +1996,7 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
|
|||||||
* have been immplemented by TCM, before AppTag is avail.
|
* have been immplemented by TCM, before AppTag is avail.
|
||||||
* Look for modesense_handlers[]
|
* Look for modesense_handlers[]
|
||||||
*/
|
*/
|
||||||
ctx->app_tag = __constant_cpu_to_le16(0);
|
ctx->app_tag = 0;
|
||||||
ctx->app_tag_mask[0] = 0x0;
|
ctx->app_tag_mask[0] = 0x0;
|
||||||
ctx->app_tag_mask[1] = 0x0;
|
ctx->app_tag_mask[1] = 0x0;
|
||||||
|
|
||||||
@ -2078,6 +2078,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
|
|||||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||||
uint32_t h;
|
uint32_t h;
|
||||||
struct atio_from_isp *atio = &prm->cmd->atio;
|
struct atio_from_isp *atio = &prm->cmd->atio;
|
||||||
|
uint16_t t16;
|
||||||
|
|
||||||
sgc = 0;
|
sgc = 0;
|
||||||
ha = vha->hw;
|
ha = vha->hw;
|
||||||
@ -2174,8 +2175,13 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
|
|||||||
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
|
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
|
||||||
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
|
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
|
||||||
pkt->exchange_addr = atio->u.isp24.exchange_addr;
|
pkt->exchange_addr = atio->u.isp24.exchange_addr;
|
||||||
pkt->ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
|
|
||||||
pkt->flags |= (atio->u.isp24.attr << 9);
|
/* silence compile warning */
|
||||||
|
t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
|
||||||
|
pkt->ox_id = cpu_to_le16(t16);
|
||||||
|
|
||||||
|
t16 = (atio->u.isp24.attr << 9);
|
||||||
|
pkt->flags |= cpu_to_le16(t16);
|
||||||
pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
|
pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
|
||||||
|
|
||||||
/* Set transfer direction */
|
/* Set transfer direction */
|
||||||
@ -2250,8 +2256,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
|
|||||||
|
|
||||||
if (bundling && prm->prot_seg_cnt) {
|
if (bundling && prm->prot_seg_cnt) {
|
||||||
/* Walks dif segments */
|
/* Walks dif segments */
|
||||||
pkt->add_flags |=
|
pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
|
||||||
__constant_cpu_to_le16(CTIO_CRC2_AF_DIF_DSD_ENA);
|
|
||||||
|
|
||||||
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
|
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
|
||||||
if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
|
if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
|
||||||
|
@ -316,7 +316,7 @@ struct fcp_hdr {
|
|||||||
uint8_t seq_id;
|
uint8_t seq_id;
|
||||||
uint8_t df_ctl;
|
uint8_t df_ctl;
|
||||||
uint16_t seq_cnt;
|
uint16_t seq_cnt;
|
||||||
uint16_t ox_id;
|
__be16 ox_id;
|
||||||
uint16_t rx_id;
|
uint16_t rx_id;
|
||||||
uint32_t parameter;
|
uint32_t parameter;
|
||||||
} __packed;
|
} __packed;
|
||||||
@ -441,7 +441,7 @@ struct ctio7_to_24xx {
|
|||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
uint16_t reserved1;
|
uint16_t reserved1;
|
||||||
uint16_t flags;
|
__le16 flags;
|
||||||
uint32_t residual;
|
uint32_t residual;
|
||||||
uint16_t ox_id;
|
uint16_t ox_id;
|
||||||
uint16_t scsi_status;
|
uint16_t scsi_status;
|
||||||
@ -527,7 +527,7 @@ struct ctio_crc2_to_fw {
|
|||||||
|
|
||||||
uint32_t handle; /* System handle. */
|
uint32_t handle; /* System handle. */
|
||||||
uint16_t nport_handle; /* N_PORT handle. */
|
uint16_t nport_handle; /* N_PORT handle. */
|
||||||
uint16_t timeout; /* Command timeout. */
|
__le16 timeout; /* Command timeout. */
|
||||||
|
|
||||||
uint16_t dseg_count; /* Data segment count. */
|
uint16_t dseg_count; /* Data segment count. */
|
||||||
uint8_t vp_index;
|
uint8_t vp_index;
|
||||||
@ -538,15 +538,15 @@ struct ctio_crc2_to_fw {
|
|||||||
uint8_t reserved1;
|
uint8_t reserved1;
|
||||||
uint32_t exchange_addr; /* rcv exchange address */
|
uint32_t exchange_addr; /* rcv exchange address */
|
||||||
uint16_t reserved2;
|
uint16_t reserved2;
|
||||||
uint16_t flags; /* refer to CTIO7 flags values */
|
__le16 flags; /* refer to CTIO7 flags values */
|
||||||
uint32_t residual;
|
uint32_t residual;
|
||||||
uint16_t ox_id;
|
__le16 ox_id;
|
||||||
uint16_t scsi_status;
|
uint16_t scsi_status;
|
||||||
uint32_t relative_offset;
|
__le32 relative_offset;
|
||||||
uint32_t reserved5;
|
uint32_t reserved5;
|
||||||
uint32_t transfer_length; /* total fc transfer length */
|
__le32 transfer_length; /* total fc transfer length */
|
||||||
uint32_t reserved6;
|
uint32_t reserved6;
|
||||||
uint32_t crc_context_address[2];/* Data segment address. */
|
__le32 crc_context_address[2];/* Data segment address. */
|
||||||
uint16_t crc_context_len; /* Data segment length. */
|
uint16_t crc_context_len; /* Data segment length. */
|
||||||
uint16_t reserved_1; /* MUST be set to 0. */
|
uint16_t reserved_1; /* MUST be set to 0. */
|
||||||
} __packed;
|
} __packed;
|
||||||
|
Loading…
Reference in New Issue
Block a user