2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-12 23:33:55 +08:00

SCSI for-linus on 20131110

This patch set is driver updates for qla4xxx, scsi_debug, pm80xx, fcoe/libfc,
 eas2r, lpfc, be2iscsi and megaraid_sas plus some assorted bug fixes and
 cleanups.
 
 Signed-off-by: James Bottomley <JBottomley@Parallels.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.19 (GNU/Linux)
 
 iQEcBAABAgAGBQJSfw30AAoJEDeqqVYsXL0MZPEIAK6GBHFw8JsU3NQ4SbM5hzdM
 ywPryTn7DO9jyj0J04i6TNtbS6om9E8tjLyr3SnmTQNiTDXGv44rIEfJyHR9ko2n
 E2hRu4xaGEWK4dkuktQuOqj2fuXRyeXr2maYIXjkmFI0hesLqozYKgLAeWTHvabE
 2HICwG/lfCzesqVl69Y3V8n1vZvtJqAls6liwY09i9eSDRe39DynRn7bjLXzkPkc
 ynjJYl22CIZ7nb+PgzqQ+xEUIdXqGG890CvGaqg7+x3ZUmmOtfECaDUkCjVeiiE6
 sy72V6E4ET/YMrkhRmIUKyZxGbl/tMxYPuGaBhq2fSNRx8x1R+Ajfh9UM2AZTh4=
 =hCHG
 -----END PGP SIGNATURE-----

Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull first round of SCSI updates from James Bottomley:
 "This patch set is driver updates for qla4xxx, scsi_debug, pm80xx,
  fcoe/libfc, eas2r, lpfc, be2iscsi and megaraid_sas plus some assorted
  bug fixes and cleanups"

* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (106 commits)
  [SCSI] scsi_error: Escalate to LUN reset if abort fails
  [SCSI] Add 'eh_deadline' to limit SCSI EH runtime
  [SCSI] remove check for 'resetting'
  [SCSI] dc395: Move 'last_reset' into internal host structure
  [SCSI] tmscsim: Move 'last_reset' into host structure
  [SCSI] advansys: Remove 'last_reset' references
  [SCSI] dpt_i2o: return SCSI_MLQUEUE_HOST_BUSY when in reset
  [SCSI] dpt_i2o: Remove DPTI_STATE_IOCTL
  [SCSI] megaraid_sas: Fix synchronization problem between sysPD IO path and AEN path
  [SCSI] lpfc: Fix typo on NULL assignment
  [SCSI] scsi_dh_alua: ALUA handler attach should succeed while TPG is transitioning
  [SCSI] scsi_dh_alua: ALUA check sense should retry device internal reset unit attention
  [SCSI] esas2r: Cleanup snprinf formatting of firmware version
  [SCSI] esas2r: Remove superfluous mask of pcie_cap_reg
  [SCSI] esas2r: Fixes for big-endian platforms
  [SCSI] esas2r: Directly call kernel functions for atomic bit operations
  [SCSI] lpfc 8.3.43: Update lpfc version to driver version 8.3.43
  [SCSI] lpfc 8.3.43: Fixed not processing task management IOCB response status
  [SCSI] lpfc 8.3.43: Fixed spinlock hang.
  [SCSI] lpfc 8.3.43: Fixed invalid Total_Data_Placed value received for els and ct command responses
  ...
This commit is contained in:
Linus Torvalds 2013-11-14 12:25:38 +09:00
commit 0d522ee749
92 changed files with 5606 additions and 1435 deletions

View File

@ -1867,7 +1867,7 @@ S: Supported
F: drivers/net/wireless/brcm80211/ F: drivers/net/wireless/brcm80211/
BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
M: Bhanu Prakash Gollapudi <bprakash@broadcom.com> M: Eddie Wai <eddie.wai@broadcom.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
S: Supported S: Supported
F: drivers/scsi/bnx2fc/ F: drivers/scsi/bnx2fc/

View File

@ -26,8 +26,8 @@
*/ */
#define blogic_drvr_version "2.1.16" #define blogic_drvr_version "2.1.17"
#define blogic_drvr_date "18 July 2002" #define blogic_drvr_date "12 September 2013"
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
@ -311,12 +311,14 @@ static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter)
caller. caller.
*/ */
static void blogic_dealloc_ccb(struct blogic_ccb *ccb) static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
{ {
struct blogic_adapter *adapter = ccb->adapter; struct blogic_adapter *adapter = ccb->adapter;
scsi_dma_unmap(ccb->command); if (ccb->command != NULL)
pci_unmap_single(adapter->pci_device, ccb->sensedata, scsi_dma_unmap(ccb->command);
if (dma_unmap)
pci_unmap_single(adapter->pci_device, ccb->sensedata,
ccb->sense_datalen, PCI_DMA_FROMDEVICE); ccb->sense_datalen, PCI_DMA_FROMDEVICE);
ccb->command = NULL; ccb->command = NULL;
@ -2762,8 +2764,8 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
/* /*
Place CCB back on the Host Adapter's free list. Place CCB back on the Host Adapter's free list.
*/ */
blogic_dealloc_ccb(ccb); blogic_dealloc_ccb(ccb, 1);
#if 0 /* this needs to be redone different for new EH */ #if 0 /* this needs to be redone different for new EH */
/* /*
Bus Device Reset CCBs have the command field Bus Device Reset CCBs have the command field
non-NULL only when a Bus Device Reset was requested non-NULL only when a Bus Device Reset was requested
@ -2791,7 +2793,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
if (ccb->status == BLOGIC_CCB_RESET && if (ccb->status == BLOGIC_CCB_RESET &&
ccb->tgt_id == tgt_id) { ccb->tgt_id == tgt_id) {
command = ccb->command; command = ccb->command;
blogic_dealloc_ccb(ccb); blogic_dealloc_ccb(ccb, 1);
adapter->active_cmds[tgt_id]--; adapter->active_cmds[tgt_id]--;
command->result = DID_RESET << 16; command->result = DID_RESET << 16;
command->scsi_done(command); command->scsi_done(command);
@ -2862,7 +2864,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)
/* /*
Place CCB back on the Host Adapter's free list. Place CCB back on the Host Adapter's free list.
*/ */
blogic_dealloc_ccb(ccb); blogic_dealloc_ccb(ccb, 1);
/* /*
Call the SCSI Command Completion Routine. Call the SCSI Command Completion Routine.
*/ */
@ -3034,6 +3036,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
int buflen = scsi_bufflen(command); int buflen = scsi_bufflen(command);
int count; int count;
struct blogic_ccb *ccb; struct blogic_ccb *ccb;
dma_addr_t sense_buf;
/* /*
SCSI REQUEST_SENSE commands will be executed automatically by the SCSI REQUEST_SENSE commands will be executed automatically by the
@ -3179,10 +3182,17 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
} }
memcpy(ccb->cdb, cdb, cdblen); memcpy(ccb->cdb, cdb, cdblen);
ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE; ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
ccb->sensedata = pci_map_single(adapter->pci_device, ccb->command = command;
sense_buf = pci_map_single(adapter->pci_device,
command->sense_buffer, ccb->sense_datalen, command->sense_buffer, ccb->sense_datalen,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
ccb->command = command; if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
blogic_err("DMA mapping for sense data buffer failed\n",
adapter);
blogic_dealloc_ccb(ccb, 0);
return SCSI_MLQUEUE_HOST_BUSY;
}
ccb->sensedata = sense_buf;
command->scsi_done = comp_cb; command->scsi_done = comp_cb;
if (blogic_multimaster_type(adapter)) { if (blogic_multimaster_type(adapter)) {
/* /*
@ -3203,7 +3213,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,
ccb)) { ccb)) {
blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter); blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter);
blogic_dealloc_ccb(ccb); blogic_dealloc_ccb(ccb, 1);
command->result = DID_ERROR << 16; command->result = DID_ERROR << 16;
command->scsi_done(command); command->scsi_done(command);
} }
@ -3337,7 +3347,7 @@ static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset)
for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all)
if (ccb->status == BLOGIC_CCB_ACTIVE) if (ccb->status == BLOGIC_CCB_ACTIVE)
blogic_dealloc_ccb(ccb); blogic_dealloc_ccb(ccb, 1);
/* /*
* Wait a few seconds between the Host Adapter Hard Reset which * Wait a few seconds between the Host Adapter Hard Reset which
* initiates a SCSI Bus Reset and issuing any SCSI Commands. Some * initiates a SCSI Bus Reset and issuing any SCSI Commands. Some

View File

@ -2511,8 +2511,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
struct asc_board *boardp = shost_priv(s); struct asc_board *boardp = shost_priv(s);
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
printk(" host_busy %u, host_no %d, last_reset %d,\n", printk(" host_busy %u, host_no %d,\n",
s->host_busy, s->host_no, (unsigned)s->last_reset); s->host_busy, s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq); (ulong)s->base, (ulong)s->io_port, boardp->irq);
@ -3345,8 +3345,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no); shost->host_no);
seq_printf(m, seq_printf(m,
" host_busy %u, last_reset %lu, max_id %u, max_lun %u, max_channel %u\n", " host_busy %u, max_id %u, max_lun %u, max_channel %u\n",
shost->host_busy, shost->last_reset, shost->max_id, shost->host_busy, shost->max_id,
shost->max_lun, shost->max_channel); shost->max_lun, shost->max_channel);
seq_printf(m, seq_printf(m,

View File

@ -128,7 +128,7 @@ struct be_ctrl_info {
#define PAGE_SHIFT_4K 12 #define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
#define mcc_timeout 120000 /* 5s timeout */ #define mcc_timeout 120000 /* 12s timeout */
/* Returns number of pages spanned by the data starting at the given addr */ /* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \ #define PAGES_4K_SPANNED(_address, size) \

View File

@ -17,9 +17,9 @@
#include <scsi/iscsi_proto.h> #include <scsi/iscsi_proto.h>
#include "be_main.h"
#include "be.h" #include "be.h"
#include "be_mgmt.h" #include "be_mgmt.h"
#include "be_main.h"
int beiscsi_pci_soft_reset(struct beiscsi_hba *phba) int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
{ {
@ -158,8 +158,10 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
struct be_cmd_resp_hdr *ioctl_resp_hdr; struct be_cmd_resp_hdr *ioctl_resp_hdr;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
if (beiscsi_error(phba)) if (beiscsi_error(phba)) {
free_mcc_tag(&phba->ctrl, tag);
return -EIO; return -EIO;
}
/* wait for the mccq completion */ /* wait for the mccq completion */
rc = wait_event_interruptible_timeout( rc = wait_event_interruptible_timeout(
@ -173,7 +175,11 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG, BEISCSI_LOG_CONFIG,
"BC_%d : MBX Cmd Completion timed out\n"); "BC_%d : MBX Cmd Completion timed out\n");
rc = -EAGAIN; rc = -EBUSY;
/* decrement the mccq used count */
atomic_dec(&phba->ctrl.mcc_obj.q.used);
goto release_mcc_tag; goto release_mcc_tag;
} else } else
rc = 0; rc = 0;
@ -208,10 +214,18 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
if (ioctl_resp_hdr->response_length) beiscsi_log(phba, KERN_WARNING,
goto release_mcc_tag; BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : Insufficent Buffer Error "
"Resp_Len : %d Actual_Resp_Len : %d\n",
ioctl_resp_hdr->response_length,
ioctl_resp_hdr->actual_resp_len);
rc = -EAGAIN;
goto release_mcc_tag;
} }
rc = -EAGAIN; rc = -EIO;
} }
release_mcc_tag: release_mcc_tag:
@ -363,7 +377,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
} else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
((evt->port_link_status & ASYNC_EVENT_LOGICAL) && ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
(evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
phba->state = BE_ADAPTER_UP; phba->state = BE_ADAPTER_LINK_UP;
beiscsi_log(phba, KERN_ERR, beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@ -486,33 +500,47 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba)
**/ **/
static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
{ {
#define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
uint32_t wait = 0; unsigned long timeout;
bool read_flag = false;
int ret = 0, i;
u32 ready; u32 ready;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
if (beiscsi_error(phba))
return -EIO;
timeout = jiffies + (HZ * 110);
do { do {
for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
if (beiscsi_error(phba)) ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
return -EIO; if (ready) {
read_flag = true;
ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; break;
if (ready) }
break; mdelay(1);
if (wait > BEISCSI_HOST_MBX_TIMEOUT) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : FW Timed Out\n");
phba->fw_timeout = true;
beiscsi_ue_detect(phba);
return -EBUSY;
} }
mdelay(1); if (!read_flag) {
wait++; wait_event_timeout(rdybit_check_q,
} while (true); (read_flag != true),
return 0; HZ * 5);
}
} while ((time_before(jiffies, timeout)) && !read_flag);
if (!read_flag) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BC_%d : FW Timed Out\n");
phba->fw_timeout = true;
beiscsi_ue_detect(phba);
ret = -EBUSY;
}
return ret;
} }
/* /*
@ -699,7 +727,7 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
BUG_ON(atomic_read(&mccq->used) >= mccq->len); WARN_ON(atomic_read(&mccq->used) >= mccq->len);
wrb = queue_head_node(mccq); wrb = queue_head_node(mccq);
memset(wrb, 0, sizeof(*wrb)); memset(wrb, 0, sizeof(*wrb));
wrb->tag0 = (mccq->head & 0x000000FF) << 16; wrb->tag0 = (mccq->head & 0x000000FF) << 16;
@ -1009,10 +1037,29 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
return status; return status;
} }
/**
* be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
* @ctrl: ptr to ctrl_info
* @cq: Completion Queue
* @dq: Default Queue
* @lenght: ring size
* @entry_size: size of each entry in DEFQ
* @is_header: Header or Data DEFQ
* @ulp_num: Bind to which ULP
*
* Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
* on this queue by the FW
*
* return
* Success: 0
* Failure: Non-Zero Value
*
**/
int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_queue_info *cq, struct be_queue_info *cq,
struct be_queue_info *dq, int length, struct be_queue_info *dq, int length,
int entry_size) int entry_size, uint8_t is_header,
uint8_t ulp_num)
{ {
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_defq_create_req *req = embedded_payload(wrb); struct be_defq_create_req *req = embedded_payload(wrb);
@ -1030,6 +1077,11 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
if (phba->fw_config.dual_ulp_aware) {
req->ulp_num = ulp_num;
req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
}
if (is_chip_be2_be3r(phba)) { if (is_chip_be2_be3r(phba)) {
AMAP_SET_BITS(struct amap_be_default_pdu_context, AMAP_SET_BITS(struct amap_be_default_pdu_context,
@ -1067,22 +1119,53 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
status = be_mbox_notify(ctrl); status = be_mbox_notify(ctrl);
if (!status) { if (!status) {
struct be_ring *defq_ring;
struct be_defq_create_resp *resp = embedded_payload(wrb); struct be_defq_create_resp *resp = embedded_payload(wrb);
dq->id = le16_to_cpu(resp->id); dq->id = le16_to_cpu(resp->id);
dq->created = true; dq->created = true;
if (is_header)
defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
else
defq_ring = &phba->phwi_ctrlr->
default_pdu_data[ulp_num];
defq_ring->id = dq->id;
if (!phba->fw_config.dual_ulp_aware) {
defq_ring->ulp_num = BEISCSI_ULP0;
defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
} else {
defq_ring->ulp_num = resp->ulp_num;
defq_ring->doorbell_offset = resp->doorbell_offset;
}
} }
spin_unlock(&ctrl->mbox_lock); spin_unlock(&ctrl->mbox_lock);
return status; return status;
} }
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, /**
struct be_queue_info *wrbq) * be_cmd_wrbq_create()- Create WRBQ
* @ctrl: ptr to ctrl_info
* @q_mem: memory details for the queue
* @wrbq: queue info
* @pwrb_context: ptr to wrb_context
* @ulp_num: ULP on which the WRBQ is to be created
*
* Create WRBQ on the passed ULP_NUM.
*
**/
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem,
struct be_queue_info *wrbq,
struct hwi_wrb_context *pwrb_context,
uint8_t ulp_num)
{ {
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_wrbq_create_req *req = embedded_payload(wrb); struct be_wrbq_create_req *req = embedded_payload(wrb);
struct be_wrbq_create_resp *resp = embedded_payload(wrb); struct be_wrbq_create_resp *resp = embedded_payload(wrb);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status; int status;
spin_lock(&ctrl->mbox_lock); spin_lock(&ctrl->mbox_lock);
@ -1093,17 +1176,78 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req)); OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
if (phba->fw_config.dual_ulp_aware) {
req->ulp_num = ulp_num;
req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
}
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl); status = be_mbox_notify(ctrl);
if (!status) { if (!status) {
wrbq->id = le16_to_cpu(resp->cid); wrbq->id = le16_to_cpu(resp->cid);
wrbq->created = true; wrbq->created = true;
pwrb_context->cid = wrbq->id;
if (!phba->fw_config.dual_ulp_aware) {
pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
pwrb_context->ulp_num = BEISCSI_ULP0;
} else {
pwrb_context->ulp_num = resp->ulp_num;
pwrb_context->doorbell_offset = resp->doorbell_offset;
}
} }
spin_unlock(&ctrl->mbox_lock); spin_unlock(&ctrl->mbox_lock);
return status; return status;
} }
int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_post_template_pages_req *req = embedded_payload(wrb);
int status;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl);
spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_remove_template_pages_req *req = embedded_payload(wrb);
int status;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
sizeof(*req));
req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
status = be_mbox_notify(ctrl);
spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem, struct be_dma_mem *q_mem,
u32 page_offset, u32 num_pages) u32 page_offset, u32 num_pages)

View File

@ -40,6 +40,7 @@ struct be_mcc_wrb {
u32 tag1; /* dword 3 */ u32 tag1; /* dword 3 */
u32 rsvd; /* dword 4 */ u32 rsvd; /* dword 4 */
union { union {
#define EMBED_MBX_MAX_PAYLOAD_SIZE 220
u8 embedded_payload[236]; /* used by embedded cmds */ u8 embedded_payload[236]; /* used by embedded cmds */
struct be_sge sgl[19]; /* used by non-embedded cmds */ struct be_sge sgl[19]; /* used by non-embedded cmds */
} payload; } payload;
@ -162,6 +163,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12 #define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13 #define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21 #define OPCODE_COMMON_MCC_CREATE 21
#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24
#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
#define OPCODE_COMMON_GET_FW_VERSION 35 #define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41
@ -217,6 +220,10 @@ struct phys_addr {
u32 hi; u32 hi;
}; };
struct virt_addr {
u32 lo;
u32 hi;
};
/************************** /**************************
* BE Command definitions * * BE Command definitions *
**************************/ **************************/
@ -722,7 +729,13 @@ int be_mbox_notify(struct be_ctrl_info *ctrl);
int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_queue_info *cq, struct be_queue_info *cq,
struct be_queue_info *dq, int length, struct be_queue_info *dq, int length,
int entry_size); int entry_size, uint8_t is_header,
uint8_t ulp_num);
int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem);
int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl);
int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem, u32 page_offset, struct be_dma_mem *q_mem, u32 page_offset,
@ -731,7 +744,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
int beiscsi_cmd_reset_function(struct beiscsi_hba *phba); int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
struct be_queue_info *wrbq); struct be_queue_info *wrbq,
struct hwi_wrb_context *pwrb_context,
uint8_t ulp_num);
bool is_link_state_evt(u32 trailer); bool is_link_state_evt(u32 trailer);
@ -776,7 +791,9 @@ struct be_defq_create_req {
struct be_cmd_req_hdr hdr; struct be_cmd_req_hdr hdr;
u16 num_pages; u16 num_pages;
u8 ulp_num; u8 ulp_num;
u8 rsvd0; #define BEISCSI_DUAL_ULP_AWARE_BIT 0 /* Byte 3 - Bit 0 */
#define BEISCSI_BIND_Q_TO_ULP_BIT 1 /* Byte 3 - Bit 1 */
u8 dua_feature;
struct be_default_pdu_context context; struct be_default_pdu_context context;
struct phys_addr pages[8]; struct phys_addr pages[8];
} __packed; } __packed;
@ -784,6 +801,27 @@ struct be_defq_create_req {
struct be_defq_create_resp { struct be_defq_create_resp {
struct be_cmd_req_hdr hdr; struct be_cmd_req_hdr hdr;
u16 id; u16 id;
u8 rsvd0;
u8 ulp_num;
u32 doorbell_offset;
u16 register_set;
u16 doorbell_format;
} __packed;
struct be_post_template_pages_req {
struct be_cmd_req_hdr hdr;
u16 num_pages;
#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI 0x1
u16 type;
struct phys_addr scratch_pa;
struct virt_addr scratch_va;
struct virt_addr pages_va;
struct phys_addr pages[16];
} __packed;
struct be_remove_template_pages_req {
struct be_cmd_req_hdr hdr;
u16 type;
u16 rsvd0; u16 rsvd0;
} __packed; } __packed;
@ -800,14 +838,18 @@ struct be_wrbq_create_req {
struct be_cmd_req_hdr hdr; struct be_cmd_req_hdr hdr;
u16 num_pages; u16 num_pages;
u8 ulp_num; u8 ulp_num;
u8 rsvd0; u8 dua_feature;
struct phys_addr pages[8]; struct phys_addr pages[8];
} __packed; } __packed;
struct be_wrbq_create_resp { struct be_wrbq_create_resp {
struct be_cmd_resp_hdr resp_hdr; struct be_cmd_resp_hdr resp_hdr;
u16 cid; u16 cid;
u16 rsvd0; u8 rsvd0;
u8 ulp_num;
u32 doorbell_offset;
u16 register_set;
u16 doorbell_format;
} __packed; } __packed;
#define SOL_CID_MASK 0x0000FFC0 #define SOL_CID_MASK 0x0000FFC0
@ -1002,6 +1044,7 @@ union tcp_upload_params {
} __packed; } __packed;
struct be_ulp_fw_cfg { struct be_ulp_fw_cfg {
#define BEISCSI_ULP_ISCSI_INI_MODE 0x10
u32 ulp_mode; u32 ulp_mode;
u32 etx_base; u32 etx_base;
u32 etx_count; u32 etx_count;
@ -1017,14 +1060,26 @@ struct be_ulp_fw_cfg {
u32 icd_count; u32 icd_count;
}; };
struct be_ulp_chain_icd {
u32 chain_base;
u32 chain_count;
};
struct be_fw_cfg { struct be_fw_cfg {
struct be_cmd_req_hdr hdr; struct be_cmd_req_hdr hdr;
u32 be_config_number; u32 be_config_number;
u32 asic_revision; u32 asic_revision;
u32 phys_port; u32 phys_port;
#define BEISCSI_FUNC_ISCSI_INI_MODE 0x10
#define BEISCSI_FUNC_DUA_MODE 0x800
u32 function_mode; u32 function_mode;
struct be_ulp_fw_cfg ulp[2]; struct be_ulp_fw_cfg ulp[2];
u32 function_caps; u32 function_caps;
u32 cqid_base;
u32 cqid_count;
u32 eqid_base;
u32 eqid_count;
struct be_ulp_chain_icd chain_icd[2];
} __packed; } __packed;
struct be_cmd_get_all_if_id_req { struct be_cmd_get_all_if_id_req {

View File

@ -58,10 +58,15 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
} }
beiscsi_ep = ep->dd_data; beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba; phba = beiscsi_ep->phba;
shost = phba->shost;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, if (phba->state & BE_ADAPTER_PCI_ERR) {
"BS_%d : In beiscsi_session_create\n"); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : PCI_ERROR Recovery\n");
return NULL;
} else {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_session_create\n");
}
if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) { if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@ -74,6 +79,7 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
} }
shost = phba->shost;
cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
shost, cmds_max, shost, cmds_max,
sizeof(*beiscsi_sess), sizeof(*beiscsi_sess),
@ -194,6 +200,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct beiscsi_hba *phba = iscsi_host_priv(shost); struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr;
struct hwi_wrb_context *pwrb_context;
struct beiscsi_endpoint *beiscsi_ep; struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep; struct iscsi_endpoint *ep;
@ -214,9 +222,13 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
return -EEXIST; return -EEXIST;
} }
pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(
beiscsi_ep->ep_cid)];
beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
beiscsi_conn->ep = beiscsi_ep; beiscsi_conn->ep = beiscsi_ep;
beiscsi_ep->conn = beiscsi_conn; beiscsi_ep->conn = beiscsi_conn;
beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n", "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
@ -265,13 +277,17 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
void beiscsi_create_def_ifaces(struct beiscsi_hba *phba) void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
{ {
struct be_cmd_get_if_info_resp if_info; struct be_cmd_get_if_info_resp *if_info;
if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {
beiscsi_create_ipv4_iface(phba); beiscsi_create_ipv4_iface(phba);
kfree(if_info);
}
if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {
beiscsi_create_ipv6_iface(phba); beiscsi_create_ipv6_iface(phba);
kfree(if_info);
}
} }
void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba) void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
@ -467,6 +483,12 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
uint32_t rm_len = dt_len; uint32_t rm_len = dt_len;
int ret = 0 ; int ret = 0 ;
if (phba->state & BE_ADAPTER_PCI_ERR) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : In PCI_ERROR Recovery\n");
return -EBUSY;
}
nla_for_each_attr(attrib, data, dt_len, rm_len) { nla_for_each_attr(attrib, data, dt_len, rm_len) {
iface_param = nla_data(attrib); iface_param = nla_data(attrib);
@ -512,59 +534,60 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
struct iscsi_iface *iface, int param, struct iscsi_iface *iface, int param,
char *buf) char *buf)
{ {
struct be_cmd_get_if_info_resp if_info; struct be_cmd_get_if_info_resp *if_info;
int len, ip_type = BE2_IPV4; int len, ip_type = BE2_IPV4;
memset(&if_info, 0, sizeof(if_info));
if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
ip_type = BE2_IPV6; ip_type = BE2_IPV6;
len = mgmt_get_if_info(phba, ip_type, &if_info); len = mgmt_get_if_info(phba, ip_type, &if_info);
if (len) if (len) {
kfree(if_info);
return len; return len;
}
switch (param) { switch (param) {
case ISCSI_NET_PARAM_IPV4_ADDR: case ISCSI_NET_PARAM_IPV4_ADDR:
len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr); len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr);
break; break;
case ISCSI_NET_PARAM_IPV6_ADDR: case ISCSI_NET_PARAM_IPV6_ADDR:
len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr); len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr);
break; break;
case ISCSI_NET_PARAM_IPV4_BOOTPROTO: case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
if (!if_info.dhcp_state) if (!if_info->dhcp_state)
len = sprintf(buf, "static\n"); len = sprintf(buf, "static\n");
else else
len = sprintf(buf, "dhcp\n"); len = sprintf(buf, "dhcp\n");
break; break;
case ISCSI_NET_PARAM_IPV4_SUBNET: case ISCSI_NET_PARAM_IPV4_SUBNET:
len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask); len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask);
break; break;
case ISCSI_NET_PARAM_VLAN_ENABLED: case ISCSI_NET_PARAM_VLAN_ENABLED:
len = sprintf(buf, "%s\n", len = sprintf(buf, "%s\n",
(if_info.vlan_priority == BEISCSI_VLAN_DISABLE) (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
? "Disabled\n" : "Enabled\n"); ? "Disabled\n" : "Enabled\n");
break; break;
case ISCSI_NET_PARAM_VLAN_ID: case ISCSI_NET_PARAM_VLAN_ID:
if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
return -EINVAL; return -EINVAL;
else else
len = sprintf(buf, "%d\n", len = sprintf(buf, "%d\n",
(if_info.vlan_priority & (if_info->vlan_priority &
ISCSI_MAX_VLAN_ID)); ISCSI_MAX_VLAN_ID));
break; break;
case ISCSI_NET_PARAM_VLAN_PRIORITY: case ISCSI_NET_PARAM_VLAN_PRIORITY:
if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
return -EINVAL; return -EINVAL;
else else
len = sprintf(buf, "%d\n", len = sprintf(buf, "%d\n",
((if_info.vlan_priority >> 13) & ((if_info->vlan_priority >> 13) &
ISCSI_MAX_VLAN_PRIORITY)); ISCSI_MAX_VLAN_PRIORITY));
break; break;
default: default:
WARN_ON(1); WARN_ON(1);
} }
kfree(if_info);
return len; return len;
} }
@ -577,6 +600,12 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
struct be_cmd_get_def_gateway_resp gateway; struct be_cmd_get_def_gateway_resp gateway;
int len = -ENOSYS; int len = -ENOSYS;
if (phba->state & BE_ADAPTER_PCI_ERR) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : In PCI_ERROR Recovery\n");
return -EBUSY;
}
switch (param) { switch (param) {
case ISCSI_NET_PARAM_IPV4_ADDR: case ISCSI_NET_PARAM_IPV4_ADDR:
case ISCSI_NET_PARAM_IPV4_SUBNET: case ISCSI_NET_PARAM_IPV4_SUBNET:
@ -672,8 +701,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
session->max_burst = 262144; session->max_burst = 262144;
break; break;
case ISCSI_PARAM_MAX_XMIT_DLENGTH: case ISCSI_PARAM_MAX_XMIT_DLENGTH:
if ((conn->max_xmit_dlength > 65536) || if (conn->max_xmit_dlength > 65536)
(conn->max_xmit_dlength == 0))
conn->max_xmit_dlength = 65536; conn->max_xmit_dlength = 65536;
default: default:
return 0; return 0;
@ -727,7 +755,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
struct beiscsi_hba *phba = iscsi_host_priv(shost); struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data; struct iscsi_cls_host *ihost = shost->shost_data;
ihost->port_state = (phba->state == BE_ADAPTER_UP) ? ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?
ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN; ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
} }
@ -795,9 +823,16 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
struct beiscsi_hba *phba = iscsi_host_priv(shost); struct beiscsi_hba *phba = iscsi_host_priv(shost);
int status = 0; int status = 0;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_get_host_param," if (phba->state & BE_ADAPTER_PCI_ERR) {
" param= %d\n", param); beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : In PCI_ERROR Recovery\n");
return -EBUSY;
} else {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_get_host_param,"
" param = %d\n", param);
}
switch (param) { switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS: case ISCSI_HOST_PARAM_HWADDRESS:
@ -840,7 +875,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
struct be_cmd_get_nic_conf_resp resp; struct be_cmd_get_nic_conf_resp resp;
int rc; int rc;
if (strlen(phba->mac_address)) if (phba->mac_addr_set)
return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
memset(&resp, 0, sizeof(resp)); memset(&resp, 0, sizeof(resp));
@ -848,6 +883,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
if (rc) if (rc)
return rc; return rc;
phba->mac_addr_set = true;
memcpy(phba->mac_address, resp.mac_address, ETH_ALEN); memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);
return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
} }
@ -923,6 +959,10 @@ static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
session->max_r2t); session->max_r2t);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params, AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
(conn->exp_statsn - 1)); (conn->exp_statsn - 1));
AMAP_SET_BITS(struct amap_beiscsi_offload_params,
max_recv_data_segment_length, params,
conn->max_recv_dlength);
} }
/** /**
@ -935,10 +975,19 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_conn *beiscsi_conn = conn->dd_data;
struct beiscsi_endpoint *beiscsi_ep; struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_offload_params params; struct beiscsi_offload_params params;
struct beiscsi_hba *phba;
beiscsi_log(beiscsi_conn->phba, KERN_INFO, phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_conn_start\n"); if (phba->state & BE_ADAPTER_PCI_ERR) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : In PCI_ERROR Recovery\n");
return -EBUSY;
} else {
beiscsi_log(beiscsi_conn->phba, KERN_INFO,
BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_conn_start\n");
}
memset(&params, 0, sizeof(struct beiscsi_offload_params)); memset(&params, 0, sizeof(struct beiscsi_offload_params));
beiscsi_ep = beiscsi_conn->ep; beiscsi_ep = beiscsi_conn->ep;
@ -960,15 +1009,31 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
*/ */
static int beiscsi_get_cid(struct beiscsi_hba *phba) static int beiscsi_get_cid(struct beiscsi_hba *phba)
{ {
unsigned short cid = 0xFFFF; unsigned short cid = 0xFFFF, cid_from_ulp;
struct ulp_cid_info *cid_info = NULL;
uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
if (!phba->avlbl_cids) /* Find the ULP which has more CID available */
return cid; cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
BEISCSI_ULP0_AVLBL_CID(phba) : 0;
cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ?
BEISCSI_ULP1_AVLBL_CID(phba) : 0;
cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
BEISCSI_ULP0 : BEISCSI_ULP1;
cid = phba->cid_array[phba->cid_alloc++]; if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) {
if (phba->cid_alloc == phba->params.cxns_per_ctrl) cid_info = phba->cid_array_info[cid_from_ulp];
phba->cid_alloc = 0; if (!cid_info->avlbl_cids)
phba->avlbl_cids--; return cid;
cid = cid_info->cid_array[cid_info->cid_alloc++];
if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(
phba, cid_from_ulp))
cid_info->cid_alloc = 0;
cid_info->avlbl_cids--;
}
return cid; return cid;
} }
@ -979,10 +1044,22 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
*/ */
static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
{ {
phba->avlbl_cids++; uint16_t cid_post_ulp;
phba->cid_array[phba->cid_free++] = cid; struct hwi_controller *phwi_ctrlr;
if (phba->cid_free == phba->params.cxns_per_ctrl) struct hwi_wrb_context *pwrb_context;
phba->cid_free = 0; struct ulp_cid_info *cid_info = NULL;
uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
cid_post_ulp = pwrb_context->ulp_num;
cid_info = phba->cid_array_info[cid_post_ulp];
cid_info->avlbl_cids++;
cid_info->cid_array[cid_info->cid_free++] = cid;
if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
cid_info->cid_free = 0;
} }
/** /**
@ -1135,7 +1212,12 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
if (phba->state != BE_ADAPTER_UP) { if (phba->state & BE_ADAPTER_PCI_ERR) {
ret = -EBUSY;
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : In PCI_ERROR Recovery\n");
return ERR_PTR(ret);
} else if (phba->state & BE_ADAPTER_LINK_DOWN) {
ret = -EBUSY; ret = -EBUSY;
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BS_%d : The Adapter Port state is Down!!!\n"); "BS_%d : The Adapter Port state is Down!!!\n");
@ -1260,6 +1342,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
tcp_upload_flag = CONNECTION_UPLOAD_ABORT; tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
} }
if (phba->state & BE_ADAPTER_PCI_ERR) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : PCI_ERROR Recovery\n");
goto free_ep;
}
tag = mgmt_invalidate_connection(phba, beiscsi_ep, tag = mgmt_invalidate_connection(phba, beiscsi_ep,
beiscsi_ep->ep_cid, beiscsi_ep->ep_cid,
mgmt_invalidate_flag, mgmt_invalidate_flag,
@ -1272,6 +1360,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_mccq_compl(phba, tag, NULL, NULL); beiscsi_mccq_compl(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag); beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
free_ep:
beiscsi_free_ep(beiscsi_ep); beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);

File diff suppressed because it is too large Load Diff

View File

@ -26,6 +26,7 @@
#include <linux/in.h> #include <linux/in.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/aer.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
@ -34,9 +35,8 @@
#include <scsi/libiscsi.h> #include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h> #include <scsi/scsi_transport_iscsi.h>
#include "be.h"
#define DRV_NAME "be2iscsi" #define DRV_NAME "be2iscsi"
#define BUILD_STR "10.0.467.0" #define BUILD_STR "10.0.659.0"
#define BE_NAME "Emulex OneConnect" \ #define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR "Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver" #define DRV_DESC BE_NAME " " "Driver"
@ -66,7 +66,6 @@
#define MAX_CPUS 64 #define MAX_CPUS 64
#define BEISCSI_MAX_NUM_CPUS 7 #define BEISCSI_MAX_NUM_CPUS 7
#define OC_SKH_MAX_NUM_CPUS 31
#define BEISCSI_VER_STRLEN 32 #define BEISCSI_VER_STRLEN 32
@ -74,6 +73,7 @@
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ #define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ #define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ #define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
@ -97,14 +97,19 @@
#define INVALID_SESS_HANDLE 0xFFFFFFFF #define INVALID_SESS_HANDLE 0xFFFFFFFF
#define BE_ADAPTER_UP 0x00000000 #define BE_ADAPTER_LINK_UP 0x001
#define BE_ADAPTER_LINK_DOWN 0x00000001 #define BE_ADAPTER_LINK_DOWN 0x002
#define BE_ADAPTER_PCI_ERR 0x004
#define BEISCSI_CLEAN_UNLOAD 0x01
#define BEISCSI_EEH_UNLOAD 0x02
/** /**
* hardware needs the async PDU buffers to be posted in multiples of 8 * hardware needs the async PDU buffers to be posted in multiples of 8
* So have atleast 8 of them by default * So have atleast 8 of them by default
*/ */
#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx) #define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num) \
(phwi->phwi_ctxt->pasync_ctx[ulp_num])
/********* Memory BAR register ************/ /********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
@ -149,29 +154,41 @@
#define DB_CQ_REARM_SHIFT (29) /* bit 29 */ #define DB_CQ_REARM_SHIFT (29) /* bit 29 */
#define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr) #define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr)
#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\ #define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\
(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id) (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id)
#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\ #define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\
(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id) (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id)
#define PAGES_REQUIRED(x) \ #define PAGES_REQUIRED(x) \
((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE)) ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
#define BEISCSI_MSI_NAME 20 /* size of msi_name string */ #define BEISCSI_MSI_NAME 20 /* size of msi_name string */
#define MEM_DESCR_OFFSET 8
#define BEISCSI_DEFQ_HDR 1
#define BEISCSI_DEFQ_DATA 0
enum be_mem_enum { enum be_mem_enum {
HWI_MEM_ADDN_CONTEXT, HWI_MEM_ADDN_CONTEXT,
HWI_MEM_WRB, HWI_MEM_WRB,
HWI_MEM_WRBH, HWI_MEM_WRBH,
HWI_MEM_SGLH, HWI_MEM_SGLH,
HWI_MEM_SGE, HWI_MEM_SGE,
HWI_MEM_ASYNC_HEADER_BUF, /* 5 */ HWI_MEM_TEMPLATE_HDR_ULP0,
HWI_MEM_ASYNC_DATA_BUF, HWI_MEM_ASYNC_HEADER_BUF_ULP0, /* 6 */
HWI_MEM_ASYNC_HEADER_RING, HWI_MEM_ASYNC_DATA_BUF_ULP0,
HWI_MEM_ASYNC_DATA_RING, HWI_MEM_ASYNC_HEADER_RING_ULP0,
HWI_MEM_ASYNC_HEADER_HANDLE, HWI_MEM_ASYNC_DATA_RING_ULP0,
HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */ HWI_MEM_ASYNC_HEADER_HANDLE_ULP0,
HWI_MEM_ASYNC_PDU_CONTEXT, HWI_MEM_ASYNC_DATA_HANDLE_ULP0, /* 11 */
HWI_MEM_ASYNC_PDU_CONTEXT_ULP0,
HWI_MEM_TEMPLATE_HDR_ULP1,
HWI_MEM_ASYNC_HEADER_BUF_ULP1, /* 14 */
HWI_MEM_ASYNC_DATA_BUF_ULP1,
HWI_MEM_ASYNC_HEADER_RING_ULP1,
HWI_MEM_ASYNC_DATA_RING_ULP1,
HWI_MEM_ASYNC_HEADER_HANDLE_ULP1,
HWI_MEM_ASYNC_DATA_HANDLE_ULP1, /* 19 */
HWI_MEM_ASYNC_PDU_CONTEXT_ULP1,
ISCSI_MEM_GLOBAL_HEADER, ISCSI_MEM_GLOBAL_HEADER,
SE_MEM_MAX SE_MEM_MAX
}; };
@ -266,9 +283,49 @@ struct invalidate_command_table {
unsigned short cid; unsigned short cid;
} __packed; } __packed;
#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
(phwi_ctrlr->wrb_context[cri].ulp_num)
struct hwi_wrb_context {
struct list_head wrb_handle_list;
struct list_head wrb_handle_drvr_list;
struct wrb_handle **pwrb_handle_base;
struct wrb_handle **pwrb_handle_basestd;
struct iscsi_wrb *plast_wrb;
unsigned short alloc_index;
unsigned short free_index;
unsigned short wrb_handles_available;
unsigned short cid;
uint8_t ulp_num; /* ULP to which CID binded */
uint16_t register_set;
uint16_t doorbell_format;
uint32_t doorbell_offset;
};
struct ulp_cid_info {
unsigned short *cid_array;
unsigned short avlbl_cids;
unsigned short cid_alloc;
unsigned short cid_free;
};
#include "be.h"
#define chip_be2(phba) (phba->generation == BE_GEN2) #define chip_be2(phba) (phba->generation == BE_GEN2)
#define chip_be3_r(phba) (phba->generation == BE_GEN3) #define chip_be3_r(phba) (phba->generation == BE_GEN3)
#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) #define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
#define BEISCSI_ULP0 0
#define BEISCSI_ULP1 1
#define BEISCSI_ULP_COUNT 2
#define BEISCSI_ULP0_LOADED 0x01
#define BEISCSI_ULP1_LOADED 0x02
#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \
(((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids)
#define BEISCSI_ULP0_AVLBL_CID(phba) \
BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0)
#define BEISCSI_ULP1_AVLBL_CID(phba) \
BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1)
struct beiscsi_hba { struct beiscsi_hba {
struct hba_parameters params; struct hba_parameters params;
struct hwi_controller *phwi_ctrlr; struct hwi_controller *phwi_ctrlr;
@ -303,17 +360,15 @@ struct beiscsi_hba {
spinlock_t io_sgl_lock; spinlock_t io_sgl_lock;
spinlock_t mgmt_sgl_lock; spinlock_t mgmt_sgl_lock;
spinlock_t isr_lock; spinlock_t isr_lock;
spinlock_t async_pdu_lock;
unsigned int age; unsigned int age;
unsigned short avlbl_cids;
unsigned short cid_alloc;
unsigned short cid_free;
struct list_head hba_queue; struct list_head hba_queue;
#define BE_MAX_SESSION 2048 #define BE_MAX_SESSION 2048
#define BE_SET_CID_TO_CRI(cri_index, cid) \ #define BE_SET_CID_TO_CRI(cri_index, cid) \
(phba->cid_to_cri_map[cid] = cri_index) (phba->cid_to_cri_map[cid] = cri_index)
#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) #define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
unsigned short cid_to_cri_map[BE_MAX_SESSION]; unsigned short cid_to_cri_map[BE_MAX_SESSION];
unsigned short *cid_array; struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
struct iscsi_endpoint **ep_array; struct iscsi_endpoint **ep_array;
struct beiscsi_conn **conn_table; struct beiscsi_conn **conn_table;
struct iscsi_boot_kset *boot_kset; struct iscsi_boot_kset *boot_kset;
@ -325,20 +380,21 @@ struct beiscsi_hba {
* group together since they are used most frequently * group together since they are used most frequently
* for cid to cri conversion * for cid to cri conversion
*/ */
unsigned int iscsi_cid_start;
unsigned int phys_port; unsigned int phys_port;
unsigned int eqid_count;
unsigned int cqid_count;
unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT];
#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \
(phba->fw_config.iscsi_cid_count[ulp_num])
unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT];
unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT];
unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT];
unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT];
unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT];
unsigned int isr_offset;
unsigned int iscsi_icd_start;
unsigned int iscsi_cid_count;
unsigned int iscsi_icd_count;
unsigned int pci_function;
unsigned short cid_alloc;
unsigned short cid_free;
unsigned short avlbl_cids;
unsigned short iscsi_features; unsigned short iscsi_features;
spinlock_t cid_lock; uint16_t dual_ulp_aware;
unsigned long ulp_supported;
} fw_config; } fw_config;
unsigned int state; unsigned int state;
@ -346,6 +402,7 @@ struct beiscsi_hba {
bool ue_detected; bool ue_detected;
struct delayed_work beiscsi_hw_check_task; struct delayed_work beiscsi_hw_check_task;
bool mac_addr_set;
u8 mac_address[ETH_ALEN]; u8 mac_address[ETH_ALEN];
char fw_ver_str[BEISCSI_VER_STRLEN]; char fw_ver_str[BEISCSI_VER_STRLEN];
char wq_name[20]; char wq_name[20];
@ -374,6 +431,7 @@ struct beiscsi_conn {
struct iscsi_conn *conn; struct iscsi_conn *conn;
struct beiscsi_hba *phba; struct beiscsi_hba *phba;
u32 exp_statsn; u32 exp_statsn;
u32 doorbell_offset;
u32 beiscsi_conn_cid; u32 beiscsi_conn_cid;
struct beiscsi_endpoint *ep; struct beiscsi_endpoint *ep;
unsigned short login_in_progress; unsigned short login_in_progress;
@ -474,7 +532,7 @@ struct amap_iscsi_sge {
}; };
struct beiscsi_offload_params { struct beiscsi_offload_params {
u32 dw[5]; u32 dw[6];
}; };
#define OFFLD_PARAMS_ERL 0x00000003 #define OFFLD_PARAMS_ERL 0x00000003
@ -504,6 +562,7 @@ struct amap_beiscsi_offload_params {
u8 max_r2t[16]; u8 max_r2t[16];
u8 pad[8]; u8 pad[8];
u8 exp_statsn[32]; u8 exp_statsn[32];
u8 max_recv_data_segment_length[32];
}; };
/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, /* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
@ -567,7 +626,8 @@ struct hwi_async_pdu_context {
unsigned int buffer_size; unsigned int buffer_size;
unsigned int num_entries; unsigned int num_entries;
#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
/** /**
* This is a varying size list! Do not add anything * This is a varying size list! Do not add anything
* after this entry!! * after this entry!!
@ -885,30 +945,32 @@ struct amap_iscsi_target_context_update_wrb_v2 {
u8 first_burst_length[24]; /* DWORD 3 */ u8 first_burst_length[24]; /* DWORD 3 */
u8 rsvd3[8]; /* DOWRD 3 */ u8 rsvd3[8]; /* DOWRD 3 */
u8 max_r2t[16]; /* DWORD 4 */ u8 max_r2t[16]; /* DWORD 4 */
u8 rsvd4[10]; /* DWORD 4 */ u8 rsvd4; /* DWORD 4 */
u8 hde; /* DWORD 4 */ u8 hde; /* DWORD 4 */
u8 dde; /* DWORD 4 */ u8 dde; /* DWORD 4 */
u8 erl[2]; /* DWORD 4 */ u8 erl[2]; /* DWORD 4 */
u8 rsvd5[6]; /* DWORD 4 */
u8 imd; /* DWORD 4 */ u8 imd; /* DWORD 4 */
u8 ir2t; /* DWORD 4 */ u8 ir2t; /* DWORD 4 */
u8 rsvd6[3]; /* DWORD 4 */
u8 stat_sn[32]; /* DWORD 5 */ u8 stat_sn[32]; /* DWORD 5 */
u8 rsvd5[32]; /* DWORD 6 */ u8 rsvd7[32]; /* DWORD 6 */
u8 rsvd6[32]; /* DWORD 7 */ u8 rsvd8[32]; /* DWORD 7 */
u8 max_recv_dataseg_len[24]; /* DWORD 8 */ u8 max_recv_dataseg_len[24]; /* DWORD 8 */
u8 rsvd7[8]; /* DWORD 8 */ u8 rsvd9[8]; /* DWORD 8 */
u8 rsvd8[32]; /* DWORD 9 */ u8 rsvd10[32]; /* DWORD 9 */
u8 rsvd9[32]; /* DWORD 10 */ u8 rsvd11[32]; /* DWORD 10 */
u8 max_cxns[16]; /* DWORD 11 */ u8 max_cxns[16]; /* DWORD 11 */
u8 rsvd10[11]; /* DWORD 11*/ u8 rsvd12[11]; /* DWORD 11*/
u8 invld; /* DWORD 11 */ u8 invld; /* DWORD 11 */
u8 rsvd11;/* DWORD 11*/ u8 rsvd13;/* DWORD 11*/
u8 dmsg; /* DWORD 11 */ u8 dmsg; /* DWORD 11 */
u8 data_seq_inorder; /* DWORD 11 */ u8 data_seq_inorder; /* DWORD 11 */
u8 pdu_seq_inorder; /* DWORD 11 */ u8 pdu_seq_inorder; /* DWORD 11 */
u8 rsvd12[32]; /*DWORD 12 */ u8 rsvd14[32]; /*DWORD 12 */
u8 rsvd13[32]; /* DWORD 13 */ u8 rsvd15[32]; /* DWORD 13 */
u8 rsvd14[32]; /* DWORD 14 */ u8 rsvd16[32]; /* DWORD 14 */
u8 rsvd15[32]; /* DWORD 15 */ u8 rsvd17[32]; /* DWORD 15 */
} __packed; } __packed;
@ -919,6 +981,10 @@ struct be_ring {
u32 cidx; /* consumer index */ u32 cidx; /* consumer index */
u32 pidx; /* producer index -- not used by most rings */ u32 pidx; /* producer index -- not used by most rings */
u32 item_size; /* size in bytes of one object */ u32 item_size; /* size in bytes of one object */
u8 ulp_num; /* ULP to which CID binded */
u16 register_set;
u16 doorbell_format;
u32 doorbell_offset;
void *va; /* The virtual address of the ring. This void *va; /* The virtual address of the ring. This
* should be last to allow 32 & 64 bit debugger * should be last to allow 32 & 64 bit debugger
@ -926,18 +992,6 @@ struct be_ring {
*/ */
}; };
struct hwi_wrb_context {
struct list_head wrb_handle_list;
struct list_head wrb_handle_drvr_list;
struct wrb_handle **pwrb_handle_base;
struct wrb_handle **pwrb_handle_basestd;
struct iscsi_wrb *plast_wrb;
unsigned short alloc_index;
unsigned short free_index;
unsigned short wrb_handles_available;
unsigned short cid;
};
struct hwi_controller { struct hwi_controller {
struct list_head io_sgl_list; struct list_head io_sgl_list;
struct list_head eh_sgl_list; struct list_head eh_sgl_list;
@ -946,8 +1000,8 @@ struct hwi_controller {
struct hwi_wrb_context *wrb_context; struct hwi_wrb_context *wrb_context;
struct mcc_wrb *pmcc_wrb_base; struct mcc_wrb *pmcc_wrb_base;
struct be_ring default_pdu_hdr; struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
struct be_ring default_pdu_data; struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
struct hwi_context_memory *phwi_ctxt; struct hwi_context_memory *phwi_ctxt;
}; };
@ -978,11 +1032,10 @@ struct hwi_context_memory {
struct be_eq_obj be_eq[MAX_CPUS]; struct be_eq_obj be_eq[MAX_CPUS];
struct be_queue_info be_cq[MAX_CPUS - 1]; struct be_queue_info be_cq[MAX_CPUS - 1];
struct be_queue_info be_def_hdrq;
struct be_queue_info be_def_dataq;
struct be_queue_info *be_wrbq; struct be_queue_info *be_wrbq;
struct hwi_async_pdu_context *pasync_ctx; struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];
}; };
/* Logging related definitions */ /* Logging related definitions */
@ -992,6 +1045,7 @@ struct hwi_context_memory {
#define BEISCSI_LOG_EH 0x0008 /* Error Handler */ #define BEISCSI_LOG_EH 0x0008 /* Error Handler */
#define BEISCSI_LOG_IO 0x0010 /* IO Code Path */ #define BEISCSI_LOG_IO 0x0010 /* IO Code Path */
#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */ #define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */
#define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */
#define beiscsi_log(phba, level, mask, fmt, arg...) \ #define beiscsi_log(phba, level, mask, fmt, arg...) \
do { \ do { \

View File

@ -278,6 +278,18 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
return tag; return tag;
} }
/**
* mgmt_get_fw_config()- Get the FW config for the function
* @ctrl: ptr to Ctrl Info
* @phba: ptr to the dev priv structure
*
* Get the FW config and resources available for the function.
* The resources are created based on the count received here.
*
* return
* Success: 0
* Failure: Non-Zero Value
**/
int mgmt_get_fw_config(struct be_ctrl_info *ctrl, int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba) struct beiscsi_hba *phba)
{ {
@ -291,31 +303,79 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
EMBED_MBX_MAX_PAYLOAD_SIZE);
status = be_mbox_notify(ctrl); status = be_mbox_notify(ctrl);
if (!status) { if (!status) {
uint8_t ulp_num = 0;
struct be_fw_cfg *pfw_cfg; struct be_fw_cfg *pfw_cfg;
pfw_cfg = req; pfw_cfg = req;
phba->fw_config.phys_port = pfw_cfg->phys_port;
phba->fw_config.iscsi_icd_start = if (!is_chip_be2_be3r(phba)) {
pfw_cfg->ulp[0].icd_base; phba->fw_config.eqid_count = pfw_cfg->eqid_count;
phba->fw_config.iscsi_icd_count = phba->fw_config.cqid_count = pfw_cfg->cqid_count;
pfw_cfg->ulp[0].icd_count;
phba->fw_config.iscsi_cid_start = beiscsi_log(phba, KERN_INFO,
pfw_cfg->ulp[0].sq_base; BEISCSI_LOG_INIT,
phba->fw_config.iscsi_cid_count = "BG_%d : EQ_Count : %d CQ_Count : %d\n",
pfw_cfg->ulp[0].sq_count; phba->fw_config.eqid_count,
if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) { phba->fw_config.cqid_count);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BG_%d : FW reported MAX CXNS as %d\t"
"Max Supported = %d.\n",
phba->fw_config.iscsi_cid_count,
BE2_MAX_SESSIONS);
phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
} }
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
if (pfw_cfg->ulp[ulp_num].ulp_mode &
BEISCSI_ULP_ISCSI_INI_MODE)
set_bit(ulp_num,
&phba->fw_config.ulp_supported);
phba->fw_config.phys_port = pfw_cfg->phys_port;
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
phba->fw_config.iscsi_cid_start[ulp_num] =
pfw_cfg->ulp[ulp_num].sq_base;
phba->fw_config.iscsi_cid_count[ulp_num] =
pfw_cfg->ulp[ulp_num].sq_count;
phba->fw_config.iscsi_icd_start[ulp_num] =
pfw_cfg->ulp[ulp_num].icd_base;
phba->fw_config.iscsi_icd_count[ulp_num] =
pfw_cfg->ulp[ulp_num].icd_count;
phba->fw_config.iscsi_chain_start[ulp_num] =
pfw_cfg->chain_icd[ulp_num].chain_base;
phba->fw_config.iscsi_chain_count[ulp_num] =
pfw_cfg->chain_icd[ulp_num].chain_count;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BG_%d : Function loaded on ULP : %d\n"
"\tiscsi_cid_count : %d\n"
"\tiscsi_cid_start : %d\n"
"\t iscsi_icd_count : %d\n"
"\t iscsi_icd_start : %d\n",
ulp_num,
phba->fw_config.
iscsi_cid_count[ulp_num],
phba->fw_config.
iscsi_cid_start[ulp_num],
phba->fw_config.
iscsi_icd_count[ulp_num],
phba->fw_config.
iscsi_icd_start[ulp_num]);
}
}
phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
BEISCSI_FUNC_DUA_MODE);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BG_%d : DUA Mode : 0x%x\n",
phba->fw_config.dual_ulp_aware);
} else { } else {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BG_%d : Failed in mgmt_get_fw_config\n"); "BG_%d : Failed in mgmt_get_fw_config\n");
status = -EINVAL;
} }
spin_unlock(&ctrl->mbox_lock); spin_unlock(&ctrl->mbox_lock);
@ -448,7 +508,16 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
return tag; return tag;
} }
int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) /**
* mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
* @phba: pointer to dev priv structure
* @ulp_num: ULP number.
*
* return
* Success: 0
* Failure: Non-Zero Value
**/
int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
{ {
struct be_ctrl_info *ctrl = &phba->ctrl; struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba); struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
@ -462,9 +531,9 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
req->chute = chute; req->chute = (1 << ulp_num);
req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba)); req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba)); req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
status = be_mcc_notify_wait(phba); status = be_mcc_notify_wait(phba);
if (status) if (status)
@ -585,6 +654,16 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
return tag; return tag;
} }
/**
* mgmt_open_connection()- Establish a TCP CXN
* @dst_addr: Destination Address
* @beiscsi_ep: ptr to device endpoint struct
* @nonemb_cmd: ptr to memory allocated for command
*
* return
* Success: Tag number of the MBX Command issued
* Failure: Error code
**/
int mgmt_open_connection(struct beiscsi_hba *phba, int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr *dst_addr, struct sockaddr *dst_addr,
struct beiscsi_endpoint *beiscsi_ep, struct beiscsi_endpoint *beiscsi_ep,
@ -602,14 +681,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
struct phys_addr template_address = { 0, 0 }; struct phys_addr template_address = { 0, 0 };
struct phys_addr *ptemplate_address; struct phys_addr *ptemplate_address;
unsigned int tag = 0; unsigned int tag = 0;
unsigned int i; unsigned int i, ulp_num;
unsigned short cid = beiscsi_ep->ep_cid; unsigned short cid = beiscsi_ep->ep_cid;
struct be_sge *sge; struct be_sge *sge;
phwi_ctrlr = phba->phwi_ctrlr; phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt; phwi_context = phwi_ctrlr->phwi_ctxt;
def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba);
def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba); ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num;
def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num);
def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num);
ptemplate_address = &template_address; ptemplate_address = &template_address;
ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
@ -748,11 +830,14 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va); rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
if (rc) { if (rc) {
/* Check if the IOCTL needs to be re-issued */
if (rc == -EAGAIN)
return rc;
beiscsi_log(phba, KERN_ERR, beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : mgmt_exec_nonemb_cmd Failed status\n"); "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
rc = -EIO;
goto free_cmd; goto free_cmd;
} }
@ -861,7 +946,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
uint32_t boot_proto) uint32_t boot_proto)
{ {
struct be_cmd_get_def_gateway_resp gtway_addr_set; struct be_cmd_get_def_gateway_resp gtway_addr_set;
struct be_cmd_get_if_info_resp if_info; struct be_cmd_get_if_info_resp *if_info;
struct be_cmd_set_dhcp_req *dhcpreq; struct be_cmd_set_dhcp_req *dhcpreq;
struct be_cmd_rel_dhcp_req *reldhcp; struct be_cmd_rel_dhcp_req *reldhcp;
struct be_dma_mem nonemb_cmd; struct be_dma_mem nonemb_cmd;
@ -872,16 +957,17 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
if (mgmt_get_all_if_id(phba)) if (mgmt_get_all_if_id(phba))
return -EIO; return -EIO;
memset(&if_info, 0, sizeof(if_info));
ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
BE2_IPV6 : BE2_IPV4 ; BE2_IPV6 : BE2_IPV4 ;
rc = mgmt_get_if_info(phba, ip_type, &if_info); rc = mgmt_get_if_info(phba, ip_type, &if_info);
if (rc) if (rc) {
kfree(if_info);
return rc; return rc;
}
if (boot_proto == ISCSI_BOOTPROTO_DHCP) { if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
if (if_info.dhcp_state) { if (if_info->dhcp_state) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : DHCP Already Enabled\n"); "BG_%d : DHCP Already Enabled\n");
return 0; return 0;
@ -894,9 +980,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
IP_V6_LEN : IP_V4_LEN; IP_V6_LEN : IP_V4_LEN;
} else { } else {
if (if_info.dhcp_state) { if (if_info->dhcp_state) {
memset(&if_info, 0, sizeof(if_info)); memset(if_info, 0, sizeof(*if_info));
rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR, OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
sizeof(*reldhcp)); sizeof(*reldhcp));
@ -919,8 +1005,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
} }
/* Delete the Static IP Set */ /* Delete the Static IP Set */
if (if_info.ip_addr.addr[0]) { if (if_info->ip_addr.addr[0]) {
rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL, rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
IP_ACTION_DEL); IP_ACTION_DEL);
if (rc) if (rc)
return rc; return rc;
@ -966,7 +1052,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
} else { } else {
return mgmt_static_ip_modify(phba, &if_info, ip_param, return mgmt_static_ip_modify(phba, if_info, ip_param,
subnet_param, IP_ACTION_ADD); subnet_param, IP_ACTION_ADD);
} }
@ -1031,27 +1117,64 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
} }
int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
struct be_cmd_get_if_info_resp *if_info) struct be_cmd_get_if_info_resp **if_info)
{ {
struct be_cmd_get_if_info_req *req; struct be_cmd_get_if_info_req *req;
struct be_dma_mem nonemb_cmd; struct be_dma_mem nonemb_cmd;
uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
int rc; int rc;
if (mgmt_get_all_if_id(phba)) if (mgmt_get_all_if_id(phba))
return -EIO; return -EIO;
rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, do {
OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
sizeof(*if_info)); OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
if (rc) ioctl_size);
return rc; if (rc)
return rc;
req = nonemb_cmd.va; req = nonemb_cmd.va;
req->interface_hndl = phba->interface_handle; req->interface_hndl = phba->interface_handle;
req->ip_type = ip_type; req->ip_type = ip_type;
return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info, /* Allocate memory for if_info */
sizeof(*if_info)); *if_info = kzalloc(ioctl_size, GFP_KERNEL);
if (!*if_info) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : Memory Allocation Failure\n");
/* Free the DMA memory for the IOCTL issuing */
pci_free_consistent(phba->ctrl.pdev,
nonemb_cmd.size,
nonemb_cmd.va,
nonemb_cmd.dma);
return -ENOMEM;
}
rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info,
ioctl_size);
/* Check if the error is because of Insufficent_Buffer */
if (rc == -EAGAIN) {
/* Get the new memory size */
ioctl_size = ((struct be_cmd_resp_hdr *)
nonemb_cmd.va)->actual_resp_len;
ioctl_size += sizeof(struct be_cmd_req_hdr);
/* Free the previous allocated DMA memory */
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va,
nonemb_cmd.dma);
/* Free the virtual memory */
kfree(*if_info);
} else
break;
} while (true);
return rc;
} }
int mgmt_get_nic_conf(struct beiscsi_hba *phba, int mgmt_get_nic_conf(struct beiscsi_hba *phba,
@ -1281,7 +1404,7 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
} }
/** /**
* beiscsi_active_cid_disp()- Display Sessions Active * beiscsi_active_session_disp()- Display Sessions Active
* @dev: ptr to device not used. * @dev: ptr to device not used.
* @attr: device attribute, not used. * @attr: device attribute, not used.
* @buf: contains formatted text Session Count * @buf: contains formatted text Session Count
@ -1290,14 +1413,56 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
* size of the formatted string * size of the formatted string
**/ **/
ssize_t ssize_t
beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr, beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct Scsi_Host *shost = class_to_shost(dev); struct Scsi_Host *shost = class_to_shost(dev);
struct beiscsi_hba *phba = iscsi_host_priv(shost); struct beiscsi_hba *phba = iscsi_host_priv(shost);
uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0;
return snprintf(buf, PAGE_SIZE, "%d\n", for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
(phba->params.cxns_per_ctrl - phba->avlbl_cids)); if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num);
total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num);
len += snprintf(buf+len, PAGE_SIZE - len,
"ULP%d : %d\n", ulp_num,
(total_cids - avlbl_cids));
} else
len += snprintf(buf+len, PAGE_SIZE - len,
"ULP%d : %d\n", ulp_num, 0);
}
return len;
}
/**
* beiscsi_free_session_disp()- Display Avaliable Session
* @dev: ptr to device not used.
* @attr: device attribute, not used.
* @buf: contains formatted text Session Count
*
* return
* size of the formatted string
**/
ssize_t
beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
uint16_t ulp_num, len = 0;
for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported))
len += snprintf(buf+len, PAGE_SIZE - len,
"ULP%d : %d\n", ulp_num,
BEISCSI_ULP_AVLBL_CID(phba, ulp_num));
else
len += snprintf(buf+len, PAGE_SIZE - len,
"ULP%d : %d\n", ulp_num, 0);
}
return len;
} }
/** /**
@ -1338,6 +1503,25 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
} }
} }
/**
* beiscsi_phys_port()- Display Physical Port Identifier
* @dev: ptr to device not used.
* @attr: device attribute, not used.
* @buf: contains formatted text port identifier
*
* return
* size of the formatted string
**/
ssize_t
beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n",
phba->fw_config.phys_port);
}
void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
struct wrb_handle *pwrb_handle, struct wrb_handle *pwrb_handle,
@ -1411,10 +1595,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
memset(pwrb, 0, sizeof(*pwrb)); memset(pwrb, 0, sizeof(*pwrb));
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
max_burst_length, pwrb, params->dw[offsetof
(struct amap_beiscsi_offload_params,
max_burst_length) / 32]);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
max_burst_length, pwrb, params->dw[offsetof max_burst_length, pwrb, params->dw[offsetof
(struct amap_beiscsi_offload_params, (struct amap_beiscsi_offload_params,
@ -1436,7 +1616,9 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
params->dw[offsetof(struct amap_beiscsi_offload_params, params->dw[offsetof(struct amap_beiscsi_offload_params,
first_burst_length) / 32]); first_burst_length) / 32]);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN); max_recv_dataseg_len, pwrb,
params->dw[offsetof(struct amap_beiscsi_offload_params,
max_recv_data_segment_length) / 32]);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
max_cxns, pwrb, BEISCSI_MAX_CXNS); max_cxns, pwrb, BEISCSI_MAX_CXNS);
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb, AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,

View File

@ -294,7 +294,7 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
struct be_cmd_get_nic_conf_resp *mac); struct be_cmd_get_nic_conf_resp *mac);
int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
struct be_cmd_get_if_info_resp *if_info); struct be_cmd_get_if_info_resp **if_info);
int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type, int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
struct be_cmd_get_def_gateway_resp *gateway); struct be_cmd_get_def_gateway_resp *gateway);
@ -315,12 +315,19 @@ ssize_t beiscsi_drvr_ver_disp(struct device *dev,
ssize_t beiscsi_fw_ver_disp(struct device *dev, ssize_t beiscsi_fw_ver_disp(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
ssize_t beiscsi_active_cid_disp(struct device *dev, ssize_t beiscsi_active_session_disp(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
ssize_t beiscsi_adap_family_disp(struct device *dev, ssize_t beiscsi_adap_family_disp(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
ssize_t beiscsi_free_session_disp(struct device *dev,
struct device_attribute *attr, char *buf);
ssize_t beiscsi_phys_port_disp(struct device *dev,
struct device_attribute *attr, char *buf);
void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
struct wrb_handle *pwrb_handle, struct wrb_handle *pwrb_handle,
struct be_mem_descriptor *mem_descr); struct be_mem_descriptor *mem_descr);

View File

@ -64,7 +64,7 @@
#include "bnx2fc_constants.h" #include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc" #define BNX2FC_NAME "bnx2fc"
#define BNX2FC_VERSION "1.0.14" #define BNX2FC_VERSION "2.4.1"
#define PFX "bnx2fc: " #define PFX "bnx2fc: "

View File

@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION #define DRV_MODULE_VERSION BNX2FC_VERSION
#define DRV_MODULE_RELDATE "Mar 08, 2013" #define DRV_MODULE_RELDATE "Sep 17, 2013"
static char version[] = static char version[] =
@ -542,8 +542,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
if (vn_port) { if (vn_port) {
port = lport_priv(vn_port); port = lport_priv(vn_port);
if (compare_ether_addr(port->data_src_addr, dest_mac) if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
!= 0) {
BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
put_cpu(); put_cpu();
kfree_skb(skb); kfree_skb(skb);
@ -1381,6 +1380,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
return NULL; return NULL;
} }
ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr = fcoe_ctlr_device_priv(ctlr_dev);
ctlr->cdev = ctlr_dev;
interface = fcoe_ctlr_priv(ctlr); interface = fcoe_ctlr_priv(ctlr);
dev_hold(netdev); dev_hold(netdev);
kref_init(&interface->kref); kref_init(&interface->kref);
@ -2004,6 +2004,24 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
} }
/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */
static int __bnx2fc_disable(struct fcoe_ctlr *ctlr)
{
struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
if (interface->enabled == true) {
if (!ctlr->lp) {
pr_err(PFX "__bnx2fc_disable: lport not found\n");
return -ENODEV;
} else {
interface->enabled = false;
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(ctlr->lp);
}
}
return 0;
}
/** /**
* Deperecated: Use bnx2fc_enabled() * Deperecated: Use bnx2fc_enabled()
*/ */
@ -2018,20 +2036,34 @@ static int bnx2fc_disable(struct net_device *netdev)
interface = bnx2fc_interface_lookup(netdev); interface = bnx2fc_interface_lookup(netdev);
ctlr = bnx2fc_to_ctlr(interface); ctlr = bnx2fc_to_ctlr(interface);
if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
} else {
interface->enabled = false;
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(ctlr->lp);
}
if (!interface) {
rc = -ENODEV;
pr_err(PFX "bnx2fc_disable: interface not found\n");
} else {
rc = __bnx2fc_disable(ctlr);
}
mutex_unlock(&bnx2fc_dev_lock); mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock(); rtnl_unlock();
return rc; return rc;
} }
static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
{
struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
if (interface->enabled == false) {
if (!ctlr->lp) {
pr_err(PFX "__bnx2fc_enable: lport not found\n");
return -ENODEV;
} else if (!bnx2fc_link_ok(ctlr->lp)) {
fcoe_ctlr_link_up(ctlr);
interface->enabled = true;
}
}
return 0;
}
/** /**
* Deprecated: Use bnx2fc_enabled() * Deprecated: Use bnx2fc_enabled()
*/ */
@ -2046,12 +2078,11 @@ static int bnx2fc_enable(struct net_device *netdev)
interface = bnx2fc_interface_lookup(netdev); interface = bnx2fc_interface_lookup(netdev);
ctlr = bnx2fc_to_ctlr(interface); ctlr = bnx2fc_to_ctlr(interface);
if (!interface || !ctlr->lp) { if (!interface) {
rc = -ENODEV; rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); pr_err(PFX "bnx2fc_enable: interface not found\n");
} else if (!bnx2fc_link_ok(ctlr->lp)) { } else {
fcoe_ctlr_link_up(ctlr); rc = __bnx2fc_enable(ctlr);
interface->enabled = true;
} }
mutex_unlock(&bnx2fc_dev_lock); mutex_unlock(&bnx2fc_dev_lock);
@ -2072,14 +2103,12 @@ static int bnx2fc_enable(struct net_device *netdev)
static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev) static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
{ {
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
struct fc_lport *lport = ctlr->lp;
struct net_device *netdev = bnx2fc_netdev(lport);
switch (cdev->enabled) { switch (cdev->enabled) {
case FCOE_CTLR_ENABLED: case FCOE_CTLR_ENABLED:
return bnx2fc_enable(netdev); return __bnx2fc_enable(ctlr);
case FCOE_CTLR_DISABLED: case FCOE_CTLR_DISABLED:
return bnx2fc_disable(netdev); return __bnx2fc_disable(ctlr);
case FCOE_CTLR_UNUSED: case FCOE_CTLR_UNUSED:
default: default:
return -ENOTSUPP; return -ENOTSUPP;

View File

@ -1246,6 +1246,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
kref_put(&io_req->refcount, kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */ bnx2fc_cmd_release); /* drop timer hold */
rc = bnx2fc_expl_logo(lport, io_req); rc = bnx2fc_expl_logo(lport, io_req);
/* This only occurs when an task abort was requested while ABTS
is in progress. Setting the IO_CLEANUP flag will skip the
RRQ process in the case when the fw generated SCSI_CMD cmpl
was a result from the ABTS request rather than the CLEANUP
request */
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
goto out; goto out;
} }

View File

@ -308,6 +308,8 @@ struct AdapterCtlBlk {
struct timer_list waiting_timer; struct timer_list waiting_timer;
struct timer_list selto_timer; struct timer_list selto_timer;
unsigned long last_reset;
u16 srb_count; u16 srb_count;
u8 sel_timeout; u8 sel_timeout;
@ -860,9 +862,9 @@ static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
init_timer(&acb->waiting_timer); init_timer(&acb->waiting_timer);
acb->waiting_timer.function = waiting_timeout; acb->waiting_timer.function = waiting_timeout;
acb->waiting_timer.data = (unsigned long) acb; acb->waiting_timer.data = (unsigned long) acb;
if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2)) if (time_before(jiffies + to, acb->last_reset - HZ / 2))
acb->waiting_timer.expires = acb->waiting_timer.expires =
acb->scsi_host->last_reset - HZ / 2 + 1; acb->last_reset - HZ / 2 + 1;
else else
acb->waiting_timer.expires = jiffies + to + 1; acb->waiting_timer.expires = jiffies + to + 1;
add_timer(&acb->waiting_timer); add_timer(&acb->waiting_timer);
@ -1319,7 +1321,7 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
udelay(500); udelay(500);
/* We may be in serious trouble. Wait some seconds */ /* We may be in serious trouble. Wait some seconds */
acb->scsi_host->last_reset = acb->last_reset =
jiffies + 3 * HZ / 2 + jiffies + 3 * HZ / 2 +
HZ * acb->eeprom.delay_time; HZ * acb->eeprom.delay_time;
@ -1462,9 +1464,9 @@ static void selto_timer(struct AdapterCtlBlk *acb)
acb->selto_timer.function = selection_timeout_missed; acb->selto_timer.function = selection_timeout_missed;
acb->selto_timer.data = (unsigned long) acb; acb->selto_timer.data = (unsigned long) acb;
if (time_before if (time_before
(jiffies + HZ, acb->scsi_host->last_reset + HZ / 2)) (jiffies + HZ, acb->last_reset + HZ / 2))
acb->selto_timer.expires = acb->selto_timer.expires =
acb->scsi_host->last_reset + HZ / 2 + 1; acb->last_reset + HZ / 2 + 1;
else else
acb->selto_timer.expires = jiffies + HZ + 1; acb->selto_timer.expires = jiffies + HZ + 1;
add_timer(&acb->selto_timer); add_timer(&acb->selto_timer);
@ -1535,7 +1537,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
} }
/* Allow starting of SCSI commands half a second before we allow the mid-level /* Allow starting of SCSI commands half a second before we allow the mid-level
* to queue them again after a reset */ * to queue them again after a reset */
if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) { if (time_before(jiffies, acb->last_reset - HZ / 2)) {
dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n"); dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
return 1; return 1;
} }
@ -3031,7 +3033,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
dprintkl(KERN_ERR, "disconnect: No such device\n"); dprintkl(KERN_ERR, "disconnect: No such device\n");
udelay(500); udelay(500);
/* Suspend queue for a while */ /* Suspend queue for a while */
acb->scsi_host->last_reset = acb->last_reset =
jiffies + HZ / 2 + jiffies + HZ / 2 +
HZ * acb->eeprom.delay_time; HZ * acb->eeprom.delay_time;
clear_fifo(acb, "disconnectEx"); clear_fifo(acb, "disconnectEx");
@ -3053,7 +3055,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
waiting_process_next(acb); waiting_process_next(acb);
} else if (srb->state & SRB_ABORT_SENT) { } else if (srb->state & SRB_ABORT_SENT) {
dcb->flag &= ~ABORT_DEV_; dcb->flag &= ~ABORT_DEV_;
acb->scsi_host->last_reset = jiffies + HZ / 2 + 1; acb->last_reset = jiffies + HZ / 2 + 1;
dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n"); dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
doing_srb_done(acb, DID_ABORT, srb->cmd, 1); doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
waiting_process_next(acb); waiting_process_next(acb);
@ -3649,7 +3651,7 @@ static void scsi_reset_detect(struct AdapterCtlBlk *acb)
/*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */ /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
udelay(500); udelay(500);
/* Maybe we locked up the bus? Then lets wait even longer ... */ /* Maybe we locked up the bus? Then lets wait even longer ... */
acb->scsi_host->last_reset = acb->last_reset =
jiffies + 5 * HZ / 2 + jiffies + 5 * HZ / 2 +
HZ * acb->eeprom.delay_time; HZ * acb->eeprom.delay_time;
@ -4426,7 +4428,7 @@ static void adapter_init_scsi_host(struct Scsi_Host *host)
host->dma_channel = -1; host->dma_channel = -1;
host->unique_id = acb->io_port_base; host->unique_id = acb->io_port_base;
host->irq = acb->irq_level; host->irq = acb->irq_level;
host->last_reset = jiffies; acb->last_reset = jiffies;
host->max_id = 16; host->max_id = 16;
if (host->max_id - 1 == eeprom->scsi_id) if (host->max_id - 1 == eeprom->scsi_id)
@ -4484,7 +4486,7 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)
/*spin_unlock_irq (&io_request_lock); */ /*spin_unlock_irq (&io_request_lock); */
udelay(500); udelay(500);
acb->scsi_host->last_reset = acb->last_reset =
jiffies + HZ / 2 + jiffies + HZ / 2 +
HZ * acb->eeprom.delay_time; HZ * acb->eeprom.delay_time;

View File

@ -481,6 +481,11 @@ static int alua_check_sense(struct scsi_device *sdev,
* Power On, Reset, or Bus Device Reset, just retry. * Power On, Reset, or Bus Device Reset, just retry.
*/ */
return ADD_TO_MLQUEUE; return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
/*
* Device internal reset
*/
return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
/* /*
* Mode Parameters Changed * Mode Parameters Changed
@ -517,12 +522,13 @@ static int alua_check_sense(struct scsi_device *sdev,
/* /*
* alua_rtpg - Evaluate REPORT TARGET GROUP STATES * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
* @sdev: the device to be evaluated. * @sdev: the device to be evaluated.
* @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state
* *
* Evaluate the Target Port Group State. * Evaluate the Target Port Group State.
* Returns SCSI_DH_DEV_OFFLINED if the path is * Returns SCSI_DH_DEV_OFFLINED if the path is
* found to be unusable. * found to be unusable.
*/ */
static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition)
{ {
struct scsi_sense_hdr sense_hdr; struct scsi_sense_hdr sense_hdr;
int len, k, off, valid_states = 0; int len, k, off, valid_states = 0;
@ -594,7 +600,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
else else
h->transition_tmo = ALUA_FAILOVER_TIMEOUT; h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
if (orig_transition_tmo != h->transition_tmo) { if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) {
sdev_printk(KERN_INFO, sdev, sdev_printk(KERN_INFO, sdev,
"%s: transition timeout set to %d seconds\n", "%s: transition timeout set to %d seconds\n",
ALUA_DH_NAME, h->transition_tmo); ALUA_DH_NAME, h->transition_tmo);
@ -632,14 +638,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
switch (h->state) { switch (h->state) {
case TPGS_STATE_TRANSITIONING: case TPGS_STATE_TRANSITIONING:
if (time_before(jiffies, expiry)) { if (wait_for_transition) {
/* State transition, retry */ if (time_before(jiffies, expiry)) {
interval += 2000; /* State transition, retry */
msleep(interval); interval += 2000;
goto retry; msleep(interval);
goto retry;
}
err = SCSI_DH_RETRY;
} else {
err = SCSI_DH_OK;
} }
/* Transitioning time exceeded, set port to standby */ /* Transitioning time exceeded, set port to standby */
err = SCSI_DH_RETRY;
h->state = TPGS_STATE_STANDBY; h->state = TPGS_STATE_STANDBY;
break; break;
case TPGS_STATE_OFFLINE: case TPGS_STATE_OFFLINE:
@ -673,7 +684,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
if (err != SCSI_DH_OK) if (err != SCSI_DH_OK)
goto out; goto out;
err = alua_rtpg(sdev, h); err = alua_rtpg(sdev, h, 0);
if (err != SCSI_DH_OK) if (err != SCSI_DH_OK)
goto out; goto out;
@ -733,7 +744,7 @@ static int alua_activate(struct scsi_device *sdev,
int err = SCSI_DH_OK; int err = SCSI_DH_OK;
int stpg = 0; int stpg = 0;
err = alua_rtpg(sdev, h); err = alua_rtpg(sdev, h, 1);
if (err != SCSI_DH_OK) if (err != SCSI_DH_OK)
goto out; goto out;

View File

@ -786,6 +786,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1742"}, {"IBM", "1742"},
{"IBM", "1745"}, {"IBM", "1745"},
{"IBM", "1746"}, {"IBM", "1746"},
{"IBM", "1813"},
{"IBM", "1814"}, {"IBM", "1814"},
{"IBM", "1815"}, {"IBM", "1815"},
{"IBM", "1818"}, {"IBM", "1818"},

View File

@ -448,19 +448,8 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
} }
rmb(); rmb();
/* if ((pHba->state) & DPTI_STATE_RESET)
* TODO: I need to block here if I am processing ioctl cmds return SCSI_MLQUEUE_HOST_BUSY;
* but if the outstanding cmds all finish before the ioctl,
* the scsi-core will not know to start sending cmds to me again.
* I need to a way to restart the scsi-cores queues or should I block
* calling scsi_done on the outstanding cmds instead
* for now we don't set the IOCTL state
*/
if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
pHba->host->last_reset = jiffies;
pHba->host->resetting = 1;
return 1;
}
// TODO if the cmd->device if offline then I may need to issue a bus rescan // TODO if the cmd->device if offline then I may need to issue a bus rescan
// followed by a get_lct to see if the device is there anymore // followed by a get_lct to see if the device is there anymore
@ -1811,21 +1800,23 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
} }
do { do {
if(pHba->host) /*
* Stop any new commands from enterring the
* controller while processing the ioctl
*/
if (pHba->host) {
scsi_block_requests(pHba->host);
spin_lock_irqsave(pHba->host->host_lock, flags); spin_lock_irqsave(pHba->host->host_lock, flags);
// This state stops any new commands from enterring the }
// controller while processing the ioctl
// pHba->state |= DPTI_STATE_IOCTL;
// We can't set this now - The scsi subsystem sets host_blocked and
// the queue empties and stops. We need a way to restart the queue
rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
if (rcode != 0) if (rcode != 0)
printk("adpt_i2o_passthru: post wait failed %d %p\n", printk("adpt_i2o_passthru: post wait failed %d %p\n",
rcode, reply); rcode, reply);
// pHba->state &= ~DPTI_STATE_IOCTL; if (pHba->host) {
if(pHba->host)
spin_unlock_irqrestore(pHba->host->host_lock, flags); spin_unlock_irqrestore(pHba->host->host_lock, flags);
} while(rcode == -ETIMEDOUT); scsi_unblock_requests(pHba->host);
}
} while (rcode == -ETIMEDOUT);
if(rcode){ if(rcode){
goto cleanup; goto cleanup;

View File

@ -202,7 +202,6 @@ struct adpt_channel {
// HBA state flags // HBA state flags
#define DPTI_STATE_RESET (0x01) #define DPTI_STATE_RESET (0x01)
#define DPTI_STATE_IOCTL (0x02)
typedef struct _adpt_hba { typedef struct _adpt_hba {
struct _adpt_hba *next; struct _adpt_hba *next;

View File

@ -799,47 +799,47 @@ struct esas2r_adapter {
struct esas2r_target *targetdb_end; struct esas2r_target *targetdb_end;
unsigned char *regs; unsigned char *regs;
unsigned char *data_window; unsigned char *data_window;
u32 volatile flags; long flags;
#define AF_PORT_CHANGE (u32)(0x00000001) #define AF_PORT_CHANGE 0
#define AF_CHPRST_NEEDED (u32)(0x00000004) #define AF_CHPRST_NEEDED 1
#define AF_CHPRST_PENDING (u32)(0x00000008) #define AF_CHPRST_PENDING 2
#define AF_CHPRST_DETECTED (u32)(0x00000010) #define AF_CHPRST_DETECTED 3
#define AF_BUSRST_NEEDED (u32)(0x00000020) #define AF_BUSRST_NEEDED 4
#define AF_BUSRST_PENDING (u32)(0x00000040) #define AF_BUSRST_PENDING 5
#define AF_BUSRST_DETECTED (u32)(0x00000080) #define AF_BUSRST_DETECTED 6
#define AF_DISABLED (u32)(0x00000100) #define AF_DISABLED 7
#define AF_FLASH_LOCK (u32)(0x00000200) #define AF_FLASH_LOCK 8
#define AF_OS_RESET (u32)(0x00002000) #define AF_OS_RESET 9
#define AF_FLASHING (u32)(0x00004000) #define AF_FLASHING 10
#define AF_POWER_MGT (u32)(0x00008000) #define AF_POWER_MGT 11
#define AF_NVR_VALID (u32)(0x00010000) #define AF_NVR_VALID 12
#define AF_DEGRADED_MODE (u32)(0x00020000) #define AF_DEGRADED_MODE 13
#define AF_DISC_PENDING (u32)(0x00040000) #define AF_DISC_PENDING 14
#define AF_TASKLET_SCHEDULED (u32)(0x00080000) #define AF_TASKLET_SCHEDULED 15
#define AF_HEARTBEAT (u32)(0x00200000) #define AF_HEARTBEAT 16
#define AF_HEARTBEAT_ENB (u32)(0x00400000) #define AF_HEARTBEAT_ENB 17
#define AF_NOT_PRESENT (u32)(0x00800000) #define AF_NOT_PRESENT 18
#define AF_CHPRST_STARTED (u32)(0x01000000) #define AF_CHPRST_STARTED 19
#define AF_FIRST_INIT (u32)(0x02000000) #define AF_FIRST_INIT 20
#define AF_POWER_DOWN (u32)(0x04000000) #define AF_POWER_DOWN 21
#define AF_DISC_IN_PROG (u32)(0x08000000) #define AF_DISC_IN_PROG 22
#define AF_COMM_LIST_TOGGLE (u32)(0x10000000) #define AF_COMM_LIST_TOGGLE 23
#define AF_LEGACY_SGE_MODE (u32)(0x20000000) #define AF_LEGACY_SGE_MODE 24
#define AF_DISC_POLLED (u32)(0x40000000) #define AF_DISC_POLLED 25
u32 volatile flags2; long flags2;
#define AF2_SERIAL_FLASH (u32)(0x00000001) #define AF2_SERIAL_FLASH 0
#define AF2_DEV_SCAN (u32)(0x00000002) #define AF2_DEV_SCAN 1
#define AF2_DEV_CNT_OK (u32)(0x00000004) #define AF2_DEV_CNT_OK 2
#define AF2_COREDUMP_AVAIL (u32)(0x00000008) #define AF2_COREDUMP_AVAIL 3
#define AF2_COREDUMP_SAVED (u32)(0x00000010) #define AF2_COREDUMP_SAVED 4
#define AF2_VDA_POWER_DOWN (u32)(0x00000100) #define AF2_VDA_POWER_DOWN 5
#define AF2_THUNDERLINK (u32)(0x00000200) #define AF2_THUNDERLINK 6
#define AF2_THUNDERBOLT (u32)(0x00000400) #define AF2_THUNDERBOLT 7
#define AF2_INIT_DONE (u32)(0x00000800) #define AF2_INIT_DONE 8
#define AF2_INT_PENDING (u32)(0x00001000) #define AF2_INT_PENDING 9
#define AF2_TIMER_TICK (u32)(0x00002000) #define AF2_TIMER_TICK 10
#define AF2_IRQ_CLAIMED (u32)(0x00004000) #define AF2_IRQ_CLAIMED 11
#define AF2_MSI_ENABLED (u32)(0x00008000) #define AF2_MSI_ENABLED 12
atomic_t disable_cnt; atomic_t disable_cnt;
atomic_t dis_ints_cnt; atomic_t dis_ints_cnt;
u32 int_stat; u32 int_stat;
@ -1150,16 +1150,6 @@ void esas2r_queue_fw_event(struct esas2r_adapter *a,
int data_sz); int data_sz);
/* Inline functions */ /* Inline functions */
static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits)
{
return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags);
}
static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits)
{
return test_and_clear_bit(ilog2(bits),
(volatile unsigned long *)flags);
}
/* Allocate a chip scatter/gather list entry */ /* Allocate a chip scatter/gather list entry */
static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a) static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
@ -1217,7 +1207,6 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq,
struct esas2r_adapter *a) struct esas2r_adapter *a)
{ {
union atto_vda_req *vrq = rq->vrq; union atto_vda_req *vrq = rq->vrq;
u32 handle;
INIT_LIST_HEAD(&rq->sg_table_head); INIT_LIST_HEAD(&rq->sg_table_head);
rq->data_buf = (void *)(vrq + 1); rq->data_buf = (void *)(vrq + 1);
@ -1253,11 +1242,9 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq,
/* /*
* add a reference number to the handle to make it unique (until it * add a reference number to the handle to make it unique (until it
* wraps of course) while preserving the upper word * wraps of course) while preserving the least significant word
*/ */
vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle;
handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
/* /*
* the following formats a SCSI request. the caller can override as * the following formats a SCSI request. the caller can override as
@ -1303,10 +1290,13 @@ static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a) static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
{ {
return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED
| AF_CHPRST_NEEDED | AF_CHPRST_DETECTED return test_bit(AF_BUSRST_NEEDED, &a->flags) ||
| AF_PORT_CHANGE)) test_bit(AF_BUSRST_DETECTED, &a->flags) ||
? true : false; test_bit(AF_CHPRST_NEEDED, &a->flags) ||
test_bit(AF_CHPRST_DETECTED, &a->flags) ||
test_bit(AF_PORT_CHANGE, &a->flags);
} }
/* /*
@ -1345,24 +1335,24 @@ static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a) static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
{ {
/* make sure we don't schedule twice */ /* make sure we don't schedule twice */
if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) & if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags))
ilog2(AF_TASKLET_SCHEDULED)))
tasklet_hi_schedule(&a->tasklet); tasklet_hi_schedule(&a->tasklet);
} }
static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a) static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
{ {
if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING)) if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
&& (a->nvram->options2 & SASNVR2_HEARTBEAT)) !test_bit(AF_CHPRST_PENDING, &a->flags) &&
esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB); (a->nvram->options2 & SASNVR2_HEARTBEAT))
set_bit(AF_HEARTBEAT_ENB, &a->flags);
else else
esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); clear_bit(AF_HEARTBEAT_ENB, &a->flags);
} }
static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a) static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
{ {
esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); clear_bit(AF_HEARTBEAT_ENB, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); clear_bit(AF_HEARTBEAT, &a->flags);
} }
/* Set the initial state for resetting the adapter on the next pass through /* Set the initial state for resetting the adapter on the next pass through
@ -1372,9 +1362,9 @@ static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
{ {
esas2r_disable_heartbeat(a); esas2r_disable_heartbeat(a);
esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED); set_bit(AF_CHPRST_NEEDED, &a->flags);
esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); set_bit(AF_CHPRST_PENDING, &a->flags);
esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); set_bit(AF_DISC_PENDING, &a->flags);
} }
/* See if an interrupt is pending on the adapter. */ /* See if an interrupt is pending on the adapter. */

View File

@ -86,9 +86,9 @@ void esas2r_disc_initialize(struct esas2r_adapter *a)
esas2r_trace_enter(); esas2r_trace_enter();
esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); clear_bit(AF_DISC_IN_PROG, &a->flags);
esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN); clear_bit(AF2_DEV_SCAN, &a->flags2);
esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK); clear_bit(AF2_DEV_CNT_OK, &a->flags2);
a->disc_start_time = jiffies_to_msecs(jiffies); a->disc_start_time = jiffies_to_msecs(jiffies);
a->disc_wait_time = nvr->dev_wait_time * 1000; a->disc_wait_time = nvr->dev_wait_time * 1000;
@ -107,7 +107,8 @@ void esas2r_disc_initialize(struct esas2r_adapter *a)
a->general_req.interrupt_cx = NULL; a->general_req.interrupt_cx = NULL;
if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) { if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
test_bit(AF_POWER_MGT, &a->flags)) {
if (a->prev_dev_cnt == 0) { if (a->prev_dev_cnt == 0) {
/* Don't bother waiting if there is nothing to wait /* Don't bother waiting if there is nothing to wait
* for. * for.
@ -212,9 +213,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
|| a->disc_wait_cnt == 0)) { || a->disc_wait_cnt == 0)) {
/* After three seconds of waiting, schedule a scan. */ /* After three seconds of waiting, schedule a scan. */
if (time >= 3000 if (time >= 3000
&& !(esas2r_lock_set_flags(&a->flags2, && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
AF2_DEV_SCAN) &
ilog2(AF2_DEV_SCAN))) {
spin_lock_irqsave(&a->mem_lock, flags); spin_lock_irqsave(&a->mem_lock, flags);
esas2r_disc_queue_event(a, DCDE_DEV_SCAN); esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
spin_unlock_irqrestore(&a->mem_lock, flags); spin_unlock_irqrestore(&a->mem_lock, flags);
@ -228,18 +227,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
* We are done waiting...we think. Adjust the wait time to * We are done waiting...we think. Adjust the wait time to
* consume events after the count is met. * consume events after the count is met.
*/ */
if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK) if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
& ilog2(AF2_DEV_CNT_OK)))
a->disc_wait_time = time + 3000; a->disc_wait_time = time + 3000;
/* If we haven't done a full scan yet, do it now. */ /* If we haven't done a full scan yet, do it now. */
if (!(esas2r_lock_set_flags(&a->flags2, if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
AF2_DEV_SCAN) &
ilog2(AF2_DEV_SCAN))) {
spin_lock_irqsave(&a->mem_lock, flags); spin_lock_irqsave(&a->mem_lock, flags);
esas2r_disc_queue_event(a, DCDE_DEV_SCAN); esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
spin_unlock_irqrestore(&a->mem_lock, flags); spin_unlock_irqrestore(&a->mem_lock, flags);
esas2r_trace_exit(); esas2r_trace_exit();
return; return;
} }
@ -253,9 +248,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
return; return;
} }
} else { } else {
if (!(esas2r_lock_set_flags(&a->flags2, if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
AF2_DEV_SCAN) &
ilog2(AF2_DEV_SCAN))) {
spin_lock_irqsave(&a->mem_lock, flags); spin_lock_irqsave(&a->mem_lock, flags);
esas2r_disc_queue_event(a, DCDE_DEV_SCAN); esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
spin_unlock_irqrestore(&a->mem_lock, flags); spin_unlock_irqrestore(&a->mem_lock, flags);
@ -265,8 +258,8 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
/* We want to stop waiting for devices. */ /* We want to stop waiting for devices. */
a->disc_wait_time = 0; a->disc_wait_time = 0;
if ((a->flags & AF_DISC_POLLED) if (test_bit(AF_DISC_POLLED, &a->flags) &&
&& (a->flags & AF_DISC_IN_PROG)) { test_bit(AF_DISC_IN_PROG, &a->flags)) {
/* /*
* Polled discovery is still pending so continue the active * Polled discovery is still pending so continue the active
* discovery until it is done. At that point, we will stop * discovery until it is done. At that point, we will stop
@ -280,14 +273,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)
* driven; i.e. There is no transition. * driven; i.e. There is no transition.
*/ */
esas2r_disc_fix_curr_requests(a); esas2r_disc_fix_curr_requests(a);
esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); clear_bit(AF_DISC_PENDING, &a->flags);
/* /*
* We have deferred target state changes until now because we * We have deferred target state changes until now because we
* don't want to report any removals (due to the first arrival) * don't want to report any removals (due to the first arrival)
* until the device wait time expires. * until the device wait time expires.
*/ */
esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); set_bit(AF_PORT_CHANGE, &a->flags);
} }
esas2r_trace_exit(); esas2r_trace_exit();
@ -308,7 +301,8 @@ void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
* Don't start discovery before or during polled discovery. if we did, * Don't start discovery before or during polled discovery. if we did,
* we would have a deadlock if we are in the ISR already. * we would have a deadlock if we are in the ISR already.
*/ */
if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED))) if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
!test_bit(AF_DISC_POLLED, &a->flags))
esas2r_disc_start_port(a); esas2r_disc_start_port(a);
esas2r_trace_exit(); esas2r_trace_exit();
@ -322,7 +316,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
esas2r_trace_enter(); esas2r_trace_enter();
if (a->flags & AF_DISC_IN_PROG) { if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
esas2r_trace_exit(); esas2r_trace_exit();
return false; return false;
@ -330,7 +324,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
/* If there is a discovery waiting, process it. */ /* If there is a discovery waiting, process it. */
if (dc->disc_evt) { if (dc->disc_evt) {
if ((a->flags & AF_DISC_POLLED) if (test_bit(AF_DISC_POLLED, &a->flags)
&& a->disc_wait_time == 0) { && a->disc_wait_time == 0) {
/* /*
* We are doing polled discovery, but we no longer want * We are doing polled discovery, but we no longer want
@ -347,7 +341,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
esas2r_hdebug("disc done"); esas2r_hdebug("disc done");
esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); set_bit(AF_PORT_CHANGE, &a->flags);
esas2r_trace_exit(); esas2r_trace_exit();
@ -356,10 +350,10 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
/* Handle the discovery context */ /* Handle the discovery context */
esas2r_trace("disc_evt: %d", dc->disc_evt); esas2r_trace("disc_evt: %d", dc->disc_evt);
esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG); set_bit(AF_DISC_IN_PROG, &a->flags);
dc->flags = 0; dc->flags = 0;
if (a->flags & AF_DISC_POLLED) if (test_bit(AF_DISC_POLLED, &a->flags))
dc->flags |= DCF_POLLED; dc->flags |= DCF_POLLED;
rq->interrupt_cx = dc; rq->interrupt_cx = dc;
@ -379,7 +373,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)
} }
/* Continue interrupt driven discovery */ /* Continue interrupt driven discovery */
if (!(a->flags & AF_DISC_POLLED)) if (!test_bit(AF_DISC_POLLED, &a->flags))
ret = esas2r_disc_continue(a, rq); ret = esas2r_disc_continue(a, rq);
else else
ret = true; ret = true;
@ -453,10 +447,10 @@ static bool esas2r_disc_continue(struct esas2r_adapter *a,
/* Discovery is done...for now. */ /* Discovery is done...for now. */
rq->interrupt_cx = NULL; rq->interrupt_cx = NULL;
if (!(a->flags & AF_DISC_PENDING)) if (!test_bit(AF_DISC_PENDING, &a->flags))
esas2r_disc_fix_curr_requests(a); esas2r_disc_fix_curr_requests(a);
esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); clear_bit(AF_DISC_IN_PROG, &a->flags);
/* Start the next discovery. */ /* Start the next discovery. */
return esas2r_disc_start_port(a); return esas2r_disc_start_port(a);
@ -480,7 +474,8 @@ static bool esas2r_disc_start_request(struct esas2r_adapter *a,
spin_lock_irqsave(&a->queue_lock, flags); spin_lock_irqsave(&a->queue_lock, flags);
if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING))) if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
!test_bit(AF_FLASHING, &a->flags))
esas2r_disc_local_start_request(a, rq); esas2r_disc_local_start_request(a, rq);
else else
list_add_tail(&rq->req_list, &a->defer_list); list_add_tail(&rq->req_list, &a->defer_list);

View File

@ -231,7 +231,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
* RS_PENDING, FM API tasks will continue. * RS_PENDING, FM API tasks will continue.
*/ */
rq->req_stat = RS_PENDING; rq->req_stat = RS_PENDING;
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
/* not suppported for now */; /* not suppported for now */;
else else
build_flash_msg(a, rq); build_flash_msg(a, rq);
@ -315,7 +315,7 @@ static bool complete_fmapi_req(struct esas2r_adapter *a,
memset(fc->scratch, 0, FM_BUF_SZ); memset(fc->scratch, 0, FM_BUF_SZ);
esas2r_enable_heartbeat(a); esas2r_enable_heartbeat(a);
esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK); clear_bit(AF_FLASH_LOCK, &a->flags);
return false; return false;
} }
@ -526,7 +526,7 @@ no_cfg:
* The download is complete. If in degraded mode, * The download is complete. If in degraded mode,
* attempt a chip reset. * attempt a chip reset.
*/ */
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
esas2r_local_reset_adapter(a); esas2r_local_reset_adapter(a);
a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version; a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
@ -890,7 +890,7 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
} }
} }
if (a->flags & AF_DEGRADED_MODE) { if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
fs->status = ATTO_STS_DEGRADED; fs->status = ATTO_STS_DEGRADED;
return false; return false;
} }
@ -945,8 +945,12 @@ static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
/* Now wait for the firmware to process it */ /* Now wait for the firmware to process it */
starttime = jiffies_to_msecs(jiffies); starttime = jiffies_to_msecs(jiffies);
timeout = a->flags &
(AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000; if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
test_bit(AF_DISC_PENDING, &a->flags))
timeout = 40000;
else
timeout = 5000;
while (true) { while (true) {
intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
@ -1008,7 +1012,7 @@ bool esas2r_read_flash_block(struct esas2r_adapter *a,
u32 offset; u32 offset;
u32 iatvr; u32 iatvr;
if (a->flags2 & AF2_SERIAL_FLASH) if (test_bit(AF2_SERIAL_FLASH, &a->flags2))
iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE); iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
else else
iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE); iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
@ -1236,9 +1240,9 @@ static void esas2r_nvram_callback(struct esas2r_adapter *a,
if (rq->req_stat != RS_PENDING) { if (rq->req_stat != RS_PENDING) {
/* update the NVRAM state */ /* update the NVRAM state */
if (rq->req_stat == RS_SUCCESS) if (rq->req_stat == RS_SUCCESS)
esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); set_bit(AF_NVR_VALID, &a->flags);
else else
esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); clear_bit(AF_NVR_VALID, &a->flags);
esas2r_enable_heartbeat(a); esas2r_enable_heartbeat(a);
@ -1258,7 +1262,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0]; u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
struct atto_vda_flash_req *vrq = &rq->vrq->flash; struct atto_vda_flash_req *vrq = &rq->vrq->flash;
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
return false; return false;
if (down_interruptible(&a->nvram_semaphore)) if (down_interruptible(&a->nvram_semaphore))
@ -1302,7 +1306,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
FLS_OFFSET_NVR, FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram)); sizeof(struct esas2r_sas_nvram));
if (a->flags & AF_LEGACY_SGE_MODE) { if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
vrq->data.sge[0].length = vrq->data.sge[0].length =
cpu_to_le32(SGE_LAST | cpu_to_le32(SGE_LAST |
@ -1337,7 +1341,7 @@ bool esas2r_nvram_validate(struct esas2r_adapter *a)
} else if (n->version > SASNVR_VERSION) { } else if (n->version > SASNVR_VERSION) {
esas2r_hdebug("invalid NVRAM version"); esas2r_hdebug("invalid NVRAM version");
} else { } else {
esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); set_bit(AF_NVR_VALID, &a->flags);
rslt = true; rslt = true;
} }
@ -1359,7 +1363,7 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
struct esas2r_sas_nvram *n = a->nvram; struct esas2r_sas_nvram *n = a->nvram;
u32 time = jiffies_to_msecs(jiffies); u32 time = jiffies_to_msecs(jiffies);
esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); clear_bit(AF_NVR_VALID, &a->flags);
*n = default_sas_nvram; *n = default_sas_nvram;
n->sas_addr[3] |= 0x0F; n->sas_addr[3] |= 0x0F;
n->sas_addr[4] = HIBYTE(LOWORD(time)); n->sas_addr[4] = HIBYTE(LOWORD(time));
@ -1389,7 +1393,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
u8 j; u8 j;
struct esas2r_component_header *ch; struct esas2r_component_header *ch;
if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) { if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) {
/* flag was already set */ /* flag was already set */
fi->status = FI_STAT_BUSY; fi->status = FI_STAT_BUSY;
return false; return false;
@ -1413,7 +1417,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
return complete_fmapi_req(a, rq, FI_STAT_IMG_VER); return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
} }
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
return complete_fmapi_req(a, rq, FI_STAT_DEGRADED); return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
switch (fi->action) { switch (fi->action) {

View File

@ -216,7 +216,7 @@ use_legacy_interrupts:
goto use_legacy_interrupts; goto use_legacy_interrupts;
} }
a->intr_mode = INTR_MODE_MSI; a->intr_mode = INTR_MODE_MSI;
esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED); set_bit(AF2_MSI_ENABLED, &a->flags2);
break; break;
@ -252,7 +252,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a)
return; return;
} }
esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED); set_bit(AF2_IRQ_CLAIMED, &a->flags2);
esas2r_log(ESAS2R_LOG_INFO, esas2r_log(ESAS2R_LOG_INFO,
"claimed IRQ %d flags: 0x%lx", "claimed IRQ %d flags: 0x%lx",
a->pcid->irq, flags); a->pcid->irq, flags);
@ -380,10 +380,10 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
/* interrupts will be disabled until we are done with init */ /* interrupts will be disabled until we are done with init */
atomic_inc(&a->dis_ints_cnt); atomic_inc(&a->dis_ints_cnt);
atomic_inc(&a->disable_cnt); atomic_inc(&a->disable_cnt);
a->flags |= AF_CHPRST_PENDING set_bit(AF_CHPRST_PENDING, &a->flags);
| AF_DISC_PENDING set_bit(AF_DISC_PENDING, &a->flags);
| AF_FIRST_INIT set_bit(AF_FIRST_INIT, &a->flags);
| AF_LEGACY_SGE_MODE; set_bit(AF_LEGACY_SGE_MODE, &a->flags);
a->init_msg = ESAS2R_INIT_MSG_START; a->init_msg = ESAS2R_INIT_MSG_START;
a->max_vdareq_size = 128; a->max_vdareq_size = 128;
@ -440,11 +440,11 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
esas2r_claim_interrupts(a); esas2r_claim_interrupts(a);
if (a->flags2 & AF2_IRQ_CLAIMED) if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))
esas2r_enable_chip_interrupts(a); esas2r_enable_chip_interrupts(a);
esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE); set_bit(AF2_INIT_DONE, &a->flags2);
if (!(a->flags & AF_DEGRADED_MODE)) if (!test_bit(AF_DEGRADED_MODE, &a->flags))
esas2r_kickoff_timer(a); esas2r_kickoff_timer(a);
esas2r_debug("esas2r_init_adapter done for %p (%d)", esas2r_debug("esas2r_init_adapter done for %p (%d)",
a, a->disable_cnt); a, a->disable_cnt);
@ -457,8 +457,8 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a,
{ {
struct esas2r_mem_desc *memdesc, *next; struct esas2r_mem_desc *memdesc, *next;
if ((a->flags2 & AF2_INIT_DONE) if ((test_bit(AF2_INIT_DONE, &a->flags2))
&& (!(a->flags & AF_DEGRADED_MODE))) { && (!test_bit(AF_DEGRADED_MODE, &a->flags))) {
if (!power_management) { if (!power_management) {
del_timer_sync(&a->timer); del_timer_sync(&a->timer);
tasklet_kill(&a->tasklet); tasklet_kill(&a->tasklet);
@ -508,19 +508,19 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a,
} }
/* Clean up interrupts */ /* Clean up interrupts */
if (a->flags2 & AF2_IRQ_CLAIMED) { if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
esas2r_log_dev(ESAS2R_LOG_INFO, esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->pcid->dev), &(a->pcid->dev),
"free_irq(%d) called", a->pcid->irq); "free_irq(%d) called", a->pcid->irq);
free_irq(a->pcid->irq, a); free_irq(a->pcid->irq, a);
esas2r_debug("IRQ released"); esas2r_debug("IRQ released");
esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED); clear_bit(AF2_IRQ_CLAIMED, &a->flags2);
} }
if (a->flags2 & AF2_MSI_ENABLED) { if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {
pci_disable_msi(a->pcid); pci_disable_msi(a->pcid);
esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED); clear_bit(AF2_MSI_ENABLED, &a->flags2);
esas2r_debug("MSI disabled"); esas2r_debug("MSI disabled");
} }
@ -641,12 +641,10 @@ void esas2r_kill_adapter(int i)
pci_set_drvdata(a->pcid, NULL); pci_set_drvdata(a->pcid, NULL);
esas2r_adapters[i] = NULL; esas2r_adapters[i] = NULL;
if (a->flags2 & AF2_INIT_DONE) { if (test_bit(AF2_INIT_DONE, &a->flags2)) {
esas2r_lock_clear_flags(&a->flags2, clear_bit(AF2_INIT_DONE, &a->flags2);
AF2_INIT_DONE);
esas2r_lock_set_flags(&a->flags, set_bit(AF_DEGRADED_MODE, &a->flags);
AF_DEGRADED_MODE);
esas2r_log_dev(ESAS2R_LOG_INFO, esas2r_log_dev(ESAS2R_LOG_INFO,
&(a->host->shost_gendev), &(a->host->shost_gendev),
@ -759,7 +757,7 @@ int esas2r_resume(struct pci_dev *pdev)
esas2r_claim_interrupts(a); esas2r_claim_interrupts(a);
if (a->flags2 & AF2_IRQ_CLAIMED) { if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {
/* /*
* Now that system interrupt(s) are claimed, we can enable * Now that system interrupt(s) are claimed, we can enable
* chip interrupts. * chip interrupts.
@ -781,7 +779,7 @@ error_exit:
bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
{ {
esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); set_bit(AF_DEGRADED_MODE, &a->flags);
esas2r_log(ESAS2R_LOG_CRIT, esas2r_log(ESAS2R_LOG_CRIT,
"setting adapter to degraded mode: %s\n", error_str); "setting adapter to degraded mode: %s\n", error_str);
return false; return false;
@ -809,7 +807,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
int pcie_cap_reg; int pcie_cap_reg;
pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
if (0xffff & pcie_cap_reg) { if (pcie_cap_reg) {
u16 devcontrol; u16 devcontrol;
pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
@ -896,7 +894,7 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
&& (a->pcid->subsystem_device & ATTO_SSDID_TBT)) && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
a->flags2 |= AF2_THUNDERBOLT; a->flags2 |= AF2_THUNDERBOLT;
if (a->flags2 & AF2_THUNDERBOLT) if (test_bit(AF2_THUNDERBOLT, &a->flags2))
a->flags2 |= AF2_SERIAL_FLASH; a->flags2 |= AF2_SERIAL_FLASH;
if (a->pcid->subsystem_device == ATTO_TLSH_1068) if (a->pcid->subsystem_device == ATTO_TLSH_1068)
@ -956,14 +954,14 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
a->outbound_copy = (u32 volatile *)high; a->outbound_copy = (u32 volatile *)high;
high += sizeof(u32); high += sizeof(u32);
if (!(a->flags & AF_NVR_VALID)) if (!test_bit(AF_NVR_VALID, &a->flags))
esas2r_nvram_set_defaults(a); esas2r_nvram_set_defaults(a);
/* update the caller's uncached memory area pointer */ /* update the caller's uncached memory area pointer */
*uncached_area = (void *)high; *uncached_area = (void *)high;
/* initialize the allocated memory */ /* initialize the allocated memory */
if (a->flags & AF_FIRST_INIT) { if (test_bit(AF_FIRST_INIT, &a->flags)) {
memset(a->req_table, 0, memset(a->req_table, 0,
(num_requests + num_ae_requests + (num_requests + num_ae_requests +
1) * sizeof(struct esas2r_request *)); 1) * sizeof(struct esas2r_request *));
@ -1019,7 +1017,7 @@ bool esas2r_check_adapter(struct esas2r_adapter *a)
* if the chip reset detected flag is set, we can bypass a bunch of * if the chip reset detected flag is set, we can bypass a bunch of
* stuff. * stuff.
*/ */
if (a->flags & AF_CHPRST_DETECTED) if (test_bit(AF_CHPRST_DETECTED, &a->flags))
goto skip_chip_reset; goto skip_chip_reset;
/* /*
@ -1057,14 +1055,12 @@ bool esas2r_check_adapter(struct esas2r_adapter *a)
doorbell); doorbell);
if (ver == DRBL_FW_VER_0) { if (ver == DRBL_FW_VER_0) {
esas2r_lock_set_flags(&a->flags, set_bit(AF_LEGACY_SGE_MODE, &a->flags);
AF_LEGACY_SGE_MODE);
a->max_vdareq_size = 128; a->max_vdareq_size = 128;
a->build_sgl = esas2r_build_sg_list_sge; a->build_sgl = esas2r_build_sg_list_sge;
} else if (ver == DRBL_FW_VER_1) { } else if (ver == DRBL_FW_VER_1) {
esas2r_lock_clear_flags(&a->flags, clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
AF_LEGACY_SGE_MODE);
a->max_vdareq_size = 1024; a->max_vdareq_size = 1024;
a->build_sgl = esas2r_build_sg_list_prd; a->build_sgl = esas2r_build_sg_list_prd;
@ -1139,7 +1135,7 @@ skip_chip_reset:
*a->outbound_copy = *a->outbound_copy =
a->last_write = a->last_write =
a->last_read = a->list_size - 1; a->last_read = a->list_size - 1;
esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
a->last_write); a->last_write);
esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
@ -1204,9 +1200,9 @@ skip_chip_reset:
*/ */
doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
if (doorbell & DRBL_POWER_DOWN) if (doorbell & DRBL_POWER_DOWN)
esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN); set_bit(AF2_VDA_POWER_DOWN, &a->flags2);
else else
esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN); clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);
/* /*
* enable assertion of outbound queue and doorbell interrupts in the * enable assertion of outbound queue and doorbell interrupts in the
@ -1239,8 +1235,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
0, 0,
NULL); NULL);
ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
ci->sgl_page_size = sgl_page_size; ci->sgl_page_size = cpu_to_le32(sgl_page_size);
ci->epoch_time = now.tv_sec; ci->epoch_time = cpu_to_le32(now.tv_sec);
rq->flags |= RF_FAILURE_OK; rq->flags |= RF_FAILURE_OK;
a->init_msg = ESAS2R_INIT_MSG_INIT; a->init_msg = ESAS2R_INIT_MSG_INIT;
break; break;
@ -1250,12 +1246,15 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
if (rq->req_stat == RS_SUCCESS) { if (rq->req_stat == RS_SUCCESS) {
u32 major; u32 major;
u32 minor; u32 minor;
u16 fw_release;
a->fw_version = le16_to_cpu( a->fw_version = le16_to_cpu(
rq->func_rsp.cfg_rsp.vda_version); rq->func_rsp.cfg_rsp.vda_version);
a->fw_build = rq->func_rsp.cfg_rsp.fw_build; a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release); fw_release = le16_to_cpu(
minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release); rq->func_rsp.cfg_rsp.fw_release);
major = LOBYTE(fw_release);
minor = HIBYTE(fw_release);
a->fw_version += (major << 16) + (minor << 24); a->fw_version += (major << 16) + (minor << 24);
} else { } else {
esas2r_hdebug("FAILED"); esas2r_hdebug("FAILED");
@ -1266,9 +1265,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,
* unsupported config requests correctly. * unsupported config requests correctly.
*/ */
if ((a->flags2 & AF2_THUNDERBOLT) if ((test_bit(AF2_THUNDERBOLT, &a->flags2))
|| (be32_to_cpu(a->fw_version) > || (be32_to_cpu(a->fw_version) > 0x00524702)) {
be32_to_cpu(0x47020052))) {
esas2r_hdebug("CFG get init"); esas2r_hdebug("CFG get init");
esas2r_build_cfg_req(a, esas2r_build_cfg_req(a,
rq, rq,
@ -1361,10 +1359,10 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
struct esas2r_request *rq; struct esas2r_request *rq;
u32 i; u32 i;
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
goto exit; goto exit;
if (!(a->flags & AF_NVR_VALID)) { if (!test_bit(AF_NVR_VALID, &a->flags)) {
if (!esas2r_nvram_read_direct(a)) if (!esas2r_nvram_read_direct(a))
esas2r_log(ESAS2R_LOG_WARN, esas2r_log(ESAS2R_LOG_WARN,
"invalid/missing NVRAM parameters"); "invalid/missing NVRAM parameters");
@ -1376,8 +1374,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
} }
/* The firmware is ready. */ /* The firmware is ready. */
esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE); clear_bit(AF_DEGRADED_MODE, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); clear_bit(AF_CHPRST_PENDING, &a->flags);
/* Post all the async event requests */ /* Post all the async event requests */
for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
@ -1398,8 +1396,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
esas2r_hdebug("firmware revision: %s", a->fw_rev); esas2r_hdebug("firmware revision: %s", a->fw_rev);
if ((a->flags & AF_CHPRST_DETECTED) if (test_bit(AF_CHPRST_DETECTED, &a->flags)
&& (a->flags & AF_FIRST_INIT)) { && (test_bit(AF_FIRST_INIT, &a->flags))) {
esas2r_enable_chip_interrupts(a); esas2r_enable_chip_interrupts(a);
return true; return true;
} }
@ -1423,18 +1421,18 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
* Block Tasklets from getting scheduled and indicate this is * Block Tasklets from getting scheduled and indicate this is
* polled discovery. * polled discovery.
*/ */
esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED); set_bit(AF_TASKLET_SCHEDULED, &a->flags);
esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED); set_bit(AF_DISC_POLLED, &a->flags);
/* /*
* Temporarily bring the disable count to zero to enable * Temporarily bring the disable count to zero to enable
* deferred processing. Note that the count is already zero * deferred processing. Note that the count is already zero
* after the first initialization. * after the first initialization.
*/ */
if (a->flags & AF_FIRST_INIT) if (test_bit(AF_FIRST_INIT, &a->flags))
atomic_dec(&a->disable_cnt); atomic_dec(&a->disable_cnt);
while (a->flags & AF_DISC_PENDING) { while (test_bit(AF_DISC_PENDING, &a->flags)) {
schedule_timeout_interruptible(msecs_to_jiffies(100)); schedule_timeout_interruptible(msecs_to_jiffies(100));
/* /*
@ -1453,7 +1451,7 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
* we have to make sure the timer tick processes the * we have to make sure the timer tick processes the
* doorbell indicating the firmware is ready. * doorbell indicating the firmware is ready.
*/ */
if (!(a->flags & AF_CHPRST_PENDING)) if (!test_bit(AF_CHPRST_PENDING, &a->flags))
esas2r_disc_check_for_work(a); esas2r_disc_check_for_work(a);
/* Simulate a timer tick. */ /* Simulate a timer tick. */
@ -1473,11 +1471,11 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
} }
if (a->flags & AF_FIRST_INIT) if (test_bit(AF_FIRST_INIT, &a->flags))
atomic_inc(&a->disable_cnt); atomic_inc(&a->disable_cnt);
esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED); clear_bit(AF_DISC_POLLED, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
} }
@ -1504,26 +1502,26 @@ exit:
* need to get done before we exit. * need to get done before we exit.
*/ */
if ((a->flags & AF_CHPRST_DETECTED) if (test_bit(AF_CHPRST_DETECTED, &a->flags) &&
&& (a->flags & AF_FIRST_INIT)) { test_bit(AF_FIRST_INIT, &a->flags)) {
/* /*
* Reinitialization was performed during the first * Reinitialization was performed during the first
* initialization. Only clear the chip reset flag so the * initialization. Only clear the chip reset flag so the
* original device polling is not cancelled. * original device polling is not cancelled.
*/ */
if (!rslt) if (!rslt)
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); clear_bit(AF_CHPRST_PENDING, &a->flags);
} else { } else {
/* First initialization or a subsequent re-init is complete. */ /* First initialization or a subsequent re-init is complete. */
if (!rslt) { if (!rslt) {
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); clear_bit(AF_CHPRST_PENDING, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); clear_bit(AF_DISC_PENDING, &a->flags);
} }
/* Enable deferred processing after the first initialization. */ /* Enable deferred processing after the first initialization. */
if (a->flags & AF_FIRST_INIT) { if (test_bit(AF_FIRST_INIT, &a->flags)) {
esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT); clear_bit(AF_FIRST_INIT, &a->flags);
if (atomic_dec_return(&a->disable_cnt) == 0) if (atomic_dec_return(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a); esas2r_do_deferred_processes(a);
@ -1535,7 +1533,7 @@ exit:
void esas2r_reset_adapter(struct esas2r_adapter *a) void esas2r_reset_adapter(struct esas2r_adapter *a)
{ {
esas2r_lock_set_flags(&a->flags, AF_OS_RESET); set_bit(AF_OS_RESET, &a->flags);
esas2r_local_reset_adapter(a); esas2r_local_reset_adapter(a);
esas2r_schedule_tasklet(a); esas2r_schedule_tasklet(a);
} }
@ -1550,17 +1548,17 @@ void esas2r_reset_chip(struct esas2r_adapter *a)
* dump is located in the upper 512KB of the onchip SRAM. Make sure * dump is located in the upper 512KB of the onchip SRAM. Make sure
* to not overwrite a previous crash that was saved. * to not overwrite a previous crash that was saved.
*/ */
if ((a->flags2 & AF2_COREDUMP_AVAIL) if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) &&
&& !(a->flags2 & AF2_COREDUMP_SAVED)) { !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {
esas2r_read_mem_block(a, esas2r_read_mem_block(a,
a->fw_coredump_buff, a->fw_coredump_buff,
MW_DATA_ADDR_SRAM + 0x80000, MW_DATA_ADDR_SRAM + 0x80000,
ESAS2R_FWCOREDUMP_SZ); ESAS2R_FWCOREDUMP_SZ);
esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED); set_bit(AF2_COREDUMP_SAVED, &a->flags2);
} }
esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL); clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);
/* Reset the chip */ /* Reset the chip */
if (a->pcid->revision == MVR_FREY_B2) if (a->pcid->revision == MVR_FREY_B2)
@ -1606,10 +1604,10 @@ static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
*/ */
void esas2r_power_down(struct esas2r_adapter *a) void esas2r_power_down(struct esas2r_adapter *a)
{ {
esas2r_lock_set_flags(&a->flags, AF_POWER_MGT); set_bit(AF_POWER_MGT, &a->flags);
esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN); set_bit(AF_POWER_DOWN, &a->flags);
if (!(a->flags & AF_DEGRADED_MODE)) { if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {
u32 starttime; u32 starttime;
u32 doorbell; u32 doorbell;
@ -1649,14 +1647,14 @@ void esas2r_power_down(struct esas2r_adapter *a)
* For versions of firmware that support it tell them the driver * For versions of firmware that support it tell them the driver
* is powering down. * is powering down.
*/ */
if (a->flags2 & AF2_VDA_POWER_DOWN) if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))
esas2r_power_down_notify_firmware(a); esas2r_power_down_notify_firmware(a);
} }
/* Suspend I/O processing. */ /* Suspend I/O processing. */
esas2r_lock_set_flags(&a->flags, AF_OS_RESET); set_bit(AF_OS_RESET, &a->flags);
esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); set_bit(AF_DISC_PENDING, &a->flags);
esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); set_bit(AF_CHPRST_PENDING, &a->flags);
esas2r_process_adapter_reset(a); esas2r_process_adapter_reset(a);
@ -1673,9 +1671,9 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
{ {
bool ret; bool ret;
esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN); clear_bit(AF_POWER_DOWN, &a->flags);
esas2r_init_pci_cfg_space(a); esas2r_init_pci_cfg_space(a);
esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT); set_bit(AF_FIRST_INIT, &a->flags);
atomic_inc(&a->disable_cnt); atomic_inc(&a->disable_cnt);
/* reinitialize the adapter */ /* reinitialize the adapter */
@ -1687,17 +1685,17 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
esas2r_send_reset_ae(a, true); esas2r_send_reset_ae(a, true);
/* clear this flag after initialization. */ /* clear this flag after initialization. */
esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT); clear_bit(AF_POWER_MGT, &a->flags);
return ret; return ret;
} }
bool esas2r_is_adapter_present(struct esas2r_adapter *a) bool esas2r_is_adapter_present(struct esas2r_adapter *a)
{ {
if (a->flags & AF_NOT_PRESENT) if (test_bit(AF_NOT_PRESENT, &a->flags))
return false; return false;
if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT); set_bit(AF_NOT_PRESENT, &a->flags);
return false; return false;
} }

View File

@ -96,7 +96,7 @@ irqreturn_t esas2r_interrupt(int irq, void *dev_id)
if (!esas2r_adapter_interrupt_pending(a)) if (!esas2r_adapter_interrupt_pending(a))
return IRQ_NONE; return IRQ_NONE;
esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING); set_bit(AF2_INT_PENDING, &a->flags2);
esas2r_schedule_tasklet(a); esas2r_schedule_tasklet(a);
return IRQ_HANDLED; return IRQ_HANDLED;
@ -317,9 +317,10 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a)
* = 2 - can start any request * = 2 - can start any request
*/ */
if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING)) if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
test_bit(AF_FLASHING, &a->flags))
startreqs = 0; startreqs = 0;
else if (a->flags & AF_DISC_PENDING) else if (test_bit(AF_DISC_PENDING, &a->flags))
startreqs = 1; startreqs = 1;
atomic_inc(&a->disable_cnt); atomic_inc(&a->disable_cnt);
@ -367,7 +368,7 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a)
* Flashing could have been set by last local * Flashing could have been set by last local
* start * start
*/ */
if (a->flags & AF_FLASHING) if (test_bit(AF_FLASHING, &a->flags))
break; break;
} }
} }
@ -404,7 +405,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a)
dc->disc_evt = 0; dc->disc_evt = 0;
esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); clear_bit(AF_DISC_IN_PROG, &a->flags);
} }
/* /*
@ -425,7 +426,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a)
a->last_write = a->last_write =
a->last_read = a->list_size - 1; a->last_read = a->list_size - 1;
esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
/* Kill all the requests on the active list */ /* Kill all the requests on the active list */
list_for_each(element, &a->defer_list) { list_for_each(element, &a->defer_list) {
@ -470,7 +471,7 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a)
if (atomic_read(&a->disable_cnt) == 0) if (atomic_read(&a->disable_cnt) == 0)
esas2r_do_deferred_processes(a); esas2r_do_deferred_processes(a);
esas2r_lock_clear_flags(&a->flags, AF_OS_RESET); clear_bit(AF_OS_RESET, &a->flags);
esas2r_trace_exit(); esas2r_trace_exit();
} }
@ -478,10 +479,10 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a)
static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
{ {
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED); clear_bit(AF_CHPRST_NEEDED, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); clear_bit(AF_BUSRST_NEEDED, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); clear_bit(AF_BUSRST_DETECTED, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); clear_bit(AF_BUSRST_PENDING, &a->flags);
/* /*
* Make sure we don't get attempt more than 3 resets * Make sure we don't get attempt more than 3 resets
* when the uptime between resets does not exceed one * when the uptime between resets does not exceed one
@ -507,10 +508,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
* prevent the heartbeat from trying to recover. * prevent the heartbeat from trying to recover.
*/ */
esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); set_bit(AF_DEGRADED_MODE, &a->flags);
esas2r_lock_set_flags(&a->flags, AF_DISABLED); set_bit(AF_DISABLED, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); clear_bit(AF_CHPRST_PENDING, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); clear_bit(AF_DISC_PENDING, &a->flags);
esas2r_disable_chip_interrupts(a); esas2r_disable_chip_interrupts(a);
a->int_mask = 0; a->int_mask = 0;
@ -519,18 +520,17 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
esas2r_log(ESAS2R_LOG_CRIT, esas2r_log(ESAS2R_LOG_CRIT,
"Adapter disabled because of hardware failure"); "Adapter disabled because of hardware failure");
} else { } else {
u32 flags = bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags);
esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
if (!(flags & AF_CHPRST_STARTED)) if (!alrdyrst)
/* /*
* Only disable interrupts if this is * Only disable interrupts if this is
* the first reset attempt. * the first reset attempt.
*/ */
esas2r_disable_chip_interrupts(a); esas2r_disable_chip_interrupts(a);
if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) && if ((test_bit(AF_POWER_MGT, &a->flags)) &&
!(flags & AF_CHPRST_STARTED)) { !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {
/* /*
* Don't reset the chip on the first * Don't reset the chip on the first
* deferred power up attempt. * deferred power up attempt.
@ -543,10 +543,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
/* Kick off the reinitialization */ /* Kick off the reinitialization */
a->chip_uptime += ESAS2R_CHP_UPTIME_CNT; a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
a->chip_init_time = jiffies_to_msecs(jiffies); a->chip_init_time = jiffies_to_msecs(jiffies);
if (!(a->flags & AF_POWER_MGT)) { if (!test_bit(AF_POWER_MGT, &a->flags)) {
esas2r_process_adapter_reset(a); esas2r_process_adapter_reset(a);
if (!(flags & AF_CHPRST_STARTED)) { if (!alrdyrst) {
/* Remove devices now that I/O is cleaned up. */ /* Remove devices now that I/O is cleaned up. */
a->prev_dev_cnt = a->prev_dev_cnt =
esas2r_targ_db_get_tgt_cnt(a); esas2r_targ_db_get_tgt_cnt(a);
@ -560,38 +560,37 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
{ {
while (a->flags & AF_CHPRST_DETECTED) { while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {
/* /*
* Balance the enable in esas2r_initadapter_hw. * Balance the enable in esas2r_initadapter_hw.
* Esas2r_power_down already took care of it for power * Esas2r_power_down already took care of it for power
* management. * management.
*/ */
if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags & if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
AF_POWER_MGT)) !test_bit(AF_POWER_MGT, &a->flags))
esas2r_disable_chip_interrupts(a); esas2r_disable_chip_interrupts(a);
/* Reinitialize the chip. */ /* Reinitialize the chip. */
esas2r_check_adapter(a); esas2r_check_adapter(a);
esas2r_init_adapter_hw(a, 0); esas2r_init_adapter_hw(a, 0);
if (a->flags & AF_CHPRST_NEEDED) if (test_bit(AF_CHPRST_NEEDED, &a->flags))
break; break;
if (a->flags & AF_POWER_MGT) { if (test_bit(AF_POWER_MGT, &a->flags)) {
/* Recovery from power management. */ /* Recovery from power management. */
if (a->flags & AF_FIRST_INIT) { if (test_bit(AF_FIRST_INIT, &a->flags)) {
/* Chip reset during normal power up */ /* Chip reset during normal power up */
esas2r_log(ESAS2R_LOG_CRIT, esas2r_log(ESAS2R_LOG_CRIT,
"The firmware was reset during a normal power-up sequence"); "The firmware was reset during a normal power-up sequence");
} else { } else {
/* Deferred power up complete. */ /* Deferred power up complete. */
esas2r_lock_clear_flags(&a->flags, clear_bit(AF_POWER_MGT, &a->flags);
AF_POWER_MGT);
esas2r_send_reset_ae(a, true); esas2r_send_reset_ae(a, true);
} }
} else { } else {
/* Recovery from online chip reset. */ /* Recovery from online chip reset. */
if (a->flags & AF_FIRST_INIT) { if (test_bit(AF_FIRST_INIT, &a->flags)) {
/* Chip reset during driver load */ /* Chip reset during driver load */
} else { } else {
/* Chip reset after driver load */ /* Chip reset after driver load */
@ -602,14 +601,14 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
"Recovering from a chip reset while the chip was online"); "Recovering from a chip reset while the chip was online");
} }
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED); clear_bit(AF_CHPRST_STARTED, &a->flags);
esas2r_enable_chip_interrupts(a); esas2r_enable_chip_interrupts(a);
/* /*
* Clear this flag last! this indicates that the chip has been * Clear this flag last! this indicates that the chip has been
* reset already during initialization. * reset already during initialization.
*/ */
esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED); clear_bit(AF_CHPRST_DETECTED, &a->flags);
} }
} }
@ -617,26 +616,28 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
/* Perform deferred tasks when chip interrupts are disabled */ /* Perform deferred tasks when chip interrupts are disabled */
void esas2r_do_tasklet_tasks(struct esas2r_adapter *a) void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
{ {
if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
if (a->flags & AF_CHPRST_NEEDED) if (test_bit(AF_CHPRST_NEEDED, &a->flags) ||
test_bit(AF_CHPRST_DETECTED, &a->flags)) {
if (test_bit(AF_CHPRST_NEEDED, &a->flags))
esas2r_chip_rst_needed_during_tasklet(a); esas2r_chip_rst_needed_during_tasklet(a);
esas2r_handle_chip_rst_during_tasklet(a); esas2r_handle_chip_rst_during_tasklet(a);
} }
if (a->flags & AF_BUSRST_NEEDED) { if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {
esas2r_hdebug("hard resetting bus"); esas2r_hdebug("hard resetting bus");
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); clear_bit(AF_BUSRST_NEEDED, &a->flags);
if (a->flags & AF_FLASHING) if (test_bit(AF_FLASHING, &a->flags))
esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); set_bit(AF_BUSRST_DETECTED, &a->flags);
else else
esas2r_write_register_dword(a, MU_DOORBELL_IN, esas2r_write_register_dword(a, MU_DOORBELL_IN,
DRBL_RESET_BUS); DRBL_RESET_BUS);
} }
if (a->flags & AF_BUSRST_DETECTED) { if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {
esas2r_process_bus_reset(a); esas2r_process_bus_reset(a);
esas2r_log_dev(ESAS2R_LOG_WARN, esas2r_log_dev(ESAS2R_LOG_WARN,
@ -645,14 +646,14 @@ void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
scsi_report_bus_reset(a->host, 0); scsi_report_bus_reset(a->host, 0);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); clear_bit(AF_BUSRST_DETECTED, &a->flags);
esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); clear_bit(AF_BUSRST_PENDING, &a->flags);
esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete"); esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
} }
if (a->flags & AF_PORT_CHANGE) { if (test_bit(AF_PORT_CHANGE, &a->flags)) {
esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE); clear_bit(AF_PORT_CHANGE, &a->flags);
esas2r_targ_db_report_changes(a); esas2r_targ_db_report_changes(a);
} }
@ -672,10 +673,10 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
if (doorbell & DRBL_RESET_BUS) if (doorbell & DRBL_RESET_BUS)
esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); set_bit(AF_BUSRST_DETECTED, &a->flags);
if (doorbell & DRBL_FORCE_INT) if (doorbell & DRBL_FORCE_INT)
esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); clear_bit(AF_HEARTBEAT, &a->flags);
if (doorbell & DRBL_PANIC_REASON_MASK) { if (doorbell & DRBL_PANIC_REASON_MASK) {
esas2r_hdebug("*** Firmware Panic ***"); esas2r_hdebug("*** Firmware Panic ***");
@ -683,7 +684,7 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
} }
if (doorbell & DRBL_FW_RESET) { if (doorbell & DRBL_FW_RESET) {
esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL); set_bit(AF2_COREDUMP_AVAIL, &a->flags2);
esas2r_local_reset_adapter(a); esas2r_local_reset_adapter(a);
} }
@ -918,7 +919,7 @@ void esas2r_complete_request(struct esas2r_adapter *a,
{ {
if (rq->vrq->scsi.function == VDA_FUNC_FLASH if (rq->vrq->scsi.function == VDA_FUNC_FLASH
&& rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
esas2r_lock_clear_flags(&a->flags, AF_FLASHING); clear_bit(AF_FLASHING, &a->flags);
/* See if we setup a callback to do special processing */ /* See if we setup a callback to do special processing */

View File

@ -49,7 +49,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
struct esas2r_request *startrq = rq; struct esas2r_request *startrq = rq;
unsigned long flags; unsigned long flags;
if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) { if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) ||
test_bit(AF_POWER_DOWN, &a->flags))) {
if (rq->vrq->scsi.function == VDA_FUNC_SCSI) if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
rq->req_stat = RS_SEL2; rq->req_stat = RS_SEL2;
else else
@ -69,8 +70,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
* Note that if AF_DISC_PENDING is set than this will * Note that if AF_DISC_PENDING is set than this will
* go on the defer queue. * go on the defer queue.
*/ */
if (unlikely(t->target_state != TS_PRESENT if (unlikely(t->target_state != TS_PRESENT &&
&& !(a->flags & AF_DISC_PENDING))) !test_bit(AF_DISC_PENDING, &a->flags)))
rq->req_stat = RS_SEL; rq->req_stat = RS_SEL;
} }
} }
@ -91,8 +92,9 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
spin_lock_irqsave(&a->queue_lock, flags); spin_lock_irqsave(&a->queue_lock, flags);
if (likely(list_empty(&a->defer_list) && if (likely(list_empty(&a->defer_list) &&
!(a->flags & !test_bit(AF_CHPRST_PENDING, &a->flags) &&
(AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING)))) !test_bit(AF_FLASHING, &a->flags) &&
!test_bit(AF_DISC_PENDING, &a->flags)))
esas2r_local_start_request(a, startrq); esas2r_local_start_request(a, startrq);
else else
list_add_tail(&startrq->req_list, &a->defer_list); list_add_tail(&startrq->req_list, &a->defer_list);
@ -124,7 +126,7 @@ void esas2r_local_start_request(struct esas2r_adapter *a,
if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
&& rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)) && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
esas2r_lock_set_flags(&a->flags, AF_FLASHING); set_bit(AF_FLASHING, &a->flags);
list_add_tail(&rq->req_list, &a->active_list); list_add_tail(&rq->req_list, &a->active_list);
esas2r_start_vda_request(a, rq); esas2r_start_vda_request(a, rq);
@ -147,11 +149,10 @@ void esas2r_start_vda_request(struct esas2r_adapter *a,
if (a->last_write >= a->list_size) { if (a->last_write >= a->list_size) {
a->last_write = 0; a->last_write = 0;
/* update the toggle bit */ /* update the toggle bit */
if (a->flags & AF_COMM_LIST_TOGGLE) if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
esas2r_lock_clear_flags(&a->flags, clear_bit(AF_COMM_LIST_TOGGLE, &a->flags);
AF_COMM_LIST_TOGGLE);
else else
esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
} }
element = element =
@ -169,7 +170,7 @@ void esas2r_start_vda_request(struct esas2r_adapter *a,
/* Update the write pointer */ /* Update the write pointer */
dw = a->last_write; dw = a->last_write;
if (a->flags & AF_COMM_LIST_TOGGLE) if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
dw |= MU_ILW_TOGGLE; dw |= MU_ILW_TOGGLE;
esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle); esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
@ -687,18 +688,14 @@ static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
esas2r_write_register_dword(a, MU_DOORBELL_OUT, esas2r_write_register_dword(a, MU_DOORBELL_OUT,
doorbell); doorbell);
if (ver == DRBL_FW_VER_0) { if (ver == DRBL_FW_VER_0) {
esas2r_lock_set_flags(&a->flags, set_bit(AF_CHPRST_DETECTED, &a->flags);
AF_CHPRST_DETECTED); set_bit(AF_LEGACY_SGE_MODE, &a->flags);
esas2r_lock_set_flags(&a->flags,
AF_LEGACY_SGE_MODE);
a->max_vdareq_size = 128; a->max_vdareq_size = 128;
a->build_sgl = esas2r_build_sg_list_sge; a->build_sgl = esas2r_build_sg_list_sge;
} else if (ver == DRBL_FW_VER_1) { } else if (ver == DRBL_FW_VER_1) {
esas2r_lock_set_flags(&a->flags, set_bit(AF_CHPRST_DETECTED, &a->flags);
AF_CHPRST_DETECTED); clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
esas2r_lock_clear_flags(&a->flags,
AF_LEGACY_SGE_MODE);
a->max_vdareq_size = 1024; a->max_vdareq_size = 1024;
a->build_sgl = esas2r_build_sg_list_prd; a->build_sgl = esas2r_build_sg_list_prd;
@ -719,28 +716,27 @@ void esas2r_timer_tick(struct esas2r_adapter *a)
a->last_tick_time = currtime; a->last_tick_time = currtime;
/* count down the uptime */ /* count down the uptime */
if (a->chip_uptime if (a->chip_uptime &&
&& !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { !test_bit(AF_CHPRST_PENDING, &a->flags) &&
!test_bit(AF_DISC_PENDING, &a->flags)) {
if (deltatime >= a->chip_uptime) if (deltatime >= a->chip_uptime)
a->chip_uptime = 0; a->chip_uptime = 0;
else else
a->chip_uptime -= deltatime; a->chip_uptime -= deltatime;
} }
if (a->flags & AF_CHPRST_PENDING) { if (test_bit(AF_CHPRST_PENDING, &a->flags)) {
if (!(a->flags & AF_CHPRST_NEEDED) if (!test_bit(AF_CHPRST_NEEDED, &a->flags) &&
&& !(a->flags & AF_CHPRST_DETECTED)) !test_bit(AF_CHPRST_DETECTED, &a->flags))
esas2r_handle_pending_reset(a, currtime); esas2r_handle_pending_reset(a, currtime);
} else { } else {
if (a->flags & AF_DISC_PENDING) if (test_bit(AF_DISC_PENDING, &a->flags))
esas2r_disc_check_complete(a); esas2r_disc_check_complete(a);
if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) {
if (a->flags & AF_HEARTBEAT_ENB) { if (test_bit(AF_HEARTBEAT, &a->flags)) {
if (a->flags & AF_HEARTBEAT) {
if ((currtime - a->heartbeat_time) >= if ((currtime - a->heartbeat_time) >=
ESAS2R_HEARTBEAT_TIME) { ESAS2R_HEARTBEAT_TIME) {
esas2r_lock_clear_flags(&a->flags, clear_bit(AF_HEARTBEAT, &a->flags);
AF_HEARTBEAT);
esas2r_hdebug("heartbeat failed"); esas2r_hdebug("heartbeat failed");
esas2r_log(ESAS2R_LOG_CRIT, esas2r_log(ESAS2R_LOG_CRIT,
"heartbeat failed"); "heartbeat failed");
@ -748,7 +744,7 @@ void esas2r_timer_tick(struct esas2r_adapter *a)
esas2r_local_reset_adapter(a); esas2r_local_reset_adapter(a);
} }
} else { } else {
esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT); set_bit(AF_HEARTBEAT, &a->flags);
a->heartbeat_time = currtime; a->heartbeat_time = currtime;
esas2r_force_interrupt(a); esas2r_force_interrupt(a);
} }
@ -812,7 +808,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
rqaux->vrq->scsi.flags |= rqaux->vrq->scsi.flags |=
cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK)); cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
if (a->flags & AF_FLASHING) { if (test_bit(AF_FLASHING, &a->flags)) {
/* Assume success. if there are active requests, return busy */ /* Assume success. if there are active requests, return busy */
rqaux->req_stat = RS_SUCCESS; rqaux->req_stat = RS_SUCCESS;
@ -831,7 +827,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
spin_unlock_irqrestore(&a->queue_lock, flags); spin_unlock_irqrestore(&a->queue_lock, flags);
if (!(a->flags & AF_FLASHING)) if (!test_bit(AF_FLASHING, &a->flags))
esas2r_start_request(a, rqaux); esas2r_start_request(a, rqaux);
esas2r_comp_list_drain(a, &comp_list); esas2r_comp_list_drain(a, &comp_list);
@ -848,11 +844,12 @@ void esas2r_reset_bus(struct esas2r_adapter *a)
{ {
esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset"); esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
if (!(a->flags & AF_DEGRADED_MODE) if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
&& !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { !test_bit(AF_CHPRST_PENDING, &a->flags) &&
esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED); !test_bit(AF_DISC_PENDING, &a->flags)) {
esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING); set_bit(AF_BUSRST_NEEDED, &a->flags);
esas2r_lock_set_flags(&a->flags, AF_OS_RESET); set_bit(AF_BUSRST_PENDING, &a->flags);
set_bit(AF_OS_RESET, &a->flags);
esas2r_schedule_tasklet(a); esas2r_schedule_tasklet(a);
} }

View File

@ -347,7 +347,7 @@ static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
{ {
struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
return false; return false;
esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
@ -463,7 +463,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
gcc->bios_build_rev = LOWORD(a->flash_ver); gcc->bios_build_rev = LOWORD(a->flash_ver);
if (a->flags2 & AF2_THUNDERLINK) if (test_bit(AF2_THUNDERLINK, &a->flags2))
gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
| CSMI_CNTLRF_SATA_HBA; | CSMI_CNTLRF_SATA_HBA;
else else
@ -485,7 +485,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,
{ {
struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
gcs->status = CSMI_CNTLR_STS_FAILED; gcs->status = CSMI_CNTLR_STS_FAILED;
else else
gcs->status = CSMI_CNTLR_STS_GOOD; gcs->status = CSMI_CNTLR_STS_GOOD;
@ -819,10 +819,10 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
gai->adap_type = ATTO_GAI_AT_ESASRAID2; gai->adap_type = ATTO_GAI_AT_ESASRAID2;
if (a->flags2 & AF2_THUNDERLINK) if (test_bit(AF2_THUNDERLINK, &a->flags2))
gai->adap_type = ATTO_GAI_AT_TLSASHBA; gai->adap_type = ATTO_GAI_AT_TLSASHBA;
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
gai->adap_flags |= ATTO_GAI_AF_DEGRADED; gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
@ -938,7 +938,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
u32 total_len = ESAS2R_FWCOREDUMP_SZ; u32 total_len = ESAS2R_FWCOREDUMP_SZ;
/* Size is zero if a core dump isn't present */ /* Size is zero if a core dump isn't present */
if (!(a->flags2 & AF2_COREDUMP_SAVED)) if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
total_len = 0; total_len = 0;
if (len > total_len) if (len > total_len)
@ -960,8 +960,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
memset(a->fw_coredump_buff, 0, memset(a->fw_coredump_buff, 0,
ESAS2R_FWCOREDUMP_SZ); ESAS2R_FWCOREDUMP_SZ);
esas2r_lock_clear_flags(&a->flags2, clear_bit(AF2_COREDUMP_SAVED, &a->flags2);
AF2_COREDUMP_SAVED);
} else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
hi->status = ATTO_STS_UNSUPPORTED; hi->status = ATTO_STS_UNSUPPORTED;
break; break;
@ -973,7 +972,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
trc->total_length = ESAS2R_FWCOREDUMP_SZ; trc->total_length = ESAS2R_FWCOREDUMP_SZ;
/* Return zero length buffer if core dump not present */ /* Return zero length buffer if core dump not present */
if (!(a->flags2 & AF2_COREDUMP_SAVED)) if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
trc->total_length = 0; trc->total_length = 0;
} else { } else {
hi->status = ATTO_STS_UNSUPPORTED; hi->status = ATTO_STS_UNSUPPORTED;
@ -1048,6 +1047,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
if (!esas2r_build_sg_list(a, rq, sgc)) { if (!esas2r_build_sg_list(a, rq, sgc)) {
hi->status = ATTO_STS_OUT_OF_RSRC; hi->status = ATTO_STS_OUT_OF_RSRC;
break; break;
@ -1139,15 +1139,15 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
break; break;
} }
if (a->flags & AF_CHPRST_NEEDED) if (test_bit(AF_CHPRST_NEEDED, &a->flags))
ac->adap_state = ATTO_AC_AS_RST_SCHED; ac->adap_state = ATTO_AC_AS_RST_SCHED;
else if (a->flags & AF_CHPRST_PENDING) else if (test_bit(AF_CHPRST_PENDING, &a->flags))
ac->adap_state = ATTO_AC_AS_RST_IN_PROG; ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
else if (a->flags & AF_DISC_PENDING) else if (test_bit(AF_DISC_PENDING, &a->flags))
ac->adap_state = ATTO_AC_AS_RST_DISC; ac->adap_state = ATTO_AC_AS_RST_DISC;
else if (a->flags & AF_DISABLED) else if (test_bit(AF_DISABLED, &a->flags))
ac->adap_state = ATTO_AC_AS_DISABLED; ac->adap_state = ATTO_AC_AS_DISABLED;
else if (a->flags & AF_DEGRADED_MODE) else if (test_bit(AF_DEGRADED_MODE, &a->flags))
ac->adap_state = ATTO_AC_AS_DEGRADED; ac->adap_state = ATTO_AC_AS_DEGRADED;
else else
ac->adap_state = ATTO_AC_AS_OK; ac->adap_state = ATTO_AC_AS_OK;

View File

@ -889,7 +889,7 @@ int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Assume success, if it fails we will fix the result later. */ /* Assume success, if it fails we will fix the result later. */
cmd->result = DID_OK << 16; cmd->result = DID_OK << 16;
if (unlikely(a->flags & AF_DEGRADED_MODE)) { if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {
cmd->result = DID_NO_CONNECT << 16; cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
return 0; return 0;
@ -1050,7 +1050,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)
esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd); esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
if (a->flags & AF_DEGRADED_MODE) { if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
cmd->result = DID_ABORT << 16; cmd->result = DID_ABORT << 16;
scsi_set_resid(cmd, 0); scsi_set_resid(cmd, 0);
@ -1131,7 +1131,7 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
struct esas2r_adapter *a = struct esas2r_adapter *a =
(struct esas2r_adapter *)cmd->device->host->hostdata; (struct esas2r_adapter *)cmd->device->host->hostdata;
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED; return FAILED;
if (host_reset) if (host_reset)
@ -1141,14 +1141,14 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
/* above call sets the AF_OS_RESET flag. wait for it to clear. */ /* above call sets the AF_OS_RESET flag. wait for it to clear. */
while (a->flags & AF_OS_RESET) { while (test_bit(AF_OS_RESET, &a->flags)) {
msleep(10); msleep(10);
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED; return FAILED;
} }
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED; return FAILED;
return SUCCESS; return SUCCESS;
@ -1176,7 +1176,7 @@ static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
u8 task_management_status = RS_PENDING; u8 task_management_status = RS_PENDING;
bool completed; bool completed;
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED; return FAILED;
retry: retry:
@ -1229,7 +1229,7 @@ retry:
msleep(10); msleep(10);
} }
if (a->flags & AF_DEGRADED_MODE) if (test_bit(AF_DEGRADED_MODE, &a->flags))
return FAILED; return FAILED;
if (task_management_status == RS_BUSY) { if (task_management_status == RS_BUSY) {
@ -1666,13 +1666,13 @@ void esas2r_adapter_tasklet(unsigned long context)
{ {
struct esas2r_adapter *a = (struct esas2r_adapter *)context; struct esas2r_adapter *a = (struct esas2r_adapter *)context;
if (unlikely(a->flags2 & AF2_TIMER_TICK)) { if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) {
esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK); clear_bit(AF2_TIMER_TICK, &a->flags2);
esas2r_timer_tick(a); esas2r_timer_tick(a);
} }
if (likely(a->flags2 & AF2_INT_PENDING)) { if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) {
esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING); clear_bit(AF2_INT_PENDING, &a->flags2);
esas2r_adapter_interrupt(a); esas2r_adapter_interrupt(a);
} }
@ -1680,12 +1680,12 @@ void esas2r_adapter_tasklet(unsigned long context)
esas2r_do_tasklet_tasks(a); esas2r_do_tasklet_tasks(a);
if (esas2r_is_tasklet_pending(a) if (esas2r_is_tasklet_pending(a)
|| (a->flags2 & AF2_INT_PENDING) || (test_bit(AF2_INT_PENDING, &a->flags2))
|| (a->flags2 & AF2_TIMER_TICK)) { || (test_bit(AF2_TIMER_TICK, &a->flags2))) {
esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
esas2r_schedule_tasklet(a); esas2r_schedule_tasklet(a);
} else { } else {
esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); clear_bit(AF_TASKLET_SCHEDULED, &a->flags);
} }
} }
@ -1707,7 +1707,7 @@ static void esas2r_timer_callback(unsigned long context)
{ {
struct esas2r_adapter *a = (struct esas2r_adapter *)context; struct esas2r_adapter *a = (struct esas2r_adapter *)context;
esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK); set_bit(AF2_TIMER_TICK, &a->flags2);
esas2r_schedule_tasklet(a); esas2r_schedule_tasklet(a);

View File

@ -86,7 +86,7 @@ void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
esas2r_trace_enter(); esas2r_trace_enter();
if (a->flags & AF_DISC_PENDING) { if (test_bit(AF_DISC_PENDING, &a->flags)) {
esas2r_trace_exit(); esas2r_trace_exit();
return; return;
} }

View File

@ -84,7 +84,7 @@ bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
return false; return false;
} }
if (a->flags & AF_DEGRADED_MODE) { if (test_bit(AF_DEGRADED_MODE, &a->flags)) {
vi->status = ATTO_STS_DEGRADED; vi->status = ATTO_STS_DEGRADED;
return false; return false;
} }
@ -310,9 +310,9 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
le32_to_cpu(rsp->vda_version); le32_to_cpu(rsp->vda_version);
cfg->data.init.fw_build = rsp->fw_build; cfg->data.init.fw_build = rsp->fw_build;
snprintf(buf, sizeof(buf), "%1d.%02d", snprintf(buf, sizeof(buf), "%1.1u.%2.2u",
(int)LOBYTE(le16_to_cpu(rsp->fw_release)), (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
(int)HIBYTE(le16_to_cpu(rsp->fw_release))); (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
memcpy(&cfg->data.init.fw_release, buf, memcpy(&cfg->data.init.fw_release, buf,
sizeof(cfg->data.init.fw_release)); sizeof(cfg->data.init.fw_release));
@ -389,7 +389,7 @@ void esas2r_build_mgt_req(struct esas2r_adapter *a,
vrq->length = cpu_to_le32(length); vrq->length = cpu_to_le32(length);
if (vrq->length) { if (vrq->length) {
if (a->flags & AF_LEGACY_SGE_MODE) { if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
vrq->sg_list_offset = (u8)offsetof( vrq->sg_list_offset = (u8)offsetof(
struct atto_vda_mgmt_req, sge); struct atto_vda_mgmt_req, sge);
@ -427,7 +427,7 @@ void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data)); vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
if (a->flags & AF_LEGACY_SGE_MODE) { if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {
vrq->sg_list_offset = vrq->sg_list_offset =
(u8)offsetof(struct atto_vda_ae_req, sge); (u8)offsetof(struct atto_vda_ae_req, sge);
vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length); vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);

View File

@ -408,6 +408,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
} }
ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr = fcoe_ctlr_device_priv(ctlr_dev);
ctlr->cdev = ctlr_dev;
fcoe = fcoe_ctlr_priv(ctlr); fcoe = fcoe_ctlr_priv(ctlr);
dev_hold(netdev); dev_hold(netdev);
@ -1440,22 +1441,28 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
ctlr = fcoe_to_ctlr(fcoe); ctlr = fcoe_to_ctlr(fcoe);
lport = ctlr->lp; lport = ctlr->lp;
if (unlikely(!lport)) { if (unlikely(!lport)) {
FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n");
goto err2; goto err2;
} }
if (!lport->link_up) if (!lport->link_up)
goto err2; goto err2;
FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " FCOE_NETDEV_DBG(netdev,
"data:%p tail:%p end:%p sum:%d dev:%s", "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
skb->len, skb->data_len, skb->head, skb->data, skb->len, skb->data_len, skb->head, skb->data,
skb_tail_pointer(skb), skb_end_pointer(skb), skb_tail_pointer(skb), skb_end_pointer(skb),
skb->csum, skb->dev ? skb->dev->name : "<NULL>"); skb->csum, skb->dev ? skb->dev->name : "<NULL>");
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
return NET_RX_DROP;
eh = eth_hdr(skb); eh = eth_hdr(skb);
if (is_fip_mode(ctlr) && if (is_fip_mode(ctlr) &&
compare_ether_addr(eh->h_source, ctlr->dest_addr)) { !ether_addr_equal(eh->h_source, ctlr->dest_addr)) {
FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
eh->h_source); eh->h_source);
goto err; goto err;
@ -1540,13 +1547,13 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
wake_up_process(fps->thread); wake_up_process(fps->thread);
spin_unlock(&fps->fcoe_rx_list.lock); spin_unlock(&fps->fcoe_rx_list.lock);
return 0; return NET_RX_SUCCESS;
err: err:
per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
put_cpu(); put_cpu();
err2: err2:
kfree_skb(skb); kfree_skb(skb);
return -1; return NET_RX_DROP;
} }
/** /**
@ -1788,13 +1795,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
lport = fr->fr_dev; lport = fr->fr_dev;
if (unlikely(!lport)) { if (unlikely(!lport)) {
if (skb->destructor != fcoe_percpu_flush_done) if (skb->destructor != fcoe_percpu_flush_done)
FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " FCOE_NETDEV_DBG(skb->dev,
"head:%p data:%p tail:%p end:%p sum:%d dev:%s", "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
skb->len, skb->data_len, skb->len, skb->data_len,
skb->head, skb->data, skb_tail_pointer(skb), skb->head, skb->data, skb_tail_pointer(skb),
skb_end_pointer(skb), skb->csum, skb_end_pointer(skb), skb->csum,

View File

@ -160,74 +160,113 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
} }
EXPORT_SYMBOL(fcoe_ctlr_init); EXPORT_SYMBOL(fcoe_ctlr_init);
/**
* fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
* @new: The newly discovered FCF
*
* Called with fip->ctlr_mutex held
*/
static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new) static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
{ {
struct fcoe_ctlr *fip = new->fip; struct fcoe_ctlr *fip = new->fip;
struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); struct fcoe_ctlr_device *ctlr_dev;
struct fcoe_fcf_device temp, *fcf_dev; struct fcoe_fcf_device *temp, *fcf_dev;
int rc = 0; int rc = -ENOMEM;
LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
new->fabric_name, new->fcf_mac); new->fabric_name, new->fcf_mac);
mutex_lock(&ctlr_dev->lock); temp = kzalloc(sizeof(*temp), GFP_KERNEL);
if (!temp)
temp.fabric_name = new->fabric_name;
temp.switch_name = new->switch_name;
temp.fc_map = new->fc_map;
temp.vfid = new->vfid;
memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
temp.priority = new->pri;
temp.fka_period = new->fka_period;
temp.selected = 0; /* default to unselected */
fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
if (unlikely(!fcf_dev)) {
rc = -ENOMEM;
goto out; goto out;
}
temp->fabric_name = new->fabric_name;
temp->switch_name = new->switch_name;
temp->fc_map = new->fc_map;
temp->vfid = new->vfid;
memcpy(temp->mac, new->fcf_mac, ETH_ALEN);
temp->priority = new->pri;
temp->fka_period = new->fka_period;
temp->selected = 0; /* default to unselected */
/* /*
* The fcoe_sysfs layer can return a CONNECTED fcf that * If ctlr_dev doesn't exist then it means we're a libfcoe user
* has a priv (fcf was never deleted) or a CONNECTED fcf * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device.
* that doesn't have a priv (fcf was deleted). However, * fnic would be an example of a driver with this behavior. In this
* libfcoe will always delete FCFs before trying to add * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we
* them. This is ensured because both recv_adv and * don't want to make sysfs changes.
* age_fcfs are protected by the the fcoe_ctlr's mutex.
* This means that we should never get a FCF with a
* non-NULL priv pointer.
*/ */
BUG_ON(fcf_dev->priv);
fcf_dev->priv = new; ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
new->fcf_dev = fcf_dev; if (ctlr_dev) {
mutex_lock(&ctlr_dev->lock);
fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp);
if (unlikely(!fcf_dev)) {
rc = -ENOMEM;
mutex_unlock(&ctlr_dev->lock);
goto out;
}
/*
* The fcoe_sysfs layer can return a CONNECTED fcf that
* has a priv (fcf was never deleted) or a CONNECTED fcf
* that doesn't have a priv (fcf was deleted). However,
* libfcoe will always delete FCFs before trying to add
* them. This is ensured because both recv_adv and
* age_fcfs are protected by the the fcoe_ctlr's mutex.
* This means that we should never get a FCF with a
* non-NULL priv pointer.
*/
BUG_ON(fcf_dev->priv);
fcf_dev->priv = new;
new->fcf_dev = fcf_dev;
mutex_unlock(&ctlr_dev->lock);
}
list_add(&new->list, &fip->fcfs); list_add(&new->list, &fip->fcfs);
fip->fcf_count++; fip->fcf_count++;
rc = 0;
out: out:
mutex_unlock(&ctlr_dev->lock); kfree(temp);
return rc; return rc;
} }
/**
* fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
* @new: The FCF to be removed
*
* Called with fip->ctlr_mutex held
*/
static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
{ {
struct fcoe_ctlr *fip = new->fip; struct fcoe_ctlr *fip = new->fip;
struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); struct fcoe_ctlr_device *cdev;
struct fcoe_fcf_device *fcf_dev; struct fcoe_fcf_device *fcf_dev;
list_del(&new->list); list_del(&new->list);
fip->fcf_count--; fip->fcf_count--;
mutex_lock(&ctlr_dev->lock); /*
* If ctlr_dev doesn't exist then it means we're a libfcoe user
fcf_dev = fcoe_fcf_to_fcf_dev(new); * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device
WARN_ON(!fcf_dev); * or a fcoe_fcf_device.
new->fcf_dev = NULL; *
fcoe_fcf_device_delete(fcf_dev); * fnic would be an example of a driver with this behavior. In this
kfree(new); * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above),
* but we don't want to make sysfs changes.
mutex_unlock(&ctlr_dev->lock); */
cdev = fcoe_ctlr_to_ctlr_dev(fip);
if (cdev) {
mutex_lock(&cdev->lock);
fcf_dev = fcoe_fcf_to_fcf_dev(new);
WARN_ON(!fcf_dev);
new->fcf_dev = NULL;
fcoe_fcf_device_delete(fcf_dev);
kfree(new);
mutex_unlock(&cdev->lock);
}
} }
/** /**
@ -300,7 +339,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
spin_unlock_bh(&fip->ctlr_lock); spin_unlock_bh(&fip->ctlr_lock);
sel = fip->sel_fcf; sel = fip->sel_fcf;
if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr)) if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
goto unlock; goto unlock;
if (!is_zero_ether_addr(fip->dest_addr)) { if (!is_zero_ether_addr(fip->dest_addr)) {
printk(KERN_NOTICE "libfcoe: host%d: " printk(KERN_NOTICE "libfcoe: host%d: "
@ -1000,7 +1039,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (fcf->switch_name == new.switch_name && if (fcf->switch_name == new.switch_name &&
fcf->fabric_name == new.fabric_name && fcf->fabric_name == new.fabric_name &&
fcf->fc_map == new.fc_map && fcf->fc_map == new.fc_map &&
compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) {
found = 1; found = 1;
break; break;
} }
@ -1340,7 +1379,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
mp = (struct fip_mac_desc *)desc; mp = (struct fip_mac_desc *)desc;
if (dlen < sizeof(*mp)) if (dlen < sizeof(*mp))
goto err; goto err;
if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac))
goto err; goto err;
desc_mask &= ~BIT(FIP_DT_MAC); desc_mask &= ~BIT(FIP_DT_MAC);
break; break;
@ -1418,8 +1457,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
* 'port_id' is already validated, check MAC address and * 'port_id' is already validated, check MAC address and
* wwpn * wwpn
*/ */
if (compare_ether_addr(fip->get_src_addr(vn_port), if (!ether_addr_equal(fip->get_src_addr(vn_port),
vp->fd_mac) != 0 || vp->fd_mac) ||
get_unaligned_be64(&vp->fd_wwpn) != get_unaligned_be64(&vp->fd_wwpn) !=
vn_port->wwpn) vn_port->wwpn)
continue; continue;
@ -1453,6 +1492,9 @@ err:
*/ */
void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{ {
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return;
skb_queue_tail(&fip->fip_recv_list, skb); skb_queue_tail(&fip->fip_recv_list, skb);
schedule_work(&fip->recv_work); schedule_work(&fip->recv_work);
} }
@ -1479,12 +1521,12 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
goto drop; goto drop;
eh = eth_hdr(skb); eh = eth_hdr(skb);
if (fip->mode == FIP_MODE_VN2VN) { if (fip->mode == FIP_MODE_VN2VN) {
if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) && !ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) &&
compare_ether_addr(eh->h_dest, fcoe_all_p2p)) !ether_addr_equal(eh->h_dest, fcoe_all_p2p))
goto drop; goto drop;
} else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && } else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
compare_ether_addr(eh->h_dest, fcoe_all_enode)) !ether_addr_equal(eh->h_dest, fcoe_all_enode))
goto drop; goto drop;
fiph = (struct fip_header *)skb->data; fiph = (struct fip_header *)skb->data;
op = ntohs(fiph->fip_op); op = ntohs(fiph->fip_op);
@ -1856,7 +1898,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
* address_mode flag to use FC_OUI-based Ethernet DA. * address_mode flag to use FC_OUI-based Ethernet DA.
* Otherwise we use the FCoE gateway addr * Otherwise we use the FCoE gateway addr
*/ */
if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
fcoe_ctlr_map_dest(fip); fcoe_ctlr_map_dest(fip);
} else { } else {
memcpy(fip->dest_addr, sa, ETH_ALEN); memcpy(fip->dest_addr, sa, ETH_ALEN);
@ -2825,8 +2867,8 @@ unlock:
* disabled, so that should ensure that this routine is only called * disabled, so that should ensure that this routine is only called
* when nothing is happening. * when nothing is happening.
*/ */
void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
enum fip_state fip_mode) enum fip_state fip_mode)
{ {
void *priv; void *priv;

View File

@ -300,29 +300,29 @@ static ssize_t store_ctlr_mode(struct device *dev,
switch (ctlr->enabled) { switch (ctlr->enabled) {
case FCOE_CTLR_ENABLED: case FCOE_CTLR_ENABLED:
LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled."); LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n");
return -EBUSY; return -EBUSY;
case FCOE_CTLR_DISABLED: case FCOE_CTLR_DISABLED:
if (!ctlr->f->set_fcoe_ctlr_mode) { if (!ctlr->f->set_fcoe_ctlr_mode) {
LIBFCOE_SYSFS_DBG(ctlr, LIBFCOE_SYSFS_DBG(ctlr,
"Mode change not supported by LLD."); "Mode change not supported by LLD.\n");
return -ENOTSUPP; return -ENOTSUPP;
} }
ctlr->mode = fcoe_parse_mode(mode); ctlr->mode = fcoe_parse_mode(mode);
if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
LIBFCOE_SYSFS_DBG(ctlr, LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
"Unknown mode %s provided.", buf); buf);
return -EINVAL; return -EINVAL;
} }
ctlr->f->set_fcoe_ctlr_mode(ctlr); ctlr->f->set_fcoe_ctlr_mode(ctlr);
LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf); LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
return count; return count;
case FCOE_CTLR_UNUSED: case FCOE_CTLR_UNUSED:
default: default:
LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported."); LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n");
return -ENOTSUPP; return -ENOTSUPP;
}; };
} }
@ -657,7 +657,7 @@ static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
if (new->switch_name == old->switch_name && if (new->switch_name == old->switch_name &&
new->fabric_name == old->fabric_name && new->fabric_name == old->fabric_name &&
new->fc_map == old->fc_map && new->fc_map == old->fc_map &&
compare_ether_addr(new->mac, old->mac) == 0) ether_addr_equal(new->mac, old->mac))
return 1; return 1;
return 0; return 0;
} }

View File

@ -27,6 +27,7 @@
#include "fnic_io.h" #include "fnic_io.h"
#include "fnic_res.h" #include "fnic_res.h"
#include "fnic_trace.h" #include "fnic_trace.h"
#include "fnic_stats.h"
#include "vnic_dev.h" #include "vnic_dev.h"
#include "vnic_wq.h" #include "vnic_wq.h"
#include "vnic_rq.h" #include "vnic_rq.h"
@ -38,7 +39,7 @@
#define DRV_NAME "fnic" #define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
#define DRV_VERSION "1.5.0.23" #define DRV_VERSION "1.5.0.45"
#define PFX DRV_NAME ": " #define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: " #define DFX DRV_NAME "%d: "
@ -232,6 +233,13 @@ struct fnic {
unsigned int wq_count; unsigned int wq_count;
unsigned int cq_count; unsigned int cq_count;
struct dentry *fnic_stats_debugfs_host;
struct dentry *fnic_stats_debugfs_file;
struct dentry *fnic_reset_debugfs_file;
unsigned int reset_stats;
atomic64_t io_cmpl_skip;
struct fnic_stats fnic_stats;
u32 vlan_hw_insert:1; /* let hw insert the tag */ u32 vlan_hw_insert:1; /* let hw insert the tag */
u32 in_remove:1; /* fnic device in removal */ u32 in_remove:1; /* fnic device in removal */
u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */

View File

@ -23,6 +23,58 @@
static struct dentry *fnic_trace_debugfs_root; static struct dentry *fnic_trace_debugfs_root;
static struct dentry *fnic_trace_debugfs_file; static struct dentry *fnic_trace_debugfs_file;
static struct dentry *fnic_trace_enable; static struct dentry *fnic_trace_enable;
static struct dentry *fnic_stats_debugfs_root;
/*
* fnic_debugfs_init - Initialize debugfs for fnic debug logging
*
* Description:
* When Debugfs is configured this routine sets up the fnic debugfs
* file system. If not already created, this routine will create the
* fnic directory and statistics directory for trace buffer and
* stats logging.
*/
int fnic_debugfs_init(void)
{
int rc = -1;
fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
if (!fnic_trace_debugfs_root) {
printk(KERN_DEBUG "Cannot create debugfs root\n");
return rc;
}
if (!fnic_trace_debugfs_root) {
printk(KERN_DEBUG
"fnic root directory doesn't exist in debugfs\n");
return rc;
}
fnic_stats_debugfs_root = debugfs_create_dir("statistics",
fnic_trace_debugfs_root);
if (!fnic_stats_debugfs_root) {
printk(KERN_DEBUG "Cannot create Statistics directory\n");
return rc;
}
rc = 0;
return rc;
}
/*
* fnic_debugfs_terminate - Tear down debugfs infrastructure
*
* Description:
* When Debugfs is configured this routine removes debugfs file system
* elements that are specific to fnic.
*/
void fnic_debugfs_terminate(void)
{
debugfs_remove(fnic_stats_debugfs_root);
fnic_stats_debugfs_root = NULL;
debugfs_remove(fnic_trace_debugfs_root);
fnic_trace_debugfs_root = NULL;
}
/* /*
* fnic_trace_ctrl_open - Open the trace_enable file * fnic_trace_ctrl_open - Open the trace_enable file
@ -241,16 +293,16 @@ static const struct file_operations fnic_trace_debugfs_fops = {
* Description: * Description:
* When Debugfs is configured this routine sets up the fnic debugfs * When Debugfs is configured this routine sets up the fnic debugfs
* file system. If not already created, this routine will create the * file system. If not already created, this routine will create the
* fnic directory. It will create file trace to log fnic trace buffer * create file trace to log fnic trace buffer output into debugfs and
* output into debugfs and it will also create file trace_enable to * it will also create file trace_enable to control enable/disable of
* control enable/disable of trace logging into trace buffer. * trace logging into trace buffer.
*/ */
int fnic_trace_debugfs_init(void) int fnic_trace_debugfs_init(void)
{ {
int rc = -1; int rc = -1;
fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
if (!fnic_trace_debugfs_root) { if (!fnic_trace_debugfs_root) {
printk(KERN_DEBUG "Cannot create debugfs root\n"); printk(KERN_DEBUG
"FNIC Debugfs root directory doesn't exist\n");
return rc; return rc;
} }
fnic_trace_enable = debugfs_create_file("tracing_enable", fnic_trace_enable = debugfs_create_file("tracing_enable",
@ -259,8 +311,8 @@ int fnic_trace_debugfs_init(void)
NULL, &fnic_trace_ctrl_fops); NULL, &fnic_trace_ctrl_fops);
if (!fnic_trace_enable) { if (!fnic_trace_enable) {
printk(KERN_DEBUG "Cannot create trace_enable file" printk(KERN_DEBUG
" under debugfs"); "Cannot create trace_enable file under debugfs\n");
return rc; return rc;
} }
@ -271,7 +323,8 @@ int fnic_trace_debugfs_init(void)
&fnic_trace_debugfs_fops); &fnic_trace_debugfs_fops);
if (!fnic_trace_debugfs_file) { if (!fnic_trace_debugfs_file) {
printk(KERN_DEBUG "Cannot create trace file under debugfs"); printk(KERN_DEBUG
"Cannot create trace file under debugfs\n");
return rc; return rc;
} }
rc = 0; rc = 0;
@ -295,8 +348,323 @@ void fnic_trace_debugfs_terminate(void)
debugfs_remove(fnic_trace_enable); debugfs_remove(fnic_trace_enable);
fnic_trace_enable = NULL; fnic_trace_enable = NULL;
} }
if (fnic_trace_debugfs_root) { }
debugfs_remove(fnic_trace_debugfs_root);
fnic_trace_debugfs_root = NULL; /*
} * fnic_reset_stats_open - Open the reset_stats file
* @inode: The inode pointer.
* @file: The file pointer to attach the stats reset flag.
*
* Description:
* This routine opens a debugsfs file reset_stats and stores i_private data
* to debug structure to retrieve later for while performing other
* file oprations.
*
* Returns:
* This function returns zero if successful.
*/
static int fnic_reset_stats_open(struct inode *inode, struct file *file)
{
struct stats_debug_info *debug;
debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
if (!debug)
return -ENOMEM;
debug->i_private = inode->i_private;
file->private_data = debug;
return 0;
}
/*
* fnic_reset_stats_read - Read a reset_stats debugfs file
* @filp: The file pointer to read from.
* @ubuf: The buffer to copy the data to.
* @cnt: The number of bytes to read.
* @ppos: The position in the file to start reading from.
*
* Description:
* This routine reads value of variable reset_stats
* and stores into local @buf. It will start reading file at @ppos and
* copy up to @cnt of data to @ubuf from @buf.
*
* Returns:
* This function returns the amount of data that was read.
*/
static ssize_t fnic_reset_stats_read(struct file *file,
char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct stats_debug_info *debug = file->private_data;
struct fnic *fnic = (struct fnic *)debug->i_private;
char buf[64];
int len;
len = sprintf(buf, "%u\n", fnic->reset_stats);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
/*
* fnic_reset_stats_write - Write to reset_stats debugfs file
* @filp: The file pointer to write from.
* @ubuf: The buffer to copy the data from.
* @cnt: The number of bytes to write.
* @ppos: The position in the file to start writing to.
*
* Description:
* This routine writes data from user buffer @ubuf to buffer @buf and
* resets cumulative stats of fnic.
*
* Returns:
* This function returns the amount of data that was written.
*/
static ssize_t fnic_reset_stats_write(struct file *file,
const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct stats_debug_info *debug = file->private_data;
struct fnic *fnic = (struct fnic *)debug->i_private;
struct fnic_stats *stats = &fnic->fnic_stats;
u64 *io_stats_p = (u64 *)&stats->io_stats;
u64 *fw_stats_p = (u64 *)&stats->fw_stats;
char buf[64];
unsigned long val;
int ret;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = kstrtoul(buf, 10, &val);
if (ret < 0)
return ret;
fnic->reset_stats = val;
if (fnic->reset_stats) {
/* Skip variable is used to avoid descrepancies to Num IOs
* and IO Completions stats. Skip incrementing No IO Compls
* for pending active IOs after reset stats
*/
atomic64_set(&fnic->io_cmpl_skip,
atomic64_read(&stats->io_stats.active_ios));
memset(&stats->abts_stats, 0, sizeof(struct abort_stats));
memset(&stats->term_stats, 0,
sizeof(struct terminate_stats));
memset(&stats->reset_stats, 0, sizeof(struct reset_stats));
memset(&stats->misc_stats, 0, sizeof(struct misc_stats));
memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats));
memset(io_stats_p+1, 0,
sizeof(struct io_path_stats) - sizeof(u64));
memset(fw_stats_p+1, 0,
sizeof(struct fw_stats) - sizeof(u64));
}
(*ppos)++;
return cnt;
}
/*
* fnic_reset_stats_release - Release the buffer used to store
* debugfs file data
* @inode: The inode pointer
* @file: The file pointer that contains the buffer to release
*
* Description:
* This routine frees the buffer that was allocated when the debugfs
* file was opened.
*
* Returns:
* This function returns zero.
*/
static int fnic_reset_stats_release(struct inode *inode,
struct file *file)
{
struct stats_debug_info *debug = file->private_data;
kfree(debug);
return 0;
}
/*
* fnic_stats_debugfs_open - Open the stats file for specific host
* and get fnic stats.
* @inode: The inode pointer.
* @file: The file pointer to attach the specific host statistics.
*
* Description:
* This routine opens a debugsfs file stats of specific host and print
* fnic stats.
*
* Returns:
* This function returns zero if successful.
*/
static int fnic_stats_debugfs_open(struct inode *inode,
struct file *file)
{
struct fnic *fnic = inode->i_private;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct stats_debug_info *debug;
int buf_size = 2 * PAGE_SIZE;
debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
if (!debug)
return -ENOMEM;
debug->debug_buffer = vmalloc(buf_size);
if (!debug->debug_buffer) {
kfree(debug);
return -ENOMEM;
}
debug->buf_size = buf_size;
memset((void *)debug->debug_buffer, 0, buf_size);
debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
file->private_data = debug;
return 0;
}
/*
* fnic_stats_debugfs_read - Read a debugfs file
* @file: The file pointer to read from.
* @ubuf: The buffer to copy the data to.
* @nbytes: The number of bytes to read.
* @pos: The position in the file to start reading from.
*
* Description:
* This routine reads data from the buffer indicated in the private_data
* field of @file. It will start reading at @pos and copy up to @nbytes of
* data to @ubuf.
*
* Returns:
* This function returns the amount of data that was read (this could be
* less than @nbytes if the end of the file was reached).
*/
static ssize_t fnic_stats_debugfs_read(struct file *file,
char __user *ubuf,
size_t nbytes,
loff_t *pos)
{
struct stats_debug_info *debug = file->private_data;
int rc = 0;
rc = simple_read_from_buffer(ubuf, nbytes, pos,
debug->debug_buffer,
debug->buffer_len);
return rc;
}
/*
* fnic_stats_stats_release - Release the buffer used to store
* debugfs file data
* @inode: The inode pointer
* @file: The file pointer that contains the buffer to release
*
* Description:
* This routine frees the buffer that was allocated when the debugfs
* file was opened.
*
* Returns:
* This function returns zero.
*/
static int fnic_stats_debugfs_release(struct inode *inode,
struct file *file)
{
struct stats_debug_info *debug = file->private_data;
vfree(debug->debug_buffer);
kfree(debug);
return 0;
}
static const struct file_operations fnic_stats_debugfs_fops = {
.owner = THIS_MODULE,
.open = fnic_stats_debugfs_open,
.read = fnic_stats_debugfs_read,
.release = fnic_stats_debugfs_release,
};
static const struct file_operations fnic_reset_debugfs_fops = {
.owner = THIS_MODULE,
.open = fnic_reset_stats_open,
.read = fnic_reset_stats_read,
.write = fnic_reset_stats_write,
.release = fnic_reset_stats_release,
};
/*
* fnic_stats_init - Initialize stats struct and create stats file per fnic
*
* Description:
* When Debugfs is configured this routine sets up the stats file per fnic
* It will create file stats and reset_stats under statistics/host# directory
* to log per fnic stats.
*/
int fnic_stats_debugfs_init(struct fnic *fnic)
{
int rc = -1;
char name[16];
snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
if (!fnic_stats_debugfs_root) {
printk(KERN_DEBUG "fnic_stats root doesn't exist\n");
return rc;
}
fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
fnic_stats_debugfs_root);
if (!fnic->fnic_stats_debugfs_host) {
printk(KERN_DEBUG "Cannot create host directory\n");
return rc;
}
fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_stats_debugfs_fops);
if (!fnic->fnic_stats_debugfs_file) {
printk(KERN_DEBUG "Cannot create host stats file\n");
return rc;
}
fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_reset_debugfs_fops);
if (!fnic->fnic_reset_debugfs_file) {
printk(KERN_DEBUG "Cannot create host stats file\n");
return rc;
}
rc = 0;
return rc;
}
/*
* fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
*
* Description:
* When Debugfs is configured this routine removes debugfs file system
* elements that are specific to fnic stats.
*/
void fnic_stats_debugfs_remove(struct fnic *fnic)
{
if (!fnic)
return;
debugfs_remove(fnic->fnic_stats_debugfs_file);
fnic->fnic_stats_debugfs_file = NULL;
debugfs_remove(fnic->fnic_reset_debugfs_file);
fnic->fnic_reset_debugfs_file = NULL;
debugfs_remove(fnic->fnic_stats_debugfs_host);
fnic->fnic_stats_debugfs_host = NULL;
} }

View File

@ -302,6 +302,7 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
static void fnic_fcoe_send_vlan_req(struct fnic *fnic) static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
{ {
struct fcoe_ctlr *fip = &fnic->ctlr; struct fcoe_ctlr *fip = &fnic->ctlr;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct sk_buff *skb; struct sk_buff *skb;
char *eth_fr; char *eth_fr;
int fr_len; int fr_len;
@ -337,6 +338,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
skb_put(skb, sizeof(*vlan)); skb_put(skb, sizeof(*vlan));
skb->protocol = htons(ETH_P_FIP); skb->protocol = htons(ETH_P_FIP);
@ -354,6 +356,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
struct fcoe_ctlr *fip = &fnic->ctlr; struct fcoe_ctlr *fip = &fnic->ctlr;
struct fip_header *fiph; struct fip_header *fiph;
struct fip_desc *desc; struct fip_desc *desc;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
u16 vid; u16 vid;
size_t rlen; size_t rlen;
size_t dlen; size_t dlen;
@ -402,6 +405,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
/* any VLAN descriptors present ? */ /* any VLAN descriptors present ? */
if (list_empty(&fnic->vlans)) { if (list_empty(&fnic->vlans)) {
/* retry from timer */ /* retry from timer */
atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"No VLAN descriptors in FIP VLAN response\n"); "No VLAN descriptors in FIP VLAN response\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags); spin_unlock_irqrestore(&fnic->vlans_lock, flags);
@ -533,6 +537,7 @@ drop:
void fnic_handle_fip_frame(struct work_struct *work) void fnic_handle_fip_frame(struct work_struct *work)
{ {
struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned long flags; unsigned long flags;
struct sk_buff *skb; struct sk_buff *skb;
struct ethhdr *eh; struct ethhdr *eh;
@ -567,6 +572,8 @@ void fnic_handle_fip_frame(struct work_struct *work)
* fcf's & restart from scratch * fcf's & restart from scratch
*/ */
if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
atomic64_inc(
&fnic_stats->vlan_stats.flogi_rejects);
shost_printk(KERN_INFO, fnic->lport->host, shost_printk(KERN_INFO, fnic->lport->host,
"Trigger a Link down - VLAN Disc\n"); "Trigger a Link down - VLAN Disc\n");
fcoe_ctlr_link_down(&fnic->ctlr); fcoe_ctlr_link_down(&fnic->ctlr);
@ -651,13 +658,13 @@ void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
if (is_zero_ether_addr(new)) if (is_zero_ether_addr(new))
new = ctl; new = ctl;
if (!compare_ether_addr(data, new)) if (ether_addr_equal(data, new))
return; return;
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
vnic_dev_del_addr(fnic->vdev, data); vnic_dev_del_addr(fnic->vdev, data);
memcpy(data, new, ETH_ALEN); memcpy(data, new, ETH_ALEN);
if (compare_ether_addr(new, ctl)) if (!ether_addr_equal(new, ctl))
vnic_dev_add_addr(fnic->vdev, new); vnic_dev_add_addr(fnic->vdev, new);
} }
@ -753,6 +760,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
struct fnic *fnic = vnic_dev_priv(rq->vdev); struct fnic *fnic = vnic_dev_priv(rq->vdev);
struct sk_buff *skb; struct sk_buff *skb;
struct fc_frame *fp; struct fc_frame *fp;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned int eth_hdrs_stripped; unsigned int eth_hdrs_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe = 0, fcoe_sof, fcoe_eof; u8 fcoe = 0, fcoe_sof, fcoe_eof;
@ -803,6 +811,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
eth_hdrs_stripped = 0; eth_hdrs_stripped = 0;
skb_trim(skb, bytes_written); skb_trim(skb, bytes_written);
if (!fcs_ok) { if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fcs error. dropping packet.\n"); "fcs error. dropping packet.\n");
goto drop; goto drop;
@ -818,6 +827,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
} }
if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fnic rq_cmpl fcoe x%x fcsok x%x" "fnic rq_cmpl fcoe x%x fcsok x%x"
" pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
@ -1205,6 +1215,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
{ {
unsigned long flags; unsigned long flags;
struct fcoe_vlan *vlan; struct fcoe_vlan *vlan;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
u64 sol_time; u64 sol_time;
spin_lock_irqsave(&fnic->fnic_lock, flags); spin_lock_irqsave(&fnic->fnic_lock, flags);
@ -1273,6 +1284,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
vlan->state = FIP_VLAN_SENT; /* sent now */ vlan->state = FIP_VLAN_SENT; /* sent now */
} }
spin_unlock_irqrestore(&fnic->vlans_lock, flags); spin_unlock_irqrestore(&fnic->vlans_lock, flags);
atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
vlan->sol_count++; vlan->sol_count++;
sol_time = jiffies + msecs_to_jiffies sol_time = jiffies + msecs_to_jiffies
(FCOE_CTLR_START_DELAY); (FCOE_CTLR_START_DELAY);

View File

@ -37,6 +37,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)
if (!pba) if (!pba)
return IRQ_NONE; return IRQ_NONE;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
if (pba & (1 << FNIC_INTX_NOTIFY)) { if (pba & (1 << FNIC_INTX_NOTIFY)) {
vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
fnic_handle_link_event(fnic); fnic_handle_link_event(fnic);
@ -66,6 +69,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)
struct fnic *fnic = data; struct fnic *fnic = data;
unsigned long work_done = 0; unsigned long work_done = 0;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_copy_cmpl_handler(fnic, -1);
work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1);
@ -83,6 +89,9 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
struct fnic *fnic = data; struct fnic *fnic = data;
unsigned long rq_work_done = 0; unsigned long rq_work_done = 0;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
rq_work_done = fnic_rq_cmpl_handler(fnic, -1); rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
rq_work_done, rq_work_done,
@ -97,6 +106,9 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
struct fnic *fnic = data; struct fnic *fnic = data;
unsigned long wq_work_done = 0; unsigned long wq_work_done = 0;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
wq_work_done = fnic_wq_cmpl_handler(fnic, -1); wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
wq_work_done, wq_work_done,
@ -110,6 +122,9 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
struct fnic *fnic = data; struct fnic *fnic = data;
unsigned long wq_copy_work_done = 0; unsigned long wq_copy_work_done = 0;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
wq_copy_work_done, wq_copy_work_done,
@ -122,6 +137,9 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
{ {
struct fnic *fnic = data; struct fnic *fnic = data;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
fnic_log_q_error(fnic); fnic_log_q_error(fnic);
fnic_handle_link_event(fnic); fnic_handle_link_event(fnic);

View File

@ -556,6 +556,13 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
host->transportt = fnic_fc_transport; host->transportt = fnic_fc_transport;
err = fnic_stats_debugfs_init(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Failed to initialize debugfs for stats\n");
fnic_stats_debugfs_remove(fnic);
}
/* Setup PCI resources */ /* Setup PCI resources */
pci_set_drvdata(pdev, fnic); pci_set_drvdata(pdev, fnic);
@ -917,6 +924,7 @@ err_out_release_regions:
err_out_disable_device: err_out_disable_device:
pci_disable_device(pdev); pci_disable_device(pdev);
err_out_free_hba: err_out_free_hba:
fnic_stats_debugfs_remove(fnic);
scsi_host_put(lp->host); scsi_host_put(lp->host);
err_out: err_out:
return err; return err;
@ -969,6 +977,7 @@ static void fnic_remove(struct pci_dev *pdev)
fcoe_ctlr_destroy(&fnic->ctlr); fcoe_ctlr_destroy(&fnic->ctlr);
fc_lport_destroy(lp); fc_lport_destroy(lp);
fnic_stats_debugfs_remove(fnic);
/* /*
* This stops the fnic device, masks all interrupts. Completed * This stops the fnic device, masks all interrupts. Completed
@ -1014,6 +1023,14 @@ static int __init fnic_init_module(void)
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
/* Create debugfs entries for fnic */
err = fnic_debugfs_init();
if (err < 0) {
printk(KERN_ERR PFX "Failed to create fnic directory "
"for tracing and stats logging\n");
fnic_debugfs_terminate();
}
/* Allocate memory for trace buffer */ /* Allocate memory for trace buffer */
err = fnic_trace_buf_init(); err = fnic_trace_buf_init();
if (err < 0) { if (err < 0) {
@ -1102,6 +1119,7 @@ err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt: err_create_fnic_sgl_slab_dflt:
fnic_trace_free(); fnic_trace_free();
fnic_debugfs_terminate();
return err; return err;
} }
@ -1118,6 +1136,7 @@ static void __exit fnic_cleanup_module(void)
kmem_cache_destroy(fnic_io_req_cache); kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport); fc_release_transport(fnic_fc_transport);
fnic_trace_free(); fnic_trace_free();
fnic_debugfs_terminate();
} }
module_init(fnic_init_module); module_init(fnic_init_module);

View File

@ -226,15 +226,23 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!vnic_wq_copy_desc_avail(wq)) if (!vnic_wq_copy_desc_avail(wq))
ret = -EAGAIN; ret = -EAGAIN;
else else {
fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(
&fnic->fnic_stats.fw_stats.active_fw_reqs));
}
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
if (!ret) if (!ret) {
atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n"); "Issued fw reset\n");
else { } else {
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n"); "Failed to issue fw reset\n");
@ -291,6 +299,12 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
fc_id, fnic->ctlr.map_dest, gw_mac); fc_id, fnic->ctlr.map_dest, gw_mac);
} }
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
flogi_reg_ioreq_end: flogi_reg_ioreq_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
return ret; return ret;
@ -310,6 +324,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
struct fc_rport_libfc_priv *rp = rport->dd_data; struct fc_rport_libfc_priv *rp = rport->dd_data;
struct host_sg_desc *desc; struct host_sg_desc *desc;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
u8 pri_tag = 0; u8 pri_tag = 0;
unsigned int i; unsigned int i;
unsigned long intr_flags; unsigned long intr_flags;
@ -358,6 +373,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"fnic_queue_wq_copy_desc failure - no descriptors\n"); "fnic_queue_wq_copy_desc failure - no descriptors\n");
atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
} }
@ -386,6 +402,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
rport->maxframe_size, rp->r_a_tov, rport->maxframe_size, rp->r_a_tov,
rp->e_d_tov); rp->e_d_tov);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
return 0; return 0;
} }
@ -401,6 +423,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
struct fc_rport *rport; struct fc_rport *rport;
struct fnic_io_req *io_req = NULL; struct fnic_io_req *io_req = NULL;
struct fnic *fnic = lport_priv(lp); struct fnic *fnic = lport_priv(lp);
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct vnic_wq_copy *wq; struct vnic_wq_copy *wq;
int ret; int ret;
u64 cmd_trace; u64 cmd_trace;
@ -414,6 +437,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
rport = starget_to_rport(scsi_target(sc->device)); rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport); ret = fc_remote_port_chkready(rport);
if (ret) { if (ret) {
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = ret; sc->result = ret;
done(sc); done(sc);
return 0; return 0;
@ -436,6 +460,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* Get a new io_req for this SCSI IO */ /* Get a new io_req for this SCSI IO */
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) { if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY; ret = SCSI_MLQUEUE_HOST_BUSY;
goto out; goto out;
} }
@ -462,6 +487,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
GFP_ATOMIC); GFP_ATOMIC);
if (!io_req->sgl_list) { if (!io_req->sgl_list) {
atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY; ret = SCSI_MLQUEUE_HOST_BUSY;
scsi_dma_unmap(sc); scsi_dma_unmap(sc);
mempool_free(io_req, fnic->io_req_pool); mempool_free(io_req, fnic->io_req_pool);
@ -509,6 +535,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
mempool_free(io_req, fnic->io_req_pool); mempool_free(io_req, fnic->io_req_pool);
} }
} else { } else {
atomic64_inc(&fnic_stats->io_stats.active_ios);
atomic64_inc(&fnic_stats->io_stats.num_ios);
if (atomic64_read(&fnic_stats->io_stats.active_ios) >
atomic64_read(&fnic_stats->io_stats.max_active_ios))
atomic64_set(&fnic_stats->io_stats.max_active_ios,
atomic64_read(&fnic_stats->io_stats.active_ios));
/* REVISIT: Use per IO lock in the final code */ /* REVISIT: Use per IO lock in the final code */
CMD_FLAGS(sc) |= FNIC_IO_ISSUED; CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
} }
@ -542,12 +575,18 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
struct fcpio_tag tag; struct fcpio_tag tag;
int ret = 0; int ret = 0;
unsigned long flags; unsigned long flags;
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
atomic64_inc(&reset_stats->fw_reset_completions);
/* Clean up all outstanding io requests */ /* Clean up all outstanding io requests */
fnic_cleanup_io(fnic, SCSI_NO_TAG); fnic_cleanup_io(fnic, SCSI_NO_TAG);
atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
spin_lock_irqsave(&fnic->fnic_lock, flags); spin_lock_irqsave(&fnic->fnic_lock, flags);
/* fnic should be in FC_TRANS_ETH_MODE */ /* fnic should be in FC_TRANS_ETH_MODE */
@ -571,6 +610,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
* reset the firmware. Free the cached flogi * reset the firmware. Free the cached flogi
*/ */
fnic->state = FNIC_IN_FC_MODE; fnic->state = FNIC_IN_FC_MODE;
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1; ret = -1;
} }
} else { } else {
@ -578,6 +618,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
fnic->lport->host, fnic->lport->host,
"Unexpected state %s while processing" "Unexpected state %s while processing"
" reset cmpl\n", fnic_state_to_str(fnic->state)); " reset cmpl\n", fnic_state_to_str(fnic->state));
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1; ret = -1;
} }
@ -701,10 +742,14 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
if (is_ack_index_in_range(wq, request_out)) { if (is_ack_index_in_range(wq, request_out)) {
fnic->fw_ack_index[0] = request_out; fnic->fw_ack_index[0] = request_out;
fnic->fw_ack_recd[0] = 1; fnic->fw_ack_recd[0] = 1;
} } else
atomic64_inc(
&fnic->fnic_stats.misc_stats.ack_index_out_of_range);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
FNIC_TRACE(fnic_fcpio_ack_handler, FNIC_TRACE(fnic_fcpio_ack_handler,
fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
@ -726,6 +771,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct fcpio_icmnd_cmpl *icmnd_cmpl; struct fcpio_icmnd_cmpl *icmnd_cmpl;
struct fnic_io_req *io_req; struct fnic_io_req *io_req;
struct scsi_cmnd *sc; struct scsi_cmnd *sc;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned long flags; unsigned long flags;
spinlock_t *io_lock; spinlock_t *io_lock;
u64 cmd_trace; u64 cmd_trace;
@ -746,6 +792,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
sc = scsi_host_find_tag(fnic->lport->host, id); sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc); WARN_ON_ONCE(!sc);
if (!sc) { if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host, shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl sc is null - " "icmnd_cmpl sc is null - "
"hdr status = %s tag = 0x%x desc = 0x%p\n", "hdr status = %s tag = 0x%x desc = 0x%p\n",
@ -766,6 +813,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc); io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req); WARN_ON_ONCE(!io_req);
if (!io_req) { if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
shost_printk(KERN_ERR, fnic->lport->host, shost_printk(KERN_ERR, fnic->lport->host,
@ -824,31 +872,54 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
xfer_len -= icmnd_cmpl->residual; xfer_len -= icmnd_cmpl->residual;
if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
break; break;
case FCPIO_TIMEOUT: /* request was timed out */ case FCPIO_TIMEOUT: /* request was timed out */
atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
break; break;
case FCPIO_ABORTED: /* request was aborted */ case FCPIO_ABORTED: /* request was aborted */
atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break; break;
case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
scsi_set_resid(sc, icmnd_cmpl->residual); scsi_set_resid(sc, icmnd_cmpl->residual);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break; break;
case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
break; break;
case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
atomic64_inc(&fnic_stats->io_stats.io_not_found);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_FW_ERR: /* request was terminated due fw error */
atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_INVALID_HEADER: /* header contains invalid data */ case FCPIO_INVALID_HEADER: /* header contains invalid data */
case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
case FCPIO_FW_ERR: /* request was terminated due fw error */
default: default:
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status)); fnic_fcpio_status_to_str(hdr_status));
@ -856,6 +927,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
break; break;
} }
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
}
/* Break link with the SCSI command */ /* Break link with the SCSI command */
CMD_SP(sc) = NULL; CMD_SP(sc) = NULL;
CMD_FLAGS(sc) |= FNIC_IO_DONE; CMD_FLAGS(sc) |= FNIC_IO_DONE;
@ -889,6 +965,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
} else } else
fnic->lport->host_stats.fcp_control_requests++; fnic->lport->host_stats.fcp_control_requests++;
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
/* Call SCSI completion function to complete the IO */ /* Call SCSI completion function to complete the IO */
if (sc->scsi_done) if (sc->scsi_done)
sc->scsi_done(sc); sc->scsi_done(sc);
@ -906,6 +988,10 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
u32 id; u32 id;
struct scsi_cmnd *sc; struct scsi_cmnd *sc;
struct fnic_io_req *io_req; struct fnic_io_req *io_req;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags; unsigned long flags;
spinlock_t *io_lock; spinlock_t *io_lock;
unsigned long start_time; unsigned long start_time;
@ -923,6 +1009,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc); WARN_ON_ONCE(!sc);
if (!sc) { if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host, shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
fnic_fcpio_status_to_str(hdr_status), id); fnic_fcpio_status_to_str(hdr_status), id);
@ -933,6 +1020,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc); io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req); WARN_ON_ONCE(!io_req);
if (!io_req) { if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
shost_printk(KERN_ERR, fnic->lport->host, shost_printk(KERN_ERR, fnic->lport->host,
@ -957,6 +1045,31 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
} else if (id & FNIC_TAG_ABORT) { } else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */ /* Completion of abort cmd */
switch (hdr_status) {
case FCPIO_SUCCESS:
break;
case FCPIO_TIMEOUT:
if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_fw_timeouts);
else
atomic64_inc(
&term_stats->terminate_fw_timeouts);
break;
case FCPIO_IO_NOT_FOUND:
if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_io_not_found);
else
atomic64_inc(
&term_stats->terminate_io_not_found);
break;
default:
if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_failures);
else
atomic64_inc(
&term_stats->terminate_failures);
break;
}
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */ /* This is a late completion. Ignore it */
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
@ -964,6 +1077,16 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
} }
CMD_ABTS_STATUS(sc) = hdr_status; CMD_ABTS_STATUS(sc) = hdr_status;
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n", "abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK), (int)(id & FNIC_TAG_MASK),
@ -1066,6 +1189,18 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
{ {
struct fnic *fnic = vnic_dev_priv(vdev); struct fnic *fnic = vnic_dev_priv(vdev);
switch (desc->hdr.type) {
case FCPIO_ICMND_CMPL: /* fw completed a command */
case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
case FCPIO_RESET_CMPL: /* fw completed reset */
atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
break;
default:
break;
}
switch (desc->hdr.type) { switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */ case FCPIO_ACK: /* fw copied copy wq desc to its queue */
fnic_fcpio_ack_handler(fnic, cq_index, desc); fnic_fcpio_ack_handler(fnic, cq_index, desc);
@ -1126,6 +1261,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
struct scsi_cmnd *sc; struct scsi_cmnd *sc;
spinlock_t *io_lock; spinlock_t *io_lock;
unsigned long start_time = 0; unsigned long start_time = 0;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
for (i = 0; i < fnic->fnic_max_tag_id; i++) { for (i = 0; i < fnic->fnic_max_tag_id; i++) {
if (i == exclude_id) if (i == exclude_id)
@ -1179,6 +1315,11 @@ cleanup_scsi_cmd:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
" DID_TRANSPORT_DISRUPTED\n"); " DID_TRANSPORT_DISRUPTED\n");
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
/* Complete the command to SCSI */ /* Complete the command to SCSI */
if (sc->scsi_done) { if (sc->scsi_done) {
FNIC_TRACE(fnic_cleanup_io, FNIC_TRACE(fnic_cleanup_io,
@ -1262,6 +1403,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
{ {
struct vnic_wq_copy *wq = &fnic->wq_copy[0]; struct vnic_wq_copy *wq = &fnic->wq_copy[0];
struct Scsi_Host *host = fnic->lport->host; struct Scsi_Host *host = fnic->lport->host;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
@ -1283,12 +1425,19 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
atomic_dec(&fnic->in_flight); atomic_dec(&fnic->in_flight);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_queue_abort_io_req: failure: no descriptors\n"); "fnic_queue_abort_io_req: failure: no descriptors\n");
atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
return 1; return 1;
} }
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
0, task_req, tag, fc_lun, io_req->port_id, 0, task_req, tag, fc_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov); fnic->config.ra_tov, fnic->config.ed_tov);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
atomic_dec(&fnic->in_flight); atomic_dec(&fnic->in_flight);
@ -1299,10 +1448,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{ {
int tag; int tag;
int abt_tag; int abt_tag;
int term_cnt = 0;
struct fnic_io_req *io_req; struct fnic_io_req *io_req;
spinlock_t *io_lock; spinlock_t *io_lock;
unsigned long flags; unsigned long flags;
struct scsi_cmnd *sc; struct scsi_cmnd *sc;
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct scsi_lun fc_lun; struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state; enum fnic_ioreq_state old_ioreq_state;
@ -1366,6 +1518,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag = (tag | FNIC_TAG_DEV_RST); abt_tag = (tag | FNIC_TAG_DEV_RST);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_exch_reset dev rst sc 0x%p\n", "fnic_rport_exch_reset dev rst sc 0x%p\n",
@ -1402,8 +1555,12 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
else else
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
atomic64_inc(&term_stats->terminates);
term_cnt++;
} }
} }
if (term_cnt > atomic64_read(&term_stats->max_terminates))
atomic64_set(&term_stats->max_terminates, term_cnt);
} }
@ -1411,6 +1568,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
{ {
int tag; int tag;
int abt_tag; int abt_tag;
int term_cnt = 0;
struct fnic_io_req *io_req; struct fnic_io_req *io_req;
spinlock_t *io_lock; spinlock_t *io_lock;
unsigned long flags; unsigned long flags;
@ -1420,6 +1578,8 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
struct fc_lport *lport; struct fc_lport *lport;
struct fnic *fnic; struct fnic *fnic;
struct fc_rport *cmd_rport; struct fc_rport *cmd_rport;
struct reset_stats *reset_stats;
struct terminate_stats *term_stats;
enum fnic_ioreq_state old_ioreq_state; enum fnic_ioreq_state old_ioreq_state;
if (!rport) { if (!rport) {
@ -1448,6 +1608,9 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
if (fnic->in_remove) if (fnic->in_remove)
return; return;
reset_stats = &fnic->fnic_stats.reset_stats;
term_stats = &fnic->fnic_stats.term_stats;
for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
abt_tag = tag; abt_tag = tag;
io_lock = fnic_io_lock_tag(fnic, tag); io_lock = fnic_io_lock_tag(fnic, tag);
@ -1504,6 +1667,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag = (tag | FNIC_TAG_DEV_RST); abt_tag = (tag | FNIC_TAG_DEV_RST);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_terminate_rport_io dev rst sc 0x%p\n", sc); "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
@ -1540,8 +1704,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
else else
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
atomic64_inc(&term_stats->terminates);
term_cnt++;
} }
} }
if (term_cnt > atomic64_read(&term_stats->max_terminates))
atomic64_set(&term_stats->max_terminates, term_cnt);
} }
@ -1562,6 +1730,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
int ret = SUCCESS; int ret = SUCCESS;
u32 task_req = 0; u32 task_req = 0;
struct scsi_lun fc_lun; struct scsi_lun fc_lun;
struct fnic_stats *fnic_stats;
struct abort_stats *abts_stats;
struct terminate_stats *term_stats;
int tag; int tag;
DECLARE_COMPLETION_ONSTACK(tm_done); DECLARE_COMPLETION_ONSTACK(tm_done);
@ -1572,6 +1743,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
lp = shost_priv(sc->device->host); lp = shost_priv(sc->device->host);
fnic = lport_priv(lp); fnic = lport_priv(lp);
fnic_stats = &fnic->fnic_stats;
abts_stats = &fnic->fnic_stats.abts_stats;
term_stats = &fnic->fnic_stats.term_stats;
rport = starget_to_rport(scsi_target(sc->device)); rport = starget_to_rport(scsi_target(sc->device));
tag = sc->request->tag; tag = sc->request->tag;
FNIC_SCSI_DBG(KERN_DEBUG, FNIC_SCSI_DBG(KERN_DEBUG,
@ -1630,8 +1805,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
*/ */
if (fc_remote_port_chkready(rport) == 0) if (fc_remote_port_chkready(rport) == 0)
task_req = FCPIO_ITMF_ABT_TASK; task_req = FCPIO_ITMF_ABT_TASK;
else else {
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
task_req = FCPIO_ITMF_ABT_TASK_TERM; task_req = FCPIO_ITMF_ABT_TASK_TERM;
}
/* Now queue the abort command to firmware */ /* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun); int_to_scsilun(sc->device->lun, &fc_lun);
@ -1646,10 +1823,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
ret = FAILED; ret = FAILED;
goto fnic_abort_cmd_end; goto fnic_abort_cmd_end;
} }
if (task_req == FCPIO_ITMF_ABT_TASK) if (task_req == FCPIO_ITMF_ABT_TASK) {
CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
else atomic64_inc(&fnic_stats->abts_stats.aborts);
} else {
CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
atomic64_inc(&fnic_stats->term_stats.terminates);
}
/* /*
* We queued an abort IO, wait for its completion. * We queued an abort IO, wait for its completion.
@ -1667,6 +1847,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc); io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) { if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED; ret = FAILED;
@ -1677,6 +1858,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* fw did not complete abort, timed out */ /* fw did not complete abort, timed out */
if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags); spin_unlock_irqrestore(io_lock, flags);
if (task_req == FCPIO_ITMF_ABT_TASK) {
FNIC_SCSI_DBG(KERN_INFO,
fnic->lport->host, "Abort Driver Timeout\n");
atomic64_inc(&abts_stats->abort_drv_timeouts);
} else {
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"Terminate Driver Timeout\n");
atomic64_inc(&term_stats->terminate_drv_timeouts);
}
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED; ret = FAILED;
goto fnic_abort_cmd_end; goto fnic_abort_cmd_end;
@ -1721,6 +1911,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
{ {
struct vnic_wq_copy *wq = &fnic->wq_copy[0]; struct vnic_wq_copy *wq = &fnic->wq_copy[0];
struct Scsi_Host *host = fnic->lport->host; struct Scsi_Host *host = fnic->lport->host;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
struct scsi_lun fc_lun; struct scsi_lun fc_lun;
int ret = 0; int ret = 0;
unsigned long intr_flags; unsigned long intr_flags;
@ -1742,6 +1933,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
if (!vnic_wq_copy_desc_avail(wq)) { if (!vnic_wq_copy_desc_avail(wq)) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"queue_dr_io_req failure - no descriptors\n"); "queue_dr_io_req failure - no descriptors\n");
atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
ret = -EAGAIN; ret = -EAGAIN;
goto lr_io_req_end; goto lr_io_req_end;
} }
@ -1754,6 +1946,12 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
fc_lun.scsi_lun, io_req->port_id, fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov); fnic->config.ra_tov, fnic->config.ed_tov);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
lr_io_req_end: lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
atomic_dec(&fnic->in_flight); atomic_dec(&fnic->in_flight);
@ -1988,6 +2186,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
unsigned long flags; unsigned long flags;
unsigned long start_time = 0; unsigned long start_time = 0;
struct scsi_lun fc_lun; struct scsi_lun fc_lun;
struct fnic_stats *fnic_stats;
struct reset_stats *reset_stats;
int tag = 0; int tag = 0;
DECLARE_COMPLETION_ONSTACK(tm_done); DECLARE_COMPLETION_ONSTACK(tm_done);
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
@ -1999,6 +2199,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
lp = shost_priv(sc->device->host); lp = shost_priv(sc->device->host);
fnic = lport_priv(lp); fnic = lport_priv(lp);
fnic_stats = &fnic->fnic_stats;
reset_stats = &fnic->fnic_stats.reset_stats;
atomic64_inc(&reset_stats->device_resets);
rport = starget_to_rport(scsi_target(sc->device)); rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@ -2009,8 +2213,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)
goto fnic_device_reset_end; goto fnic_device_reset_end;
/* Check if remote port up */ /* Check if remote port up */
if (fc_remote_port_chkready(rport)) if (fc_remote_port_chkready(rport)) {
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
goto fnic_device_reset_end; goto fnic_device_reset_end;
}
CMD_FLAGS(sc) = FNIC_DEVICE_RESET; CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
/* Allocate tag if not present */ /* Allocate tag if not present */
@ -2086,6 +2292,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
* gets cleaned up during higher levels of EH * gets cleaned up during higher levels of EH
*/ */
if (status == FCPIO_INVALID_CODE) { if (status == FCPIO_INVALID_CODE) {
atomic64_inc(&reset_stats->device_reset_timeouts);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n"); "Device reset timed out\n");
CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
@ -2199,6 +2406,10 @@ fnic_device_reset_end:
"Returning from device reset %s\n", "Returning from device reset %s\n",
(ret == SUCCESS) ? (ret == SUCCESS) ?
"SUCCESS" : "FAILED"); "SUCCESS" : "FAILED");
if (ret == FAILED)
atomic64_inc(&reset_stats->device_reset_failures);
return ret; return ret;
} }
@ -2207,26 +2418,34 @@ int fnic_reset(struct Scsi_Host *shost)
{ {
struct fc_lport *lp; struct fc_lport *lp;
struct fnic *fnic; struct fnic *fnic;
int ret = SUCCESS; int ret = 0;
struct reset_stats *reset_stats;
lp = shost_priv(shost); lp = shost_priv(shost);
fnic = lport_priv(lp); fnic = lport_priv(lp);
reset_stats = &fnic->fnic_stats.reset_stats;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_reset called\n"); "fnic_reset called\n");
atomic64_inc(&reset_stats->fnic_resets);
/* /*
* Reset local port, this will clean up libFC exchanges, * Reset local port, this will clean up libFC exchanges,
* reset remote port sessions, and if link is up, begin flogi * reset remote port sessions, and if link is up, begin flogi
*/ */
if (lp->tt.lport_reset(lp)) ret = lp->tt.lport_reset(lp);
ret = FAILED;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from fnic reset %s\n", "Returning from fnic reset %s\n",
(ret == SUCCESS) ? (ret == 0) ?
"SUCCESS" : "FAILED"); "SUCCESS" : "FAILED");
if (ret == 0)
atomic64_inc(&reset_stats->fnic_reset_completions);
else
atomic64_inc(&reset_stats->fnic_reset_failures);
return ret; return ret;
} }
@ -2251,7 +2470,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
* scsi-ml tries to send a TUR to every device if host reset is * scsi-ml tries to send a TUR to every device if host reset is
* successful, so before returning to scsi, fabric should be up * successful, so before returning to scsi, fabric should be up
*/ */
ret = fnic_reset(shost); ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
if (ret == SUCCESS) { if (ret == SUCCESS) {
wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
ret = FAILED; ret = FAILED;

View File

@ -0,0 +1,116 @@
/*
* Copyright 2013 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
struct io_path_stats {
atomic64_t active_ios;
atomic64_t max_active_ios;
atomic64_t io_completions;
atomic64_t io_failures;
atomic64_t ioreq_null;
atomic64_t alloc_failures;
atomic64_t sc_null;
atomic64_t io_not_found;
atomic64_t num_ios;
};
struct abort_stats {
atomic64_t aborts;
atomic64_t abort_failures;
atomic64_t abort_drv_timeouts;
atomic64_t abort_fw_timeouts;
atomic64_t abort_io_not_found;
};
struct terminate_stats {
atomic64_t terminates;
atomic64_t max_terminates;
atomic64_t terminate_drv_timeouts;
atomic64_t terminate_fw_timeouts;
atomic64_t terminate_io_not_found;
atomic64_t terminate_failures;
};
struct reset_stats {
atomic64_t device_resets;
atomic64_t device_reset_failures;
atomic64_t device_reset_aborts;
atomic64_t device_reset_timeouts;
atomic64_t device_reset_terminates;
atomic64_t fw_resets;
atomic64_t fw_reset_completions;
atomic64_t fw_reset_failures;
atomic64_t fnic_resets;
atomic64_t fnic_reset_completions;
atomic64_t fnic_reset_failures;
};
struct fw_stats {
atomic64_t active_fw_reqs;
atomic64_t max_fw_reqs;
atomic64_t fw_out_of_resources;
atomic64_t io_fw_errs;
};
struct vlan_stats {
atomic64_t vlan_disc_reqs;
atomic64_t resp_withno_vlanID;
atomic64_t sol_expiry_count;
atomic64_t flogi_rejects;
};
struct misc_stats {
u64 last_isr_time;
u64 last_ack_time;
atomic64_t isr_count;
atomic64_t max_cq_entries;
atomic64_t ack_index_out_of_range;
atomic64_t data_count_mismatch;
atomic64_t fcpio_timeout;
atomic64_t fcpio_aborted;
atomic64_t sgl_invalid;
atomic64_t mss_invalid;
atomic64_t abts_cpwq_alloc_failures;
atomic64_t devrst_cpwq_alloc_failures;
atomic64_t io_cpwq_alloc_failures;
atomic64_t no_icmnd_itmf_cmpls;
atomic64_t queue_fulls;
atomic64_t rport_not_ready;
atomic64_t frame_errors;
};
struct fnic_stats {
struct io_path_stats io_stats;
struct abort_stats abts_stats;
struct terminate_stats term_stats;
struct reset_stats reset_stats;
struct fw_stats fw_stats;
struct vlan_stats vlan_stats;
struct misc_stats misc_stats;
};
struct stats_debug_info {
char *debug_buffer;
void *i_private;
int buf_size;
int buffer_len;
};
int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *);
int fnic_stats_debugfs_init(struct fnic *);
void fnic_stats_debugfs_remove(struct fnic *);
#endif /* _FNIC_STATS_H_ */

View File

@ -188,6 +188,191 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
return len; return len;
} }
/*
* fnic_get_stats_data - Copy fnic stats buffer to a memory file
* @fnic_dbgfs_t: pointer to debugfs fnic stats buffer
*
* Description:
* This routine gathers the fnic stats debugfs data from the fnic_stats struct
* and dumps it to stats_debug_info.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into
* stats_debug_info
*/
int fnic_get_stats_data(struct stats_debug_info *debug,
struct fnic_stats *stats)
{
int len = 0;
int buf_size = debug->buf_size;
struct timespec val1, val2;
len = snprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tIO Statistics\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
"Number of IOs: %lld\nNumber of IO Completions: %lld\n"
"Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
"Number of Memory alloc Failures: %lld\n"
"Number of IOREQ Null: %lld\n"
"Number of SCSI cmd pointer Null: %lld\n",
(u64)atomic64_read(&stats->io_stats.active_ios),
(u64)atomic64_read(&stats->io_stats.max_active_ios),
(u64)atomic64_read(&stats->io_stats.num_ios),
(u64)atomic64_read(&stats->io_stats.io_completions),
(u64)atomic64_read(&stats->io_stats.io_failures),
(u64)atomic64_read(&stats->io_stats.io_not_found),
(u64)atomic64_read(&stats->io_stats.alloc_failures),
(u64)atomic64_read(&stats->io_stats.ioreq_null),
(u64)atomic64_read(&stats->io_stats.sc_null));
len += snprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tAbort Statistics\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Number of Aborts: %lld\n"
"Number of Abort Failures: %lld\n"
"Number of Abort Driver Timeouts: %lld\n"
"Number of Abort FW Timeouts: %lld\n"
"Number of Abort IO NOT Found: %lld\n",
(u64)atomic64_read(&stats->abts_stats.aborts),
(u64)atomic64_read(&stats->abts_stats.abort_failures),
(u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
(u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
(u64)atomic64_read(&stats->abts_stats.abort_io_not_found));
len += snprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tTerminate Statistics\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Number of Terminates: %lld\n"
"Maximum Terminates: %lld\n"
"Number of Terminate Driver Timeouts: %lld\n"
"Number of Terminate FW Timeouts: %lld\n"
"Number of Terminate IO NOT Found: %lld\n"
"Number of Terminate Failures: %lld\n",
(u64)atomic64_read(&stats->term_stats.terminates),
(u64)atomic64_read(&stats->term_stats.max_terminates),
(u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts),
(u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts),
(u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
(u64)atomic64_read(&stats->term_stats.terminate_failures));
len += snprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tReset Statistics\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Number of Device Resets: %lld\n"
"Number of Device Reset Failures: %lld\n"
"Number of Device Reset Aborts: %lld\n"
"Number of Device Reset Timeouts: %lld\n"
"Number of Device Reset Terminates: %lld\n"
"Number of FW Resets: %lld\n"
"Number of FW Reset Completions: %lld\n"
"Number of FW Reset Failures: %lld\n"
"Number of Fnic Reset: %lld\n"
"Number of Fnic Reset Completions: %lld\n"
"Number of Fnic Reset Failures: %lld\n",
(u64)atomic64_read(&stats->reset_stats.device_resets),
(u64)atomic64_read(&stats->reset_stats.device_reset_failures),
(u64)atomic64_read(&stats->reset_stats.device_reset_aborts),
(u64)atomic64_read(&stats->reset_stats.device_reset_timeouts),
(u64)atomic64_read(
&stats->reset_stats.device_reset_terminates),
(u64)atomic64_read(&stats->reset_stats.fw_resets),
(u64)atomic64_read(&stats->reset_stats.fw_reset_completions),
(u64)atomic64_read(&stats->reset_stats.fw_reset_failures),
(u64)atomic64_read(&stats->reset_stats.fnic_resets),
(u64)atomic64_read(
&stats->reset_stats.fnic_reset_completions),
(u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
len += snprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tFirmware Statistics\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active FW Requests %lld\n"
"Maximum FW Requests: %lld\n"
"Number of FW out of resources: %lld\n"
"Number of FW IO errors: %lld\n",
(u64)atomic64_read(&stats->fw_stats.active_fw_reqs),
(u64)atomic64_read(&stats->fw_stats.max_fw_reqs),
(u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
(u64)atomic64_read(&stats->fw_stats.io_fw_errs));
len += snprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tVlan Discovery Statistics\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Number of Vlan Discovery Requests Sent %lld\n"
"Vlan Response Received with no FCF VLAN ID: %lld\n"
"No solicitations recvd after vlan set, expiry count: %lld\n"
"Flogi rejects count: %lld\n",
(u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs),
(u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID),
(u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
(u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
len += snprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
len += snprintf(debug->debug_buffer + len, buf_size - len,
"Last ISR time: %llu (%8lu.%8lu)\n"
"Last ACK time: %llu (%8lu.%8lu)\n"
"Number of ISRs: %lld\n"
"Maximum CQ Entries: %lld\n"
"Number of ACK index out of range: %lld\n"
"Number of data count mismatch: %lld\n"
"Number of FCPIO Timeouts: %lld\n"
"Number of FCPIO Aborted: %lld\n"
"Number of SGL Invalid: %lld\n"
"Number of Copy WQ Alloc Failures for ABTs: %lld\n"
"Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
"Number of Copy WQ Alloc Failures for IOs: %lld\n"
"Number of no icmnd itmf Completions: %lld\n"
"Number of QUEUE Fulls: %lld\n"
"Number of rport not ready: %lld\n"
"Number of receive frame errors: %lld\n",
(u64)stats->misc_stats.last_isr_time,
val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
val2.tv_sec, val2.tv_nsec,
(u64)atomic64_read(&stats->misc_stats.isr_count),
(u64)atomic64_read(&stats->misc_stats.max_cq_entries),
(u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
(u64)atomic64_read(&stats->misc_stats.data_count_mismatch),
(u64)atomic64_read(&stats->misc_stats.fcpio_timeout),
(u64)atomic64_read(&stats->misc_stats.fcpio_aborted),
(u64)atomic64_read(&stats->misc_stats.sgl_invalid),
(u64)atomic64_read(
&stats->misc_stats.abts_cpwq_alloc_failures),
(u64)atomic64_read(
&stats->misc_stats.devrst_cpwq_alloc_failures),
(u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
(u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
(u64)atomic64_read(&stats->misc_stats.queue_fulls),
(u64)atomic64_read(&stats->misc_stats.rport_not_ready),
(u64)atomic64_read(&stats->misc_stats.frame_errors));
return len;
}
/* /*
* fnic_trace_buf_init - Initialize fnic trace buffer logging facility * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
* *

View File

@ -84,7 +84,8 @@ fnic_trace_data_t *fnic_trace_get_buf(void);
int fnic_get_trace_data(fnic_dbgfs_t *); int fnic_get_trace_data(fnic_dbgfs_t *);
int fnic_trace_buf_init(void); int fnic_trace_buf_init(void);
void fnic_trace_free(void); void fnic_trace_free(void);
int fnic_debugfs_init(void);
void fnic_debugfs_terminate(void);
int fnic_trace_debugfs_init(void); int fnic_trace_debugfs_init(void);
void fnic_trace_debugfs_terminate(void); void fnic_trace_debugfs_terminate(void);
#endif #endif

View File

@ -316,6 +316,12 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost); kfree(shost);
} }
static unsigned int shost_eh_deadline;
module_param_named(eh_deadline, shost_eh_deadline, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(eh_deadline,
"SCSI EH timeout in seconds (should be between 1 and 2^32-1)");
static struct device_type scsi_host_type = { static struct device_type scsi_host_type = {
.name = "scsi_host", .name = "scsi_host",
.release = scsi_host_dev_release, .release = scsi_host_dev_release,
@ -388,6 +394,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->unchecked_isa_dma = sht->unchecked_isa_dma; shost->unchecked_isa_dma = sht->unchecked_isa_dma;
shost->use_clustering = sht->use_clustering; shost->use_clustering = sht->use_clustering;
shost->ordered_tag = sht->ordered_tag; shost->ordered_tag = sht->ordered_tag;
shost->eh_deadline = shost_eh_deadline * HZ;
if (sht->supported_mode == MODE_UNKNOWN) if (sht->supported_mode == MODE_UNKNOWN)
/* means we didn't set it ... default to INITIATOR */ /* means we didn't set it ... default to INITIATOR */

View File

@ -100,7 +100,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},

View File

@ -116,6 +116,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
struct iscsi_conn *conn = sk->sk_user_data; struct iscsi_conn *conn = sk->sk_user_data;
if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
(conn->session->state != ISCSI_STATE_LOGGING_OUT) &&
!atomic_read(&sk->sk_rmem_alloc)) { !atomic_read(&sk->sk_rmem_alloc)) {
ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n"); ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE); iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);

View File

@ -27,6 +27,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/log2.h>
#include <scsi/fc/fc_fc2.h> #include <scsi/fc/fc_fc2.h>
@ -303,10 +304,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
fr_eof(fp) = FC_EOF_N; fr_eof(fp) = FC_EOF_N;
} }
/* /* Initialize remaining fh fields from fc_fill_fc_hdr */
* Initialize remainig fh fields
* from fc_fill_fc_hdr
*/
fh->fh_ox_id = htons(ep->oxid); fh->fh_ox_id = htons(ep->oxid);
fh->fh_rx_id = htons(ep->rxid); fh->fh_rx_id = htons(ep->rxid);
fh->fh_seq_id = ep->seq.id; fh->fh_seq_id = ep->seq.id;
@ -362,9 +360,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, fc_exch_hold(ep); /* hold for timer */
msecs_to_jiffies(timer_msec))) if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
fc_exch_hold(ep); /* hold for timer */ msecs_to_jiffies(timer_msec)))
fc_exch_release(ep);
} }
/** /**
@ -382,6 +381,8 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
/** /**
* fc_exch_done_locked() - Complete an exchange with the exchange lock held * fc_exch_done_locked() - Complete an exchange with the exchange lock held
* @ep: The exchange that is complete * @ep: The exchange that is complete
*
* Note: May sleep if invoked from outside a response handler.
*/ */
static int fc_exch_done_locked(struct fc_exch *ep) static int fc_exch_done_locked(struct fc_exch *ep)
{ {
@ -393,7 +394,6 @@ static int fc_exch_done_locked(struct fc_exch *ep)
* ep, and in that case we only clear the resp and set it as * ep, and in that case we only clear the resp and set it as
* complete, so it can be reused by the timer to send the rrq. * complete, so it can be reused by the timer to send the rrq.
*/ */
ep->resp = NULL;
if (ep->state & FC_EX_DONE) if (ep->state & FC_EX_DONE)
return rc; return rc;
ep->esb_stat |= ESB_ST_COMPLETE; ep->esb_stat |= ESB_ST_COMPLETE;
@ -464,15 +464,21 @@ static void fc_exch_delete(struct fc_exch *ep)
} }
static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
struct fc_frame *fp) struct fc_frame *fp)
{ {
struct fc_exch *ep; struct fc_exch *ep;
struct fc_frame_header *fh = fc_frame_header_get(fp); struct fc_frame_header *fh = fc_frame_header_get(fp);
int error; int error = -ENXIO;
u32 f_ctl; u32 f_ctl;
u8 fh_type = fh->fh_type; u8 fh_type = fh->fh_type;
ep = fc_seq_exch(sp); ep = fc_seq_exch(sp);
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
fc_frame_free(fp);
goto out;
}
WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
f_ctl = ntoh24(fh->fh_f_ctl); f_ctl = ntoh24(fh->fh_f_ctl);
@ -515,6 +521,9 @@ out:
* @lport: The local port that the exchange will be sent on * @lport: The local port that the exchange will be sent on
* @sp: The sequence to be sent * @sp: The sequence to be sent
* @fp: The frame to be sent on the exchange * @fp: The frame to be sent on the exchange
*
* Note: The frame will be freed either by a direct call to fc_frame_free(fp)
* or indirectly by calling libfc_function_template.frame_send().
*/ */
static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
struct fc_frame *fp) struct fc_frame *fp)
@ -581,6 +590,8 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
/* /*
* Set the response handler for the exchange associated with a sequence. * Set the response handler for the exchange associated with a sequence.
*
* Note: May sleep if invoked from outside a response handler.
*/ */
static void fc_seq_set_resp(struct fc_seq *sp, static void fc_seq_set_resp(struct fc_seq *sp,
void (*resp)(struct fc_seq *, struct fc_frame *, void (*resp)(struct fc_seq *, struct fc_frame *,
@ -588,8 +599,18 @@ static void fc_seq_set_resp(struct fc_seq *sp,
void *arg) void *arg)
{ {
struct fc_exch *ep = fc_seq_exch(sp); struct fc_exch *ep = fc_seq_exch(sp);
DEFINE_WAIT(wait);
spin_lock_bh(&ep->ex_lock); spin_lock_bh(&ep->ex_lock);
while (ep->resp_active && ep->resp_task != current) {
prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_bh(&ep->ex_lock);
schedule();
spin_lock_bh(&ep->ex_lock);
}
finish_wait(&ep->resp_wq, &wait);
ep->resp = resp; ep->resp = resp;
ep->arg = arg; ep->arg = arg;
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
@ -622,27 +643,31 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
if (!sp) if (!sp)
return -ENOMEM; return -ENOMEM;
ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
if (timer_msec) if (timer_msec)
fc_exch_timer_set_locked(ep, timer_msec); fc_exch_timer_set_locked(ep, timer_msec);
/* if (ep->sid) {
* If not logged into the fabric, don't send ABTS but leave /*
* sequence active until next timeout. * Send an abort for the sequence that timed out.
*/ */
if (!ep->sid) fp = fc_frame_alloc(ep->lp, 0);
return 0; if (fp) {
ep->esb_stat |= ESB_ST_SEQ_INIT;
/* fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
* Send an abort for the sequence that timed out. FC_TYPE_BLS, FC_FC_END_SEQ |
*/ FC_FC_SEQ_INIT, 0);
fp = fc_frame_alloc(ep->lp, 0); error = fc_seq_send_locked(ep->lp, sp, fp);
if (fp) { } else {
fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, error = -ENOBUFS;
FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); }
error = fc_seq_send_locked(ep->lp, sp, fp); } else {
} else /*
error = -ENOBUFS; * If not logged into the fabric, don't send ABTS but leave
* sequence active until next timeout.
*/
error = 0;
}
ep->esb_stat |= ESB_ST_ABNORMAL;
return error; return error;
} }
@ -668,6 +693,61 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,
return error; return error;
} }
/**
* fc_invoke_resp() - invoke ep->resp()
*
* Notes:
* It is assumed that after initialization finished (this means the
* first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
* modified only via fc_seq_set_resp(). This guarantees that none of these
* two variables changes if ep->resp_active > 0.
*
* If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
* this function is invoked, the first spin_lock_bh() call in this function
* will wait until fc_seq_set_resp() has finished modifying these variables.
*
* Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
* ep->resp() won't be invoked after fc_exch_done() has returned.
*
* The response handler itself may invoke fc_exch_done(), which will clear the
* ep->resp pointer.
*
* Return value:
* Returns true if and only if ep->resp has been invoked.
*/
static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
struct fc_frame *fp)
{
void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
void *arg;
bool res = false;
spin_lock_bh(&ep->ex_lock);
ep->resp_active++;
if (ep->resp_task != current)
ep->resp_task = !ep->resp_task ? current : NULL;
resp = ep->resp;
arg = ep->arg;
spin_unlock_bh(&ep->ex_lock);
if (resp) {
resp(sp, fp, arg);
res = true;
} else if (!IS_ERR(fp)) {
fc_frame_free(fp);
}
spin_lock_bh(&ep->ex_lock);
if (--ep->resp_active == 0)
ep->resp_task = NULL;
spin_unlock_bh(&ep->ex_lock);
if (ep->resp_active == 0)
wake_up(&ep->resp_wq);
return res;
}
/** /**
* fc_exch_timeout() - Handle exchange timer expiration * fc_exch_timeout() - Handle exchange timer expiration
* @work: The work_struct identifying the exchange that timed out * @work: The work_struct identifying the exchange that timed out
@ -677,8 +757,6 @@ static void fc_exch_timeout(struct work_struct *work)
struct fc_exch *ep = container_of(work, struct fc_exch, struct fc_exch *ep = container_of(work, struct fc_exch,
timeout_work.work); timeout_work.work);
struct fc_seq *sp = &ep->seq; struct fc_seq *sp = &ep->seq;
void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
void *arg;
u32 e_stat; u32 e_stat;
int rc = 1; int rc = 1;
@ -696,16 +774,13 @@ static void fc_exch_timeout(struct work_struct *work)
fc_exch_rrq(ep); fc_exch_rrq(ep);
goto done; goto done;
} else { } else {
resp = ep->resp;
arg = ep->arg;
ep->resp = NULL;
if (e_stat & ESB_ST_ABNORMAL) if (e_stat & ESB_ST_ABNORMAL)
rc = fc_exch_done_locked(ep); rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
if (!rc) if (!rc)
fc_exch_delete(ep); fc_exch_delete(ep);
if (resp) fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); fc_seq_set_resp(sp, NULL, ep->arg);
fc_seq_exch_abort(sp, 2 * ep->r_a_tov); fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
goto done; goto done;
} }
@ -792,6 +867,8 @@ hit:
ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
ep->rxid = FC_XID_UNKNOWN; ep->rxid = FC_XID_UNKNOWN;
ep->class = mp->class; ep->class = mp->class;
ep->resp_active = 0;
init_waitqueue_head(&ep->resp_wq);
INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
out: out:
return ep; return ep;
@ -838,8 +915,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
spin_lock_bh(&pool->lock); spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
if (ep && ep->xid == xid) if (ep) {
WARN_ON(ep->xid != xid);
fc_exch_hold(ep); fc_exch_hold(ep);
}
spin_unlock_bh(&pool->lock); spin_unlock_bh(&pool->lock);
} }
return ep; return ep;
@ -850,6 +929,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
* fc_exch_done() - Indicate that an exchange/sequence tuple is complete and * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
* the memory allocated for the related objects may be freed. * the memory allocated for the related objects may be freed.
* @sp: The sequence that has completed * @sp: The sequence that has completed
*
* Note: May sleep if invoked from outside a response handler.
*/ */
static void fc_exch_done(struct fc_seq *sp) static void fc_exch_done(struct fc_seq *sp)
{ {
@ -859,6 +940,8 @@ static void fc_exch_done(struct fc_seq *sp)
spin_lock_bh(&ep->ex_lock); spin_lock_bh(&ep->ex_lock);
rc = fc_exch_done_locked(ep); rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
fc_seq_set_resp(sp, NULL, ep->arg);
if (!rc) if (!rc)
fc_exch_delete(ep); fc_exch_delete(ep);
} }
@ -987,6 +1070,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
} }
} }
spin_lock_bh(&ep->ex_lock);
/* /*
* At this point, we have the exchange held. * At this point, we have the exchange held.
* Find or create the sequence. * Find or create the sequence.
@ -1014,11 +1098,11 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
* sending RSP, hence write request on other * sending RSP, hence write request on other
* end never finishes. * end never finishes.
*/ */
spin_lock_bh(&ep->ex_lock);
sp->ssb_stat |= SSB_ST_RESP; sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id; sp->id = fh->fh_seq_id;
spin_unlock_bh(&ep->ex_lock);
} else { } else {
spin_unlock_bh(&ep->ex_lock);
/* sequence/exch should exist */ /* sequence/exch should exist */
reject = FC_RJT_SEQ_ID; reject = FC_RJT_SEQ_ID;
goto rel; goto rel;
@ -1029,6 +1113,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
if (f_ctl & FC_FC_SEQ_INIT) if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat |= ESB_ST_SEQ_INIT; ep->esb_stat |= ESB_ST_SEQ_INIT;
spin_unlock_bh(&ep->ex_lock);
fr_seq(fp) = sp; fr_seq(fp) = sp;
out: out:
@ -1291,21 +1376,23 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
if (!ep) if (!ep)
goto reject; goto reject;
fp = fc_frame_alloc(ep->lp, sizeof(*ap));
if (!fp)
goto free;
spin_lock_bh(&ep->ex_lock); spin_lock_bh(&ep->ex_lock);
if (ep->esb_stat & ESB_ST_COMPLETE) { if (ep->esb_stat & ESB_ST_COMPLETE) {
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
fc_frame_free(fp);
goto reject; goto reject;
} }
if (!(ep->esb_stat & ESB_ST_REC_QUAL)) if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
ep->esb_stat |= ESB_ST_REC_QUAL;
fc_exch_hold(ep); /* hold for REC_QUAL */ fc_exch_hold(ep); /* hold for REC_QUAL */
ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
fc_exch_timer_set_locked(ep, ep->r_a_tov);
fp = fc_frame_alloc(ep->lp, sizeof(*ap));
if (!fp) {
spin_unlock_bh(&ep->ex_lock);
goto free;
} }
fc_exch_timer_set_locked(ep, ep->r_a_tov);
fh = fc_frame_header_get(fp); fh = fc_frame_header_get(fp);
ap = fc_frame_payload_get(fp, sizeof(*ap)); ap = fc_frame_payload_get(fp, sizeof(*ap));
memset(ap, 0, sizeof(*ap)); memset(ap, 0, sizeof(*ap));
@ -1319,14 +1406,16 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
} }
sp = fc_seq_start_next_locked(sp); sp = fc_seq_start_next_locked(sp);
fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
ep->esb_stat |= ESB_ST_ABNORMAL;
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
free:
fc_frame_free(rx_fp); fc_frame_free(rx_fp);
return; return;
reject: reject:
fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID); fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
free: goto free;
fc_frame_free(rx_fp);
} }
/** /**
@ -1416,9 +1505,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
* If new exch resp handler is valid then call that * If new exch resp handler is valid then call that
* first. * first.
*/ */
if (ep->resp) if (!fc_invoke_resp(ep, sp, fp))
ep->resp(sp, fp, ep->arg);
else
lport->tt.lport_recv(lport, fp); lport->tt.lport_recv(lport, fp);
fc_exch_release(ep); /* release from lookup */ fc_exch_release(ep); /* release from lookup */
} else { } else {
@ -1442,8 +1529,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
struct fc_exch *ep; struct fc_exch *ep;
enum fc_sof sof; enum fc_sof sof;
u32 f_ctl; u32 f_ctl;
void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
void *ex_resp_arg;
int rc; int rc;
ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
@ -1478,19 +1563,19 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
f_ctl = ntoh24(fh->fh_f_ctl); f_ctl = ntoh24(fh->fh_f_ctl);
fr_seq(fp) = sp; fr_seq(fp) = sp;
spin_lock_bh(&ep->ex_lock);
if (f_ctl & FC_FC_SEQ_INIT) if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat |= ESB_ST_SEQ_INIT; ep->esb_stat |= ESB_ST_SEQ_INIT;
spin_unlock_bh(&ep->ex_lock);
if (fc_sof_needs_ack(sof)) if (fc_sof_needs_ack(sof))
fc_seq_send_ack(sp, fp); fc_seq_send_ack(sp, fp);
resp = ep->resp;
ex_resp_arg = ep->arg;
if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T && if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
(f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
spin_lock_bh(&ep->ex_lock); spin_lock_bh(&ep->ex_lock);
resp = ep->resp;
rc = fc_exch_done_locked(ep); rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep); WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
@ -1511,10 +1596,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
* If new exch resp handler is valid then call that * If new exch resp handler is valid then call that
* first. * first.
*/ */
if (resp) fc_invoke_resp(ep, sp, fp);
resp(sp, fp, ex_resp_arg);
else
fc_frame_free(fp);
fc_exch_release(ep); fc_exch_release(ep);
return; return;
rel: rel:
@ -1553,8 +1636,6 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
*/ */
static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
{ {
void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
void *ex_resp_arg;
struct fc_frame_header *fh; struct fc_frame_header *fh;
struct fc_ba_acc *ap; struct fc_ba_acc *ap;
struct fc_seq *sp; struct fc_seq *sp;
@ -1599,9 +1680,6 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
break; break;
} }
resp = ep->resp;
ex_resp_arg = ep->arg;
/* do we need to do some other checks here. Can we reuse more of /* do we need to do some other checks here. Can we reuse more of
* fc_exch_recv_seq_resp * fc_exch_recv_seq_resp
*/ */
@ -1613,17 +1691,14 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ) ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
rc = fc_exch_done_locked(ep); rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
fc_exch_hold(ep);
if (!rc) if (!rc)
fc_exch_delete(ep); fc_exch_delete(ep);
fc_invoke_resp(ep, sp, fp);
if (resp)
resp(sp, fp, ex_resp_arg);
else
fc_frame_free(fp);
if (has_rec) if (has_rec)
fc_exch_timer_set(ep, ep->r_a_tov); fc_exch_timer_set(ep, ep->r_a_tov);
fc_exch_release(ep);
} }
/** /**
@ -1662,7 +1737,7 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
break; break;
default: default:
if (ep) if (ep)
FC_EXCH_DBG(ep, "BLS rctl %x - %s received", FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
fh->fh_r_ctl, fh->fh_r_ctl,
fc_exch_rctl_name(fh->fh_r_ctl)); fc_exch_rctl_name(fh->fh_r_ctl));
break; break;
@ -1745,32 +1820,33 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
/** /**
* fc_exch_reset() - Reset an exchange * fc_exch_reset() - Reset an exchange
* @ep: The exchange to be reset * @ep: The exchange to be reset
*
* Note: May sleep if invoked from outside a response handler.
*/ */
static void fc_exch_reset(struct fc_exch *ep) static void fc_exch_reset(struct fc_exch *ep)
{ {
struct fc_seq *sp; struct fc_seq *sp;
void (*resp)(struct fc_seq *, struct fc_frame *, void *);
void *arg;
int rc = 1; int rc = 1;
spin_lock_bh(&ep->ex_lock); spin_lock_bh(&ep->ex_lock);
fc_exch_abort_locked(ep, 0); fc_exch_abort_locked(ep, 0);
ep->state |= FC_EX_RST_CLEANUP; ep->state |= FC_EX_RST_CLEANUP;
fc_exch_timer_cancel(ep); fc_exch_timer_cancel(ep);
resp = ep->resp;
ep->resp = NULL;
if (ep->esb_stat & ESB_ST_REC_QUAL) if (ep->esb_stat & ESB_ST_REC_QUAL)
atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
ep->esb_stat &= ~ESB_ST_REC_QUAL; ep->esb_stat &= ~ESB_ST_REC_QUAL;
arg = ep->arg;
sp = &ep->seq; sp = &ep->seq;
rc = fc_exch_done_locked(ep); rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock); spin_unlock_bh(&ep->ex_lock);
fc_exch_hold(ep);
if (!rc) if (!rc)
fc_exch_delete(ep); fc_exch_delete(ep);
if (resp) fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); fc_seq_set_resp(sp, NULL, ep->arg);
fc_exch_release(ep);
} }
/** /**
@ -1956,13 +2032,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
switch (op) { switch (op) {
case ELS_LS_RJT: case ELS_LS_RJT:
FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
/* fall through */ /* fall through */
case ELS_LS_ACC: case ELS_LS_ACC:
goto cleanup; goto cleanup;
default: default:
FC_EXCH_DBG(aborted_ep, "unexpected response op %x " FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
"for RRQ", op); op);
return; return;
} }
@ -2533,13 +2609,8 @@ int fc_setup_exch_mgr(void)
* cpu on which exchange originated by simple bitwise * cpu on which exchange originated by simple bitwise
* AND operation between fc_cpu_mask and exchange id. * AND operation between fc_cpu_mask and exchange id.
*/ */
fc_cpu_mask = 1; fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
fc_cpu_order = 0; fc_cpu_mask = (1 << fc_cpu_order) - 1;
while (fc_cpu_mask < nr_cpu_ids) {
fc_cpu_mask <<= 1;
fc_cpu_order++;
}
fc_cpu_mask--;
fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
if (!fc_exch_workqueue) if (!fc_exch_workqueue)

View File

@ -902,7 +902,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
/* /*
* Check for missing or extra data frames. * Check for missing or extra data frames.
*/ */
if (unlikely(fsp->xfer_len != expected_len)) { if (unlikely(fsp->cdb_status == SAM_STAT_GOOD &&
fsp->xfer_len != expected_len)) {
if (fsp->xfer_len < expected_len) { if (fsp->xfer_len < expected_len) {
/* /*
* Some data may be queued locally, * Some data may be queued locally,
@ -955,12 +956,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
* Test for transport underrun, independent of response * Test for transport underrun, independent of response
* underrun status. * underrun status.
*/ */
if (fsp->xfer_len < fsp->data_len && !fsp->io_status && if (fsp->cdb_status == SAM_STAT_GOOD &&
fsp->xfer_len < fsp->data_len && !fsp->io_status &&
(!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
fsp->status_code = FC_DATA_UNDRUN; fsp->status_code = FC_DATA_UNDRUN;
fsp->io_status = 0;
}
} }
seq = fsp->seq_ptr; seq = fsp->seq_ptr;

View File

@ -516,7 +516,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
* @lport: The local port receiving the LOGO * @lport: The local port receiving the LOGO
* @fp: The LOGO request frame * @fp: The LOGO request frame
* *
* Locking Note: The lport lock is exected to be held before calling * Locking Note: The lport lock is expected to be held before calling
* this function. * this function.
*/ */
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
@ -1088,7 +1088,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
{ {
unsigned long delay = 0; unsigned long delay = 0;
FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
PTR_ERR(fp), fc_lport_state(lport), IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),
lport->retry_count); lport->retry_count);
if (PTR_ERR(fp) == -FC_EX_CLOSED) if (PTR_ERR(fp) == -FC_EX_CLOSED)

View File

@ -1705,7 +1705,7 @@ reject:
* @rdata: The remote port that sent the PRLI request * @rdata: The remote port that sent the PRLI request
* @rx_fp: The PRLI request frame * @rx_fp: The PRLI request frame
* *
* Locking Note: The rport lock is exected to be held before calling * Locking Note: The rport lock is expected to be held before calling
* this function. * this function.
*/ */
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
@ -1824,7 +1824,7 @@ drop:
* @rdata: The remote port that sent the PRLO request * @rdata: The remote port that sent the PRLO request
* @rx_fp: The PRLO request frame * @rx_fp: The PRLO request frame
* *
* Locking Note: The rport lock is exected to be held before calling * Locking Note: The rport lock is expected to be held before calling
* this function. * this function.
*/ */
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
@ -1895,7 +1895,7 @@ drop:
* @lport: The local port that received the LOGO request * @lport: The local port that received the LOGO request
* @fp: The LOGO request frame * @fp: The LOGO request frame
* *
* Locking Note: The rport lock is exected to be held before calling * Locking Note: The rport lock is expected to be held before calling
* this function. * this function.
*/ */
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)

View File

@ -2629,7 +2629,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
rspiocbq, rspiocbq,
(phba->fc_ratov * 2) (phba->fc_ratov * 2)
+ LPFC_DRVR_TIMEOUT); + LPFC_DRVR_TIMEOUT);
if (iocb_stat) { if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
ret_val = -EIO; ret_val = -EIO;
goto err_get_xri_exit; goto err_get_xri_exit;
} }
@ -3204,8 +3204,9 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rspiocbq, (phba->fc_ratov * 2) + rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT); LPFC_DRVR_TIMEOUT);
if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) && if ((iocb_stat != IOCB_SUCCESS) ||
(rsp->ulpStatus != IOCB_SUCCESS))) { ((phba->sli_rev < LPFC_SLI_REV4) &&
(rsp->ulpStatus != IOSTAT_SUCCESS))) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3126 Failed loopback test issue iocb: " "3126 Failed loopback test issue iocb: "
"iocb_stat:x%x\n", iocb_stat); "iocb_stat:x%x\n", iocb_stat);

View File

@ -280,7 +280,7 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3; buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr); kfree(buf_ptr);
ctiocb->context1 = NULL; ctiocb->context3 = NULL;
} }
lpfc_sli_release_iocbq(phba, ctiocb); lpfc_sli_release_iocbq(phba, ctiocb);
return 0; return 0;

View File

@ -4171,8 +4171,6 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NLP_INT_NODE_ACT(ndlp); NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0); atomic_set(&ndlp->cmd_pending, 0);
ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
if (vport->phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
} }
struct lpfc_nodelist * struct lpfc_nodelist *
@ -4217,6 +4215,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_initialize_node(vport, ndlp, did); lpfc_initialize_node(vport, ndlp, did);
spin_unlock_irqrestore(&phba->ndlp_lock, flags); spin_unlock_irqrestore(&phba->ndlp_lock, flags);
if (vport->phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
if (state != NLP_STE_UNUSED_NODE) if (state != NLP_STE_UNUSED_NODE)
lpfc_nlp_set_state(vport, ndlp, state); lpfc_nlp_set_state(vport, ndlp, state);
@ -5617,6 +5618,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_initialize_node(vport, ndlp, did); lpfc_initialize_node(vport, ndlp, did);
INIT_LIST_HEAD(&ndlp->nlp_listp); INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node init: did:x%x", "node init: did:x%x",

View File

@ -3439,7 +3439,8 @@ struct els_request64_wqe {
#define els_req64_hopcnt_SHIFT 24 #define els_req64_hopcnt_SHIFT 24
#define els_req64_hopcnt_MASK 0x000000ff #define els_req64_hopcnt_MASK 0x000000ff
#define els_req64_hopcnt_WORD word13 #define els_req64_hopcnt_WORD word13
uint32_t reserved[2]; uint32_t word14;
uint32_t max_response_payload_len;
}; };
struct xmit_els_rsp64_wqe { struct xmit_els_rsp64_wqe {
@ -3554,7 +3555,8 @@ struct gen_req64_wqe {
uint32_t relative_offset; uint32_t relative_offset;
struct wqe_rctl_dfctl wge_ctl; /* word 5 */ struct wqe_rctl_dfctl wge_ctl; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */ struct wqe_common wqe_com; /* words 6-11 */
uint32_t rsvd_12_15[4]; uint32_t rsvd_12_14[3];
uint32_t max_response_payload_len;
}; };
struct create_xri_wqe { struct create_xri_wqe {
@ -3584,7 +3586,13 @@ struct abort_cmd_wqe {
struct fcp_iwrite64_wqe { struct fcp_iwrite64_wqe {
struct ulp_bde64 bde; struct ulp_bde64 bde;
uint32_t payload_offset_len; uint32_t word3;
#define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3
#define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3
uint32_t total_xfer_len; uint32_t total_xfer_len;
uint32_t initial_xfer_len; uint32_t initial_xfer_len;
struct wqe_common wqe_com; /* words 6-11 */ struct wqe_common wqe_com; /* words 6-11 */
@ -3594,7 +3602,13 @@ struct fcp_iwrite64_wqe {
struct fcp_iread64_wqe { struct fcp_iread64_wqe {
struct ulp_bde64 bde; struct ulp_bde64 bde;
uint32_t payload_offset_len; /* word 3 */ uint32_t word3;
#define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3
#define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3
uint32_t total_xfer_len; /* word 4 */ uint32_t total_xfer_len; /* word 4 */
uint32_t rsrvd5; /* word 5 */ uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */ struct wqe_common wqe_com; /* words 6-11 */
@ -3604,7 +3618,13 @@ struct fcp_iread64_wqe {
struct fcp_icmnd64_wqe { struct fcp_icmnd64_wqe {
struct ulp_bde64 bde; /* words 0-2 */ struct ulp_bde64 bde; /* words 0-2 */
uint32_t rsrvd3; /* word 3 */ uint32_t word3;
#define cmd_buff_len_SHIFT 16
#define cmd_buff_len_MASK 0x00000ffff
#define cmd_buff_len_WORD word3
#define payload_offset_len_SHIFT 0
#define payload_offset_len_MASK 0x0000ffff
#define payload_offset_len_WORD word3
uint32_t rsrvd4; /* word 4 */ uint32_t rsrvd4; /* word 4 */
uint32_t rsrvd5; /* word 5 */ uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */ struct wqe_common wqe_com; /* words 6-11 */

View File

@ -4545,7 +4545,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
pci_save_state(pdev); pci_save_state(pdev);
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */ /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) if (pci_is_pcie(pdev))
pdev->needs_freset = 1; pdev->needs_freset = 1;
return 0; return 0;

View File

@ -1012,14 +1012,6 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
break; break;
} }
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
if (iotag == 0) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
}
lxri = lpfc_sli4_next_xritag(phba); lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) { if (lxri == NO_XRI) {
@ -1028,6 +1020,19 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
kfree(psb); kfree(psb);
break; break;
} }
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
if (iotag == 0) {
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"3368 Failed to allocated IOTAG for"
" XRI:0x%x\n", lxri);
lpfc_sli4_free_xri(phba, lxri);
break;
}
psb->cur_iocbq.sli4_lxritag = lxri; psb->cur_iocbq.sli4_lxritag = lxri;
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
@ -4485,9 +4490,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
piocb->ulpContext = piocb->ulpContext =
vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
} }
if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpFCP2Rcvy = 1;
}
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
/* ulpTimeout is only one byte */ /* ulpTimeout is only one byte */
@ -4981,6 +4984,73 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
} }
} }
/**
* lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
*
* This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
*
* Return code :
* 0x2003 - Error
* 0x2002 - Success
**/
static int
lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
{
struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
uint32_t rsp_info;
uint32_t rsp_len;
uint8_t rsp_info_code;
int ret = FAILED;
if (fcprsp == NULL)
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0703 fcp_rsp is missing\n");
else {
rsp_info = fcprsp->rspStatus2;
rsp_len = be32_to_cpu(fcprsp->rspRspLen);
rsp_info_code = fcprsp->rspInfo3;
lpfc_printf_vlog(vport, KERN_INFO,
LOG_FCP,
"0706 fcp_rsp valid 0x%x,"
" rsp len=%d code 0x%x\n",
rsp_info,
rsp_len, rsp_info_code);
if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
switch (rsp_info_code) {
case RSP_NO_FAILURE:
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0715 Task Mgmt No Failure\n");
ret = SUCCESS;
break;
case RSP_TM_NOT_SUPPORTED: /* TM rejected */
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0716 Task Mgmt Target "
"reject\n");
break;
case RSP_TM_NOT_COMPLETED: /* TM failed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0717 Task Mgmt Target "
"failed TM\n");
break;
case RSP_TM_INVALID_LU: /* TM to invalid LU! */
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0718 Task Mgmt to invalid "
"LUN\n");
break;
}
}
}
return ret;
}
/** /**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed. * @vport: The virtual port for which this call is being executed.
@ -5042,12 +5112,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout); iocbq, iocbqrsp, lpfc_cmd->timeout);
if (status != IOCB_SUCCESS) { if ((status != IOCB_SUCCESS) ||
if (status == IOCB_TIMEDOUT) { (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
ret = TIMEOUT_ERROR;
} else
ret = FAILED;
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0727 TMF %s to TGT %d LUN %d failed (%d, %d) " "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
"iocb_flag x%x\n", "iocb_flag x%x\n",
@ -5055,9 +5121,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
iocbqrsp->iocb.un.ulpWord[4], iocbqrsp->iocb.un.ulpWord[4],
iocbq->iocb_flag); iocbq->iocb_flag);
} else if (status == IOCB_BUSY) /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
ret = FAILED; if (status == IOCB_SUCCESS) {
else if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
/* Something in the FCP_RSP was invalid.
* Check conditions */
ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
else
ret = FAILED;
} else if (status == IOCB_TIMEDOUT) {
ret = TIMEOUT_ERROR;
} else {
ret = FAILED;
}
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
} else
ret = SUCCESS; ret = SUCCESS;
lpfc_sli_release_iocbq(phba, iocbqrsp); lpfc_sli_release_iocbq(phba, iocbqrsp);
@ -5181,7 +5259,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
unsigned tgt_id = cmnd->device->id; unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun; unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event; struct lpfc_scsi_event_header scsi_event;
int status, ret = SUCCESS; int status;
if (!rdata) { if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@ -5222,9 +5300,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
* So, continue on. * So, continue on.
* We will report success if all the i/o aborts successfully. * We will report success if all the i/o aborts successfully.
*/ */
ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, if (status == SUCCESS)
status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_LUN); LPFC_CTX_LUN);
return ret;
return status;
} }
/** /**
@ -5248,7 +5328,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
unsigned tgt_id = cmnd->device->id; unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun; unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event; struct lpfc_scsi_event_header scsi_event;
int status, ret = SUCCESS; int status;
if (!rdata) { if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@ -5289,9 +5369,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
* So, continue on. * So, continue on.
* We will report success if all the i/o aborts successfully. * We will report success if all the i/o aborts successfully.
*/ */
ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, if (status == SUCCESS)
status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT); LPFC_CTX_TGT);
return ret; return status;
} }
/** /**

View File

@ -73,6 +73,7 @@ struct fcp_rsp {
#define RSP_RO_MISMATCH_ERR 0x03 #define RSP_RO_MISMATCH_ERR 0x03
#define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */ #define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */
#define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */ #define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */
#define RSP_TM_INVALID_LU 0x09 /* Task mgmt function to invalid LU */
uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */ uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */

View File

@ -71,6 +71,8 @@ static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
int); int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
uint32_t); uint32_t);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static IOCB_t * static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@ -6566,6 +6568,108 @@ lpfc_mbox_timeout(unsigned long ptr)
return; return;
} }
/**
* lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
* are pending
* @phba: Pointer to HBA context object.
*
* This function checks if any mailbox completions are present on the mailbox
* completion queue.
**/
bool
lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
{
uint32_t idx;
struct lpfc_queue *mcq;
struct lpfc_mcqe *mcqe;
bool pending_completions = false;
if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
return false;
/* Check for completions on mailbox completion queue */
mcq = phba->sli4_hba.mbx_cq;
idx = mcq->hba_index;
while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
(!bf_get_le32(lpfc_trailer_async, mcqe))) {
pending_completions = true;
break;
}
idx = (idx + 1) % mcq->entry_count;
if (mcq->hba_index == idx)
break;
}
return pending_completions;
}
/**
* lpfc_sli4_process_missed_mbox_completions - process mbox completions
* that were missed.
* @phba: Pointer to HBA context object.
*
* For sli4, it is possible to miss an interrupt. As such mbox completions
* maybe missed causing erroneous mailbox timeouts to occur. This function
* checks to see if mbox completions are on the mailbox completion queue
* and will process all the completions associated with the eq for the
* mailbox completion queue.
**/
bool
lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
{
uint32_t eqidx;
struct lpfc_queue *fpeq = NULL;
struct lpfc_eqe *eqe;
bool mbox_pending;
if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
return false;
/* Find the eq associated with the mcq */
if (phba->sli4_hba.hba_eq)
for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
phba->sli4_hba.mbx_cq->assoc_qid) {
fpeq = phba->sli4_hba.hba_eq[eqidx];
break;
}
if (!fpeq)
return false;
/* Turn off interrupts from this EQ */
lpfc_sli4_eq_clr_intr(fpeq);
/* Check to see if a mbox completion is pending */
mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
/*
* If a mbox completion is pending, process all the events on EQ
* associated with the mbox completion queue (this could include
* mailbox commands, async events, els commands, receive queue data
* and fcp commands)
*/
if (mbox_pending)
while ((eqe = lpfc_sli4_eq_get(fpeq))) {
lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
fpeq->EQ_processed++;
}
/* Always clear and re-arm the EQ */
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
return mbox_pending;
}
/** /**
* lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
@ -6583,6 +6687,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli; struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring; struct lpfc_sli_ring *pring;
/* If the mailbox completed, process the completion and return */
if (lpfc_sli4_process_missed_mbox_completions(phba))
return;
/* Check the pmbox pointer first. There is a race condition /* Check the pmbox pointer first. There is a race condition
* between the mbox timeout handler getting executed in the * between the mbox timeout handler getting executed in the
* worklist and the mailbox actually completing. When this * worklist and the mailbox actually completing. When this
@ -7077,6 +7185,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
1000) + jiffies; 1000) + jiffies;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Make sure the mailbox is really active */
if (timeout)
lpfc_sli4_process_missed_mbox_completions(phba);
/* Wait for the outstnading mailbox command to complete */ /* Wait for the outstnading mailbox command to complete */
while (phba->sli.mbox_active) { while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */ /* Check active mailbox complete status every 2ms */
@ -8076,6 +8188,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
wqe->els_req.max_response_payload_len = total_len - xmit_len;
break; break;
case CMD_XMIT_SEQUENCE64_CX: case CMD_XMIT_SEQUENCE64_CX:
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
@ -8120,8 +8233,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
command_type = FCP_COMMAND_DATA_OUT; command_type = FCP_COMMAND_DATA_OUT;
/* word3 iocb=iotag wqe=payload_offset_len */ /* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
wqe->fcp_iwrite.payload_offset_len = bf_set(payload_offset_len, &wqe->fcp_iwrite,
xmit_len + sizeof(struct fcp_rsp); xmit_len + sizeof(struct fcp_rsp));
bf_set(cmd_buff_len, &wqe->fcp_iwrite,
0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
@ -8139,8 +8254,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
case CMD_FCP_IREAD64_CR: case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */ /* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
wqe->fcp_iread.payload_offset_len = bf_set(payload_offset_len, &wqe->fcp_iread,
xmit_len + sizeof(struct fcp_rsp); xmit_len + sizeof(struct fcp_rsp));
bf_set(cmd_buff_len, &wqe->fcp_iread,
0);
/* word4 iocb=parameter wqe=total_xfer_length memcpy */ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
@ -8156,8 +8273,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
break; break;
case CMD_FCP_ICMND64_CR: case CMD_FCP_ICMND64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
bf_set(payload_offset_len, &wqe->fcp_icmd,
xmit_len + sizeof(struct fcp_rsp));
bf_set(cmd_buff_len, &wqe->fcp_icmd,
0);
/* word3 iocb=IO_TAG wqe=reserved */ /* word3 iocb=IO_TAG wqe=reserved */
wqe->fcp_icmd.rsrvd3 = 0;
bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
/* Always open the exchange */ /* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
@ -8203,6 +8325,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
wqe->gen_req.max_response_payload_len = total_len - xmit_len;
command_type = OTHER_COMMAND; command_type = OTHER_COMMAND;
break; break;
case CMD_XMIT_ELS_RSP64_CX: case CMD_XMIT_ELS_RSP64_CX:
@ -10073,6 +10196,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
if (iocb_completed) { if (iocb_completed) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI, lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0331 IOCB wake signaled\n"); "0331 IOCB wake signaled\n");
/* Note: we are not indicating if the IOCB has a success
* status or not - that's for the caller to check.
* IOCB_SUCCESS means just that the command was sent and
* completed. Not that it completed successfully.
* */
} else if (timeleft == 0) { } else if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0338 IOCB wait timeout error - no " "0338 IOCB wait timeout error - no "
@ -11074,8 +11202,11 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
struct lpfc_iocbq *pIocbOut, struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe) struct lpfc_wcqe_complete *wcqe)
{ {
int numBdes, i;
unsigned long iflags; unsigned long iflags;
uint32_t status; uint32_t status, max_response;
struct lpfc_dmabuf *dmabuf;
struct ulp_bde64 *bpl, bde;
size_t offset = offsetof(struct lpfc_iocbq, iocb); size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@ -11092,7 +11223,36 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
else { else {
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; switch (pIocbOut->iocb.ulpCommand) {
case CMD_ELS_REQUEST64_CR:
dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
bpl = (struct ulp_bde64 *)dmabuf->virt;
bde.tus.w = le32_to_cpu(bpl[1].tus.w);
max_response = bde.tus.f.bdeSize;
break;
case CMD_GEN_REQUEST64_CR:
max_response = 0;
if (!pIocbOut->context3)
break;
numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
sizeof(struct ulp_bde64);
dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
bpl = (struct ulp_bde64 *)dmabuf->virt;
for (i = 0; i < numBdes; i++) {
bde.tus.w = le32_to_cpu(bpl[i].tus.w);
if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
max_response += bde.tus.f.bdeSize;
}
break;
default:
max_response = wcqe->total_data_placed;
break;
}
if (max_response < wcqe->total_data_placed)
pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
else
pIocbIn->iocb.un.genreq64.bdl.bdeSize =
wcqe->total_data_placed;
} }
/* Convert BG errors for completion status */ /* Convert BG errors for completion status */
@ -15098,6 +15258,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
uint16_t max_rpi, rpi_limit; uint16_t max_rpi, rpi_limit;
uint16_t rpi_remaining, lrpi = 0; uint16_t rpi_remaining, lrpi = 0;
struct lpfc_rpi_hdr *rpi_hdr; struct lpfc_rpi_hdr *rpi_hdr;
unsigned long iflag;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi; rpi_limit = phba->sli4_hba.next_rpi;
@ -15106,7 +15267,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* Fetch the next logical rpi. Because this index is logical, * Fetch the next logical rpi. Because this index is logical,
* the driver starts at 0 each time. * the driver starts at 0 each time.
*/ */
spin_lock_irq(&phba->hbalock); spin_lock_irqsave(&phba->hbalock, iflag);
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
if (rpi >= rpi_limit) if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR; rpi = LPFC_RPI_ALLOC_ERROR;
@ -15122,7 +15283,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
*/ */
if ((rpi == LPFC_RPI_ALLOC_ERROR) && if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) { (phba->sli4_hba.rpi_count >= max_rpi)) {
spin_unlock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi; return rpi;
} }
@ -15131,7 +15292,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* extents. * extents.
*/ */
if (!phba->sli4_hba.rpi_hdrs_in_use) { if (!phba->sli4_hba.rpi_hdrs_in_use) {
spin_unlock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->hbalock, iflag);
return rpi; return rpi;
} }
@ -15142,7 +15303,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* how many are supported max by the device. * how many are supported max by the device.
*/ */
rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
spin_unlock_irq(&phba->hbalock); spin_unlock_irqrestore(&phba->hbalock, iflag);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) { if (!rpi_hdr) {

View File

@ -673,6 +673,7 @@ void lpfc_sli4_queue_unset(struct lpfc_hba *);
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
void lpfc_sli4_free_xri(struct lpfc_hba *, int);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *); int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);

View File

@ -18,7 +18,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.42" #define LPFC_DRIVER_VERSION "8.3.43"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */ /* Used for SLI 2/3 */

View File

@ -1531,6 +1531,7 @@ struct megasas_instance {
struct megasas_register_set __iomem *reg_set; struct megasas_register_set __iomem *reg_set;
u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
u8 ld_ids[MEGASAS_MAX_LD_IDS]; u8 ld_ids[MEGASAS_MAX_LD_IDS];
s8 init_id; s8 init_id;

View File

@ -3194,19 +3194,21 @@ megasas_get_pd_list(struct megasas_instance *instance)
(le32_to_cpu(ci->count) < (le32_to_cpu(ci->count) <
(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
memset(instance->pd_list, 0, memset(instance->local_pd_list, 0,
MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
instance->pd_list[pd_addr->deviceId].tid = instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
le16_to_cpu(pd_addr->deviceId); le16_to_cpu(pd_addr->deviceId);
instance->pd_list[pd_addr->deviceId].driveType = instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
pd_addr->scsiDevType; pd_addr->scsiDevType;
instance->pd_list[pd_addr->deviceId].driveState = instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
MR_PD_STATE_SYSTEM; MR_PD_STATE_SYSTEM;
pd_addr++; pd_addr++;
} }
memcpy(instance->pd_list, instance->local_pd_list,
sizeof(instance->pd_list));
} }
pci_free_consistent(instance->pdev, pci_free_consistent(instance->pdev,
@ -3998,7 +4000,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
* values * values
*/ */
if ((prev_aen.members.class <= curr_aen.members.class) && if ((prev_aen.members.class <= curr_aen.members.class) &&
!((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^ !((prev_aen.members.locale & curr_aen.members.locale) ^
curr_aen.members.locale)) { curr_aen.members.locale)) {
/* /*
* Previously issued event registration includes * Previously issued event registration includes
@ -4006,7 +4008,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
*/ */
return 0; return 0;
} else { } else {
curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale); curr_aen.members.locale |= prev_aen.members.locale;
if (prev_aen.members.class < curr_aen.members.class) if (prev_aen.members.class < curr_aen.members.class)
curr_aen.members.class = prev_aen.members.class; curr_aen.members.class = prev_aen.members.class;
@ -4097,7 +4099,7 @@ static int megasas_start_aen(struct megasas_instance *instance)
class_locale.members.class = MR_EVT_CLASS_DEBUG; class_locale.members.class = MR_EVT_CLASS_DEBUG;
return megasas_register_aen(instance, return megasas_register_aen(instance,
le32_to_cpu(eli.newest_seq_num) + 1, eli.newest_seq_num + 1,
class_locale.word); class_locale.word);
} }

View File

@ -308,6 +308,117 @@ static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,
return str - buf; return str - buf;
} }
static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);
/**
* pm8001_ctl_ib_queue_log_show - Out bound Queue log
* @cdev:pointer to embedded class device
* @buf: the buffer returned
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
int offset;
char *str = buf;
int start = 0;
#define IB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
memoryMap.region[IB].virt_ptr + \
pm8001_ha->evtlog_ib_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
if (pm8001_ha->chip_id != chip_8001)
str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
else
str += sprintf(str, "0x%08x\n", IB_MEMMAP(start));
start = start + 4;
}
pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET;
if ((((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
&& (pm8001_ha->chip_id != chip_8001))
pm8001_ha->evtlog_ib_offset = 0;
if ((((pm8001_ha->evtlog_ib_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
&& (pm8001_ha->chip_id == chip_8001))
pm8001_ha->evtlog_ib_offset = 0;
return str - buf;
}
static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL);
/**
* pm8001_ctl_ob_queue_log_show - Out bound Queue log
* @cdev:pointer to embedded class device
* @buf: the buffer returned
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
int offset;
char *str = buf;
int start = 0;
#define OB_MEMMAP(c) \
(*(u32 *)((u8 *)pm8001_ha-> \
memoryMap.region[OB].virt_ptr + \
pm8001_ha->evtlog_ob_offset + (c)))
for (offset = 0; offset < IB_OB_READ_TIMES; offset++) {
if (pm8001_ha->chip_id != chip_8001)
str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
else
str += sprintf(str, "0x%08x\n", OB_MEMMAP(start));
start = start + 4;
}
pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET;
if ((((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0)
&& (pm8001_ha->chip_id != chip_8001))
pm8001_ha->evtlog_ob_offset = 0;
if ((((pm8001_ha->evtlog_ob_offset) % (PM8001_IB_OB_QUEUE_SIZE)) == 0)
&& (pm8001_ha->chip_id == chip_8001))
pm8001_ha->evtlog_ob_offset = 0;
return str - buf;
}
static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL);
/**
* pm8001_ctl_bios_version_show - Bios version Display
* @cdev:pointer to embedded class device
* @buf:the buffer returned
* A sysfs 'read-only' shost attribute.
*/
static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
char *str = buf;
void *virt_addr;
int bios_index;
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
pm8001_ha->nvmd_completion = &completion;
payload.minor_function = 7;
payload.offset = 0;
payload.length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
wait_for_completion(&completion);
virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
bios_index++)
str += sprintf(str, "%c",
*((u8 *)((u8 *)virt_addr+bios_index)));
return str - buf;
}
static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
/** /**
* pm8001_ctl_aap_log_show - IOP event log * pm8001_ctl_aap_log_show - IOP event log
* @cdev: pointer to embedded class device * @cdev: pointer to embedded class device
@ -344,6 +455,43 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
} }
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
/**
** pm8001_ctl_fatal_log_show - fatal error logging
** @cdev:pointer to embedded class device
** @buf: the buffer returned
**
** A sysfs 'read-only' shost attribute.
**/
static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
u32 count;
count = pm80xx_get_fatal_dump(cdev, attr, buf);
return count;
}
static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL);
/**
** pm8001_ctl_gsm_log_show - gsm dump collection
** @cdev:pointer to embedded class device
** @buf: the buffer returned
**A sysfs 'read-only' shost attribute.
**/
static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
u32 count;
count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf);
return count;
}
static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL);
#define FLASH_CMD_NONE 0x00 #define FLASH_CMD_NONE 0x00
#define FLASH_CMD_UPDATE 0x01 #define FLASH_CMD_UPDATE 0x01
#define FLASH_CMD_SET_NVMD 0x02 #define FLASH_CMD_SET_NVMD 0x02
@ -603,12 +751,17 @@ struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_update_fw, &dev_attr_update_fw,
&dev_attr_aap_log, &dev_attr_aap_log,
&dev_attr_iop_log, &dev_attr_iop_log,
&dev_attr_fatal_log,
&dev_attr_gsm_log,
&dev_attr_max_out_io, &dev_attr_max_out_io,
&dev_attr_max_devices, &dev_attr_max_devices,
&dev_attr_max_sg_list, &dev_attr_max_sg_list,
&dev_attr_sas_spec_support, &dev_attr_sas_spec_support,
&dev_attr_logging_level, &dev_attr_logging_level,
&dev_attr_host_sas_address, &dev_attr_host_sas_address,
&dev_attr_bios_version,
&dev_attr_ib_log,
&dev_attr_ob_log,
NULL, NULL,
}; };

View File

@ -45,6 +45,8 @@
#define HEADER_LEN 28 #define HEADER_LEN 28
#define SIZE_OFFSET 16 #define SIZE_OFFSET 16
#define BIOSOFFSET 56
#define BIOS_OFFSET_LIMIT 61
#define FLASH_OK 0x000000 #define FLASH_OK 0x000000
#define FAIL_OPEN_BIOS_FILE 0x000100 #define FAIL_OPEN_BIOS_FILE 0x000100
@ -53,5 +55,9 @@
#define FAIL_OUT_MEMORY 0x000c00 #define FAIL_OUT_MEMORY 0x000c00
#define FLASH_IN_PROGRESS 0x001000 #define FLASH_IN_PROGRESS 0x001000
#define IB_OB_READ_TIMES 256
#define SYSFS_OFFSET 1024
#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024)
#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024)
#endif /* PM8001_CTL_H_INCLUDED */ #endif /* PM8001_CTL_H_INCLUDED */

View File

@ -46,7 +46,10 @@ enum chip_flavors {
chip_8008, chip_8008,
chip_8009, chip_8009,
chip_8018, chip_8018,
chip_8019 chip_8019,
chip_8074,
chip_8076,
chip_8077
}; };
enum phy_speed { enum phy_speed {
@ -99,7 +102,8 @@ enum memory_region_num {
NVMD, /* NVM device */ NVMD, /* NVM device */
DEV_MEM, /* memory for devices */ DEV_MEM, /* memory for devices */
CCB_MEM, /* memory for command control block */ CCB_MEM, /* memory for command control block */
FW_FLASH /* memory for fw flash update */ FW_FLASH, /* memory for fw flash update */
FORENSIC_MEM /* memory for fw forensic data */
}; };
#define PM8001_EVENT_LOG_SIZE (128 * 1024) #define PM8001_EVENT_LOG_SIZE (128 * 1024)

View File

@ -1868,6 +1868,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev)) if (unlikely(!t || !t->lldd_task || !t->dev))
return; return;
ts = &t->task_status; ts = &t->task_status;
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SAS Address of IO Failure Drive:"
"%016llx", SAS_ADDR(t->dev->sas_addr)));
switch (status) { switch (status) {
case IO_SUCCESS: case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@ -2276,6 +2283,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 param; u32 param;
u32 status; u32 status;
u32 tag; u32 tag;
int i, j;
u8 sata_addr_low[4];
u32 temp_sata_addr_low;
u8 sata_addr_hi[4];
u32 temp_sata_addr_hi;
struct sata_completion_resp *psataPayload; struct sata_completion_resp *psataPayload;
struct task_status_struct *ts; struct task_status_struct *ts;
struct ata_task_resp *resp ; struct ata_task_resp *resp ;
@ -2325,7 +2337,46 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n")); pm8001_printk("ts null\n"));
return; return;
} }
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
(DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++)
sata_addr_hi[i] = pm8001_ha->sas_addr[j];
memcpy(&temp_sata_addr_low, sata_addr_low,
sizeof(sata_addr_low));
memcpy(&temp_sata_addr_hi, sata_addr_hi,
sizeof(sata_addr_hi));
temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
|((temp_sata_addr_hi << 8) &
0xff0000) |
((temp_sata_addr_hi >> 8)
& 0xff00) |
((temp_sata_addr_hi << 24) &
0xff000000));
temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
& 0xff) |
((temp_sata_addr_low << 8)
& 0xff0000) |
((temp_sata_addr_low >> 8)
& 0xff00) |
((temp_sata_addr_low << 24)
& 0xff000000)) +
pm8001_dev->attached_phy +
0x10);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SAS Address of IO Failure Drive:"
"%08x%08x", temp_sata_addr_hi,
temp_sata_addr_low));
} else {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SAS Address of IO Failure Drive:"
"%016llx", SAS_ADDR(t->dev->sas_addr)));
}
}
switch (status) { switch (status) {
case IO_SUCCESS: case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@ -3087,8 +3138,8 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev = ccb->device; struct pm8001_device *pm8001_dev = ccb->device;
u32 status = le32_to_cpu(pPayload->status); u32 status = le32_to_cpu(pPayload->status);
u32 device_id = le32_to_cpu(pPayload->device_id); u32 device_id = le32_to_cpu(pPayload->device_id);
u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS; u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS; u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state " PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "
"from 0x%x to 0x%x status = 0x%x!\n", "from 0x%x to 0x%x status = 0x%x!\n",
device_id, pds, nds, status)); device_id, pds, nds, status));
@ -4700,6 +4751,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
sspTMCmd.tmf = cpu_to_le32(tmf->tmf); sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
if (pm8001_ha->chip_id != chip_8001)
sspTMCmd.ds_ads_m = 0x08;
circularQ = &pm8001_ha->inbnd_q_tbl[0]; circularQ = &pm8001_ha->inbnd_q_tbl[0];
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
return ret; return ret;
@ -4778,6 +4831,16 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
break; break;
} }
case IOP_RDUMP: {
nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP);
nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length);
nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset);
nvmd_req.resp_addr_hi =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi);
nvmd_req.resp_addr_lo =
cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);
break;
}
default: default:
break; break;
} }
@ -4938,6 +5001,89 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
return rc; return rc;
} }
ssize_t
pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf)
{
u32 value, rem, offset = 0, bar = 0;
u32 index, work_offset, dw_length;
u32 shift_value, gsm_base, gsm_dump_offset;
char *direct_data;
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
direct_data = buf;
gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset;
/* check max is 1 Mbytes */
if ((length > 0x100000) || (gsm_dump_offset & 3) ||
((gsm_dump_offset + length) > 0x1000000))
return 1;
if (pm8001_ha->chip_id == chip_8001)
bar = 2;
else
bar = 1;
work_offset = gsm_dump_offset & 0xFFFF0000;
offset = gsm_dump_offset & 0x0000FFFF;
gsm_dump_offset = work_offset;
/* adjust length to dword boundary */
rem = length & 3;
dw_length = length >> 2;
for (index = 0; index < dw_length; index++) {
if ((work_offset + offset) & 0xFFFF0000) {
if (pm8001_ha->chip_id == chip_8001)
shift_value = ((gsm_dump_offset + offset) &
SHIFT_REG_64K_MASK);
else
shift_value = (((gsm_dump_offset + offset) &
SHIFT_REG_64K_MASK) >>
SHIFT_REG_BIT_SHIFT);
if (pm8001_ha->chip_id == chip_8001) {
gsm_base = GSM_BASE;
if (-1 == pm8001_bar4_shift(pm8001_ha,
(gsm_base + shift_value)))
return 1;
} else {
gsm_base = 0;
if (-1 == pm80xx_bar4_shift(pm8001_ha,
(gsm_base + shift_value)))
return 1;
}
gsm_dump_offset = (gsm_dump_offset + offset) &
0xFFFF0000;
work_offset = 0;
offset = offset & 0x0000FFFF;
}
value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
0x0000FFFF);
direct_data += sprintf(direct_data, "%08x ", value);
offset += 4;
}
if (rem != 0) {
value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) &
0x0000FFFF);
/* xfr for non_dw */
direct_data += sprintf(direct_data, "%08x ", value);
}
/* Shift back to BAR4 original address */
if (pm8001_ha->chip_id == chip_8001) {
if (-1 == pm8001_bar4_shift(pm8001_ha, 0))
return 1;
} else {
if (-1 == pm80xx_bar4_shift(pm8001_ha, 0))
return 1;
}
pm8001_ha->fatal_forensic_shift_offset += 1024;
if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000)
pm8001_ha->fatal_forensic_shift_offset = 0;
return direct_data - buf;
}
int int
pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev, u32 state) struct pm8001_device *pm8001_dev, u32 state)

View File

@ -1027,5 +1027,8 @@ struct set_dev_state_resp {
#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 #define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
#define GSM_BASE 0x4F0000
#define SHIFT_REG_64K_MASK 0xffff0000
#define SHIFT_REG_BIT_SHIFT 8
#endif #endif

View File

@ -54,6 +54,9 @@ static const struct pm8001_chip_info pm8001_chips[] = {
[chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, [chip_8009] = {1, 8, &pm8001_80xx_dispatch,},
[chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, [chip_8018] = {0, 16, &pm8001_80xx_dispatch,},
[chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, [chip_8019] = {1, 16, &pm8001_80xx_dispatch,},
[chip_8074] = {0, 8, &pm8001_80xx_dispatch,},
[chip_8076] = {0, 16, &pm8001_80xx_dispatch,},
[chip_8077] = {0, 16, &pm8001_80xx_dispatch,},
}; };
static int pm8001_id; static int pm8001_id;
@ -344,6 +347,10 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
/* Memory region for fw flash */ /* Memory region for fw flash */
pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1;
pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000;
pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000;
pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;
for (i = 0; i < USI_MAX_MEMCNT; i++) { for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_mem_alloc(pm8001_ha->pdev, if (pm8001_mem_alloc(pm8001_ha->pdev,
&pm8001_ha->memoryMap.region[i].virt_ptr, &pm8001_ha->memoryMap.region[i].virt_ptr,
@ -664,6 +671,31 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
#endif #endif
} }
/*
* pm8001_get_phy_settings_info : Read phy setting values.
* @pm8001_ha : our hba.
*/
void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
{
#ifdef PM8001_READ_VPD
/*OPTION ROM FLASH read for the SPC cards */
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
pm8001_ha->nvmd_completion = &completion;
/* SAS ADDRESS read from flash / EEPROM */
payload.minor_function = 6;
payload.offset = 0;
payload.length = 4096;
payload.func_specific = kzalloc(4096, GFP_KERNEL);
/* Read phy setting values from flash */
PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
wait_for_completion(&completion);
pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
#endif
}
#ifdef PM8001_USE_MSIX #ifdef PM8001_USE_MSIX
/** /**
* pm8001_setup_msix - enable MSI-X interrupt * pm8001_setup_msix - enable MSI-X interrupt
@ -844,6 +876,10 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
} }
pm8001_init_sas_add(pm8001_ha); pm8001_init_sas_add(pm8001_ha);
/* phy setting support for motherboard controller */
if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
pdev->subsystem_vendor != 0)
pm8001_get_phy_settings_info(pm8001_ha);
pm8001_post_sas_ha_init(shost, chip); pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
if (rc) if (rc)
@ -1037,6 +1073,12 @@ static struct pci_device_id pm8001_pci_table[] = {
{ PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
{ PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
{ PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
{ PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 },
{ PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 },
{ PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 },
{ PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 },
{ PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 },
{ PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8081, { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8081, { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
@ -1057,6 +1099,24 @@ static struct pci_device_id pm8001_pci_table[] = {
PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8089, { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8074,
PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8076,
PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8077,
PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8074,
PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8076,
PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8077,
PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8076,
PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8077,
PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8074,
PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 },
{} /* terminate list */ {} /* terminate list */
}; };
@ -1108,8 +1168,11 @@ module_init(pm8001_init);
module_exit(pm8001_exit); module_exit(pm8001_exit);
MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
MODULE_DESCRIPTION( MODULE_DESCRIPTION(
"PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver"); "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
"SAS/SATA controller driver");
MODULE_VERSION(DRV_VERSION); MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pm8001_pci_table); MODULE_DEVICE_TABLE(pci, pm8001_pci_table);

View File

@ -447,7 +447,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
break; break;
case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP: case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
rc = pm8001_task_prep_ata(pm8001_ha, ccb); rc = pm8001_task_prep_ata(pm8001_ha, ccb);
break; break;
default: default:
@ -704,6 +703,8 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
int res, retry; int res, retry;
struct sas_task *task = NULL; struct sas_task *task = NULL;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
struct pm8001_device *pm8001_dev = dev->lldd_dev;
DECLARE_COMPLETION_ONSTACK(completion_setstate);
for (retry = 0; retry < 3; retry++) { for (retry = 0; retry < 3; retry++) {
task = sas_alloc_slow_task(GFP_KERNEL); task = sas_alloc_slow_task(GFP_KERNEL);
@ -729,6 +730,12 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
goto ex_err; goto ex_err;
} }
wait_for_completion(&task->slow_task->completion); wait_for_completion(&task->slow_task->completion);
if (pm8001_ha->chip_id != chip_8001) {
pm8001_dev->setds_completion = &completion_setstate;
PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
pm8001_dev, 0x01);
wait_for_completion(&completion_setstate);
}
res = -TMF_RESP_FUNC_FAILED; res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */ /* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {

View File

@ -104,6 +104,9 @@ do { \
#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \
|| (dev->device == 0X8076) \
|| (dev->device == 0X8077))
#define PM8001_NAME_LENGTH 32/* generic length of strings */ #define PM8001_NAME_LENGTH 32/* generic length of strings */
extern struct list_head hba_list; extern struct list_head hba_list;
@ -129,6 +132,61 @@ struct pm8001_ioctl_payload {
u8 *func_specific; u8 *func_specific;
}; };
#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF
#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24)
#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x00 /* HNFBUFL */
#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x04 /* HNFBUFH */
#define MPI_FATAL_EDUMP_TABLE_LENGTH 0x08 /* HNFBLEN */
#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */
#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */
#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */
#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1
#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0
#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0
#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED 0x1
#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2
#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE 0x3
#define TYPE_GSM_SPACE 1
#define TYPE_QUEUE 2
#define TYPE_FATAL 3
#define TYPE_NON_FATAL 4
#define TYPE_INBOUND 1
#define TYPE_OUTBOUND 2
struct forensic_data {
u32 data_type;
union {
struct {
u32 direct_len;
u32 direct_offset;
void *direct_data;
} gsm_buf;
struct {
u16 queue_type;
u16 queue_index;
u32 direct_len;
void *direct_data;
} queue_buf;
struct {
u32 direct_len;
u32 direct_offset;
u32 read_len;
void *direct_data;
} data_buf;
};
};
/* bit31-26 - mask bar */
#define SCRATCH_PAD0_BAR_MASK 0xFC000000
/* bit25-0 - offset mask */
#define SCRATCH_PAD0_OFFSET_MASK 0x03FFFFFF
/* if AAP error state */
#define SCRATCH_PAD0_AAPERR_MASK 0xFFFFFFFF
/* Inbound doorbell bit7 */
#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP 0x80
/* Inbound doorbell bit7 SPCV */
#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80
#define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */
struct pm8001_dispatch { struct pm8001_dispatch {
char *name; char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha); int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@ -343,6 +401,7 @@ union main_cfg_table {
u32 phy_attr_table_offset; u32 phy_attr_table_offset;
u32 port_recovery_timer; u32 port_recovery_timer;
u32 interrupt_reassertion_delay; u32 interrupt_reassertion_delay;
u32 fatal_n_non_fatal_dump; /* 0x28 */
} pm80xx_tbl; } pm80xx_tbl;
}; };
@ -417,6 +476,13 @@ struct pm8001_hba_info {
struct pm8001_hba_memspace io_mem[6]; struct pm8001_hba_memspace io_mem[6];
struct mpi_mem_req memoryMap; struct mpi_mem_req memoryMap;
struct encrypt encrypt_info; /* support encryption */ struct encrypt encrypt_info; /* support encryption */
struct forensic_data forensic_info;
u32 fatal_bar_loc;
u32 forensic_last_offset;
u32 fatal_forensic_shift_offset;
u32 forensic_fatal_step;
u32 evtlog_ib_offset;
u32 evtlog_ob_offset;
void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/
void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/
@ -425,6 +491,7 @@ struct pm8001_hba_info {
void __iomem *pspa_q_tbl_addr; void __iomem *pspa_q_tbl_addr;
/*MPI SAS PHY attributes Queue Config Table Addr*/ /*MPI SAS PHY attributes Queue Config Table Addr*/
void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */
void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */
union main_cfg_table main_cfg_tbl; union main_cfg_table main_cfg_tbl;
union general_status_table gs_tbl; union general_status_table gs_tbl;
struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
@ -629,7 +696,12 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
u32 length, u8 *buf);
int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct device_attribute *attr, char *buf);
ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
/* ctl shared API */ /* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[]; extern struct device_attribute *pm8001_host_attrs[];

View File

@ -45,6 +45,228 @@
#define SMP_DIRECT 1 #define SMP_DIRECT 1
#define SMP_INDIRECT 2 #define SMP_INDIRECT 2
int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
{
u32 reg_val;
unsigned long start;
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value);
/* confirm the setting is written */
start = jiffies + HZ; /* 1 sec */
do {
reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER);
} while ((reg_val != shift_value) && time_before(jiffies, start));
if (reg_val != shift_value) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
" = 0x%x\n", reg_val));
return -1;
}
return 0;
}
void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
const void *destination,
u32 dw_count, u32 bus_base_number)
{
u32 index, value, offset;
u32 *destination1;
destination1 = (u32 *)destination;
for (index = 0; index < dw_count; index += 4, destination1++) {
offset = (soffset + index / 4);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
*destination1 = cpu_to_le32(value);
}
}
return;
}
ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
u32 status = 1;
u32 accum_len , reg_val, index, *temp;
unsigned long start;
u8 *direct_data;
char *fatal_error_data = buf;
pm8001_ha->forensic_info.data_buf.direct_data = buf;
if (pm8001_ha->chip_id == chip_8001) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"Not supported for SPC controller");
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
}
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
/* Program the MEMBASE II Shifting Register with 0x00.*/
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->forensic_last_offset = 0;
pm8001_ha->forensic_fatal_step = 0;
pm8001_ha->fatal_bar_loc = 0;
}
/* Read until accum_len is retrived */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
accum_len));
if (accum_len == 0xFFFFFFFF) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Possible PCI issue 0x%x not expected\n",
accum_len));
return status;
}
if (accum_len == 0 || accum_len >= 0x100000) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 0xFFFFFFFF);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
if (pm8001_ha->forensic_fatal_step == 0) {
moreData:
if (pm8001_ha->forensic_info.data_buf.direct_data) {
/* Data is in bar, copy to host memory */
pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
pm8001_ha->forensic_info.data_buf.direct_len ,
1);
}
pm8001_ha->fatal_bar_loc +=
pm8001_ha->forensic_info.data_buf.direct_len;
pm8001_ha->forensic_info.data_buf.direct_offset +=
pm8001_ha->forensic_info.data_buf.direct_len;
pm8001_ha->forensic_last_offset +=
pm8001_ha->forensic_info.data_buf.direct_len;
pm8001_ha->forensic_info.data_buf.read_len =
pm8001_ha->forensic_info.data_buf.direct_len;
if (pm8001_ha->forensic_last_offset >= accum_len) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 3);
for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
pm8001_ha->fatal_bar_loc = 0;
pm8001_ha->forensic_fatal_step = 1;
pm8001_ha->fatal_forensic_shift_offset = 0;
pm8001_ha->forensic_last_offset = 0;
status = 0;
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
}
if (pm8001_ha->fatal_bar_loc < (64 * 1024)) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", 2);
for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
status = 0;
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
}
/* Increment the MEMBASE II Shifting Register value by 0x100.*/
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 2);
for (index = 0; index < 256; index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
pm8001_ha->fatal_forensic_shift_offset += 0x100;
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->fatal_bar_loc = 0;
status = 0;
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
if (pm8001_ha->forensic_fatal_step == 1) {
pm8001_ha->fatal_forensic_shift_offset = 0;
/* Read 64K of the debug data. */
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_mw32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
MPI_FATAL_EDUMP_HANDSHAKE_RDY);
/* Poll FDDHSHK until clear */
start = jiffies + (2 * HZ); /* 2 sec */
do {
reg_val = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
} while ((reg_val) && time_before(jiffies, start));
if (reg_val != 0) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
" = 0x%x\n", reg_val));
return -1;
}
/* Read the next 64K of the debug data. */
pm8001_ha->forensic_fatal_step = 0;
if (pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_STATUS) !=
MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
pm8001_mw32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
goto moreData;
} else {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", 4);
pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
pm8001_ha->forensic_info.data_buf.direct_len = 0;
pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
status = 0;
}
}
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
/** /**
* read_main_config_table - read the configure table and save it. * read_main_config_table - read the configure table and save it.
* @pm8001_ha: our hba card information * @pm8001_ha: our hba card information
@ -430,7 +652,11 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
table is updated */ table is updated */
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
/* wait until Inbound DoorBell Clear Register toggled */ /* wait until Inbound DoorBell Clear Register toggled */
max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */ if (IS_SPCV_12G(pm8001_ha->pdev)) {
max_wait_count = 4 * 1000 * 1000;/* 4 sec */
} else {
max_wait_count = 2 * 1000 * 1000;/* 2 sec */
}
do { do {
udelay(1); udelay(1);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@ -579,6 +805,9 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->pspa_q_tbl_addr = pm8001_ha->pspa_q_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
0xFFFFFF); 0xFFFFFF);
pm8001_ha->fatal_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) &
0xFFFFFF);
PM8001_INIT_DBG(pm8001_ha, PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("GST OFFSET 0x%x\n", pm8001_printk("GST OFFSET 0x%x\n",
@ -913,7 +1142,11 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
/* wait until Inbound DoorBell Clear Register toggled */ /* wait until Inbound DoorBell Clear Register toggled */
max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ if (IS_SPCV_12G(pm8001_ha->pdev)) {
max_wait_count = 4 * 1000 * 1000;/* 4 sec */
} else {
max_wait_count = 2 * 1000 * 1000;/* 2 sec */
}
do { do {
udelay(1); udelay(1);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
@ -959,6 +1192,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
{ {
u32 regval; u32 regval;
u32 bootloader_state; u32 bootloader_state;
u32 ibutton0, ibutton1;
/* Check if MPI is in ready state to reset */ /* Check if MPI is in ready state to reset */
if (mpi_uninit_check(pm8001_ha) != 0) { if (mpi_uninit_check(pm8001_ha) != 0) {
@ -1017,7 +1251,27 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
if (-1 == check_fw_ready(pm8001_ha)) { if (-1 == check_fw_ready(pm8001_ha)) {
PM8001_FAIL_DBG(pm8001_ha, PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Firmware is not ready!\n")); pm8001_printk("Firmware is not ready!\n"));
return -EBUSY; /* check iButton feature support for motherboard controller */
if (pm8001_ha->pdev->subsystem_vendor !=
PCI_VENDOR_ID_ADAPTEC2 &&
pm8001_ha->pdev->subsystem_vendor != 0) {
ibutton0 = pm8001_cr32(pm8001_ha, 0,
MSGU_HOST_SCRATCH_PAD_6);
ibutton1 = pm8001_cr32(pm8001_ha, 0,
MSGU_HOST_SCRATCH_PAD_7);
if (!ibutton0 && !ibutton1) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("iButton Feature is"
" not Available!!!\n"));
return -EBUSY;
}
if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("CRC Check for iButton"
" Feature Failed!!!\n"));
return -EBUSY;
}
}
} }
PM8001_INIT_DBG(pm8001_ha, PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("SPCv soft reset Complete\n")); pm8001_printk("SPCv soft reset Complete\n"));
@ -1268,6 +1522,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev)) if (unlikely(!t || !t->lldd_task || !t->dev))
return; return;
ts = &t->task_status; ts = &t->task_status;
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SAS Address of IO Failure Drive"
":%016llx", SAS_ADDR(t->dev->sas_addr)));
switch (status) { switch (status) {
case IO_SUCCESS: case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, PM8001_IO_DBG(pm8001_ha,
@ -1691,6 +1952,10 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 param; u32 param;
u32 status; u32 status;
u32 tag; u32 tag;
int i, j;
u8 sata_addr_low[4];
u32 temp_sata_addr_low, temp_sata_addr_hi;
u8 sata_addr_hi[4];
struct sata_completion_resp *psataPayload; struct sata_completion_resp *psataPayload;
struct task_status_struct *ts; struct task_status_struct *ts;
struct ata_task_resp *resp ; struct ata_task_resp *resp ;
@ -1740,7 +2005,47 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n")); pm8001_printk("ts null\n"));
return; return;
} }
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
(DEV_IS_EXPANDER(t->dev->parent->dev_type)))) {
for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++)
sata_addr_hi[i] = pm8001_ha->sas_addr[j];
memcpy(&temp_sata_addr_low, sata_addr_low,
sizeof(sata_addr_low));
memcpy(&temp_sata_addr_hi, sata_addr_hi,
sizeof(sata_addr_hi));
temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff)
|((temp_sata_addr_hi << 8) &
0xff0000) |
((temp_sata_addr_hi >> 8)
& 0xff00) |
((temp_sata_addr_hi << 24) &
0xff000000));
temp_sata_addr_low = ((((temp_sata_addr_low >> 24)
& 0xff) |
((temp_sata_addr_low << 8)
& 0xff0000) |
((temp_sata_addr_low >> 8)
& 0xff00) |
((temp_sata_addr_low << 24)
& 0xff000000)) +
pm8001_dev->attached_phy +
0x10);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SAS Address of IO Failure Drive:"
"%08x%08x", temp_sata_addr_hi,
temp_sata_addr_low));
} else {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("SAS Address of IO Failure Drive:"
"%016llx", SAS_ADDR(t->dev->sas_addr)));
}
}
switch (status) { switch (status) {
case IO_SUCCESS: case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
@ -3103,9 +3408,27 @@ static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb) void *piomb)
{ {
PM8001_MSG_DBG(pm8001_ha, u8 page_code;
pm8001_printk(" pm80xx_addition_functionality\n")); struct set_phy_profile_resp *pPayload =
(struct set_phy_profile_resp *)(piomb + 4);
u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid);
u32 status = le32_to_cpu(pPayload->status);
page_code = (u8)((ppc_phyid & 0xFF00) >> 8);
if (status) {
/* status is FAILED */
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("PhyProfile command failed with status "
"0x%08X \n", status));
return -1;
} else {
if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Invalid page code 0x%X\n",
page_code));
return -1;
}
}
return 0; return 0;
} }
@ -3484,8 +3807,6 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
else else
pm8001_ha->smp_exp_mode = SMP_INDIRECT; pm8001_ha->smp_exp_mode = SMP_INDIRECT;
/* DIRECT MODE support only in spcv/ve */
pm8001_ha->smp_exp_mode = SMP_DIRECT;
tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
preq_dma_addr = (char *)phys_to_virt(tmp_addr); preq_dma_addr = (char *)phys_to_virt(tmp_addr);
@ -3501,7 +3822,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
/* exclude top 4 bytes for SMP req header */ /* exclude top 4 bytes for SMP req header */
smp_cmd.long_smp_req.long_req_addr = smp_cmd.long_smp_req.long_req_addr =
cpu_to_le64((u64)sg_dma_address cpu_to_le64((u64)sg_dma_address
(&task->smp_task.smp_req) - 4); (&task->smp_task.smp_req) + 4);
/* exclude 4 bytes for SMP req header and CRC */ /* exclude 4 bytes for SMP req header and CRC */
smp_cmd.long_smp_req.long_req_size = smp_cmd.long_smp_req.long_req_size =
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
@ -3604,10 +3925,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
struct ssp_ini_io_start_req ssp_cmd; struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag; u32 tag = ccb->ccb_tag;
int ret; int ret;
u64 phys_addr; u64 phys_addr, start_addr, end_addr;
u32 end_addr_high, end_addr_low;
struct inbound_queue_table *circularQ; struct inbound_queue_table *circularQ;
static u32 inb; u32 q_index;
static u32 outb;
u32 opc = OPC_INB_SSPINIIOSTART; u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd)); memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
@ -3626,7 +3947,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len); task->ssp_task.cmd->cmd_len);
circularQ = &pm8001_ha->inbnd_q_tbl[0]; q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
/* Check if encryption is set */ /* Check if encryption is set */
if (pm8001_ha->chip->encrypt && if (pm8001_ha->chip->encrypt &&
@ -3658,6 +3980,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(upper_32_bits(dma_addr)); cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0; ssp_cmd.enc_esgl = 0;
/* Check 4G Boundary */
start_addr = cpu_to_le64(dma_addr);
end_addr = (start_addr + ssp_cmd.enc_len) - 1;
end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
if (end_addr_high != ssp_cmd.enc_addr_high) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("The sg list address "
"start_addr=0x%016llx data_len=0x%x "
"end_addr_high=0x%08x end_addr_low="
"0x%08x has crossed 4G boundary\n",
start_addr, ssp_cmd.enc_len,
end_addr_high, end_addr_low));
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle +
offsetof(struct pm8001_ccb_info,
buf_prd[0]);
ssp_cmd.enc_addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.enc_addr_high =
cpu_to_le32(upper_32_bits(phys_addr));
ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
}
} else if (task->num_scatter == 0) { } else if (task->num_scatter == 0) {
ssp_cmd.enc_addr_low = 0; ssp_cmd.enc_addr_low = 0;
ssp_cmd.enc_addr_high = 0; ssp_cmd.enc_addr_high = 0;
@ -3674,7 +4020,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
} else { } else {
PM8001_IO_DBG(pm8001_ha, pm8001_printk( PM8001_IO_DBG(pm8001_ha, pm8001_printk(
"Sending Normal SAS command 0x%x inb q %x\n", "Sending Normal SAS command 0x%x inb q %x\n",
task->ssp_task.cmd->cmnd[0], inb)); task->ssp_task.cmd->cmnd[0], q_index));
/* fill in PRD (scatter/gather) table, if any */ /* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) { if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter, ccb->n_elem, pm8001_chip_make_sg(task->scatter, ccb->n_elem,
@ -3693,6 +4039,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(upper_32_bits(dma_addr)); cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0; ssp_cmd.esgl = 0;
/* Check 4G Boundary */
start_addr = cpu_to_le64(dma_addr);
end_addr = (start_addr + ssp_cmd.len) - 1;
end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
if (end_addr_high != ssp_cmd.addr_high) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("The sg list address "
"start_addr=0x%016llx data_len=0x%x "
"end_addr_high=0x%08x end_addr_low="
"0x%08x has crossed 4G boundary\n",
start_addr, ssp_cmd.len,
end_addr_high, end_addr_low));
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle +
offsetof(struct pm8001_ccb_info,
buf_prd[0]);
ssp_cmd.addr_low =
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.addr_high =
cpu_to_le32(upper_32_bits(phys_addr));
ssp_cmd.esgl = cpu_to_le32(1<<31);
}
} else if (task->num_scatter == 0) { } else if (task->num_scatter == 0) {
ssp_cmd.addr_low = 0; ssp_cmd.addr_low = 0;
ssp_cmd.addr_high = 0; ssp_cmd.addr_high = 0;
@ -3700,11 +4070,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.esgl = 0; ssp_cmd.esgl = 0;
} }
} }
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++); q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
/* rotate the outb queue */ &ssp_cmd, q_index);
outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
return ret; return ret;
} }
@ -3716,18 +4084,19 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
u32 tag = ccb->ccb_tag; u32 tag = ccb->ccb_tag;
int ret; int ret;
static u32 inb; u32 q_index;
static u32 outb;
struct sata_start_req sata_cmd; struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0; u32 hdr_tag, ncg_tag = 0;
u64 phys_addr; u64 phys_addr, start_addr, end_addr;
u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0; u32 ATAP = 0x0;
u32 dir; u32 dir;
struct inbound_queue_table *circularQ; struct inbound_queue_table *circularQ;
unsigned long flags; unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART; u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd)); memset(&sata_cmd, 0, sizeof(sata_cmd));
circularQ = &pm8001_ha->inbnd_q_tbl[0]; q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
if (task->data_dir == PCI_DMA_NONE) { if (task->data_dir == PCI_DMA_NONE) {
ATAP = 0x04; /* no data*/ ATAP = 0x04; /* no data*/
@ -3788,6 +4157,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.enc_addr_high = upper_32_bits(dma_addr); sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
sata_cmd.enc_esgl = 0; sata_cmd.enc_esgl = 0;
/* Check 4G Boundary */
start_addr = cpu_to_le64(dma_addr);
end_addr = (start_addr + sata_cmd.enc_len) - 1;
end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
if (end_addr_high != sata_cmd.enc_addr_high) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("The sg list address "
"start_addr=0x%016llx data_len=0x%x "
"end_addr_high=0x%08x end_addr_low"
"=0x%08x has crossed 4G boundary\n",
start_addr, sata_cmd.enc_len,
end_addr_high, end_addr_low));
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle +
offsetof(struct pm8001_ccb_info,
buf_prd[0]);
sata_cmd.enc_addr_low =
lower_32_bits(phys_addr);
sata_cmd.enc_addr_high =
upper_32_bits(phys_addr);
sata_cmd.enc_esgl =
cpu_to_le32(1 << 31);
}
} else if (task->num_scatter == 0) { } else if (task->num_scatter == 0) {
sata_cmd.enc_addr_low = 0; sata_cmd.enc_addr_low = 0;
sata_cmd.enc_addr_high = 0; sata_cmd.enc_addr_high = 0;
@ -3808,7 +4202,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
} else { } else {
PM8001_IO_DBG(pm8001_ha, pm8001_printk( PM8001_IO_DBG(pm8001_ha, pm8001_printk(
"Sending Normal SATA command 0x%x inb %x\n", "Sending Normal SATA command 0x%x inb %x\n",
sata_cmd.sata_fis.command, inb)); sata_cmd.sata_fis.command, q_index));
/* dad (bit 0-1) is 0 */ /* dad (bit 0-1) is 0 */
sata_cmd.ncqtag_atap_dir_m_dad = sata_cmd.ncqtag_atap_dir_m_dad =
cpu_to_le32(((ncg_tag & 0xff)<<16) | cpu_to_le32(((ncg_tag & 0xff)<<16) |
@ -3829,6 +4223,30 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.addr_high = upper_32_bits(dma_addr); sata_cmd.addr_high = upper_32_bits(dma_addr);
sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0; sata_cmd.esgl = 0;
/* Check 4G Boundary */
start_addr = cpu_to_le64(dma_addr);
end_addr = (start_addr + sata_cmd.len) - 1;
end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
if (end_addr_high != sata_cmd.addr_high) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("The sg list address "
"start_addr=0x%016llx data_len=0x%x"
"end_addr_high=0x%08x end_addr_low="
"0x%08x has crossed 4G boundary\n",
start_addr, sata_cmd.len,
end_addr_high, end_addr_low));
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle +
offsetof(struct pm8001_ccb_info,
buf_prd[0]);
sata_cmd.addr_low =
lower_32_bits(phys_addr);
sata_cmd.addr_high =
upper_32_bits(phys_addr);
sata_cmd.esgl = cpu_to_le32(1 << 31);
}
} else if (task->num_scatter == 0) { } else if (task->num_scatter == 0) {
sata_cmd.addr_low = 0; sata_cmd.addr_low = 0;
sata_cmd.addr_high = 0; sata_cmd.addr_high = 0;
@ -3905,12 +4323,9 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
} }
} }
} }
q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&sata_cmd, outb++); &sata_cmd, q_index);
/* rotate the outb queue */
outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
return ret; return ret;
} }
@ -3941,9 +4356,16 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
** [14] 0b disable spin up hold; 1b enable spin up hold ** [14] 0b disable spin up hold; 1b enable spin up hold
** [15] ob no change in current PHY analig setup 1b enable using SPAST ** [15] ob no change in current PHY analig setup 1b enable using SPAST
*/ */
payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | if (!IS_SPCV_12G(pm8001_ha->pdev))
LINKMODE_AUTO | LINKRATE_15 | payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
LINKRATE_30 | LINKRATE_60 | phy_id); LINKMODE_AUTO | LINKRATE_15 |
LINKRATE_30 | LINKRATE_60 | phy_id);
else
payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
LINKMODE_AUTO | LINKRATE_15 |
LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
phy_id);
/* SSC Disable and SAS Analog ST configuration */ /* SSC Disable and SAS Analog ST configuration */
/** /**
payload.ase_sh_lm_slr_phyid = payload.ase_sh_lm_slr_phyid =
@ -4102,6 +4524,45 @@ pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
u32 operation, u32 phyid, u32 length, u32 *buf)
{
u32 tag , i, j = 0;
int rc;
struct set_phy_profile_req payload;
struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SET_PHY_PROFILE;
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n"));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF));
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk(" phy profile command for phy %x ,length is %d\n",
payload.ppc_phyid, length));
for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
}
pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
}
void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
u32 length, u8 *buf)
{
u32 page_code, i;
page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
mpi_set_phy_profile_req(pm8001_ha,
SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
length = length + PHY_DWORD_LENGTH;
}
PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n"));
}
const struct pm8001_dispatch pm8001_80xx_dispatch = { const struct pm8001_dispatch pm8001_80xx_dispatch = {
.name = "pmc80xx", .name = "pmc80xx",
.chip_init = pm80xx_chip_init, .chip_init = pm80xx_chip_init,

View File

@ -168,6 +168,11 @@
#define LINKRATE_15 (0x01 << 8) #define LINKRATE_15 (0x01 << 8)
#define LINKRATE_30 (0x02 << 8) #define LINKRATE_30 (0x02 << 8)
#define LINKRATE_60 (0x06 << 8) #define LINKRATE_60 (0x06 << 8)
#define LINKRATE_120 (0x08 << 8)
/* phy_profile */
#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04
#define PHY_DWORD_LENGTH 0xC
/* Thermal related */ /* Thermal related */
#define THERMAL_ENABLE 0x1 #define THERMAL_ENABLE 0x1
@ -1223,10 +1228,10 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
/* MSGU CONFIGURATION TABLE*/ /* MSGU CONFIGURATION TABLE*/
#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01 #define SPCv_MSGU_CFG_TABLE_UPDATE 0x001
#define SPCv_MSGU_CFG_TABLE_RESET 0x02 #define SPCv_MSGU_CFG_TABLE_RESET 0x002
#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04 #define SPCv_MSGU_CFG_TABLE_FREEZE 0x004
#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08 #define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x008
#define MSGU_IBDB_SET 0x00 #define MSGU_IBDB_SET 0x00
#define MSGU_HOST_INT_STATUS 0x08 #define MSGU_HOST_INT_STATUS 0x08
#define MSGU_HOST_INT_MASK 0x0C #define MSGU_HOST_INT_MASK 0x0C
@ -1520,4 +1525,6 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 #define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06
#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07
#define MEMBASE_II_SHIFT_REGISTER 0x1010
#endif #endif

View File

@ -306,6 +306,7 @@ struct ddb_entry {
struct qla_ddb_index { struct qla_ddb_index {
struct list_head list; struct list_head list;
uint16_t fw_ddb_idx; uint16_t fw_ddb_idx;
uint16_t flash_ddb_idx;
struct dev_db_entry fw_ddb; struct dev_db_entry fw_ddb;
uint8_t flash_isid[6]; uint8_t flash_isid[6];
}; };

View File

@ -539,6 +539,10 @@ struct qla_flt_region {
#define ENABLE_INTERNAL_LOOPBACK 0x04 #define ENABLE_INTERNAL_LOOPBACK 0x04
#define ENABLE_EXTERNAL_LOOPBACK 0x08 #define ENABLE_EXTERNAL_LOOPBACK 0x08
/* generic defines to enable/disable params */
#define QL4_PARAM_DISABLE 0
#define QL4_PARAM_ENABLE 1
/*************************************************************************/ /*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */ /* Host Adapter Initialization Control Block (from host) */

View File

@ -83,6 +83,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts); uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
char *password, int bidi, uint16_t *chap_index); char *password, int bidi, uint16_t *chap_index);
int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
uint16_t idx, int bidi);
void qla4xxx_queue_iocb(struct scsi_qla_host *ha); void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
void qla4xxx_complete_iocb(struct scsi_qla_host *ha); void qla4xxx_complete_iocb(struct scsi_qla_host *ha);

View File

@ -82,3 +82,15 @@ qla4xxx_disable_intrs(struct scsi_qla_host *ha)
__qla4xxx_disable_intrs(ha); __qla4xxx_disable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
} }
static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry)
{
int type;
if (chap_entry->flags & BIT_7)
type = LOCAL_CHAP;
else
type = BIDI_CHAP;
return type;
}

View File

@ -1530,13 +1530,26 @@ exit_get_chap:
return ret; return ret;
} }
static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, /**
char *password, uint16_t idx, int bidi) * qla4xxx_set_chap - Make a chap entry at the given index
* @ha: pointer to adapter structure
* @username: CHAP username to set
* @password: CHAP password to set
* @idx: CHAP index at which to make the entry
* @bidi: type of chap entry (chap_in or chap_out)
*
* Create chap entry at the given index with the information provided.
*
* Note: Caller should acquire the chap lock before getting here.
**/
int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
uint16_t idx, int bidi)
{ {
int ret = 0; int ret = 0;
int rval = QLA_ERROR; int rval = QLA_ERROR;
uint32_t offset = 0; uint32_t offset = 0;
struct ql4_chap_table *chap_table; struct ql4_chap_table *chap_table;
uint32_t chap_size = 0;
dma_addr_t chap_dma; dma_addr_t chap_dma;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
@ -1554,7 +1567,20 @@ static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN); strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);
strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN); strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);
chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table));
if (is_qla40XX(ha)) {
chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table);
offset = FLASH_CHAP_OFFSET;
} else { /* Single region contains CHAP info for both ports which is
* divided into half for each port.
*/
chap_size = ha->hw.flt_chap_size / 2;
offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
if (ha->port_num == 1)
offset += chap_size;
}
offset += (idx * sizeof(struct ql4_chap_table));
rval = qla4xxx_set_flash(ha, chap_dma, offset, rval = qla4xxx_set_flash(ha, chap_dma, offset,
sizeof(struct ql4_chap_table), sizeof(struct ql4_chap_table),
FLASH_OPT_RMW_COMMIT); FLASH_OPT_RMW_COMMIT);
@ -1611,7 +1637,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
goto exit_unlock_uni_chap; goto exit_unlock_uni_chap;
} }
if (!(chap_table->flags & BIT_6)) { if (!(chap_table->flags & BIT_7)) {
ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n"); ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
rval = QLA_ERROR; rval = QLA_ERROR;
goto exit_unlock_uni_chap; goto exit_unlock_uni_chap;

View File

@ -149,6 +149,8 @@ static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf); uint32_t *num_entries, char *buf);
static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data,
int len);
/* /*
* SCSI host template entry points * SCSI host template entry points
@ -252,6 +254,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.send_ping = qla4xxx_send_ping, .send_ping = qla4xxx_send_ping,
.get_chap = qla4xxx_get_chap_list, .get_chap = qla4xxx_get_chap_list,
.delete_chap = qla4xxx_delete_chap, .delete_chap = qla4xxx_delete_chap,
.set_chap = qla4xxx_set_chap_entry,
.get_flashnode_param = qla4xxx_sysfs_ddb_get_param, .get_flashnode_param = qla4xxx_sysfs_ddb_get_param,
.set_flashnode_param = qla4xxx_sysfs_ddb_set_param, .set_flashnode_param = qla4xxx_sysfs_ddb_set_param,
.new_flashnode = qla4xxx_sysfs_ddb_add, .new_flashnode = qla4xxx_sysfs_ddb_add,
@ -508,6 +511,95 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
return 0; return 0;
} }
static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha,
int16_t chap_index,
struct ql4_chap_table **chap_entry)
{
int rval = QLA_ERROR;
int max_chap_entries;
if (!ha->chap_list) {
ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
rval = QLA_ERROR;
goto exit_get_chap;
}
if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
max_chap_entries = MAX_CHAP_ENTRIES_40XX;
if (chap_index > max_chap_entries) {
ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
rval = QLA_ERROR;
goto exit_get_chap;
}
*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index;
if ((*chap_entry)->cookie !=
__constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
rval = QLA_ERROR;
*chap_entry = NULL;
} else {
rval = QLA_SUCCESS;
}
exit_get_chap:
return rval;
}
/**
* qla4xxx_find_free_chap_index - Find the first free chap index
* @ha: pointer to adapter structure
* @chap_index: CHAP index to be returned
*
* Find the first free chap index available in the chap table
*
* Note: Caller should acquire the chap lock before getting here.
**/
static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha,
uint16_t *chap_index)
{
int i, rval;
int free_index = -1;
int max_chap_entries = 0;
struct ql4_chap_table *chap_table;
if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
max_chap_entries = MAX_CHAP_ENTRIES_40XX;
if (!ha->chap_list) {
ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n");
rval = QLA_ERROR;
goto exit_find_chap;
}
for (i = 0; i < max_chap_entries; i++) {
chap_table = (struct ql4_chap_table *)ha->chap_list + i;
if ((chap_table->cookie !=
__constant_cpu_to_le16(CHAP_VALID_COOKIE)) &&
(i > MAX_RESRV_CHAP_IDX)) {
free_index = i;
break;
}
}
if (free_index != -1) {
*chap_index = free_index;
rval = QLA_SUCCESS;
} else {
rval = QLA_ERROR;
}
exit_find_chap:
return rval;
}
static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf) uint32_t *num_entries, char *buf)
{ {
@ -691,6 +783,111 @@ exit_delete_chap:
return ret; return ret;
} }
/**
* qla4xxx_set_chap_entry - Make chap entry with given information
* @shost: pointer to host
* @data: chap info - credentials, index and type to make chap entry
* @len: length of data
*
* Add or update chap entry with the given information
**/
static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
{
struct scsi_qla_host *ha = to_qla_host(shost);
struct iscsi_chap_rec chap_rec;
struct ql4_chap_table *chap_entry = NULL;
struct iscsi_param_info *param_info;
struct nlattr *attr;
int max_chap_entries = 0;
int type;
int rem = len;
int rc = 0;
memset(&chap_rec, 0, sizeof(chap_rec));
nla_for_each_attr(attr, data, len, rem) {
param_info = nla_data(attr);
switch (param_info->param) {
case ISCSI_CHAP_PARAM_INDEX:
chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value;
break;
case ISCSI_CHAP_PARAM_CHAP_TYPE:
chap_rec.chap_type = param_info->value[0];
break;
case ISCSI_CHAP_PARAM_USERNAME:
memcpy(chap_rec.username, param_info->value,
param_info->len);
break;
case ISCSI_CHAP_PARAM_PASSWORD:
memcpy(chap_rec.password, param_info->value,
param_info->len);
break;
case ISCSI_CHAP_PARAM_PASSWORD_LEN:
chap_rec.password_length = param_info->value[0];
break;
default:
ql4_printk(KERN_ERR, ha,
"%s: No such sysfs attribute\n", __func__);
rc = -ENOSYS;
goto exit_set_chap;
};
}
if (chap_rec.chap_type == CHAP_TYPE_IN)
type = BIDI_CHAP;
else
type = LOCAL_CHAP;
if (is_qla80XX(ha))
max_chap_entries = (ha->hw.flt_chap_size / 2) /
sizeof(struct ql4_chap_table);
else
max_chap_entries = MAX_CHAP_ENTRIES_40XX;
mutex_lock(&ha->chap_sem);
if (chap_rec.chap_tbl_idx < max_chap_entries) {
rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx,
&chap_entry);
if (!rc) {
if (!(type == qla4xxx_get_chap_type(chap_entry))) {
ql4_printk(KERN_INFO, ha,
"Type mismatch for CHAP entry %d\n",
chap_rec.chap_tbl_idx);
rc = -EINVAL;
goto exit_unlock_chap;
}
/* If chap index is in use then don't modify it */
rc = qla4xxx_is_chap_active(shost,
chap_rec.chap_tbl_idx);
if (rc) {
ql4_printk(KERN_INFO, ha,
"CHAP entry %d is in use\n",
chap_rec.chap_tbl_idx);
rc = -EBUSY;
goto exit_unlock_chap;
}
}
} else {
rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx);
if (rc) {
ql4_printk(KERN_INFO, ha, "CHAP entry not available\n");
rc = -EBUSY;
goto exit_unlock_chap;
}
}
rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password,
chap_rec.chap_tbl_idx, type);
exit_unlock_chap:
mutex_unlock(&ha->chap_sem);
exit_set_chap:
return rc;
}
static int qla4xxx_get_iface_param(struct iscsi_iface *iface, static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
enum iscsi_param_type param_type, enum iscsi_param_type param_type,
int param, char *buf) int param, char *buf)
@ -1455,9 +1652,12 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
struct iscsi_session *sess = cls_sess->dd_data; struct iscsi_session *sess = cls_sess->dd_data;
struct ddb_entry *ddb_entry = sess->dd_data; struct ddb_entry *ddb_entry = sess->dd_data;
struct scsi_qla_host *ha = ddb_entry->ha; struct scsi_qla_host *ha = ddb_entry->ha;
struct iscsi_cls_conn *cls_conn = ddb_entry->conn;
struct ql4_chap_table chap_tbl;
int rval, len; int rval, len;
uint16_t idx; uint16_t idx;
memset(&chap_tbl, 0, sizeof(chap_tbl));
switch (param) { switch (param) {
case ISCSI_PARAM_CHAP_IN_IDX: case ISCSI_PARAM_CHAP_IN_IDX:
rval = qla4xxx_get_chap_index(ha, sess->username_in, rval = qla4xxx_get_chap_index(ha, sess->username_in,
@ -1469,14 +1669,46 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
len = sprintf(buf, "%hu\n", idx); len = sprintf(buf, "%hu\n", idx);
break; break;
case ISCSI_PARAM_CHAP_OUT_IDX: case ISCSI_PARAM_CHAP_OUT_IDX:
rval = qla4xxx_get_chap_index(ha, sess->username, if (ddb_entry->ddb_type == FLASH_DDB) {
sess->password, LOCAL_CHAP, if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
&idx); idx = ddb_entry->chap_tbl_idx;
rval = QLA_SUCCESS;
} else {
rval = QLA_ERROR;
}
} else {
rval = qla4xxx_get_chap_index(ha, sess->username,
sess->password,
LOCAL_CHAP, &idx);
}
if (rval) if (rval)
len = sprintf(buf, "\n"); len = sprintf(buf, "\n");
else else
len = sprintf(buf, "%hu\n", idx); len = sprintf(buf, "%hu\n", idx);
break; break;
case ISCSI_PARAM_USERNAME:
case ISCSI_PARAM_PASSWORD:
/* First, populate session username and password for FLASH DDB,
* if not already done. This happens when session login fails
* for a FLASH DDB.
*/
if (ddb_entry->ddb_type == FLASH_DDB &&
ddb_entry->chap_tbl_idx != INVALID_ENTRY &&
!sess->username && !sess->password) {
idx = ddb_entry->chap_tbl_idx;
rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
chap_tbl.secret,
idx);
if (!rval) {
iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
(char *)chap_tbl.name,
strlen((char *)chap_tbl.name));
iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
(char *)chap_tbl.secret,
chap_tbl.secret_len);
}
}
/* allow fall-through */
default: default:
return iscsi_session_get_param(cls_sess, param, buf); return iscsi_session_get_param(cls_sess, param, buf);
} }
@ -2373,11 +2605,6 @@ static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
COPY_ISID(sess->isid, fw_ddb_entry->isid); COPY_ISID(sess->isid, fw_ddb_entry->isid);
ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
if (ddb_link < MAX_DDB_ENTRIES)
sess->discovery_parent_idx = ddb_link;
else
sess->discovery_parent_idx = DDB_NO_LINK;
if (ddb_link == DDB_ISNS) if (ddb_link == DDB_ISNS)
disc_parent = ISCSI_DISC_PARENT_ISNS; disc_parent = ISCSI_DISC_PARENT_ISNS;
else if (ddb_link == DDB_NO_LINK) else if (ddb_link == DDB_NO_LINK)
@ -2402,6 +2629,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
int buflen = 0; int buflen = 0;
struct iscsi_session *sess; struct iscsi_session *sess;
struct ddb_entry *ddb_entry; struct ddb_entry *ddb_entry;
struct ql4_chap_table chap_tbl;
struct iscsi_conn *conn; struct iscsi_conn *conn;
char ip_addr[DDB_IPADDR_LEN]; char ip_addr[DDB_IPADDR_LEN];
uint16_t options = 0; uint16_t options = 0;
@ -2409,6 +2637,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
sess = cls_sess->dd_data; sess = cls_sess->dd_data;
ddb_entry = sess->dd_data; ddb_entry = sess->dd_data;
conn = cls_conn->dd_data; conn = cls_conn->dd_data;
memset(&chap_tbl, 0, sizeof(chap_tbl));
ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
@ -2435,6 +2664,19 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
(char *)fw_ddb_entry->iscsi_name, buflen); (char *)fw_ddb_entry->iscsi_name, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
(char *)ha->name_string, buflen); (char *)ha->name_string, buflen);
if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) {
if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name,
chap_tbl.secret,
ddb_entry->chap_tbl_idx)) {
iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME,
(char *)chap_tbl.name,
strlen((char *)chap_tbl.name));
iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD,
(char *)chap_tbl.secret,
chap_tbl.secret_len);
}
}
} }
void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@ -4937,7 +5179,8 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
} }
static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry) struct dev_db_entry *fw_ddb_entry,
uint32_t *index)
{ {
struct ddb_entry *ddb_entry; struct ddb_entry *ddb_entry;
struct ql4_tuple_ddb *fw_tddb = NULL; struct ql4_tuple_ddb *fw_tddb = NULL;
@ -4971,6 +5214,8 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
ret = QLA_SUCCESS; /* found */ ret = QLA_SUCCESS; /* found */
if (index != NULL)
*index = idx;
goto exit_check; goto exit_check;
} }
} }
@ -5206,6 +5451,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
ddb_entry->ha = ha; ddb_entry->ha = ha;
ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
ddb_entry->ddb_change = qla4xxx_flash_ddb_change; ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
ddb_entry->chap_tbl_idx = INVALID_ENTRY;
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->relogin_timer, 0);
@ -5267,6 +5513,87 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
} while (time_after(wtime, jiffies)); } while (time_after(wtime, jiffies));
} }
static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry,
struct dev_db_entry *flash_ddb_entry)
{
uint16_t options = 0;
size_t ip_len = IP_ADDR_LEN;
options = le16_to_cpu(fw_ddb_entry->options);
if (options & DDB_OPT_IPV6_DEVICE)
ip_len = IPv6_ADDR_LEN;
if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len))
return QLA_ERROR;
if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0],
sizeof(fw_ddb_entry->isid)))
return QLA_ERROR;
if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port,
sizeof(fw_ddb_entry->port)))
return QLA_ERROR;
return QLA_SUCCESS;
}
static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
uint32_t fw_idx, uint32_t *flash_index)
{
struct dev_db_entry *flash_ddb_entry;
dma_addr_t flash_ddb_entry_dma;
uint32_t idx = 0;
int max_ddbs;
int ret = QLA_ERROR, status;
max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
&flash_ddb_entry_dma);
if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) {
ql4_printk(KERN_ERR, ha, "Out of memory\n");
goto exit_find_st_idx;
}
status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
flash_ddb_entry_dma, fw_idx);
if (status == QLA_SUCCESS) {
status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
if (status == QLA_SUCCESS) {
*flash_index = fw_idx;
ret = QLA_SUCCESS;
goto exit_find_st_idx;
}
}
for (idx = 0; idx < max_ddbs; idx++) {
status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry,
flash_ddb_entry_dma, idx);
if (status == QLA_ERROR)
continue;
status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry);
if (status == QLA_SUCCESS) {
*flash_index = idx;
ret = QLA_SUCCESS;
goto exit_find_st_idx;
}
}
if (idx == max_ddbs)
ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n",
fw_idx);
exit_find_st_idx:
if (flash_ddb_entry)
dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry,
flash_ddb_entry_dma);
return ret;
}
static void qla4xxx_build_st_list(struct scsi_qla_host *ha, static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
struct list_head *list_st) struct list_head *list_st)
{ {
@ -5278,6 +5605,7 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
int ret; int ret;
uint32_t idx = 0, next_idx = 0; uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0; uint32_t state = 0, conn_err = 0;
uint32_t flash_index = -1;
uint16_t conn_id = 0; uint16_t conn_id = 0;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@ -5310,6 +5638,19 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
if (!st_ddb_idx) if (!st_ddb_idx)
break; break;
ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx,
&flash_index);
if (ret == QLA_ERROR) {
ql4_printk(KERN_ERR, ha,
"No flash entry for ST at idx [%d]\n", idx);
st_ddb_idx->flash_ddb_idx = idx;
} else {
ql4_printk(KERN_INFO, ha,
"ST at idx [%d] is stored at flash [%d]\n",
idx, flash_index);
st_ddb_idx->flash_ddb_idx = flash_index;
}
st_ddb_idx->fw_ddb_idx = idx; st_ddb_idx->fw_ddb_idx = idx;
list_add_tail(&st_ddb_idx->list, list_st); list_add_tail(&st_ddb_idx->list, list_st);
@ -5354,6 +5695,28 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
} }
} }
static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry,
struct dev_db_entry *fw_ddb_entry)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_session *sess;
uint32_t max_ddbs = 0;
uint16_t ddb_link = -1;
max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
cls_sess = ddb_entry->sess;
sess = cls_sess->dd_data;
ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
if (ddb_link < max_ddbs)
sess->discovery_parent_idx = ddb_link;
else
sess->discovery_parent_idx = DDB_NO_LINK;
}
static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry, struct dev_db_entry *fw_ddb_entry,
int is_reset, uint16_t idx) int is_reset, uint16_t idx)
@ -5418,6 +5781,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
/* Update sess/conn params */ /* Update sess/conn params */
qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);
if (is_reset == RESET_ADAPTER) { if (is_reset == RESET_ADAPTER) {
iscsi_block_session(cls_sess); iscsi_block_session(cls_sess);
@ -5434,17 +5798,43 @@ exit_setup:
return ret; return ret;
} }
static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha,
struct list_head *list_ddb,
struct dev_db_entry *fw_ddb_entry)
{
struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
uint16_t ddb_link;
ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
if (ddb_idx->fw_ddb_idx == ddb_link) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"Updating NT parent idx from [%d] to [%d]\n",
ddb_link, ddb_idx->flash_ddb_idx));
fw_ddb_entry->ddb_link =
cpu_to_le16(ddb_idx->flash_ddb_idx);
return;
}
}
}
static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
struct list_head *list_nt, int is_reset) struct list_head *list_nt,
struct list_head *list_st,
int is_reset)
{ {
struct dev_db_entry *fw_ddb_entry; struct dev_db_entry *fw_ddb_entry;
struct ddb_entry *ddb_entry = NULL;
dma_addr_t fw_ddb_dma; dma_addr_t fw_ddb_dma;
int max_ddbs; int max_ddbs;
int fw_idx_size; int fw_idx_size;
int ret; int ret;
uint32_t idx = 0, next_idx = 0; uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0; uint32_t state = 0, conn_err = 0;
uint32_t ddb_idx = -1;
uint16_t conn_id = 0; uint16_t conn_id = 0;
uint16_t ddb_link = -1;
struct qla_ddb_index *nt_ddb_idx; struct qla_ddb_index *nt_ddb_idx;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
@ -5471,12 +5861,18 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
goto continue_next_nt; goto continue_next_nt;
ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
if (ddb_link < max_ddbs)
qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry);
if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED)) state == DDB_DS_SESSION_FAILED) &&
(is_reset == INIT_ADAPTER))
goto continue_next_nt; goto continue_next_nt;
DEBUG2(ql4_printk(KERN_INFO, ha, DEBUG2(ql4_printk(KERN_INFO, ha,
"Adding DDB to session = 0x%x\n", idx)); "Adding DDB to session = 0x%x\n", idx));
if (is_reset == INIT_ADAPTER) { if (is_reset == INIT_ADAPTER) {
nt_ddb_idx = vmalloc(fw_idx_size); nt_ddb_idx = vmalloc(fw_idx_size);
if (!nt_ddb_idx) if (!nt_ddb_idx)
@ -5506,9 +5902,17 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
list_add_tail(&nt_ddb_idx->list, list_nt); list_add_tail(&nt_ddb_idx->list, list_nt);
} else if (is_reset == RESET_ADAPTER) { } else if (is_reset == RESET_ADAPTER) {
if (qla4xxx_is_session_exists(ha, fw_ddb_entry) == ret = qla4xxx_is_session_exists(ha, fw_ddb_entry,
QLA_SUCCESS) &ddb_idx);
if (ret == QLA_SUCCESS) {
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha,
ddb_idx);
if (ddb_entry != NULL)
qla4xxx_update_sess_disc_idx(ha,
ddb_entry,
fw_ddb_entry);
goto continue_next_nt; goto continue_next_nt;
}
} }
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
@ -5526,7 +5930,8 @@ exit_nt_list:
} }
static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
struct list_head *list_nt) struct list_head *list_nt,
uint16_t target_id)
{ {
struct dev_db_entry *fw_ddb_entry; struct dev_db_entry *fw_ddb_entry;
dma_addr_t fw_ddb_dma; dma_addr_t fw_ddb_dma;
@ -5571,13 +5976,16 @@ static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
nt_ddb_idx->fw_ddb_idx = idx; nt_ddb_idx->fw_ddb_idx = idx;
ret = qla4xxx_is_session_exists(ha, fw_ddb_entry); ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
if (ret == QLA_SUCCESS) { if (ret == QLA_SUCCESS) {
/* free nt_ddb_idx and do not add to list_nt */ /* free nt_ddb_idx and do not add to list_nt */
vfree(nt_ddb_idx); vfree(nt_ddb_idx);
goto continue_next_new_nt; goto continue_next_new_nt;
} }
if (target_id < max_ddbs)
fw_ddb_entry->ddb_link = cpu_to_le16(target_id);
list_add_tail(&nt_ddb_idx->list, list_nt); list_add_tail(&nt_ddb_idx->list, list_nt);
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
@ -5894,7 +6302,8 @@ exit_ddb_conn_open:
} }
static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry) struct dev_db_entry *fw_ddb_entry,
uint16_t target_id)
{ {
struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
struct list_head list_nt; struct list_head list_nt;
@ -5919,7 +6328,7 @@ static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
if (ret == QLA_ERROR) if (ret == QLA_ERROR)
goto exit_login_st; goto exit_login_st;
qla4xxx_build_new_nt_list(ha, &list_nt); qla4xxx_build_new_nt_list(ha, &list_nt, target_id);
list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
list_del_init(&ddb_idx->list); list_del_init(&ddb_idx->list);
@ -5946,7 +6355,7 @@ static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
{ {
int ret = QLA_ERROR; int ret = QLA_ERROR;
ret = qla4xxx_is_session_exists(ha, fw_ddb_entry); ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);
if (ret != QLA_SUCCESS) if (ret != QLA_SUCCESS)
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
idx); idx);
@ -6001,7 +6410,8 @@ static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
fw_ddb_entry->cookie = DDB_VALID_COOKIE; fw_ddb_entry->cookie = DDB_VALID_COOKIE;
if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry); ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry,
fnode_sess->target_id);
else else
ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
fnode_sess->target_id); fnode_sess->target_id);
@ -6522,10 +6932,13 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
struct scsi_qla_host *ha = to_qla_host(shost); struct scsi_qla_host *ha = to_qla_host(shost);
struct iscsi_flashnode_param_info *fnode_param; struct iscsi_flashnode_param_info *fnode_param;
struct ql4_chap_table chap_tbl;
struct nlattr *attr; struct nlattr *attr;
uint16_t chap_out_idx = INVALID_ENTRY;
int rc = QLA_ERROR; int rc = QLA_ERROR;
uint32_t rem = len; uint32_t rem = len;
memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
nla_for_each_attr(attr, data, len, rem) { nla_for_each_attr(attr, data, len, rem) {
fnode_param = nla_data(attr); fnode_param = nla_data(attr);
@ -6567,6 +6980,10 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
break; break;
case ISCSI_FLASHNODE_CHAP_AUTH_EN: case ISCSI_FLASHNODE_CHAP_AUTH_EN:
fnode_sess->chap_auth_en = fnode_param->value[0]; fnode_sess->chap_auth_en = fnode_param->value[0];
/* Invalidate chap index if chap auth is disabled */
if (!fnode_sess->chap_auth_en)
fnode_sess->chap_out_idx = INVALID_ENTRY;
break; break;
case ISCSI_FLASHNODE_SNACK_REQ_EN: case ISCSI_FLASHNODE_SNACK_REQ_EN:
fnode_conn->snack_req_en = fnode_param->value[0]; fnode_conn->snack_req_en = fnode_param->value[0];
@ -6705,6 +7122,17 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
fnode_conn->exp_statsn = fnode_conn->exp_statsn =
*(uint32_t *)fnode_param->value; *(uint32_t *)fnode_param->value;
break; break;
case ISCSI_FLASHNODE_CHAP_OUT_IDX:
chap_out_idx = *(uint16_t *)fnode_param->value;
if (!qla4xxx_get_uni_chap_at_index(ha,
chap_tbl.name,
chap_tbl.secret,
chap_out_idx)) {
fnode_sess->chap_out_idx = chap_out_idx;
/* Enable chap auth if chap index is valid */
fnode_sess->chap_auth_en = QL4_PARAM_ENABLE;
}
break;
default: default:
ql4_printk(KERN_ERR, ha, ql4_printk(KERN_ERR, ha,
"%s: No such sysfs attribute\n", __func__); "%s: No such sysfs attribute\n", __func__);
@ -6926,11 +7354,10 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
schedule_timeout_uninterruptible(HZ / 10); schedule_timeout_uninterruptible(HZ / 10);
} while (time_after(wtime, jiffies)); } while (time_after(wtime, jiffies));
/* Free up the sendtargets list */
qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset);
qla4xxx_free_ddb_list(&list_st); qla4xxx_free_ddb_list(&list_st);
qla4xxx_build_nt_list(ha, &list_nt, is_reset);
qla4xxx_free_ddb_list(&list_nt); qla4xxx_free_ddb_list(&list_nt);
qla4xxx_free_ddb_index(ha); qla4xxx_free_ddb_index(ha);

View File

@ -78,11 +78,6 @@ static void scsi_done(struct scsi_cmnd *cmd);
* Definitions and constants. * Definitions and constants.
*/ */
#define MIN_RESET_DELAY (2*HZ)
/* Do not call reset on error if we just did a reset within 15 sec. */
#define MIN_RESET_PERIOD (15*HZ)
/* /*
* Note - the initial logging level can be set here to log events at boot time. * Note - the initial logging level can be set here to log events at boot time.
* After the system is up, you may enable logging via the /proc interface. * After the system is up, you may enable logging via the /proc interface.
@ -658,7 +653,6 @@ EXPORT_SYMBOL(scsi_cmd_get_serial);
int scsi_dispatch_cmd(struct scsi_cmnd *cmd) int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{ {
struct Scsi_Host *host = cmd->device->host; struct Scsi_Host *host = cmd->device->host;
unsigned long timeout;
int rtn = 0; int rtn = 0;
atomic_inc(&cmd->device->iorequest_cnt); atomic_inc(&cmd->device->iorequest_cnt);
@ -704,28 +698,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
(cmd->device->lun << 5 & 0xe0); (cmd->device->lun << 5 & 0xe0);
} }
/*
* We will wait MIN_RESET_DELAY clock ticks after the last reset so
* we can avoid the drive not being ready.
*/
timeout = host->last_reset + MIN_RESET_DELAY;
if (host->resetting && time_before(jiffies, timeout)) {
int ticks_remaining = timeout - jiffies;
/*
* NOTE: This may be executed from within an interrupt
* handler! This is bad, but for now, it'll do. The irq
* level of the interrupt handler has been masked out by the
* platform dependent interrupt handling code already, so the
* sti() here will not cause another call to the SCSI host's
* interrupt handler (assuming there is one irq-level per
* host).
*/
while (--ticks_remaining >= 0)
mdelay(1 + 999 / HZ);
host->resetting = 0;
}
scsi_log_send(cmd); scsi_log_send(cmd);
/* /*

View File

@ -169,7 +169,7 @@ static int scsi_debug_dix = DEF_DIX;
static int scsi_debug_dsense = DEF_D_SENSE; static int scsi_debug_dsense = DEF_D_SENSE;
static int scsi_debug_every_nth = DEF_EVERY_NTH; static int scsi_debug_every_nth = DEF_EVERY_NTH;
static int scsi_debug_fake_rw = DEF_FAKE_RW; static int scsi_debug_fake_rw = DEF_FAKE_RW;
static int scsi_debug_guard = DEF_GUARD; static unsigned int scsi_debug_guard = DEF_GUARD;
static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int scsi_debug_max_luns = DEF_MAX_LUNS; static int scsi_debug_max_luns = DEF_MAX_LUNS;
static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
@ -293,6 +293,20 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
0, 0, 0x0, 0x0}; 0, 0, 0x0, 0x0};
static void *fake_store(unsigned long long lba)
{
lba = do_div(lba, sdebug_store_sectors);
return fake_storep + lba * scsi_debug_sector_size;
}
static struct sd_dif_tuple *dif_store(sector_t sector)
{
sector = do_div(sector, sdebug_store_sectors);
return dif_storep + sector;
}
static int sdebug_add_adapter(void); static int sdebug_add_adapter(void);
static void sdebug_remove_adapter(void); static void sdebug_remove_adapter(void);
@ -1731,25 +1745,22 @@ static int do_device_access(struct scsi_cmnd *scmd,
return ret; return ret;
} }
static u16 dif_compute_csum(const void *buf, int len) static __be16 dif_compute_csum(const void *buf, int len)
{ {
u16 csum; __be16 csum;
switch (scsi_debug_guard) { if (scsi_debug_guard)
case 1: csum = (__force __be16)ip_compute_csum(buf, len);
csum = ip_compute_csum(buf, len); else
break;
case 0:
csum = cpu_to_be16(crc_t10dif(buf, len)); csum = cpu_to_be16(crc_t10dif(buf, len));
break;
}
return csum; return csum;
} }
static int dif_verify(struct sd_dif_tuple *sdt, const void *data, static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
sector_t sector, u32 ei_lba) sector_t sector, u32 ei_lba)
{ {
u16 csum = dif_compute_csum(data, scsi_debug_sector_size); __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
if (sdt->guard_tag != csum) { if (sdt->guard_tag != csum) {
pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
@ -1775,31 +1786,62 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
return 0; return 0;
} }
static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
unsigned int sectors, u32 ei_lba) unsigned int sectors, bool read)
{ {
unsigned int i, resid; unsigned int i, resid;
struct scatterlist *psgl; struct scatterlist *psgl;
void *paddr;
const void *dif_store_end = dif_storep + sdebug_store_sectors;
/* Bytes of protection data to copy into sgl */
resid = sectors * sizeof(*dif_storep);
scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
int len = min(psgl->length, resid);
void *start = dif_store(sector);
int rest = 0;
if (dif_store_end < start + len)
rest = start + len - dif_store_end;
paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
if (read)
memcpy(paddr, start, len - rest);
else
memcpy(start, paddr, len - rest);
if (rest) {
if (read)
memcpy(paddr + len - rest, dif_storep, rest);
else
memcpy(dif_storep, paddr + len - rest, rest);
}
sector += len / sizeof(*dif_storep);
resid -= len;
kunmap_atomic(paddr);
}
}
static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
unsigned int i;
struct sd_dif_tuple *sdt; struct sd_dif_tuple *sdt;
sector_t sector; sector_t sector;
sector_t tmp_sec = start_sec;
void *paddr;
start_sec = do_div(tmp_sec, sdebug_store_sectors); for (i = 0; i < sectors; i++) {
sdt = dif_storep + start_sec;
for (i = 0 ; i < sectors ; i++) {
int ret; int ret;
if (sdt[i].app_tag == 0xffff) sector = start_sec + i;
sdt = dif_store(sector);
if (sdt->app_tag == cpu_to_be16(0xffff))
continue; continue;
sector = start_sec + i; ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
ret = dif_verify(&sdt[i],
fake_storep + sector * scsi_debug_sector_size,
sector, ei_lba);
if (ret) { if (ret) {
dif_errors++; dif_errors++;
return ret; return ret;
@ -1808,26 +1850,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
ei_lba++; ei_lba++;
} }
/* Bytes of protection data to copy into sgl */ dif_copy_prot(SCpnt, start_sec, sectors, true);
resid = sectors * sizeof(*dif_storep);
sector = start_sec;
scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
int len = min(psgl->length, resid);
paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
memcpy(paddr, dif_storep + sector, len);
sector += len / sizeof(*dif_storep);
if (sector >= sdebug_store_sectors) {
/* Force wrap */
tmp_sec = sector;
sector = do_div(tmp_sec, sdebug_store_sectors);
}
resid -= len;
kunmap_atomic(paddr);
}
dix_reads++; dix_reads++;
return 0; return 0;
@ -1910,15 +1933,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
{ {
int i, j, ret; int i, j, ret;
struct sd_dif_tuple *sdt; struct sd_dif_tuple *sdt;
struct scatterlist *dsgl = scsi_sglist(SCpnt); struct scatterlist *dsgl;
struct scatterlist *psgl = scsi_prot_sglist(SCpnt); struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
void *daddr, *paddr; void *daddr, *paddr;
sector_t tmp_sec = start_sec; sector_t sector = start_sec;
sector_t sector;
int ppage_offset; int ppage_offset;
sector = do_div(tmp_sec, sdebug_store_sectors);
BUG_ON(scsi_sg_count(SCpnt) == 0); BUG_ON(scsi_sg_count(SCpnt) == 0);
BUG_ON(scsi_prot_sg_count(SCpnt) == 0); BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
@ -1946,25 +1966,13 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
sdt = paddr + ppage_offset; sdt = paddr + ppage_offset;
ret = dif_verify(sdt, daddr + j, start_sec, ei_lba); ret = dif_verify(sdt, daddr + j, sector, ei_lba);
if (ret) { if (ret) {
dump_sector(daddr + j, scsi_debug_sector_size); dump_sector(daddr + j, scsi_debug_sector_size);
goto out; goto out;
} }
/* Would be great to copy this in bigger
* chunks. However, for the sake of
* correctness we need to verify each sector
* before writing it to "stable" storage
*/
memcpy(dif_storep + sector, sdt, sizeof(*sdt));
sector++; sector++;
if (sector == sdebug_store_sectors)
sector = 0; /* Force wrap */
start_sec++;
ei_lba++; ei_lba++;
ppage_offset += sizeof(struct sd_dif_tuple); ppage_offset += sizeof(struct sd_dif_tuple);
} }
@ -1973,6 +1981,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
kunmap_atomic(daddr); kunmap_atomic(daddr);
} }
dif_copy_prot(SCpnt, start_sec, sectors, false);
dix_writes++; dix_writes++;
return 0; return 0;
@ -2742,7 +2751,7 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(guard, scsi_debug_guard, int, S_IRUGO); module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
@ -3172,7 +3181,7 @@ DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf) static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
{ {
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard); return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
} }
DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL); DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);

View File

@ -87,6 +87,18 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
} }
EXPORT_SYMBOL_GPL(scsi_schedule_eh); EXPORT_SYMBOL_GPL(scsi_schedule_eh);
static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
{
if (!shost->last_reset || !shost->eh_deadline)
return 0;
if (time_before(jiffies,
shost->last_reset + shost->eh_deadline))
return 0;
return 1;
}
/** /**
* scsi_eh_scmd_add - add scsi cmd to error handling. * scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on. * @scmd: scmd to run eh on.
@ -109,6 +121,9 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
goto out_unlock; goto out_unlock;
if (shost->eh_deadline && !shost->last_reset)
shost->last_reset = jiffies;
ret = 1; ret = 1;
scmd->eh_eflags |= eh_flag; scmd->eh_eflags |= eh_flag;
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
@ -138,6 +153,9 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
trace_scsi_dispatch_cmd_timeout(scmd); trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR); scsi_log_completion(scmd, TIMEOUT_ERROR);
if (host->eh_deadline && !host->last_reset)
host->last_reset = jiffies;
if (host->transportt->eh_timed_out) if (host->transportt->eh_timed_out)
rtn = host->transportt->eh_timed_out(scmd); rtn = host->transportt->eh_timed_out(scmd);
else if (host->hostt->eh_timed_out) else if (host->hostt->eh_timed_out)
@ -990,13 +1008,26 @@ int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q) struct list_head *done_q)
{ {
struct scsi_cmnd *scmd, *next; struct scsi_cmnd *scmd, *next;
struct Scsi_Host *shost;
int rtn; int rtn;
unsigned long flags;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) { list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
SCSI_SENSE_VALID(scmd)) SCSI_SENSE_VALID(scmd))
continue; continue;
shost = scmd->device->host;
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"skip %s, past eh deadline\n",
__func__));
break;
}
spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
"%s: requesting sense\n", "%s: requesting sense\n",
current->comm)); current->comm));
@ -1082,11 +1113,28 @@ static int scsi_eh_test_devices(struct list_head *cmd_list,
struct scsi_cmnd *scmd, *next; struct scsi_cmnd *scmd, *next;
struct scsi_device *sdev; struct scsi_device *sdev;
int finish_cmds; int finish_cmds;
unsigned long flags;
while (!list_empty(cmd_list)) { while (!list_empty(cmd_list)) {
scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry); scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
sdev = scmd->device; sdev = scmd->device;
if (!try_stu) {
spin_lock_irqsave(sdev->host->host_lock, flags);
if (scsi_host_eh_past_deadline(sdev->host)) {
/* Push items back onto work_q */
list_splice_init(cmd_list, work_q);
spin_unlock_irqrestore(sdev->host->host_lock,
flags);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, sdev->host,
"skip %s, past eh deadline",
__func__));
break;
}
spin_unlock_irqrestore(sdev->host->host_lock, flags);
}
finish_cmds = !scsi_device_online(scmd->device) || finish_cmds = !scsi_device_online(scmd->device) ||
(try_stu && !scsi_eh_try_stu(scmd) && (try_stu && !scsi_eh_try_stu(scmd) &&
!scsi_eh_tur(scmd)) || !scsi_eh_tur(scmd)) ||
@ -1122,26 +1170,42 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
struct scsi_cmnd *scmd, *next; struct scsi_cmnd *scmd, *next;
LIST_HEAD(check_list); LIST_HEAD(check_list);
int rtn; int rtn;
struct Scsi_Host *shost;
unsigned long flags;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) { list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
continue; continue;
shost = scmd->device->host;
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
spin_unlock_irqrestore(shost->host_lock, flags);
list_splice_init(&check_list, work_q);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"skip %s, past eh deadline\n",
__func__));
return list_empty(work_q);
}
spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
"0x%p\n", current->comm, "0x%p\n", current->comm,
scmd)); scmd));
rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd); rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { if (rtn == FAILED) {
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
else
list_move_tail(&scmd->eh_entry, &check_list);
} else
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
" cmd failed:" " cmd failed:"
"0x%p\n", "0x%p\n",
current->comm, current->comm,
scmd)); scmd));
list_splice_init(&check_list, work_q);
return list_empty(work_q);
}
scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
if (rtn == FAST_IO_FAIL)
scsi_eh_finish_cmd(scmd, done_q);
else
list_move_tail(&scmd->eh_entry, &check_list);
} }
return scsi_eh_test_devices(&check_list, work_q, done_q, 0); return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
@ -1187,8 +1251,19 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
{ {
struct scsi_cmnd *scmd, *stu_scmd, *next; struct scsi_cmnd *scmd, *stu_scmd, *next;
struct scsi_device *sdev; struct scsi_device *sdev;
unsigned long flags;
shost_for_each_device(sdev, shost) { shost_for_each_device(sdev, shost) {
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"skip %s, past eh deadline\n",
__func__));
break;
}
spin_unlock_irqrestore(shost->host_lock, flags);
stu_scmd = NULL; stu_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
@ -1241,9 +1316,20 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
{ {
struct scsi_cmnd *scmd, *bdr_scmd, *next; struct scsi_cmnd *scmd, *bdr_scmd, *next;
struct scsi_device *sdev; struct scsi_device *sdev;
unsigned long flags;
int rtn; int rtn;
shost_for_each_device(sdev, shost) { shost_for_each_device(sdev, shost) {
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"skip %s, past eh deadline\n",
__func__));
break;
}
spin_unlock_irqrestore(shost->host_lock, flags);
bdr_scmd = NULL; bdr_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) list_for_each_entry(scmd, work_q, eh_entry)
if (scmd->device == sdev) { if (scmd->device == sdev) {
@ -1303,6 +1389,21 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
struct scsi_cmnd *next, *scmd; struct scsi_cmnd *next, *scmd;
int rtn; int rtn;
unsigned int id; unsigned int id;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
spin_unlock_irqrestore(shost->host_lock, flags);
/* push back on work queue for further processing */
list_splice_init(&check_list, work_q);
list_splice_init(&tmp_list, work_q);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"skip %s, past eh deadline\n",
__func__));
return list_empty(work_q);
}
spin_unlock_irqrestore(shost->host_lock, flags);
scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
id = scmd_id(scmd); id = scmd_id(scmd);
@ -1347,6 +1448,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
LIST_HEAD(check_list); LIST_HEAD(check_list);
unsigned int channel; unsigned int channel;
int rtn; int rtn;
unsigned long flags;
/* /*
* we really want to loop over the various channels, and do this on * we really want to loop over the various channels, and do this on
@ -1356,6 +1458,18 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
*/ */
for (channel = 0; channel <= shost->max_channel; channel++) { for (channel = 0; channel <= shost->max_channel; channel++) {
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_eh_past_deadline(shost)) {
spin_unlock_irqrestore(shost->host_lock, flags);
list_splice_init(&check_list, work_q);
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"skip %s, past eh deadline\n",
__func__));
return list_empty(work_q);
}
spin_unlock_irqrestore(shost->host_lock, flags);
chan_scmd = NULL; chan_scmd = NULL;
list_for_each_entry(scmd, work_q, eh_entry) { list_for_each_entry(scmd, work_q, eh_entry) {
if (channel == scmd_channel(scmd)) { if (channel == scmd_channel(scmd)) {
@ -1755,8 +1869,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* will be requests for character device operations, and also for * will be requests for character device operations, and also for
* ioctls to queued block devices. * ioctls to queued block devices.
*/ */
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", SCSI_LOG_ERROR_RECOVERY(3,
__func__)); printk("scsi_eh_%d waking up host to restart\n",
shost->host_no));
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RUNNING)) if (scsi_host_set_state(shost, SHOST_RUNNING))
@ -1883,6 +1998,10 @@ static void scsi_unjam_host(struct Scsi_Host *shost)
if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
spin_lock_irqsave(shost->host_lock, flags);
if (shost->eh_deadline)
shost->last_reset = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_eh_flush_done_q(&eh_done_q); scsi_eh_flush_done_q(&eh_done_q);
} }
@ -1909,7 +2028,7 @@ int scsi_error_handler(void *data)
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
shost->host_failed != shost->host_busy) { shost->host_failed != shost->host_busy) {
SCSI_LOG_ERROR_RECOVERY(1, SCSI_LOG_ERROR_RECOVERY(1,
printk("Error handler scsi_eh_%d sleeping\n", printk("scsi_eh_%d: sleeping\n",
shost->host_no)); shost->host_no));
schedule(); schedule();
continue; continue;
@ -1917,8 +2036,9 @@ int scsi_error_handler(void *data)
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
SCSI_LOG_ERROR_RECOVERY(1, SCSI_LOG_ERROR_RECOVERY(1,
printk("Error handler scsi_eh_%d waking up\n", printk("scsi_eh_%d: waking up %d/%d/%d\n",
shost->host_no)); shost->host_no, shost->host_eh_scheduled,
shost->host_failed, shost->host_busy));
/* /*
* We have a host that is failing for some reason. Figure out * We have a host that is failing for some reason. Figure out

View File

@ -54,7 +54,8 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
/* /*
* All the high-level SCSI drivers that implement runtime * All the high-level SCSI drivers that implement runtime
* PM treat runtime suspend, system suspend, and system * PM treat runtime suspend, system suspend, and system
* hibernate identically. * hibernate nearly identically. In all cases the requirements
* for runtime suspension are stricter.
*/ */
if (pm_runtime_suspended(dev)) if (pm_runtime_suspended(dev))
return 0; return 0;

View File

@ -281,6 +281,42 @@ exit_store_host_reset:
static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
static ssize_t
show_shost_eh_deadline(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
return sprintf(buf, "%d\n", shost->eh_deadline / HZ);
}
static ssize_t
store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
int ret = -EINVAL;
int deadline;
unsigned long flags;
if (shost->transportt && shost->transportt->eh_strategy_handler)
return ret;
if (sscanf(buf, "%d\n", &deadline) == 1) {
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_in_recovery(shost))
ret = -EBUSY;
else {
shost->eh_deadline = deadline * HZ;
ret = count;
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
return ret;
}
static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
shost_rd_attr(unique_id, "%u\n"); shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(host_busy, "%hu\n"); shost_rd_attr(host_busy, "%hu\n");
shost_rd_attr(cmd_per_lun, "%hd\n"); shost_rd_attr(cmd_per_lun, "%hd\n");
@ -308,6 +344,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_prot_capabilities.attr, &dev_attr_prot_capabilities.attr,
&dev_attr_prot_guard_type.attr, &dev_attr_prot_guard_type.attr,
&dev_attr_host_reset.attr, &dev_attr_host_reset.attr,
&dev_attr_eh_deadline.attr,
NULL NULL
}; };
@ -529,6 +566,7 @@ static int scsi_sdev_check_buf_bit(const char *buf)
*/ */
sdev_rd_attr (device_blocked, "%d\n"); sdev_rd_attr (device_blocked, "%d\n");
sdev_rd_attr (queue_depth, "%d\n"); sdev_rd_attr (queue_depth, "%d\n");
sdev_rd_attr (device_busy, "%d\n");
sdev_rd_attr (type, "%d\n"); sdev_rd_attr (type, "%d\n");
sdev_rd_attr (scsi_level, "%d\n"); sdev_rd_attr (scsi_level, "%d\n");
sdev_rd_attr (vendor, "%.8s\n"); sdev_rd_attr (vendor, "%.8s\n");
@ -750,6 +788,7 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_device_blocked.attr, &dev_attr_device_blocked.attr,
&dev_attr_type.attr, &dev_attr_type.attr,
&dev_attr_scsi_level.attr, &dev_attr_scsi_level.attr,
&dev_attr_device_busy.attr,
&dev_attr_vendor.attr, &dev_attr_vendor.attr,
&dev_attr_model.attr, &dev_attr_model.attr,
&dev_attr_rev.attr, &dev_attr_rev.attr,

View File

@ -2744,6 +2744,28 @@ exit_get_chap:
return err; return err;
} }
static int iscsi_set_chap(struct iscsi_transport *transport,
struct iscsi_uevent *ev, uint32_t len)
{
char *data = (char *)ev + sizeof(*ev);
struct Scsi_Host *shost;
int err = 0;
if (!transport->set_chap)
return -ENOSYS;
shost = scsi_host_lookup(ev->u.set_path.host_no);
if (!shost) {
pr_err("%s could not find host no %u\n",
__func__, ev->u.set_path.host_no);
return -ENODEV;
}
err = transport->set_chap(shost, data, len);
scsi_host_put(shost);
return err;
}
static int iscsi_delete_chap(struct iscsi_transport *transport, static int iscsi_delete_chap(struct iscsi_transport *transport,
struct iscsi_uevent *ev) struct iscsi_uevent *ev)
{ {
@ -3234,6 +3256,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID: case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
err = iscsi_logout_flashnode_sid(transport, ev); err = iscsi_logout_flashnode_sid(transport, ev);
break; break;
case ISCSI_UEVENT_SET_CHAP:
err = iscsi_set_chap(transport, ev,
nlmsg_attrlen(nlh, sizeof(*ev)));
break;
default: default:
err = -ENOSYS; err = -ENOSYS;
break; break;

View File

@ -105,7 +105,8 @@ static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *); static int sd_probe(struct device *);
static int sd_remove(struct device *); static int sd_remove(struct device *);
static void sd_shutdown(struct device *); static void sd_shutdown(struct device *);
static int sd_suspend(struct device *); static int sd_suspend_system(struct device *);
static int sd_suspend_runtime(struct device *);
static int sd_resume(struct device *); static int sd_resume(struct device *);
static void sd_rescan(struct device *); static void sd_rescan(struct device *);
static int sd_done(struct scsi_cmnd *); static int sd_done(struct scsi_cmnd *);
@ -484,11 +485,11 @@ static struct class sd_disk_class = {
}; };
static const struct dev_pm_ops sd_pm_ops = { static const struct dev_pm_ops sd_pm_ops = {
.suspend = sd_suspend, .suspend = sd_suspend_system,
.resume = sd_resume, .resume = sd_resume,
.poweroff = sd_suspend, .poweroff = sd_suspend_system,
.restore = sd_resume, .restore = sd_resume,
.runtime_suspend = sd_suspend, .runtime_suspend = sd_suspend_runtime,
.runtime_resume = sd_resume, .runtime_resume = sd_resume,
}; };
@ -829,7 +830,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
{ {
rq->timeout = SD_FLUSH_TIMEOUT; rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER;
rq->retries = SD_MAX_RETRIES; rq->retries = SD_MAX_RETRIES;
rq->cmd[0] = SYNCHRONIZE_CACHE; rq->cmd[0] = SYNCHRONIZE_CACHE;
rq->cmd_len = 10; rq->cmd_len = 10;
@ -1433,12 +1434,13 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
{ {
int retries, res; int retries, res;
struct scsi_device *sdp = sdkp->device; struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout
* SD_FLUSH_TIMEOUT_MULTIPLIER;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
if (!scsi_device_online(sdp)) if (!scsi_device_online(sdp))
return -ENODEV; return -ENODEV;
for (retries = 3; retries > 0; --retries) { for (retries = 3; retries > 0; --retries) {
unsigned char cmd[10] = { 0 }; unsigned char cmd[10] = { 0 };
@ -1448,20 +1450,39 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
* flush everything. * flush everything.
*/ */
res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
&sshdr, SD_FLUSH_TIMEOUT, &sshdr, timeout, SD_MAX_RETRIES,
SD_MAX_RETRIES, NULL, REQ_PM); NULL, REQ_PM);
if (res == 0) if (res == 0)
break; break;
} }
if (res) { if (res) {
sd_print_result(sdkp, res); sd_print_result(sdkp, res);
if (driver_byte(res) & DRIVER_SENSE) if (driver_byte(res) & DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr); sd_print_sense_hdr(sdkp, &sshdr);
} /* we need to evaluate the error return */
if (scsi_sense_valid(&sshdr) &&
/* 0x3a is medium not present */
sshdr.asc == 0x3a)
/* this is no error here */
return 0;
if (res) switch (host_byte(res)) {
return -EIO; /* ignore errors due to racing a disconnection */
case DID_BAD_TARGET:
case DID_NO_CONNECT:
return 0;
/* signal the upper layer it might try again */
case DID_BUS_BUSY:
case DID_IMM_RETRY:
case DID_REQUEUE:
case DID_SOFT_ERROR:
return -EBUSY;
default:
return -EIO;
}
}
return 0; return 0;
} }
@ -2639,13 +2660,16 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
struct scsi_device *sdev = sdkp->device; struct scsi_device *sdev = sdkp->device;
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
/* too large values might cause issues with arcmsr */
int vpd_buf_len = 64;
sdev->no_report_opcodes = 1; sdev->no_report_opcodes = 1;
/* Disable WRITE SAME if REPORT SUPPORTED OPERATION /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
* CODES is unsupported and the device has an ATA * CODES is unsupported and the device has an ATA
* Information VPD page (SAT). * Information VPD page (SAT).
*/ */
if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE)) if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
sdev->no_write_same = 1; sdev->no_write_same = 1;
} }
@ -3058,9 +3082,17 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
sd_print_result(sdkp, res); sd_print_result(sdkp, res);
if (driver_byte(res) & DRIVER_SENSE) if (driver_byte(res) & DRIVER_SENSE)
sd_print_sense_hdr(sdkp, &sshdr); sd_print_sense_hdr(sdkp, &sshdr);
if (scsi_sense_valid(&sshdr) &&
/* 0x3a is medium not present */
sshdr.asc == 0x3a)
res = 0;
} }
return res; /* SCSI error codes must not go to the generic layer */
if (res)
return -EIO;
return 0;
} }
/* /*
@ -3078,7 +3110,7 @@ static void sd_shutdown(struct device *dev)
if (pm_runtime_suspended(dev)) if (pm_runtime_suspended(dev))
goto exit; goto exit;
if (sdkp->WCE) { if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
sd_sync_cache(sdkp); sd_sync_cache(sdkp);
} }
@ -3092,7 +3124,7 @@ exit:
scsi_disk_put(sdkp); scsi_disk_put(sdkp);
} }
static int sd_suspend(struct device *dev) static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
{ {
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
int ret = 0; int ret = 0;
@ -3100,16 +3132,23 @@ static int sd_suspend(struct device *dev)
if (!sdkp) if (!sdkp)
return 0; /* this can happen */ return 0; /* this can happen */
if (sdkp->WCE) { if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
ret = sd_sync_cache(sdkp); ret = sd_sync_cache(sdkp);
if (ret) if (ret) {
/* ignore OFFLINE device */
if (ret == -ENODEV)
ret = 0;
goto done; goto done;
}
} }
if (sdkp->device->manage_start_stop) { if (sdkp->device->manage_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
/* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0); ret = sd_start_stop_device(sdkp, 0);
if (ignore_stop_errors)
ret = 0;
} }
done: done:
@ -3117,6 +3156,16 @@ done:
return ret; return ret;
} }
static int sd_suspend_system(struct device *dev)
{
return sd_suspend_common(dev, true);
}
static int sd_suspend_runtime(struct device *dev)
{
return sd_suspend_common(dev, false);
}
static int sd_resume(struct device *dev) static int sd_resume(struct device *dev)
{ {
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);

View File

@ -13,7 +13,11 @@
*/ */
#define SD_TIMEOUT (30 * HZ) #define SD_TIMEOUT (30 * HZ)
#define SD_MOD_TIMEOUT (75 * HZ) #define SD_MOD_TIMEOUT (75 * HZ)
#define SD_FLUSH_TIMEOUT (60 * HZ) /*
* Flush timeout is a multiplier over the standard device timeout which is
* user modifiable via sysfs but initially set to SD_TIMEOUT
*/
#define SD_FLUSH_TIMEOUT_MULTIPLIER 2
#define SD_WRITE_SAME_TIMEOUT (120 * HZ) #define SD_WRITE_SAME_TIMEOUT (120 * HZ)
/* /*

View File

@ -521,7 +521,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
pACB->SelConn++; pACB->SelConn++;
return 1; return 1;
} }
if (time_before (jiffies, pACB->pScsiHost->last_reset)) if (time_before (jiffies, pACB->last_reset))
{ {
DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n")); DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n"));
return 1; return 1;
@ -1863,7 +1863,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB )
/* delay half a second */ /* delay half a second */
udelay (1000); udelay (1000);
DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
pACB->pScsiHost->last_reset = jiffies + 5*HZ/2 pACB->last_reset = jiffies + 5*HZ/2
+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
pACB->Connected = 0; pACB->Connected = 0;
@ -2048,9 +2048,9 @@ static int DC390_bus_reset (struct scsi_cmnd *cmd)
dc390_ResetDevParam(pACB); dc390_ResetDevParam(pACB);
mdelay(1); mdelay(1);
pACB->pScsiHost->last_reset = jiffies + 3*HZ/2 pACB->last_reset = jiffies + 3*HZ/2
+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);
DC390_read8(INT_Status); /* Reset Pending INT */ DC390_read8(INT_Status); /* Reset Pending INT */
@ -2383,7 +2383,7 @@ static void dc390_init_hw(struct dc390_acb *pACB, u8 index)
if (pACB->Gmode2 & RST_SCSI_BUS) { if (pACB->Gmode2 & RST_SCSI_BUS) {
dc390_ResetSCSIBus(pACB); dc390_ResetSCSIBus(pACB);
udelay(1000); udelay(1000);
shost->last_reset = jiffies + HZ/2 + pACB->last_reset = jiffies + HZ/2 +
HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];
} }
@ -2455,8 +2455,8 @@ static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->irq = pdev->irq; shost->irq = pdev->irq;
shost->base = io_port; shost->base = io_port;
shost->unique_id = io_port; shost->unique_id = io_port;
shost->last_reset = jiffies;
pACB->last_reset = jiffies;
pACB->pScsiHost = shost; pACB->pScsiHost = shost;
pACB->IOPortBase = (u16) io_port; pACB->IOPortBase = (u16) io_port;
pACB->IRQLevel = pdev->irq; pACB->IRQLevel = pdev->irq;

View File

@ -143,6 +143,7 @@ u8 Ignore_IRQ; /* Not used */
struct pci_dev *pdev; struct pci_dev *pdev;
unsigned long last_reset;
unsigned long Cmds; unsigned long Cmds;
u32 SelLost; u32 SelLost;
u32 SelConn; u32 SelConn;

View File

@ -104,7 +104,7 @@ struct fc_esb {
* esb_e_stat - flags from FC-FS-2 T11/1619-D Rev 0.90. * esb_e_stat - flags from FC-FS-2 T11/1619-D Rev 0.90.
*/ */
#define ESB_ST_RESP (1 << 31) /* responder to exchange */ #define ESB_ST_RESP (1 << 31) /* responder to exchange */
#define ESB_ST_SEQ_INIT (1 << 30) /* port holds sequence initiaive */ #define ESB_ST_SEQ_INIT (1 << 30) /* port holds sequence initiative */
#define ESB_ST_COMPLETE (1 << 29) /* exchange is complete */ #define ESB_ST_COMPLETE (1 << 29) /* exchange is complete */
#define ESB_ST_ABNORMAL (1 << 28) /* abnormal ending condition */ #define ESB_ST_ABNORMAL (1 << 28) /* abnormal ending condition */
#define ESB_ST_REC_QUAL (1 << 26) /* recovery qualifier active */ #define ESB_ST_REC_QUAL (1 << 26) /* recovery qualifier active */

View File

@ -69,6 +69,7 @@ enum iscsi_uevent_e {
ISCSI_UEVENT_LOGIN_FLASHNODE = UEVENT_BASE + 28, ISCSI_UEVENT_LOGIN_FLASHNODE = UEVENT_BASE + 28,
ISCSI_UEVENT_LOGOUT_FLASHNODE = UEVENT_BASE + 29, ISCSI_UEVENT_LOGOUT_FLASHNODE = UEVENT_BASE + 29,
ISCSI_UEVENT_LOGOUT_FLASHNODE_SID = UEVENT_BASE + 30, ISCSI_UEVENT_LOGOUT_FLASHNODE_SID = UEVENT_BASE + 30,
ISCSI_UEVENT_SET_CHAP = UEVENT_BASE + 31,
/* up events */ /* up events */
ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@ -309,8 +310,16 @@ enum iscsi_param_type {
ISCSI_HOST_PARAM, /* iscsi_host_param */ ISCSI_HOST_PARAM, /* iscsi_host_param */
ISCSI_NET_PARAM, /* iscsi_net_param */ ISCSI_NET_PARAM, /* iscsi_net_param */
ISCSI_FLASHNODE_PARAM, /* iscsi_flashnode_param */ ISCSI_FLASHNODE_PARAM, /* iscsi_flashnode_param */
ISCSI_CHAP_PARAM, /* iscsi_chap_param */
}; };
/* structure for minimalist usecase */
struct iscsi_param_info {
uint32_t len; /* Actual length of the param value */
uint16_t param; /* iscsi param */
uint8_t value[0]; /* length sized value follows */
} __packed;
struct iscsi_iface_param_info { struct iscsi_iface_param_info {
uint32_t iface_num; /* iface number, 0 - n */ uint32_t iface_num; /* iface number, 0 - n */
uint32_t len; /* Actual length of the param */ uint32_t len; /* Actual length of the param */
@ -739,6 +748,14 @@ enum chap_type_e {
CHAP_TYPE_IN, CHAP_TYPE_IN,
}; };
enum iscsi_chap_param {
ISCSI_CHAP_PARAM_INDEX,
ISCSI_CHAP_PARAM_CHAP_TYPE,
ISCSI_CHAP_PARAM_USERNAME,
ISCSI_CHAP_PARAM_PASSWORD,
ISCSI_CHAP_PARAM_PASSWORD_LEN
};
#define ISCSI_CHAP_AUTH_NAME_MAX_LEN 256 #define ISCSI_CHAP_AUTH_NAME_MAX_LEN 256
#define ISCSI_CHAP_AUTH_SECRET_MAX_LEN 256 #define ISCSI_CHAP_AUTH_SECRET_MAX_LEN 256
struct iscsi_chap_rec { struct iscsi_chap_rec {

View File

@ -410,6 +410,12 @@ struct fc_seq {
* @fh_type: The frame type * @fh_type: The frame type
* @class: The class of service * @class: The class of service
* @seq: The sequence in use on this exchange * @seq: The sequence in use on this exchange
* @resp_active: Number of tasks that are concurrently executing @resp().
* @resp_task: If @resp_active > 0, either the task executing @resp(), the
* task that has been interrupted to execute the soft-IRQ
* executing @resp() or NULL if more than one task is executing
* @resp concurrently.
* @resp_wq: Waitqueue for the tasks waiting on @resp_active.
* @resp: Callback for responses on this exchange * @resp: Callback for responses on this exchange
* @destructor: Called when destroying the exchange * @destructor: Called when destroying the exchange
* @arg: Passed as a void pointer to the resp() callback * @arg: Passed as a void pointer to the resp() callback
@ -441,6 +447,9 @@ struct fc_exch {
u32 r_a_tov; u32 r_a_tov;
u32 f_ctl; u32 f_ctl;
struct fc_seq seq; struct fc_seq seq;
int resp_active;
struct task_struct *resp_task;
wait_queue_head_t resp_wq;
void (*resp)(struct fc_seq *, struct fc_frame *, void *); void (*resp)(struct fc_seq *, struct fc_frame *, void *);
void *arg; void *arg;
void (*destructor)(struct fc_seq *, void *); void (*destructor)(struct fc_seq *, void *);

View File

@ -90,6 +90,7 @@ enum fip_state {
* @lp: &fc_lport: libfc local port. * @lp: &fc_lport: libfc local port.
* @sel_fcf: currently selected FCF, or NULL. * @sel_fcf: currently selected FCF, or NULL.
* @fcfs: list of discovered FCFs. * @fcfs: list of discovered FCFs.
* @cdev: (Optional) pointer to sysfs fcoe_ctlr_device.
* @fcf_count: number of discovered FCF entries. * @fcf_count: number of discovered FCF entries.
* @sol_time: time when a multicast solicitation was last sent. * @sol_time: time when a multicast solicitation was last sent.
* @sel_time: time after which to select an FCF. * @sel_time: time after which to select an FCF.
@ -127,6 +128,7 @@ struct fcoe_ctlr {
struct fc_lport *lp; struct fc_lport *lp;
struct fcoe_fcf *sel_fcf; struct fcoe_fcf *sel_fcf;
struct list_head fcfs; struct list_head fcfs;
struct fcoe_ctlr_device *cdev;
u16 fcf_count; u16 fcf_count;
unsigned long sol_time; unsigned long sol_time;
unsigned long sel_time; unsigned long sel_time;
@ -168,8 +170,11 @@ static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
return (void *)(ctlr + 1); return (void *)(ctlr + 1);
} }
/*
* This assumes that the fcoe_ctlr (x) is allocated with the fcoe_ctlr_device.
*/
#define fcoe_ctlr_to_ctlr_dev(x) \ #define fcoe_ctlr_to_ctlr_dev(x) \
(struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1) (x)->cdev
/** /**
* struct fcoe_fcf - Fibre-Channel Forwarder * struct fcoe_fcf - Fibre-Channel Forwarder

View File

@ -598,9 +598,12 @@ struct Scsi_Host {
unsigned int host_eh_scheduled; /* EH scheduled without command */ unsigned int host_eh_scheduled; /* EH scheduled without command */
unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
int resetting; /* if set, it means that last_reset is a valid value */
/* next two fields are used to bound the time spent in error handling */
int eh_deadline;
unsigned long last_reset; unsigned long last_reset;
/* /*
* These three parameters can be used to allow for wide scsi, * These three parameters can be used to allow for wide scsi,
* and for host adapters that support multiple busses * and for host adapters that support multiple busses

View File

@ -152,6 +152,7 @@ struct iscsi_transport {
int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx, int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx,
uint32_t *num_entries, char *buf); uint32_t *num_entries, char *buf);
int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx); int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx);
int (*set_chap) (struct Scsi_Host *shost, void *data, int len);
int (*get_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess, int (*get_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
int param, char *buf); int param, char *buf);
int (*set_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess, int (*set_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,