2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00

Merge branch 'misc' into for-linus

Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
James Bottomley 2013-05-10 07:53:40 -07:00
commit 832e77bc11
98 changed files with 10485 additions and 1452 deletions

View File

@ -1353,6 +1353,8 @@ config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
select GENERIC_CSUM
select CRC_T10DIF
help
This lpfc driver supports the Emulex LightPulse
Family of Fibre Channel PCI host adapters.

View File

@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev)
struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
int ddb = (int) (unsigned long) dev->lldd_dev;
if (dev->dev_type == SATA_PM_PORT)
if (dev->dev_type == SAS_SATA_PM_PORT)
asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
else if (dev->tproto)
asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev)
int ddb = (int) (unsigned long) dev->lldd_dev;
u32 qdepth = 0;
if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) {
if (ata_id_has_ncq(ata_dev->id))
qdepth = ata_id_queue_depth(ata_dev->id);
asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev)
int ddb = (int) (unsigned long) dev->lldd_dev;
asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
dev->dev_type == SATA_PM_PORT) {
if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
dev->dev_type == SAS_SATA_PM_PORT) {
struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
dev->frame_rcvd;
asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
if (dev->port->oob_mode != SATA_OOB_MODE) {
flags |= OPEN_REQUIRED;
if ((dev->dev_type == SATA_DEV) ||
if ((dev->dev_type == SAS_SATA_DEV) ||
(dev->tproto & SAS_PROTOCOL_STP)) {
struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
if (rps_resp->frame_type == SMP_RESPONSE &&
@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev)
} else {
flags |= CONCURRENT_CONN_SUPP;
if (!dev->parent &&
(dev->dev_type == EDGE_DEV ||
dev->dev_type == FANOUT_DEV))
(dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
4);
else
@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
}
}
if (dev->dev_type == SATA_PM)
if (dev->dev_type == SAS_SATA_PM)
flags |= SATA_MULTIPORT;
asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
i = asd_init_sata(dev);
if (i < 0) {
asd_free_ddb(asd_ha, ddb);
@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
}
}
if (dev->dev_type == SAS_END_DEV) {
if (dev->dev_type == SAS_END_DEVICE) {
struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
if (rdev->I_T_nexus_loss_timeout > 0)
asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev)
spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
switch (dev->dev_type) {
case SATA_PM:
case SAS_SATA_PM:
res = asd_init_sata_pm_ddb(dev);
break;
case SATA_PM_PORT:
case SAS_SATA_PM_PORT:
res = asd_init_sata_pm_port_ddb(dev);
break;
default:

View File

@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy)
memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
phy->identify_frame->dev_type = SAS_END_DEV;
phy->identify_frame->dev_type = SAS_END_DEVICE;
if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
if (phy->sas_phy.role & PHY_ROLE_TARGET)

View File

@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
struct sas_phy *phy = sas_get_local_phy(dev);
/* Standard mandates link reset for ATA (type 0) and
* hard reset for SSP (type 1) */
int reset_type = (dev->dev_type == SATA_DEV ||
int reset_type = (dev->dev_type == SAS_SATA_DEV ||
(dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);

View File

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2012 Emulex
* Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or

View File

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2012 Emulex
* Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
uint16_t status = 0, addl_status = 0, wrb_num = 0;
struct be_mcc_wrb *temp_wrb;
struct be_cmd_req_hdr *ioctl_hdr;
struct be_cmd_resp_hdr *ioctl_resp_hdr;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
if (beiscsi_error(phba))
@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
ioctl_hdr->subsystem,
ioctl_hdr->opcode,
status, addl_status);
if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
if (ioctl_resp_hdr->response_length)
goto release_mcc_tag;
}
rc = -EAGAIN;
}
@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
struct be_cmd_resp_hdr *resp_hdr;
be_dws_le_to_cpu(compl, 4);
@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
hdr->subsystem, hdr->opcode,
compl_status, extd_status);
if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
resp_hdr = (struct be_cmd_resp_hdr *) hdr;
if (resp_hdr->response_length)
return 0;
}
return -EBUSY;
}
return 0;
@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
struct be_async_event_link_state *evt)
{
switch (evt->port_link_status) {
case ASYNC_EVENT_LINK_DOWN:
if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
(evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
phba->state = BE_ADAPTER_LINK_DOWN;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BC_%d : Link Down on Physical Port %d\n",
"BC_%d : Link Down on Port %d\n",
evt->physical_port);
phba->state |= BE_ADAPTER_LINK_DOWN;
iscsi_host_for_each_session(phba->shost,
be2iscsi_fail_session);
break;
case ASYNC_EVENT_LINK_UP:
} else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
(evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
phba->state = BE_ADAPTER_UP;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BC_%d : Link UP on Physical Port %d\n",
evt->physical_port);
break;
default:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BC_%d : Unexpected Async Notification %d on"
"Physical Port %d\n",
evt->port_link_status,
"BC_%d : Link UP on Port %d\n",
evt->physical_port);
}
}
@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
{
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int wait = 0;
uint32_t wait = 0;
u32 ready;
do {
@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
struct be_mcc_compl *compl = &mbox->compl;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
status = be_mbox_db_ready_wait(ctrl);
if (status)
return status;
val &= ~MPU_MAILBOX_DB_RDY_MASK;
val |= MPU_MAILBOX_DB_HI_MASK;
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
struct be_mcc_compl *compl = &mbox->compl;
struct be_ctrl_info *ctrl = &phba->ctrl;
status = be_mbox_db_ready_wait(ctrl);
if (status)
return status;
val |= MPU_MAILBOX_DB_HI_MASK;
/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
return status;
}
/**
* be_cmd_fw_initialize()- Initialize FW
* @ctrl: Pointer to function control structure
*
* Send FW initialize pattern for the function.
*
* return
* Success: 0
* Failure: Non-Zero value
**/
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
return status;
}
/**
* be_cmd_fw_uninit()- Uinitialize FW
* @ctrl: Pointer to function control structure
*
* Send FW uninitialize pattern for the function
*
* return
* Success: 0
* Failure: Non-Zero value
**/
int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
u8 *endian_check;
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
endian_check = (u8 *) wrb;
*endian_check++ = 0xFF;
*endian_check++ = 0xAA;
*endian_check++ = 0xBB;
*endian_check++ = 0xFF;
*endian_check++ = 0xFF;
*endian_check++ = 0xCC;
*endian_check++ = 0xDD;
*endian_check = 0xFF;
be_dws_cpu_to_le(wrb, sizeof(*wrb));
status = be_mbox_notify(ctrl);
if (status)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BC_%d : be_cmd_fw_uninit Failed\n");
spin_unlock(&ctrl->mbox_lock);
return status;
}
int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *cq, struct be_queue_info *eq,
bool sol_evts, bool no_delay, int coalesce_wm)
@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (chip_skh_r(ctrl->pdev)) {
req->hdr.version = MBX_CMD_VER2;
req->page_size = 1;
AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
__ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
} else {
if (is_chip_be2_be3r(phba)) {
AMAP_SET_BITS(struct amap_cq_context, coalescwm,
ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
PCI_FUNC(ctrl->pdev->devfn));
} else {
req->hdr.version = MBX_CMD_VER2;
req->page_size = 1;
AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
__ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
}
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_defq_create_req *req = embedded_payload(wrb);
struct be_dma_mem *q_mem = &dq->dma_mem;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
void *ctxt = &req->context;
int status;
@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
1);
AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
PCI_FUNC(ctrl->pdev->devfn));
AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
be_encoded_q_len(length / sizeof(struct phys_addr)));
AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
ctxt, entry_size);
AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
cq->id);
if (is_chip_be2_be3r(phba)) {
AMAP_SET_BITS(struct amap_be_default_pdu_context,
rx_pdid, ctxt, 0);
AMAP_SET_BITS(struct amap_be_default_pdu_context,
rx_pdid_valid, ctxt, 1);
AMAP_SET_BITS(struct amap_be_default_pdu_context,
pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
AMAP_SET_BITS(struct amap_be_default_pdu_context,
ring_size, ctxt,
be_encoded_q_len(length /
sizeof(struct phys_addr)));
AMAP_SET_BITS(struct amap_be_default_pdu_context,
default_buffer_size, ctxt, entry_size);
AMAP_SET_BITS(struct amap_be_default_pdu_context,
cq_id_recv, ctxt, cq->id);
} else {
AMAP_SET_BITS(struct amap_default_pdu_context_ext,
rx_pdid, ctxt, 0);
AMAP_SET_BITS(struct amap_default_pdu_context_ext,
rx_pdid_valid, ctxt, 1);
AMAP_SET_BITS(struct amap_default_pdu_context_ext,
ring_size, ctxt,
be_encoded_q_len(length /
sizeof(struct phys_addr)));
AMAP_SET_BITS(struct amap_default_pdu_context_ext,
default_buffer_size, ctxt, entry_size);
AMAP_SET_BITS(struct amap_default_pdu_context_ext,
cq_id_recv, ctxt, cq->id);
}
be_dws_cpu_to_le(ctxt, sizeof(req->context));

View File

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2012 Emulex
* Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -52,6 +52,10 @@ struct be_mcc_wrb {
/* Completion Status */
#define MCC_STATUS_SUCCESS 0x0
#define MCC_STATUS_FAILED 0x1
#define MCC_STATUS_ILLEGAL_REQUEST 0x2
#define MCC_STATUS_ILLEGAL_FIELD 0x3
#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
#define CQE_STATUS_COMPL_MASK 0xFFFF
#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
@ -118,7 +122,8 @@ struct be_async_event_trailer {
enum {
ASYNC_EVENT_LINK_DOWN = 0x0,
ASYNC_EVENT_LINK_UP = 0x1
ASYNC_EVENT_LINK_UP = 0x1,
ASYNC_EVENT_LOGICAL = 0x2
};
/**
@ -130,6 +135,9 @@ struct be_async_event_link_state {
u8 port_link_status;
u8 port_duplex;
u8 port_speed;
#define BEISCSI_PHY_LINK_FAULT_NONE 0x00
#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01
#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02
u8 port_fault;
u8 rsvd0[7];
struct be_async_event_trailer trailer;
@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
@ -751,6 +760,18 @@ struct amap_be_default_pdu_context {
u8 rsvd4[32]; /* dword 3 */
} __packed;
struct amap_default_pdu_context_ext {
u8 rsvd0[16]; /* dword 0 */
u8 ring_size[4]; /* dword 0 */
u8 rsvd1[12]; /* dword 0 */
u8 rsvd2[22]; /* dword 1 */
u8 rx_pdid[9]; /* dword 1 */
u8 rx_pdid_valid; /* dword 1 */
u8 default_buffer_size[16]; /* dword 2 */
u8 cq_id_recv[16]; /* dword 2 */
u8 rsvd3[32]; /* dword 3 */
} __packed;
struct be_defq_create_req {
struct be_cmd_req_hdr hdr;
u16 num_pages;
@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 {
* stack to notify the
* controller of a posted Work Request Block
*/
#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */
#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */
#define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */
#define DB_DEF_PDU_WRB_INDEX_SHIFT 16

View File

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2012 Emulex
* Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
struct beiscsi_conn *beiscsi_conn,
unsigned int cid)
{
if (phba->conn_table[cid]) {
uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
if (phba->conn_table[cri_index]) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Connection table already occupied. Detected clash\n");
@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
} else {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
cid, beiscsi_conn);
cri_index, beiscsi_conn);
phba->conn_table[cid] = beiscsi_conn;
phba->conn_table[cri_index] = beiscsi_conn;
}
return 0;
}
@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
{
struct beiscsi_hba *phba = beiscsi_ep->phba;
struct beiscsi_conn *beiscsi_conn;
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
beiscsi_ep->phba = NULL;
phba->ep_array[BE_GET_CRI_FROM_CID
(beiscsi_ep->ep_cid)] = NULL;
/**
* Check if any connection resource allocated by driver
* is to be freed.This case occurs when target redirection
* or connection retry is done.
**/
if (!beiscsi_ep->conn)
return;
beiscsi_conn = beiscsi_ep->conn;
if (beiscsi_conn->login_in_progress) {
beiscsi_free_mgmt_task_handles(beiscsi_conn,
beiscsi_conn->task);
beiscsi_conn->login_in_progress = 0;
}
}
/**
@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
struct be_mcc_wrb *wrb;
struct tcp_connect_and_offload_out *ptcpcnct_out;
struct be_dma_mem nonemb_cmd;
unsigned int tag;
@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
beiscsi_ep->ep_cid);
phba->ep_array[beiscsi_ep->ep_cid -
phba->fw_config.iscsi_cid_start] = ep;
if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
phba->params.cxns_per_ctrl * 2)) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Failed in allocate iscsi cid\n");
goto free_ep;
}
phba->ep_array[BE_GET_CRI_FROM_CID
(beiscsi_ep->ep_cid)] = ep;
beiscsi_ep->cid_vld = 0;
nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : Failed to allocate memory for"
" mgmt_open_connection\n");
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
beiscsi_free_ep(beiscsi_ep);
return -ENOMEM;
}
nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
memset(nonemb_cmd.va, 0, nonemb_cmd.size);
tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
if (!tag) {
if (tag <= 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_open_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
beiscsi_free_ep(beiscsi_ep);
return -EAGAIN;
}
ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
goto free_ep;
beiscsi_free_ep(beiscsi_ep);
return -EBUSY;
}
ptcpcnct_out = embedded_payload(wrb);
ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
beiscsi_ep = ep->dd_data;
beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
beiscsi_ep->cid_vld = 1;
@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return 0;
free_ep:
beiscsi_free_ep(beiscsi_ep);
return -EBUSY;
}
/**
@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
if (beiscsi_error(phba)) {
ret = -EIO;
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BS_%d : The FW state Not Stable!!!\n");
return ERR_PTR(ret);
}
if (phba->state != BE_ADAPTER_UP) {
ret = -EBUSY;
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
unsigned int cid)
{
if (phba->conn_table[cid])
phba->conn_table[cid] = NULL;
uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
if (phba->conn_table[cri_index])
phba->conn_table[cri_index] = NULL;
else {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : Connection table Not occupied.\n");

View File

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2012 Emulex
* Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or

View File

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2012 Emulex
* Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
struct device_attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable,
&dev_attr_beiscsi_drvr_ver,
&dev_attr_beiscsi_adapter_family,
&dev_attr_beiscsi_fw_ver,
&dev_attr_beiscsi_active_cid_count,
NULL,
};
@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
+ BE2_TMFS
+ BE2_NOPOUT_REQ));
phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
phba->params.num_sge_per_io = BE2_SGE;
phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
static unsigned int
beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_hba *phba,
unsigned short cid,
struct pdu_base *ppdu,
unsigned long pdu_len,
void *pbuffer, unsigned long buf_len)
@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
struct hwi_wrb_context *pwrb_context;
struct hwi_controller *phwi_ctrlr;
struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[cid];
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
if (pwrb_context->wrb_handles_available >= 2) {
pwrb_handle = pwrb_context->pwrb_handle_base[
pwrb_context->alloc_index];
@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
hdr->t2retain = 0;
hdr->flags = csol_cqe->i_flags;
hdr->response = csol_cqe->i_resp;
hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1);
hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
csol_cqe->cmd_wnd - 1);
hdr->dlength[0] = 0;
hdr->dlength[1] = 0;
@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
hdr->flags = csol_cqe->i_flags;
hdr->response = csol_cqe->i_resp;
hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
hdr->max_cmdsn = (csol_cqe->exp_cmdsn +
csol_cqe->cmd_wnd - 1);
hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
csol_cqe->cmd_wnd - 1);
hdr->itt = io_task->libiscsi_itt;
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
struct hwi_controller *phwi_ctrlr;
struct iscsi_task *task;
struct beiscsi_io_task *io_task;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
uint16_t wrb_index, cid;
uint16_t wrb_index, cid, cri_index;
phwi_ctrlr = phba->phwi_ctrlr;
if (chip_skh_r(phba->pcidev)) {
wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
wrb_idx, psol);
cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
cid, psol);
} else {
if (is_chip_be2_be3r(phba)) {
wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
wrb_idx, psol);
cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
cid, psol);
} else {
wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
wrb_idx, psol);
cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
cid, psol);
}
pwrb_context = &phwi_ctrlr->wrb_context[
cid - phba->fw_config.iscsi_cid_start];
cri_index = BE_GET_CRI_FROM_CID(cid);
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
task = pwrb_handle->pio_handle;
io_task = task->dd_data;
spin_lock_bh(&phba->mgmt_sgl_lock);
free_mgmt_sgl_handle(phba, io_task->psgl_handle);
spin_unlock_bh(&phba->mgmt_sgl_lock);
spin_lock_bh(&session->lock);
free_wrb_handle(phba, pwrb_context, pwrb_handle);
spin_unlock_bh(&session->lock);
memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
iscsi_put_task(task);
}
static void
@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
hdr = (struct iscsi_nopin *)task->hdr;
hdr->flags = csol_cqe->i_flags;
hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn +
csol_cqe->cmd_wnd - 1);
hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
csol_cqe->cmd_wnd - 1);
hdr->opcode = ISCSI_OP_NOOP_IN;
hdr->itt = io_task->libiscsi_itt;
@ -1418,34 +1417,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
struct sol_cqe *psol,
struct common_sol_cqe *csol_cqe)
{
if (chip_skh_r(phba->pcidev)) {
csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
i_exp_cmd_sn, psol);
csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
i_res_cnt, psol);
csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
wrb_index, psol);
csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
cid, psol);
csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
hw_sts, psol);
csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
i_cmd_wnd, psol);
if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
cmd_cmpl, psol))
csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
i_sts, psol);
else
csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
i_sts, psol);
if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
u, psol))
csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
o, psol))
csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
} else {
if (is_chip_be2_be3r(phba)) {
csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
i_exp_cmd_sn, psol);
csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
@ -1464,6 +1436,33 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
i_sts, psol);
csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
i_flags, psol);
} else {
csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
i_exp_cmd_sn, psol);
csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
i_res_cnt, psol);
csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
wrb_index, psol);
csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
cid, psol);
csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
hw_sts, psol);
csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
i_cmd_wnd, psol);
if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
cmd_cmpl, psol))
csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
i_sts, psol);
else
csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
i_sts, psol);
if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
u, psol))
csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
o, psol))
csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
}
}
@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
struct common_sol_cqe csol_cqe = {0};
uint16_t cri_index = 0;
phwi_ctrlr = phba->phwi_ctrlr;
/* Copy the elements to a common structure */
adapter_get_sol_cqe(phba, psol, &csol_cqe);
pwrb_context = &phwi_ctrlr->wrb_context[
csol_cqe.cid - phba->fw_config.iscsi_cid_start];
cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
pwrb_handle = pwrb_context->pwrb_handle_basestd[
csol_cqe.wrb_index];
@ -1561,16 +1561,16 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
unsigned char is_header = 0;
unsigned int index, dpl;
if (chip_skh_r(phba->pcidev)) {
dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
dpl, pdpdu_cqe);
index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
index, pdpdu_cqe);
} else {
if (is_chip_be2_be3r(phba)) {
dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
dpl, pdpdu_cqe);
index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
index, pdpdu_cqe);
} else {
dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
dpl, pdpdu_cqe);
index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
index, pdpdu_cqe);
}
phys_addr.u.a32.address_lo =
@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
WARN_ON(!pasync_handle);
pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start;
pasync_handle->cri =
BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
pasync_handle->is_header = is_header;
pasync_handle->buffer_len = dpl;
*pcq_index = index;
@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
}
status = beiscsi_process_async_pdu(beiscsi_conn, phba,
(beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start),
phdr, hdr_len, pfirst_buffer,
offset);
@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
unsigned int num_processed = 0;
unsigned int tot_nump = 0;
unsigned short code = 0, cid = 0;
uint16_t cri_index = 0;
struct beiscsi_conn *beiscsi_conn;
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
32] & CQE_CODE_MASK);
/* Get the CID */
if (chip_skh_r(phba->pcidev)) {
if (is_chip_be2_be3r(phba)) {
cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
} else {
if ((code == DRIVERMSG_NOTIFY) ||
(code == UNSOL_HDR_NOTIFY) ||
(code == UNSOL_DATA_NOTIFY))
@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
else
cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
cid, sol);
} else
cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
}
ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
cri_index = BE_GET_CRI_FROM_CID(cid);
ep = phba->ep_array[cri_index];
beiscsi_ep = ep->dd_data;
beiscsi_conn = beiscsi_ep->conn;
@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work)
static int be_iopoll(struct blk_iopoll *iop, int budget)
{
static unsigned int ret;
unsigned int ret;
struct beiscsi_hba *phba;
struct be_eq_obj *pbe_eq;
@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
/* Check for the data_count */
dsp_value = (task->data_count) ? 1 : 0;
if (chip_skh_r(phba->pcidev))
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
if (is_chip_be2_be3r(phba))
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
pwrb, dsp_value);
else
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
pwrb, dsp_value);
/* Map addr only if there is data_count */
@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
{
struct be_mem_descriptor *mem_descr;
dma_addr_t bus_add;
struct hwi_controller *phwi_ctrlr;
struct be_mem_descriptor *mem_descr;
struct mem_array *mem_arr, *mem_arr_orig;
unsigned int i, j, alloc_size, curr_alloc_size;
@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
if (!phba->phwi_ctrlr)
return -ENOMEM;
/* Allocate memory for wrb_context */
phwi_ctrlr = phba->phwi_ctrlr;
phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
phba->params.cxns_per_ctrl,
GFP_KERNEL);
if (!phwi_ctrlr->wrb_context)
return -ENOMEM;
phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
GFP_KERNEL);
if (!phba->init_mem) {
kfree(phwi_ctrlr->wrb_context);
kfree(phba->phwi_ctrlr);
return -ENOMEM;
}
@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
GFP_KERNEL);
if (!mem_arr_orig) {
kfree(phba->init_mem);
kfree(phwi_ctrlr->wrb_context);
kfree(phba->phwi_ctrlr);
return -ENOMEM;
}
@ -2628,6 +2640,7 @@ free_mem:
}
kfree(mem_arr_orig);
kfree(phba->init_mem);
kfree(phba->phwi_ctrlr->wrb_context);
kfree(phba->phwi_ctrlr);
return -ENOMEM;
}
@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba)
static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
{
struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
struct hwi_context_memory *phwi_ctxt;
struct wrb_handle *pwrb_handle = NULL;
struct hwi_controller *phwi_ctrlr;
struct hwi_wrb_context *pwrb_context;
@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
mem_descr_wrb += HWI_MEM_WRB;
phwi_ctrlr = phba->phwi_ctrlr;
for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
/* Allocate memory for WRBQ */
phwi_ctxt = phwi_ctrlr->phwi_ctxt;
phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
phba->fw_config.iscsi_cid_count,
GFP_KERNEL);
if (!phwi_ctxt->be_wrbq) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : WRBQ Mem Alloc Failed\n");
return -ENOMEM;
}
for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
pwrb_context = &phwi_ctrlr->wrb_context[index];
pwrb_context->pwrb_handle_base =
kzalloc(sizeof(struct wrb_handle *) *
@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
}
}
idx = 0;
for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
pwrb_context = &phwi_ctrlr->wrb_context[index];
if (!num_cxn_wrb) {
pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
@ -2752,7 +2777,7 @@ init_wrb_hndl_failed:
return -ENOMEM;
}
static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
{
struct hwi_controller *phwi_ctrlr;
struct hba_parameters *p = &phba->params;
@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
memset(pasync_ctx, 0, sizeof(*pasync_ctx));
pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
phba->fw_config.iscsi_cid_count,
GFP_KERNEL);
if (!pasync_ctx->async_entry) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
return -ENOMEM;
}
pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
pasync_ctx->buffer_size = p->defpdu_hdr_sz;
@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
pasync_ctx->async_header.ep_read_ptr = -1;
pasync_ctx->async_data.host_write_ptr = 0;
pasync_ctx->async_data.ep_read_ptr = -1;
return 0;
}
static int
@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
void *wrb_vaddr;
struct be_dma_mem sgl;
struct be_mem_descriptor *mem_descr;
struct hwi_wrb_context *pwrb_context;
int status;
idx = 0;
@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
kfree(pwrb_arr);
return status;
}
phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
id;
pwrb_context = &phwi_ctrlr->wrb_context[i];
pwrb_context->cid = phwi_context->be_wrbq[i].id;
BE_SET_CID_TO_CRI(i, pwrb_context->cid);
}
kfree(pwrb_arr);
return 0;
@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
struct hwi_wrb_context *pwrb_context;
phwi_ctrlr = phba->phwi_ctrlr;
for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
pwrb_context = &phwi_ctrlr->wrb_context[index];
kfree(pwrb_context->pwrb_handle_base);
kfree(pwrb_context->pwrb_handle_basestd);
@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
struct be_ctrl_info *ctrl = &phba->ctrl;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct hwi_async_pdu_context *pasync_ctx;
int i, eq_num;
phwi_ctrlr = phba->phwi_ctrlr;
@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
if (q->created)
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
}
kfree(phwi_context->be_wrbq);
free_wrb_handles(phba);
q = &phwi_context->be_def_hdrq;
@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
}
be_mcc_queues_destroy(phba);
pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
kfree(pasync_ctx->async_entry);
be_cmd_fw_uninit(ctrl);
}
static int be_mcc_queues_create(struct beiscsi_hba *phba,
@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
if (beiscsi_init_wrb_handle(phba))
return -ENOMEM;
hwi_init_async_pdu_ctx(phba);
if (hwi_init_async_pdu_ctx(phba)) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : hwi_init_async_pdu_ctx failed\n");
return -ENOMEM;
}
if (hwi_init_port(phba) != 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : hwi_init_controller failed\n");
@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
mem_descr++;
}
kfree(phba->init_mem);
kfree(phba->phwi_ctrlr->wrb_context);
kfree(phba->phwi_ctrlr);
}
@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
{
int i, new_cid;
int i;
phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
GFP_KERNEL);
@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
return -ENOMEM;
}
phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
phba->params.cxns_per_ctrl, GFP_KERNEL);
if (!phba->ep_array) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory in "
"hba_setup_cid_tbls\n");
kfree(phba->cid_array);
phba->cid_array = NULL;
return -ENOMEM;
}
new_cid = phba->fw_config.iscsi_cid_start;
for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
phba->cid_array[i] = new_cid;
new_cid += 2;
phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
phba->params.cxns_per_ctrl, GFP_KERNEL);
if (!phba->conn_table) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : Failed to allocate memory in"
"hba_setup_cid_tbls\n");
kfree(phba->cid_array);
kfree(phba->ep_array);
phba->cid_array = NULL;
phba->ep_array = NULL;
return -ENOMEM;
}
for (i = 0; i < phba->params.cxns_per_ctrl; i++)
phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
phba->avlbl_cids = phba->params.cxns_per_ctrl;
return 0;
}
@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
kfree(phba->eh_sgl_hndl_base);
kfree(phba->cid_array);
kfree(phba->ep_array);
kfree(phba->conn_table);
}
/**
* beiscsi_free_mgmt_task_handles()- Free driver CXN resources
* @beiscsi_conn: ptr to the conn to be cleaned up
* @task: ptr to iscsi_task resource to be freed.
*
* Free driver mgmt resources binded to CXN.
**/
void
beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
struct iscsi_task *task)
{
struct beiscsi_io_task *io_task;
struct beiscsi_hba *phba = beiscsi_conn->phba;
struct hwi_wrb_context *pwrb_context;
struct hwi_controller *phwi_ctrlr;
uint16_t cri_index = BE_GET_CRI_FROM_CID(
beiscsi_conn->beiscsi_conn_cid);
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
io_task = task->dd_data;
if (io_task->pwrb_handle) {
memset(io_task->pwrb_handle->pwrb, 0,
sizeof(struct iscsi_wrb));
free_wrb_handle(phba, pwrb_context,
io_task->pwrb_handle);
io_task->pwrb_handle = NULL;
}
if (io_task->psgl_handle) {
spin_lock_bh(&phba->mgmt_sgl_lock);
free_mgmt_sgl_handle(phba,
io_task->psgl_handle);
io_task->psgl_handle = NULL;
spin_unlock_bh(&phba->mgmt_sgl_lock);
}
if (io_task->mtask_addr)
pci_unmap_single(phba->pcidev,
io_task->mtask_addr,
io_task->mtask_data_count,
PCI_DMA_TODEVICE);
}
/**
@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
struct hwi_wrb_context *pwrb_context;
struct hwi_controller *phwi_ctrlr;
uint16_t cri_index = BE_GET_CRI_FROM_CID(
beiscsi_conn->beiscsi_conn_cid);
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
- phba->fw_config.iscsi_cid_start];
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
if (io_task->cmd_bhs) {
pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
io_task->psgl_handle = NULL;
}
} else {
if (!beiscsi_conn->login_in_progress) {
if (io_task->pwrb_handle) {
free_wrb_handle(phba, pwrb_context,
io_task->pwrb_handle);
io_task->pwrb_handle = NULL;
}
if (io_task->psgl_handle) {
spin_lock(&phba->mgmt_sgl_lock);
free_mgmt_sgl_handle(phba,
io_task->psgl_handle);
spin_unlock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = NULL;
}
if (io_task->mtask_addr) {
pci_unmap_single(phba->pcidev,
io_task->mtask_addr,
io_task->mtask_data_count,
PCI_DMA_TODEVICE);
io_task->mtask_addr = 0;
}
}
if (!beiscsi_conn->login_in_progress)
beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
}
}
@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
beiscsi_cleanup_task(task);
spin_unlock_bh(&session->lock);
pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start));
pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
/* Check for the adapter family */
if (chip_skh_r(phba->pcidev))
beiscsi_offload_cxn_v2(params, pwrb_handle);
else
if (is_chip_be2_be3r(phba))
beiscsi_offload_cxn_v0(params, pwrb_handle,
phba->init_mem);
else
beiscsi_offload_cxn_v2(params, pwrb_handle);
be_dws_le_to_cpu(pwrb_handle->pwrb,
sizeof(struct iscsi_target_context_update_wrb));
@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
struct hwi_wrb_context *pwrb_context;
struct hwi_controller *phwi_ctrlr;
itt_t itt;
uint16_t cri_index = 0;
struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
dma_addr_t paddr;
@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
goto free_hndls;
}
io_task->pwrb_handle = alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
beiscsi_conn->beiscsi_conn_cid);
if (!io_task->pwrb_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
} else {
io_task->scsi_cmnd = NULL;
if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
beiscsi_conn->task = task;
if (!beiscsi_conn->login_in_progress) {
spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = (struct sgl_handle *)
@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
io_task->psgl_handle;
io_task->pwrb_handle =
alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
beiscsi_conn->beiscsi_conn_cid);
if (!io_task->pwrb_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
io_task->pwrb_handle =
beiscsi_conn->plogin_wrb_handle;
}
beiscsi_conn->task = task;
} else {
spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
}
io_task->pwrb_handle =
alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
beiscsi_conn->beiscsi_conn_cid);
if (!io_task->pwrb_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@ -4324,12 +4414,13 @@ free_io_hndls:
free_mgmt_hndls:
spin_lock(&phba->mgmt_sgl_lock);
free_mgmt_sgl_handle(phba, io_task->psgl_handle);
io_task->psgl_handle = NULL;
spin_unlock(&phba->mgmt_sgl_lock);
free_hndls:
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start];
cri_index = BE_GET_CRI_FROM_CID(
beiscsi_conn->beiscsi_conn_cid);
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
if (io_task->pwrb_handle)
free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
io_task->pwrb_handle = NULL;
@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
unsigned int doorbell = 0;
pwrb = io_task->pwrb_handle->pwrb;
memset(pwrb, 0, sizeof(*pwrb));
io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
io_task->bhs_len = sizeof(struct be_cmd_bhs);
@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
pwrb = io_task->pwrb_handle->pwrb;
memset(pwrb, 0, sizeof(*pwrb));
if (chip_skh_r(phba->pcidev)) {
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
be32_to_cpu(task->cmdsn));
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
io_task->pwrb_handle->wrb_index);
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
io_task->psgl_handle->sgl_index);
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
task->data_count);
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
io_task->pwrb_handle->nxt_wrb_index);
pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
} else {
if (is_chip_be2_be3r(phba)) {
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
be32_to_cpu(task->cmdsn));
AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task)
AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
io_task->pwrb_handle->nxt_wrb_index);
pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
} else {
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
be32_to_cpu(task->cmdsn));
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
io_task->pwrb_handle->wrb_index);
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
io_task->psgl_handle->sgl_index);
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
task->data_count);
AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
io_task->pwrb_handle->nxt_wrb_index);
pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
}
@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
case ISCSI_OP_NOOP_OUT:
if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
if (chip_skh_r(phba->pcidev))
AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
if (is_chip_be2_be3r(phba))
AMAP_SET_BITS(struct amap_iscsi_wrb,
dmsg, pwrb, 1);
else
AMAP_SET_BITS(struct amap_iscsi_wrb,
AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
dmsg, pwrb, 1);
} else {
ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
if (chip_skh_r(phba->pcidev))
AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
if (is_chip_be2_be3r(phba))
AMAP_SET_BITS(struct amap_iscsi_wrb,
dmsg, pwrb, 0);
else
AMAP_SET_BITS(struct amap_iscsi_wrb,
AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
dmsg, pwrb, 0);
}
hwi_write_buffer(pwrb, task);
@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
}
/* Set the task type */
io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
doorbell |= cid & DB_WRB_POST_CID_MASK;
doorbell |= (io_task->pwrb_handle->wrb_index &
@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
case OC_SKH_ID1:
phba->generation = BE_GEN4;
phba->iotask_fn = beiscsi_iotask_v2;
break;
default:
phba->generation = 0;
}

View File

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2012 Emulex
* Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -36,7 +36,7 @@
#include "be.h"
#define DRV_NAME "be2iscsi"
#define BUILD_STR "10.0.272.0"
#define BUILD_STR "10.0.467.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@ -66,8 +66,9 @@
#define MAX_CPUS 64
#define BEISCSI_MAX_NUM_CPUS 7
#define OC_SKH_MAX_NUM_CPUS 63
#define OC_SKH_MAX_NUM_CPUS 31
#define BEISCSI_VER_STRLEN 32
#define BEISCSI_SGLIST_ELEMENTS 30
@ -265,7 +266,9 @@ struct invalidate_command_table {
unsigned short cid;
} __packed;
#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1)
#define chip_be2(phba) (phba->generation == BE_GEN2)
#define chip_be3_r(phba) (phba->generation == BE_GEN3)
#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
struct beiscsi_hba {
struct hba_parameters params;
struct hwi_controller *phwi_ctrlr;
@ -304,10 +307,15 @@ struct beiscsi_hba {
unsigned short avlbl_cids;
unsigned short cid_alloc;
unsigned short cid_free;
struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
struct list_head hba_queue;
#define BE_MAX_SESSION 2048
#define BE_SET_CID_TO_CRI(cri_index, cid) \
(phba->cid_to_cri_map[cid] = cri_index)
#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
unsigned short cid_to_cri_map[BE_MAX_SESSION];
unsigned short *cid_array;
struct iscsi_endpoint **ep_array;
struct beiscsi_conn **conn_table;
struct iscsi_boot_kset *boot_kset;
struct Scsi_Host *shost;
struct iscsi_iface *ipv4_iface;
@ -339,6 +347,7 @@ struct beiscsi_hba {
struct delayed_work beiscsi_hw_check_task;
u8 mac_address[ETH_ALEN];
char fw_ver_str[BEISCSI_VER_STRLEN];
char wq_name[20];
struct workqueue_struct *wq; /* The actuak work queue */
struct be_ctrl_info ctrl;
@ -563,7 +572,7 @@ struct hwi_async_pdu_context {
* This is a varying size list! Do not add anything
* after this entry!!
*/
struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
struct hwi_async_entry *async_entry;
};
#define PDUCQE_CODE_MASK 0x0000003F
@ -749,6 +758,8 @@ void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
void beiscsi_process_all_cqs(struct work_struct *work);
void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
struct iscsi_task *task);
static inline bool beiscsi_error(struct beiscsi_hba *phba)
{
@ -933,7 +944,7 @@ struct hwi_controller {
struct sgl_handle *psgl_handle_base;
unsigned int wrb_mem_index;
struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2];
struct hwi_wrb_context *wrb_context;
struct mcc_wrb *pmcc_wrb_base;
struct be_ring default_pdu_hdr;
struct be_ring default_pdu_data;
@ -970,9 +981,7 @@ struct hwi_context_memory {
struct be_queue_info be_def_hdrq;
struct be_queue_info be_def_dataq;
struct be_queue_info be_wrbq[BE2_MAX_SESSIONS];
struct be_mcc_wrb_context *pbe_mcc_context;
struct be_queue_info *be_wrbq;
struct hwi_async_pdu_context *pasync_ctx;
};

View File

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2012 Emulex
* Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : phba->fw_config.iscsi_features = %d\n",
phba->fw_config.iscsi_features);
memcpy(phba->fw_ver_str, resp->params.hba_attribs.
firmware_version_string, BEISCSI_VER_STRLEN);
} else
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BG_%d : Failed in mgmt_check_supported_fw\n");
@ -1259,6 +1261,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
}
/**
* beiscsi_fw_ver_disp()- Display Firmware Version
* @dev: ptr to device not used.
* @attr: device attribute, not used.
* @buf: contains formatted text Firmware version
*
* return
* size of the formatted string
**/
ssize_t
beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
}
/**
* beiscsi_active_cid_disp()- Display Sessions Active
* @dev: ptr to device not used.
* @attr: device attribute, not used.
* @buf: contains formatted text Session Count
*
* return
* size of the formatted string
**/
ssize_t
beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
return snprintf(buf, PAGE_SIZE, "%d\n",
(phba->params.cxns_per_ctrl - phba->avlbl_cids));
}
/**
* beiscsi_adap_family_disp()- Display adapter family.
* @dev: ptr to device to get priv structure

View File

@ -1,5 +1,5 @@
/**
* Copyright (C) 2005 - 2012 Emulex
* Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@ -156,25 +156,25 @@ union invalidate_commands_params {
} __packed;
struct mgmt_hba_attributes {
u8 flashrom_version_string[32];
u8 manufacturer_name[32];
u8 flashrom_version_string[BEISCSI_VER_STRLEN];
u8 manufacturer_name[BEISCSI_VER_STRLEN];
u32 supported_modes;
u8 seeprom_version_lo;
u8 seeprom_version_hi;
u8 rsvd0[2];
u32 fw_cmd_data_struct_version;
u32 ep_fw_data_struct_version;
u32 future_reserved[12];
u8 ncsi_version_string[12];
u32 default_extended_timeout;
u8 controller_model_number[32];
u8 controller_model_number[BEISCSI_VER_STRLEN];
u8 controller_description[64];
u8 controller_serial_number[32];
u8 ip_version_string[32];
u8 firmware_version_string[32];
u8 bios_version_string[32];
u8 redboot_version_string[32];
u8 driver_version_string[32];
u8 fw_on_flash_version_string[32];
u8 controller_serial_number[BEISCSI_VER_STRLEN];
u8 ip_version_string[BEISCSI_VER_STRLEN];
u8 firmware_version_string[BEISCSI_VER_STRLEN];
u8 bios_version_string[BEISCSI_VER_STRLEN];
u8 redboot_version_string[BEISCSI_VER_STRLEN];
u8 driver_version_string[BEISCSI_VER_STRLEN];
u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN];
u32 functionalities_supported;
u16 max_cdblength;
u8 asic_revision;
@ -190,7 +190,8 @@ struct mgmt_hba_attributes {
u32 firmware_post_status;
u32 hba_mtu[8];
u8 iscsi_features;
u8 future_u8[3];
u8 asic_generation;
u8 future_u8[2];
u32 future_u32[3];
} __packed;
@ -207,7 +208,7 @@ struct mgmt_controller_attributes {
u64 unique_identifier;
u8 netfilters;
u8 rsvd0[3];
u8 future_u32[4];
u32 future_u32[4];
} __packed;
struct be_mgmt_controller_attributes {
@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
ssize_t beiscsi_drvr_ver_disp(struct device *dev,
struct device_attribute *attr, char *buf);
ssize_t beiscsi_fw_ver_disp(struct device *dev,
struct device_attribute *attr, char *buf);
ssize_t beiscsi_active_cid_disp(struct device *dev,
struct device_attribute *attr, char *buf);
ssize_t beiscsi_adap_family_disp(struct device *dev,
struct device_attribute *attr, char *buf);

View File

@ -2,7 +2,7 @@
#define _BNX2FC_H_
/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
*
* Copyright (c) 2008 - 2011 Broadcom Corporation
* Copyright (c) 2008 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -64,10 +64,12 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
#define BNX2FC_VERSION "1.0.13"
#define BNX2FC_VERSION "1.0.14"
#define PFX "bnx2fc: "
#define BCM_CHIP_LEN 16
#define BNX2X_DOORBELL_PCI_BAR 2
#define BNX2FC_MAX_BD_LEN 0xffff
@ -241,6 +243,8 @@ struct bnx2fc_hba {
int wait_for_link_down;
int num_ofld_sess;
struct list_head vports;
char chip_num[BCM_CHIP_LEN];
};
struct bnx2fc_interface {

View File

@ -3,7 +3,7 @@
* This file contains helper routines that handle ELS requests
* and responses.
*
* Copyright (c) 2008 - 2011 Broadcom Corporation
* Copyright (c) 2008 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

View File

@ -3,7 +3,7 @@
* cnic modules to create FCoE instances, send/receive non-offloaded
* FIP/FCoE packets, listen to link events etc.
*
* Copyright (c) 2008 - 2011 Broadcom Corporation
* Copyright (c) 2008 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
#define DRV_MODULE_RELDATE "Dec 21, 2012"
#define DRV_MODULE_RELDATE "Mar 08, 2013"
static char version[] =
@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
{
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct Scsi_Host *shost = lport->host;
int rc = 0;
@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
}
if (!lport->vport)
fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
BNX2FC_NAME, BNX2FC_VERSION,
snprintf(fc_host_symbolic_name(lport->host), 256,
"%s (Broadcom %s) v%s over %s",
BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
interface->netdev->name);
return 0;
@ -1656,23 +1658,60 @@ mem_err:
static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
{
struct cnic_dev *cnic;
struct pci_dev *pdev;
if (!hba->cnic) {
printk(KERN_ERR PFX "cnic is NULL\n");
return -ENODEV;
}
cnic = hba->cnic;
hba->pcidev = cnic->pcidev;
if (hba->pcidev)
pci_dev_get(hba->pcidev);
pdev = hba->pcidev = cnic->pcidev;
if (!hba->pcidev)
return -ENODEV;
switch (pdev->device) {
case PCI_DEVICE_ID_NX2_57710:
strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
break;
case PCI_DEVICE_ID_NX2_57711:
strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
break;
case PCI_DEVICE_ID_NX2_57712:
case PCI_DEVICE_ID_NX2_57712_MF:
case PCI_DEVICE_ID_NX2_57712_VF:
strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
break;
case PCI_DEVICE_ID_NX2_57800:
case PCI_DEVICE_ID_NX2_57800_MF:
case PCI_DEVICE_ID_NX2_57800_VF:
strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
break;
case PCI_DEVICE_ID_NX2_57810:
case PCI_DEVICE_ID_NX2_57810_MF:
case PCI_DEVICE_ID_NX2_57810_VF:
strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
break;
case PCI_DEVICE_ID_NX2_57840:
case PCI_DEVICE_ID_NX2_57840_MF:
case PCI_DEVICE_ID_NX2_57840_VF:
case PCI_DEVICE_ID_NX2_57840_2_20:
case PCI_DEVICE_ID_NX2_57840_4_10:
strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
break;
default:
pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
break;
}
pci_dev_get(hba->pcidev);
return 0;
}
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
{
if (hba->pcidev)
if (hba->pcidev) {
hba->chip_num[0] = '\0';
pci_dev_put(hba->pcidev);
}
hba->pcidev = NULL;
}

View File

@ -2,7 +2,7 @@
* This file contains the code that low level functions that interact
* with 57712 FCoE firmware.
*
* Copyright (c) 2008 - 2011 Broadcom Corporation
* Copyright (c) 2008 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
fcoe_init3.error_bit_map_lo = 0xffffffff;
fcoe_init3.error_bit_map_hi = 0xffffffff;
fcoe_init3.perf_config = 1;
/*
* enable both cached connection and cached tasks
* 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
*/
fcoe_init3.perf_config = 3;
kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
kwqe_arr[1] = (struct kwqe *) &fcoe_init2;

View File

@ -1,7 +1,7 @@
/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
* IO manager and SCSI IO processing.
*
* Copyright (c) 2008 - 2011 Broadcom Corporation
* Copyright (c) 2008 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
&io_req->req_flags))) {
if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
rc = SUCCESS;
} else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
&io_req->req_flags))) {
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);

View File

@ -2,7 +2,7 @@
* Handles operations such as session offload/upload etc, and manages
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008 - 2011 Broadcom Corporation
* Copyright (c) 2008 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

View File

@ -114,7 +114,7 @@ struct csio_lnode_stats {
uint32_t n_rnode_match; /* matched rnode */
uint32_t n_dev_loss_tmo; /* Device loss timeout */
uint32_t n_fdmi_err; /* fdmi err */
uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */
enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
uint32_t n_rnode_alloc; /* rnode allocated */
uint32_t n_rnode_free; /* rnode freed */

View File

@ -63,7 +63,7 @@ struct csio_rnode_stats {
uint32_t n_err_nomem; /* error nomem */
uint32_t n_evt_unexp; /* unexpected event */
uint32_t n_evt_drop; /* unexpected event */
uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */
enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
uint32_t n_lun_rst; /* Number of resets of
* of LUNs under this

View File

@ -38,7 +38,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
#define DRV_VERSION "1.5.0.2"
#define DRV_VERSION "1.5.0.22"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@ -192,6 +192,18 @@ enum fnic_state {
struct mempool;
enum fnic_evt {
FNIC_EVT_START_VLAN_DISC = 1,
FNIC_EVT_START_FCF_DISC = 2,
FNIC_EVT_MAX,
};
struct fnic_event {
struct list_head list;
struct fnic *fnic;
enum fnic_evt event;
};
/* Per-instance private data structure */
struct fnic {
struct fc_lport *lport;
@ -254,6 +266,18 @@ struct fnic {
struct sk_buff_head frame_queue;
struct sk_buff_head tx_queue;
/*** FIP related data members -- start ***/
void (*set_vlan)(struct fnic *, u16 vlan);
struct work_struct fip_frame_work;
struct sk_buff_head fip_frame_queue;
struct timer_list fip_timer;
struct list_head vlans;
spinlock_t vlans_lock;
struct work_struct event_work;
struct list_head evlist;
/*** FIP related data members -- end ***/
/* copy work queue cache line section */
____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
/* completion queue cache line section */
@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
}
extern struct workqueue_struct *fnic_event_queue;
extern struct workqueue_struct *fnic_fip_queue;
extern struct device_attribute *fnic_attrs[];
void fnic_clear_intr_mode(struct fnic *fnic);
@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *);
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
void fnic_handle_frame(struct work_struct *work);
void fnic_handle_link(struct work_struct *work);
void fnic_handle_event(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic);
int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
void fnic_handle_fip_frame(struct work_struct *work);
void fnic_handle_fip_event(struct fnic *fnic);
void fnic_fcoe_reset_vlans(struct fnic *fnic);
void fnic_fcoe_evlist_free(struct fnic *fnic);
extern void fnic_handle_fip_timer(struct fnic *fnic);
static inline int
fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
{

View File

@ -31,12 +31,20 @@
#include <scsi/libfc.h>
#include "fnic_io.h"
#include "fnic.h"
#include "fnic_fip.h"
#include "cq_enet_desc.h"
#include "cq_exch_desc.h"
static u8 fcoe_all_fcfs[ETH_ALEN];
struct workqueue_struct *fnic_fip_queue;
struct workqueue_struct *fnic_event_queue;
static void fnic_set_eth_mode(struct fnic *);
static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
void fnic_handle_link(struct work_struct *work)
{
@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work)
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link down\n");
fcoe_ctlr_link_down(&fnic->ctlr);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
/* start FCoE VLAN discovery */
fnic_fcoe_send_vlan_req(fnic);
return;
}
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link up\n");
fcoe_ctlr_link_up(&fnic->ctlr);
@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work)
} else if (fnic->link_status) {
/* DOWN -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
/* start FCoE VLAN discovery */
fnic_fcoe_send_vlan_req(fnic);
return;
}
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
fcoe_ctlr_link_up(&fnic->ctlr);
} else {
@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work)
}
}
void fnic_fcoe_evlist_free(struct fnic *fnic)
{
struct fnic_event *fevt = NULL;
struct fnic_event *next = NULL;
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (list_empty(&fnic->evlist)) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
list_del(&fevt->list);
kfree(fevt);
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
void fnic_handle_event(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, event_work);
struct fnic_event *fevt = NULL;
struct fnic_event *next = NULL;
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (list_empty(&fnic->evlist)) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
if (fnic->stop_rx_link_events) {
list_del(&fevt->list);
kfree(fevt);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
/*
* If we're in a transitional state, just re-queue and return.
* The queue will be serviced when we get to a stable state.
*/
if (fnic->state != FNIC_IN_FC_MODE &&
fnic->state != FNIC_IN_ETH_MODE) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
list_del(&fevt->list);
switch (fevt->event) {
case FNIC_EVT_START_VLAN_DISC:
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_fcoe_send_vlan_req(fnic);
spin_lock_irqsave(&fnic->fnic_lock, flags);
break;
case FNIC_EVT_START_FCF_DISC:
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Start FCF Discovery\n");
fnic_fcoe_start_fcf_disc(fnic);
break;
default:
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Unknown event 0x%x\n", fevt->event);
break;
}
kfree(fevt);
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
/**
* Check if the Received FIP FLOGI frame is rejected
* @fip: The FCoE controller that received the frame
* @skb: The received FIP frame
*
* Returns non-zero if the frame is rejected with unsupported cmd with
* insufficient resource els explanation.
*/
static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
struct sk_buff *skb)
{
struct fc_lport *lport = fip->lp;
struct fip_header *fiph;
struct fc_frame_header *fh = NULL;
struct fip_desc *desc;
struct fip_encaps *els;
enum fip_desc_type els_dtype = 0;
u16 op;
u8 els_op;
u8 sub;
size_t els_len = 0;
size_t rlen;
size_t dlen = 0;
if (skb_linearize(skb))
return 0;
if (skb->len < sizeof(*fiph))
return 0;
fiph = (struct fip_header *)skb->data;
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
if (op != FIP_OP_LS)
return 0;
if (sub != FIP_SC_REP)
return 0;
rlen = ntohs(fiph->fip_dl_len) * 4;
if (rlen + sizeof(*fiph) > skb->len)
return 0;
desc = (struct fip_desc *)(fiph + 1);
dlen = desc->fip_dlen * FIP_BPW;
if (desc->fip_dtype == FIP_DT_FLOGI) {
shost_printk(KERN_DEBUG, lport->host,
" FIP TYPE FLOGI: fab name:%llx "
"vfid:%d map:%x\n",
fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
fip->sel_fcf->fc_map);
if (dlen < sizeof(*els) + sizeof(*fh) + 1)
return 0;
els_len = dlen - sizeof(*els);
els = (struct fip_encaps *)desc;
fh = (struct fc_frame_header *)(els + 1);
els_dtype = desc->fip_dtype;
if (!fh)
return 0;
/*
* ELS command code, reason and explanation should be = Reject,
* unsupported command and insufficient resource
*/
els_op = *(u8 *)(fh + 1);
if (els_op == ELS_LS_RJT) {
shost_printk(KERN_INFO, lport->host,
"Flogi Request Rejected by Switch\n");
return 1;
}
shost_printk(KERN_INFO, lport->host,
"Flogi Request Accepted by Switch\n");
}
return 0;
}
static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
{
struct fcoe_ctlr *fip = &fnic->ctlr;
struct sk_buff *skb;
char *eth_fr;
int fr_len;
struct fip_vlan *vlan;
u64 vlan_tov;
fnic_fcoe_reset_vlans(fnic);
fnic->set_vlan(fnic, 0);
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"Sending VLAN request...\n");
skb = dev_alloc_skb(sizeof(struct fip_vlan));
if (!skb)
return;
fr_len = sizeof(*vlan);
eth_fr = (char *)skb->data;
vlan = (struct fip_vlan *)eth_fr;
memset(vlan, 0, sizeof(*vlan));
memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
vlan->eth.h_proto = htons(ETH_P_FIP);
vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
vlan->fip.fip_op = htons(FIP_OP_VLAN);
vlan->fip.fip_subcode = FIP_SC_VL_REQ;
vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
skb_put(skb, sizeof(*vlan));
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
/* set a timer so that we can retry if there no response */
vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
}
static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
{
struct fcoe_ctlr *fip = &fnic->ctlr;
struct fip_header *fiph;
struct fip_desc *desc;
u16 vid;
size_t rlen;
size_t dlen;
struct fcoe_vlan *vlan;
u64 sol_time;
unsigned long flags;
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"Received VLAN response...\n");
fiph = (struct fip_header *) skb->data;
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
ntohs(fiph->fip_op), fiph->fip_subcode);
rlen = ntohs(fiph->fip_dl_len) * 4;
fnic_fcoe_reset_vlans(fnic);
spin_lock_irqsave(&fnic->vlans_lock, flags);
desc = (struct fip_desc *)(fiph + 1);
while (rlen > 0) {
dlen = desc->fip_dlen * FIP_BPW;
switch (desc->fip_dtype) {
case FIP_DT_VLAN:
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
shost_printk(KERN_INFO, fnic->lport->host,
"process_vlan_resp: FIP VLAN %d\n", vid);
vlan = kmalloc(sizeof(*vlan),
GFP_ATOMIC);
if (!vlan) {
/* retry from timer */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
goto out;
}
memset(vlan, 0, sizeof(struct fcoe_vlan));
vlan->vid = vid & 0x0fff;
vlan->state = FIP_VLAN_AVAIL;
list_add_tail(&vlan->list, &fnic->vlans);
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
rlen -= dlen;
}
/* any VLAN descriptors present ? */
if (list_empty(&fnic->vlans)) {
/* retry from timer */
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"No VLAN descriptors in FIP VLAN response\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
goto out;
}
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
fnic->set_vlan(fnic, vlan->vid);
vlan->state = FIP_VLAN_SENT; /* sent now */
vlan->sol_count++;
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* start the solicitation */
fcoe_ctlr_link_up(fip);
sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
out:
return;
}
static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
{
unsigned long flags;
struct fcoe_vlan *vlan;
u64 sol_time;
spin_lock_irqsave(&fnic->vlans_lock, flags);
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
fnic->set_vlan(fnic, vlan->vid);
vlan->state = FIP_VLAN_SENT; /* sent now */
vlan->sol_count = 1;
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* start the solicitation */
fcoe_ctlr_link_up(&fnic->ctlr);
sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
}
static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
{
unsigned long flags;
struct fcoe_vlan *fvlan;
spin_lock_irqsave(&fnic->vlans_lock, flags);
if (list_empty(&fnic->vlans)) {
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
return -EINVAL;
}
fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
if (fvlan->state == FIP_VLAN_USED) {
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
return 0;
}
if (fvlan->state == FIP_VLAN_SENT) {
fvlan->state = FIP_VLAN_USED;
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
return 0;
}
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
return -EINVAL;
}
static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
{
struct fnic_event *fevt;
unsigned long flags;
fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
if (!fevt)
return;
fevt->fnic = fnic;
fevt->event = ev;
spin_lock_irqsave(&fnic->fnic_lock, flags);
list_add_tail(&fevt->list, &fnic->evlist);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
schedule_work(&fnic->event_work);
}
static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
{
struct fip_header *fiph;
int ret = 1;
u16 op;
u8 sub;
if (!skb || !(skb->data))
return -1;
if (skb_linearize(skb))
goto drop;
fiph = (struct fip_header *)skb->data;
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
goto drop;
if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
goto drop;
if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
goto drop;
/* pass it on to fcoe */
ret = 1;
} else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
/* set the vlan as used */
fnic_fcoe_process_vlan_resp(fnic, skb);
ret = 0;
} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
/* received CVL request, restart vlan disc */
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
/* pass it on to fcoe */
ret = 1;
}
drop:
return ret;
}
void fnic_handle_fip_frame(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
unsigned long flags;
struct sk_buff *skb;
struct ethhdr *eh;
while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
dev_kfree_skb(skb);
return;
}
/*
* If we're in a transitional state, just re-queue and return.
* The queue will be serviced when we get to a stable state.
*/
if (fnic->state != FNIC_IN_FC_MODE &&
fnic->state != FNIC_IN_ETH_MODE) {
skb_queue_head(&fnic->fip_frame_queue, skb);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
eh = (struct ethhdr *)skb->data;
if (eh->h_proto == htons(ETH_P_FIP)) {
skb_pull(skb, sizeof(*eh));
if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
dev_kfree_skb(skb);
continue;
}
/*
* If there's FLOGI rejects - clear all
* fcf's & restart from scratch
*/
if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
shost_printk(KERN_INFO, fnic->lport->host,
"Trigger a Link down - VLAN Disc\n");
fcoe_ctlr_link_down(&fnic->ctlr);
/* start FCoE VLAN discovery */
fnic_fcoe_send_vlan_req(fnic);
dev_kfree_skb(skb);
continue;
}
fcoe_ctlr_recv(&fnic->ctlr, skb);
continue;
}
}
}
/**
* fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
* @fnic: fnic instance.
@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
skb_reset_mac_header(skb);
}
if (eh->h_proto == htons(ETH_P_FIP)) {
skb_pull(skb, sizeof(*eh));
fcoe_ctlr_recv(&fnic->ctlr, skb);
if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
printk(KERN_ERR "Dropped FIP frame, as firmware "
"uses non-FIP mode, Enable FIP "
"using UCSM\n");
goto drop;
}
skb_queue_tail(&fnic->fip_frame_queue, skb);
queue_work(fnic_fip_queue, &fnic->fip_frame_work);
return 1; /* let caller know packet was used */
}
if (eh->h_proto != htons(ETH_P_FCOE))
@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
}
void fnic_fcoe_reset_vlans(struct fnic *fnic)
{
unsigned long flags;
struct fcoe_vlan *vlan;
struct fcoe_vlan *next;
/*
* indicate a link down to fcoe so that all fcf's are free'd
* might not be required since we did this before sending vlan
* discovery request
*/
spin_lock_irqsave(&fnic->vlans_lock, flags);
if (!list_empty(&fnic->vlans)) {
list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
list_del(&vlan->list);
kfree(vlan);
}
}
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
}
void fnic_handle_fip_timer(struct fnic *fnic)
{
unsigned long flags;
struct fcoe_vlan *vlan;
u64 sol_time;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (fnic->ctlr.mode == FIP_ST_NON_FIP)
return;
spin_lock_irqsave(&fnic->vlans_lock, flags);
if (list_empty(&fnic->vlans)) {
/* no vlans available, try again */
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Start VLAN Discovery\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
return;
}
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
shost_printk(KERN_DEBUG, fnic->lport->host,
"fip_timer: vlan %d state %d sol_count %d\n",
vlan->vid, vlan->state, vlan->sol_count);
switch (vlan->state) {
case FIP_VLAN_USED:
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"FIP VLAN is selected for FC transaction\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
break;
case FIP_VLAN_FAILED:
/* if all vlans are in failed state, restart vlan disc */
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Start VLAN Discovery\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
break;
case FIP_VLAN_SENT:
if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
/*
* no response on this vlan, remove from the list.
* Try the next vlan
*/
shost_printk(KERN_INFO, fnic->lport->host,
"Dequeue this VLAN ID %d from list\n",
vlan->vid);
list_del(&vlan->list);
kfree(vlan);
vlan = NULL;
if (list_empty(&fnic->vlans)) {
/* we exhausted all vlans, restart vlan disc */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
shost_printk(KERN_INFO, fnic->lport->host,
"fip_timer: vlan list empty, "
"trigger vlan disc\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
return;
}
/* check the next vlan */
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
list);
fnic->set_vlan(fnic, vlan->vid);
vlan->state = FIP_VLAN_SENT; /* sent now */
}
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
vlan->sol_count++;
sol_time = jiffies + msecs_to_jiffies
(FCOE_CTLR_START_DELAY);
mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
break;
}
}

View File

@ -0,0 +1,68 @@
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _FNIC_FIP_H_
#define _FNIC_FIP_H_
#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */
#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */
#define FCOE_CTLR_MAX_SOL 8
#define FINC_MAX_FLOGI_REJECTS 8
/*
* FIP_DT_VLAN descriptor.
*/
struct fip_vlan_desc {
struct fip_desc fd_desc;
__be16 fd_vlan;
} __attribute__((packed));
struct vlan {
__be16 vid;
__be16 type;
};
/*
* VLAN entry.
*/
struct fcoe_vlan {
struct list_head list;
u16 vid; /* vlan ID */
u16 sol_count; /* no. of sols sent */
u16 state; /* state */
};
enum fip_vlan_state {
FIP_VLAN_AVAIL = 0, /* don't do anything */
FIP_VLAN_SENT = 1, /* sent */
FIP_VLAN_USED = 2, /* succeed */
FIP_VLAN_FAILED = 3, /* failed to response */
};
struct fip_vlan {
struct ethhdr eth;
struct fip_header fip;
struct {
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
} desc;
};
#endif /* __FINC_FIP_H_ */

View File

@ -39,6 +39,7 @@
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "fnic_io.h"
#include "fnic_fip.h"
#include "fnic.h"
#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data)
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
static void fnic_fip_notify_timer(unsigned long data)
{
struct fnic *fnic = (struct fnic *)data;
fnic_handle_fip_timer(fnic);
}
static void fnic_notify_timer_start(struct fnic *fnic)
{
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
return fnic->data_src_addr;
}
static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
{
u16 old_vlan;
old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *host;
@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
fnic->set_vlan = fnic_set_vlan;
fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
(unsigned long)fnic);
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
skb_queue_head_init(&fnic->fip_frame_queue);
spin_lock_irqsave(&fnic_list_lock, flags);
if (!fnic_fip_queue) {
fnic_fip_queue =
create_singlethread_workqueue("fnic_fip_q");
if (!fnic_fip_queue) {
spin_unlock_irqrestore(&fnic_list_lock, flags);
printk(KERN_ERR PFX "fnic FIP work queue "
"create failed\n");
err = -ENOMEM;
goto err_out_free_max_pool;
}
}
spin_unlock_irqrestore(&fnic_list_lock, flags);
INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans);
} else {
shost_printk(KERN_INFO, fnic->lport->host,
"firmware uses non-FIP mode\n");
@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev)
skb_queue_purge(&fnic->frame_queue);
skb_queue_purge(&fnic->tx_queue);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
del_timer_sync(&fnic->fip_timer);
skb_queue_purge(&fnic->fip_frame_queue);
fnic_fcoe_reset_vlans(fnic);
fnic_fcoe_evlist_free(fnic);
}
/*
* Log off the fabric. This stops all remote ports, dns port,
* logs off the fabric. This flushes all rport, disc, lport work
@ -889,8 +932,8 @@ static int __init fnic_init_module(void)
len = sizeof(struct fnic_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN,
NULL);
SLAB_HWCACHE_ALIGN,
NULL);
if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
err = -ENOMEM;
@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void)
{
pci_unregister_driver(&fnic_driver);
destroy_workqueue(fnic_event_queue);
if (fnic_fip_queue) {
flush_workqueue(fnic_fip_queue);
destroy_workqueue(fnic_fip_queue);
}
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);

View File

@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
}
u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
{
u64 a0 = new_default_vlan, a1 = 0;
int wait = 1000;
int old_vlan = 0;
old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
return (u16)old_vlan;
}
int vnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)

View File

@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev,
u16 new_default_vlan);
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,

View File

@ -196,6 +196,73 @@ enum vnic_devcmd_cmd {
/* undo initialize of virtual link */
CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
/* check fw capability of a cmd:
* in: (u32)a0=cmd
* out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
/* persistent binding info
* in: (u64)a0=paddr of arg
* (u32)a1=CMD_PERBI_XXX */
CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
/* Interrupt Assert Register functionality
* in: (u16)a0=interrupt number to assert
*/
CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
/* initiate hangreset, like softreset after hang detected */
CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
/* hangreset status:
* out: a0=0 reset complete, a0=1 reset in progress */
CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
/*
* Set hw ingress packet vlan rewrite mode:
* in: (u32)a0=new vlan rewrite mode
* out: (u32)a0=old vlan rewrite mode */
CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
/*
* in: (u16)a0=bdf of target vnic
* (u32)a1=cmd to proxy
* a2-a15=args to cmd in a1
* out: (u32)a0=status of proxied cmd
* a1-a15=out args of proxied cmd */
CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
/*
* As for BY_BDF except a0 is index of hvnlink subordinate vnic
* or SR-IOV virtual vnic
*/
CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
/*
* For HPP toggle:
* adapter-info-get
* in: (u64)a0=phsical address of buffer passed in from caller.
* (u16)a1=size of buffer specified in a0.
* out: (u64)a0=phsical address of buffer passed in from caller.
* (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
* 0 if no VIF-CONFIG-INFO TLV was ever received. */
CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
/*
* INT13 API: (u64)a0=paddr to vnic_int13_params struct
* (u32)a1=INT13_CMD_xxx
*/
CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
/*
* Set default vlan:
* in: (u16)a0=new default vlan
* (u16)a1=zero for overriding vlan with param a0,
* non-zero for resetting vlan to the default
* out: (u16)a0=old default vlan
*/
CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
};
/* flags for CMD_OPEN */

View File

@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
return 0;
}
if (vhost->state == IBMVFC_ACTIVE) {
if (vhost->logged_in) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
tmf->common.length = sizeof(*tmf);
tmf->scsi_id = rport->port_id;
int_to_scsilun(sdev->lun, &tmf->lun);
tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
if (vhost->state == IBMVFC_ACTIVE)
tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
else
tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->my_cancel_key = (unsigned long)starget->hostdata;
@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
timeout = wait_for_completion_timeout(&evt->comp, timeout);
if (!timeout) {
rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
rc = ibmvfc_cancel_all(sdev, 0);
if (!rc) {
rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
if (rc == SUCCESS)
@ -2383,24 +2388,30 @@ out:
* @cmd: scsi command to abort
*
* Returns:
* SUCCESS / FAILED
* SUCCESS / FAST_IO_FAIL / FAILED
**/
static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
int cancel_rc, abort_rc;
int cancel_rc, block_rc;
int rc = FAILED;
ENTER;
fc_block_scsi_eh(cmd);
block_rc = fc_block_scsi_eh(cmd);
ibmvfc_wait_while_resetting(vhost);
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
abort_rc = ibmvfc_abort_task_set(sdev);
if (block_rc != FAST_IO_FAIL) {
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
ibmvfc_abort_task_set(sdev);
} else
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
if (!cancel_rc && !abort_rc)
if (!cancel_rc)
rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
if (block_rc == FAST_IO_FAIL && rc != FAILED)
rc = FAST_IO_FAIL;
LEAVE;
return rc;
}
@ -2410,28 +2421,46 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
* @cmd: scsi command struct
*
* Returns:
* SUCCESS / FAILED
* SUCCESS / FAST_IO_FAIL / FAILED
**/
static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
int cancel_rc, reset_rc;
int cancel_rc, block_rc, reset_rc = 0;
int rc = FAILED;
ENTER;
fc_block_scsi_eh(cmd);
block_rc = fc_block_scsi_eh(cmd);
ibmvfc_wait_while_resetting(vhost);
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
if (block_rc != FAST_IO_FAIL) {
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
} else
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
if (block_rc == FAST_IO_FAIL && rc != FAILED)
rc = FAST_IO_FAIL;
LEAVE;
return rc;
}
/**
* ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
* @sdev: scsi device struct
* @data: return code
*
**/
static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
{
unsigned long *rc = data;
*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
}
/**
* ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
* @sdev: scsi device struct
@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
* @cmd: scsi command struct
*
* Returns:
* SUCCESS / FAILED
* SUCCESS / FAST_IO_FAIL / FAILED
**/
static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
int reset_rc;
int block_rc;
int reset_rc = 0;
int rc = FAILED;
unsigned long cancel_rc = 0;
ENTER;
fc_block_scsi_eh(cmd);
block_rc = fc_block_scsi_eh(cmd);
ibmvfc_wait_while_resetting(vhost);
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
if (block_rc != FAST_IO_FAIL) {
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
} else
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
if (block_rc == FAST_IO_FAIL && rc != FAILED)
rc = FAST_IO_FAIL;
LEAVE;
return rc;
}
@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
**/
static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
int rc;
int rc, block_rc;
struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
fc_block_scsi_eh(cmd);
block_rc = fc_block_scsi_eh(cmd);
dev_err(vhost->dev, "Resetting connection due to error recovery\n");
rc = ibmvfc_issue_fc_host_lip(vhost->host);
if (block_rc == FAST_IO_FAIL)
return FAST_IO_FAIL;
return rc ? FAILED : SUCCESS;
}
@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
dev_rport = starget_to_rport(scsi_target(sdev));
if (dev_rport != rport)
continue;
ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
ibmvfc_abort_task_set(sdev);
ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
}
rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);

View File

@ -29,8 +29,8 @@
#include "viosrp.h"
#define IBMVFC_NAME "ibmvfc"
#define IBMVFC_DRIVER_VERSION "1.0.10"
#define IBMVFC_DRIVER_DATE "(August 24, 2012)"
#define IBMVFC_DRIVER_VERSION "1.0.11"
#define IBMVFC_DRIVER_DATE "(April 12, 2013)"
#define IBMVFC_DEFAULT_TIMEOUT 60
#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp {
u16 error;
u32 flags;
#define IBMVFC_NATIVE_FC 0x01
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
u32 reserved;
u64 capabilities;
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
u32 max_cmds;
u32 scsi_id_sz;
u64 max_dma_len;
@ -351,6 +351,7 @@ struct ibmvfc_tmf {
#define IBMVFC_TMF_LUN_RESET 0x10
#define IBMVFC_TMF_TGT_RESET 0x20
#define IBMVFC_TMF_LUA_VALID 0x40
#define IBMVFC_TMF_SUPPRESS_ABTS 0x80
u32 cancel_key;
u32 my_cancel_key;
u32 pad;

View File

@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (!ioa_cfg->in_reset_reload) {
if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
dev_err(&ioa_cfg->pdev->dev,
"Adapter being reset as a result of error recovery.\n");
@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
{
u32 ioadl_flags = 0;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
struct ipr_ioadl64_desc *last_ioadl64 = NULL;
int len = qc->nbytes;
struct scatterlist *sg;
@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
ioarcb->ioadl_len =
cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
ioarcb->u.sis64_addr_data.data_ioadl_addr =
cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
for_each_sg(qc->sg, sg, qc->n_elem, si) {
ioadl64->flags = cpu_to_be32(ioadl_flags);
@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
int i;
ENTER;
if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
ioa_cfg->in_reset_reload = 0;
ioa_cfg->reset_retries = 0;
for (i = 0; i < ioa_cfg->hrrq_num; i++) {
spin_lock(&ioa_cfg->hrrq[i]._lock);
ioa_cfg->hrrq[i].ioa_is_dead = 1;
spin_unlock(&ioa_cfg->hrrq[i]._lock);
}
wmb();
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
LEAVE;
@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
ioa_cfg->in_ioa_bringdown = 1;
for (i = 0; i < ioa_cfg->hrrq_num; i++) {
spin_lock(&ioa_cfg->hrrq[i]._lock);

View File

@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs { /* 22 bytes */
u8 hob_lbam;
u8 hob_lbah;
u8 ctl;
}__attribute__ ((packed, aligned(4)));
}__attribute__ ((packed, aligned(2)));
struct ipr_ioadl_desc {
__be32 flags_and_data_len;

View File

@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s
struct isci_host *ihost = idev->owning_port->owning_controller;
struct domain_device *dev = idev->domain_dev;
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
} else if (dev_is_expander(dev)) {
sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct domain_device *dev = idev->domain_dev;
if (dev->dev_type == SAS_END_DEV) {
if (dev->dev_type == SAS_END_DEVICE) {
struct isci_host *ihost = idev->owning_port->owning_controller;
isci_remote_device_not_ready(ihost, idev,

View File

@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte
static inline bool dev_is_expander(struct domain_device *dev)
{
return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
}
static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)

View File

@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
/* all unaccelerated request types (non ssp or ncq) handled with
* substates
*/
if (!task && dev->dev_type == SAS_END_DEV) {
if (!task && dev->dev_type == SAS_END_DEVICE) {
state = SCI_REQ_TASK_WAIT_TC_COMP;
} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
state = SCI_REQ_SMP_WAIT_RESP;
@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost,
if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
return SCI_FAILURE_INVALID_REMOTE_DEVICE;
if (dev->dev_type == SAS_END_DEV)
if (dev->dev_type == SAS_END_DEVICE)
/* pass */;
else if (dev_is_sata(dev))
memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
/* Build the common part of the request */
sci_general_request_construct(ihost, idev, ireq);
if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
set_bit(IREQ_TMF, &ireq->flags);
memset(ireq->tc, 0, sizeof(struct scu_task_context));

View File

@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
}
/* XXX convert to get this from task->tproto like other drivers */
if (dev->dev_type == SAS_END_DEV) {
if (dev->dev_type == SAS_END_DEVICE) {
isci_tmf->proto = SAS_PROTOCOL_SSP;
status = sci_task_request_construct_ssp(ireq);
if (status != SCI_SUCCESS)

View File

@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
if (phy->attached_tproto & SAS_PROTOCOL_STP)
dev->tproto = phy->attached_tproto;
if (phy->attached_sata_dev)
dev->tproto |= SATA_DEV;
dev->tproto |= SAS_SATA_DEV;
if (phy->attached_dev_type == SATA_PENDING)
dev->dev_type = SATA_PENDING;
if (phy->attached_dev_type == SAS_SATA_PENDING)
dev->dev_type = SAS_SATA_PENDING;
else {
int res;
dev->dev_type = SATA_DEV;
dev->dev_type = SAS_SATA_DEV;
res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
&dev->sata_dev.rps_resp);
if (res) {
@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
int res;
/* we weren't pending, so successfully end the reset sequence now */
if (dev->dev_type != SATA_PENDING)
if (dev->dev_type != SAS_SATA_PENDING)
return 1;
/* hmmm, if this succeeds do we need to repost the domain_device to the
@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link)
return 0;
switch (ex_phy->attached_dev_type) {
case SATA_PENDING:
case SAS_SATA_PENDING:
return 0;
case SAS_END_DEV:
case SAS_END_DEVICE:
if (ex_phy->attached_sata_dev)
return sas_ata_clear_pending(dev, ex_phy);
default:
@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev)
struct dev_to_host_fis *fis =
(struct dev_to_host_fis *) dev->frame_rcvd;
if (dev->dev_type == SATA_PENDING)
if (dev->dev_type == SAS_SATA_PENDING)
return;
if ((fis->sector_count == 1 && /* ATA */
@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev)
{
int res;
if (dev->dev_type == SATA_PM)
if (dev->dev_type == SAS_SATA_PM)
return -ENODEV;
sas_get_ata_command_set(dev);

View File

@ -39,11 +39,11 @@
void sas_init_dev(struct domain_device *dev)
{
switch (dev->dev_type) {
case SAS_END_DEV:
case SAS_END_DEVICE:
INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
break;
case EDGE_DEV:
case FANOUT_DEV:
case SAS_EDGE_EXPANDER_DEVICE:
case SAS_FANOUT_EXPANDER_DEVICE:
INIT_LIST_HEAD(&dev->ex_dev.children);
mutex_init(&dev->ex_dev.cmd_mutex);
break;
@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port)
if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
&& (fis->device & ~0x10) == 0)
dev->dev_type = SATA_PM;
dev->dev_type = SAS_SATA_PM;
else
dev->dev_type = SATA_DEV;
dev->dev_type = SAS_SATA_DEV;
dev->tproto = SAS_PROTOCOL_SATA;
} else {
struct sas_identify_frame *id =
@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
dev->port = port;
switch (dev->dev_type) {
case SATA_DEV:
case SAS_SATA_DEV:
rc = sas_ata_init(dev);
if (rc) {
rphy = NULL;
break;
}
/* fall through */
case SAS_END_DEV:
case SAS_END_DEVICE:
rphy = sas_end_device_alloc(port->port);
break;
case EDGE_DEV:
case SAS_EDGE_EXPANDER_DEVICE:
rphy = sas_expander_alloc(port->port,
SAS_EDGE_EXPANDER_DEVICE);
break;
case FANOUT_DEV:
case SAS_FANOUT_EXPANDER_DEVICE:
rphy = sas_expander_alloc(port->port,
SAS_FANOUT_EXPANDER_DEVICE);
break;
@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
dev->rphy = rphy;
get_device(&dev->rphy->dev);
if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
list_add_tail(&dev->disco_list_node, &port->disco_list);
else {
spin_lock_irq(&port->dev_list_lock);
@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref)
dev->phy = NULL;
/* remove the phys and ports, everything else should be gone */
if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
kfree(dev->ex_dev.ex_phy);
if (dev_is_sata(dev) && dev->sata_dev.ap) {
@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
spin_unlock_irq(&port->dev_list_lock);
spin_lock_irq(&ha->lock);
if (dev->dev_type == SAS_END_DEV &&
if (dev->dev_type == SAS_END_DEVICE &&
!list_empty(&dev->ssp_dev.eh_list_node)) {
list_del_init(&dev->ssp_dev.eh_list_node);
ha->eh_active--;
@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work)
task_pid_nr(current));
switch (dev->dev_type) {
case SAS_END_DEV:
case SAS_END_DEVICE:
error = sas_discover_end_dev(dev);
break;
case EDGE_DEV:
case FANOUT_DEV:
case SAS_EDGE_EXPANDER_DEVICE:
case SAS_FANOUT_EXPANDER_DEVICE:
error = sas_discover_root_expander(dev);
break;
case SATA_DEV:
case SATA_PM:
case SAS_SATA_DEV:
case SAS_SATA_PM:
#ifdef CONFIG_SCSI_SAS_ATA
error = sas_discover_sata(dev);
break;

View File

@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
}
}
static enum sas_dev_type to_dev_type(struct discover_resp *dr)
static enum sas_device_type to_dev_type(struct discover_resp *dr)
{
/* This is detecting a failure to transmit initial dev to host
* FIS as described in section J.5 of sas-2 r16
*/
if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
return SATA_PENDING;
return SAS_SATA_PENDING;
else
return dr->attached_dev_type;
}
static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
{
enum sas_dev_type dev_type;
enum sas_device_type dev_type;
enum sas_linkrate linkrate;
u8 sas_addr[SAS_ADDR_SIZE];
struct smp_resp *resp = rsp;
@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
/* Handle vacant phy - rest of dr data is not valid so skip it */
if (phy->phy_state == PHY_VACANT) {
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
phy->attached_dev_type = NO_DEVICE;
phy->attached_dev_type = SAS_PHY_UNUSED;
if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
phy->phy_id = phy_id;
goto skip;
@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
/* help some expanders that fail to zero sas_address in the 'no
* device' case
*/
if (phy->attached_dev_type == NO_DEVICE ||
if (phy->attached_dev_type == SAS_PHY_UNUSED ||
phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
else
@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
out:
switch (phy->attached_dev_type) {
case SATA_PENDING:
case SAS_SATA_PENDING:
type = "stp pending";
break;
case NO_DEVICE:
case SAS_PHY_UNUSED:
type = "no device";
break;
case SAS_END_DEV:
case SAS_END_DEVICE:
if (phy->attached_iproto) {
if (phy->attached_tproto)
type = "host+target";
@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
type = "ssp";
}
break;
case EDGE_DEV:
case FANOUT_DEV:
case SAS_EDGE_EXPANDER_DEVICE:
case SAS_FANOUT_EXPANDER_DEVICE:
type = "smp";
break;
default:
@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev(
} else
#endif
if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
child->dev_type = SAS_END_DEV;
child->dev_type = SAS_END_DEVICE;
rphy = sas_end_device_alloc(phy->port);
/* FIXME: error handling */
if (unlikely(!rphy))
@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander(
switch (phy->attached_dev_type) {
case EDGE_DEV:
case SAS_EDGE_EXPANDER_DEVICE:
rphy = sas_expander_alloc(phy->port,
SAS_EDGE_EXPANDER_DEVICE);
break;
case FANOUT_DEV:
case SAS_FANOUT_EXPANDER_DEVICE:
rphy = sas_expander_alloc(phy->port,
SAS_FANOUT_EXPANDER_DEVICE);
break;
@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
if (ex_phy->attached_dev_type == NO_DEVICE) {
if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
if (ex_phy->routing_attr == DIRECT_ROUTING) {
memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
sas_configure_routing(dev, ex_phy->attached_sas_addr);
@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
} else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
return 0;
if (ex_phy->attached_dev_type != SAS_END_DEV &&
ex_phy->attached_dev_type != FANOUT_DEV &&
ex_phy->attached_dev_type != EDGE_DEV &&
ex_phy->attached_dev_type != SATA_PENDING) {
if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
ex_phy->attached_dev_type != SAS_SATA_PENDING) {
SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
"phy 0x%x\n", ex_phy->attached_dev_type,
SAS_ADDR(dev->sas_addr),
@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
}
switch (ex_phy->attached_dev_type) {
case SAS_END_DEV:
case SATA_PENDING:
case SAS_END_DEVICE:
case SAS_SATA_PENDING:
child = sas_ex_discover_end_dev(dev, phy_id);
break;
case FANOUT_DEV:
case SAS_FANOUT_EXPANDER_DEVICE:
if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
"attached to ex %016llx phy 0x%x\n",
@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
memcpy(dev->port->disc.fanout_sas_addr,
ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
/* fallthrough */
case EDGE_DEV:
case SAS_EDGE_EXPANDER_DEVICE:
child = sas_ex_discover_expander(dev, phy_id);
break;
default:
@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
phy->phy_state == PHY_NOT_PRESENT)
continue;
if ((phy->attached_dev_type == EDGE_DEV ||
phy->attached_dev_type == FANOUT_DEV) &&
if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
phy->routing_attr == SUBTRACTIVE_ROUTING) {
memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
u8 sub_addr[8] = {0, };
list_for_each_entry(child, &ex->children, siblings) {
if (child->dev_type != EDGE_DEV &&
child->dev_type != FANOUT_DEV)
if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
continue;
if (sub_addr[0] == 0) {
sas_find_sub_addr(child, sub_addr);
@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
int i;
u8 *sub_sas_addr = NULL;
if (dev->dev_type != EDGE_DEV)
if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
return 0;
for (i = 0; i < ex->num_phys; i++) {
@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
phy->phy_state == PHY_NOT_PRESENT)
continue;
if ((phy->attached_dev_type == FANOUT_DEV ||
phy->attached_dev_type == EDGE_DEV) &&
if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
phy->routing_attr == SUBTRACTIVE_ROUTING) {
if (!sub_sas_addr)
@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
struct ex_phy *child_phy)
{
static const char *ex_type[] = {
[EDGE_DEV] = "edge",
[FANOUT_DEV] = "fanout",
[SAS_EDGE_EXPANDER_DEVICE] = "edge",
[SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
};
struct domain_device *parent = child->parent;
@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child)
if (!child->parent)
return 0;
if (child->parent->dev_type != EDGE_DEV &&
child->parent->dev_type != FANOUT_DEV)
if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
return 0;
parent_ex = &child->parent->ex_dev;
@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child)
child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
switch (child->parent->dev_type) {
case EDGE_DEV:
if (child->dev_type == FANOUT_DEV) {
case SAS_EDGE_EXPANDER_DEVICE:
if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
child_phy->routing_attr != TABLE_ROUTING) {
sas_print_parent_topology_bug(child, parent_phy, child_phy);
@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child)
}
}
break;
case FANOUT_DEV:
case SAS_FANOUT_EXPANDER_DEVICE:
if (parent_phy->routing_attr != TABLE_ROUTING ||
child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
sas_print_parent_topology_bug(child, parent_phy, child_phy);
@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
struct domain_device *dev;
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
if (dev->dev_type == EDGE_DEV ||
dev->dev_type == FANOUT_DEV) {
if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
struct sas_expander_device *ex =
rphy_to_expander_device(dev->rphy);
@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
}
static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_dev_type *type)
u8 *sas_addr, enum sas_device_type *type)
{
int res;
struct smp_resp *disc_resp;
@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
SAS_DPRINTK("Expander phys DID NOT change\n");
}
list_for_each_entry(ch, &ex->children, siblings) {
if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
res = sas_find_bcast_dev(ch, src_dev);
if (*src_dev)
return res;
@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
list_for_each_entry_safe(child, n, &ex->children, siblings) {
set_bit(SAS_DEV_GONE, &child->state);
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV)
if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
sas_unregister_ex_tree(port, child);
else
sas_unregister_dev(port, child);
@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
if (SAS_ADDR(child->sas_addr) ==
SAS_ADDR(phy->attached_sas_addr)) {
set_bit(SAS_DEV_GONE, &child->state);
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV)
if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
sas_unregister_ex_tree(parent->port, child);
else
sas_unregister_dev(parent->port, child);
@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root,
int res = 0;
list_for_each_entry(child, &ex_root->children, siblings) {
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV) {
if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
struct sas_expander_device *ex =
rphy_to_expander_device(child->rphy);
@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
list_for_each_entry(child, &dev->ex_dev.children, siblings) {
if (SAS_ADDR(child->sas_addr) ==
SAS_ADDR(ex_phy->attached_sas_addr)) {
if (child->dev_type == EDGE_DEV ||
child->dev_type == FANOUT_DEV)
if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
res = sas_discover_bfs_by_root(child);
break;
}
@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
return res;
}
static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
{
if (old == new)
return true;
/* treat device directed resets as flutter, if we went
* SAS_END_DEV to SATA_PENDING the link needs recovery
* SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
*/
if ((old == SATA_PENDING && new == SAS_END_DEV) ||
(old == SAS_END_DEV && new == SATA_PENDING))
if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
(old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
return true;
return false;
@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
{
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
enum sas_dev_type type = NO_DEVICE;
enum sas_device_type type = SAS_PHY_UNUSED;
u8 sas_addr[8];
int res;
@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
sas_ex_phy_discover(dev, phy_id);
if (ata_dev && phy->attached_dev_type == SATA_PENDING)
if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
action = ", needs recovery";
SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
SAS_ADDR(dev->sas_addr), phy_id, action);

View File

@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
rphy->identify.initiator_port_protocols = dev->iproto;
rphy->identify.target_port_protocols = dev->tproto;
switch (dev->dev_type) {
case SATA_DEV:
case SAS_SATA_DEV:
/* FIXME: need sata device type */
case SAS_END_DEV:
case SATA_PENDING:
case SAS_END_DEVICE:
case SAS_SATA_PENDING:
rphy->identify.device_type = SAS_END_DEVICE;
break;
case EDGE_DEV:
case SAS_EDGE_EXPANDER_DEVICE:
rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
break;
case FANOUT_DEV:
case SAS_FANOUT_EXPANDER_DEVICE:
rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
break;
default:

View File

@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
continue;
}
if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
dev->ex_dev.ex_change_count = -1;
for (i = 0; i < dev->ex_dev.num_phys; i++) {
struct ex_phy *phy = &dev->ex_dev.ex_phy[i];

View File

@ -46,10 +46,15 @@ struct lpfc_sli2_slim;
#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
cmnd for menlo needs nearly twice as for firmware
downloads using bsg */
#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
#define LPFC_VNAME_LEN 100 /* vport symbolic name length */
@ -66,8 +71,10 @@ struct lpfc_sli2_slim;
* queue depths when there are driver resource error or Firmware
* resource error.
*/
#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */
#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */
/* 1 Second */
#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
/* 5 minutes */
#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300))
/* Number of exchanges reserved for discovery to complete */
#define LPFC_DISC_IOCB_BUFF_COUNT 20
@ -671,6 +678,7 @@ struct lpfc_hba {
uint32_t lmt;
uint32_t fc_topology; /* link topology, from LINK INIT */
uint32_t fc_topology_changed; /* link topology, from LINK INIT */
struct lpfc_stats fc_stat;
@ -701,9 +709,11 @@ struct lpfc_hba {
uint32_t cfg_poll_tmo;
uint32_t cfg_use_msi;
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map;
uint32_t cfg_fcp_wq_count;
uint32_t cfg_fcp_eq_count;
uint32_t cfg_fcp_io_channel;
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_prot_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
@ -804,8 +814,10 @@ struct lpfc_hba {
uint64_t bg_reftag_err_cnt;
/* fastpath list. */
spinlock_t scsi_buf_list_lock;
struct list_head lpfc_scsi_buf_list;
spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */
spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */
struct list_head lpfc_scsi_buf_list_get;
struct list_head lpfc_scsi_buf_list_put;
uint32_t total_scsi_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;

View File

@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
int i;
int rc;
if (phba->pport->fc_flag & FC_OFFLINE_MODE)
return 0;
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE_PREP);
@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba)
int status = 0;
int rc;
if (!phba->cfg_enable_hba_reset)
if ((!phba->cfg_enable_hba_reset) ||
(phba->pport->fc_flag & FC_OFFLINE_MODE))
return -EACCES;
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
pci_disable_sriov(pdev);
phba->cfg_sriov_nr_virtfn = 0;
}
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"3054 lpfc_topology changed from %d to %d\n",
prev_val, val);
if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
phba->fc_topology_changed = 1;
err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
if (err) {
phba->cfg_topology = prev_val;
@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
lpfc_fcp_imax_show, lpfc_fcp_imax_store);
/**
* lpfc_state_show - Display current driver CPU affinity
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains text describing the state of the link.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_vector_map_info *cpup;
int idx, len = 0;
if ((phba->sli_rev != LPFC_SLI_REV4) ||
(phba->intr_type != MSIX))
return len;
switch (phba->cfg_fcp_cpu_map) {
case 0:
len += snprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: No mapping (%d)\n",
phba->cfg_fcp_cpu_map);
return len;
case 1:
len += snprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: HBA centric mapping (%d): "
"%d online CPUs\n",
phba->cfg_fcp_cpu_map,
phba->sli4_hba.num_online_cpu);
break;
case 2:
len += snprintf(buf + len, PAGE_SIZE-len,
"fcp_cpu_map: Driver centric mapping (%d): "
"%d online CPUs\n",
phba->cfg_fcp_cpu_map,
phba->sli4_hba.num_online_cpu);
break;
}
cpup = phba->sli4_hba.cpu_map;
for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d\n",
idx, cpup->channel_id, cpup->phys_id,
cpup->core_id);
else
len += snprintf(buf + len, PAGE_SIZE-len,
"CPU %02d io_chan %02d "
"physid %d coreid %d IRQ %d\n",
idx, cpup->channel_id, cpup->phys_id,
cpup->core_id, cpup->irq);
cpup++;
}
return len;
}
/**
* lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
* @buf: one or more lpfc_polling_flags values.
* @count: not used.
*
* Returns:
* -EINVAL - Not implemented yet.
**/
static ssize_t
lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int status = -EINVAL;
return status;
}
/*
# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
# for the HBA.
#
# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
# 0 - Do not affinitze IRQ vectors
# 1 - Affintize HBA vectors with respect to each HBA
# (start with CPU0 for each HBA)
# 2 - Affintize HBA vectors with respect to the entire driver
# (round robin thru all CPUs across all HBAs)
*/
static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(lpfc_fcp_cpu_map,
"Defines how to map CPUs to IRQ vectors per HBA");
/**
* lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
* @phba: lpfc_hba pointer.
* @val: link speed value.
*
* Description:
* If val is in a valid range [0-2], then affinitze the adapter's
* MSIX vectors.
*
* Returns:
* zero if val saved.
* -EINVAL val out of range
**/
static int
lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
{
if (phba->sli_rev != LPFC_SLI_REV4) {
phba->cfg_fcp_cpu_map = 0;
return 0;
}
if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
phba->cfg_fcp_cpu_map = val;
return 0;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3326 fcp_cpu_map: %d out of range, using default\n",
val);
phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
return 0;
}
static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
# Value range is [2,3]. Default value is 3.
@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
# 0 = disabled (default)
# 1 = enabled
# Value range is [0,1]. Default value is 0.
#
# This feature in under investigation and may be supported in the future.
*/
unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
/*
# lpfc_prot_mask: i
# - Bit mask of host protection capabilities used to register with the
@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery,
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
* This value can be set to values between 64 and 256. The default value is
* This value can be set to values between 64 and 4096. The default value is
* 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
* will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
* Because of the additional overhead involved in setting up T10-DIF,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
"Max Protection Scatter Gather Segment Count");
/*
* This parameter will be depricated, the driver cannot limit the
* protection data s/g list.
*/
LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
"Max Protection Scatter Gather Segment Count");
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_bg_info,
@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_poll_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
&dev_attr_lpfc_fcp_wq_count,
&dev_attr_lpfc_fcp_eq_count,
&dev_attr_lpfc_fcp_io_channel,
@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);

View File

@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
unsigned int transfer_bytes, bytes_copied = 0;
unsigned int sg_offset, dma_offset;
unsigned char *dma_address, *sg_address;
struct scatterlist *sgel;
LIST_HEAD(temp_list);
struct sg_mapping_iter miter;
unsigned long flags;
unsigned int sg_flags = SG_MITER_ATOMIC;
bool sg_valid;
list_splice_init(&dma_buffers->list, &temp_list);
list_add(&dma_buffers->list, &temp_list);
sg_offset = 0;
sgel = bsg_buffers->sg_list;
if (to_buffers)
sg_flags |= SG_MITER_FROM_SG;
else
sg_flags |= SG_MITER_TO_SG;
sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
sg_flags);
local_irq_save(flags);
sg_valid = sg_miter_next(&miter);
list_for_each_entry(mp, &temp_list, list) {
dma_offset = 0;
while (bytes_to_transfer && sgel &&
while (bytes_to_transfer && sg_valid &&
(dma_offset < LPFC_BPL_SIZE)) {
dma_address = mp->virt + dma_offset;
if (sg_offset) {
/* Continue previous partial transfer of sg */
sg_address = sg_virt(sgel) + sg_offset;
transfer_bytes = sgel->length - sg_offset;
sg_address = miter.addr + sg_offset;
transfer_bytes = miter.length - sg_offset;
} else {
sg_address = sg_virt(sgel);
transfer_bytes = sgel->length;
sg_address = miter.addr;
transfer_bytes = miter.length;
}
if (bytes_to_transfer < transfer_bytes)
transfer_bytes = bytes_to_transfer;
@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
sg_offset += transfer_bytes;
bytes_to_transfer -= transfer_bytes;
bytes_copied += transfer_bytes;
if (sg_offset >= sgel->length) {
if (sg_offset >= miter.length) {
sg_offset = 0;
sgel = sg_next(sgel);
sg_valid = sg_miter_next(&miter);
}
}
}
sg_miter_stop(&miter);
local_irq_restore(flags);
list_del_init(&dma_buffers->list);
list_splice(&temp_list, &dma_buffers->list);
return bytes_copied;
@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
cmdiocbq->context1 = dd_data;
cmdiocbq->context2 = cmp;
cmdiocbq->context3 = bmp;
cmdiocbq->context_un.ndlp = ndlp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
ctiocb->context1 = dd_data;
ctiocb->context2 = cmp;
ctiocb->context3 = bmp;
ctiocb->context_un.ndlp = ndlp;
ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
dd_data->type = TYPE_IOCB;
@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
evt->wait_time_stamp = jiffies;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
msecs_to_jiffies(1000 *
((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
if (list_empty(&evt->events_to_see))
ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
else {
@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
evt->waiting = 1;
time_left = wait_event_interruptible_timeout(
evt->wq, !list_empty(&evt->events_to_see),
((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
msecs_to_jiffies(1000 *
((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
evt->waiting = 0;
if (list_empty(&evt->events_to_see)) {
rc = (time_left) ? -EINTR : -ETIMEDOUT;

View File

@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
void lpfc_sli4_offline_eratt(struct lpfc_hba *);

View File

@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
if (init_utsname()->nodename[0] != '\0')
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
else
mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
mod_timer(&vport->fc_fdmitmo, jiffies +
msecs_to_jiffies(1000 * 60));
}
return;
}

View File

@ -29,6 +29,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->un.elsreq64.remoteID = did; /* DID */
icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
icmd->ulpTimeout = phba->fc_ratov * 2;
if (elscmd == ELS_CMD_FLOGI)
icmd->ulpTimeout = FF_DEF_RATOV * 2;
else
icmd->ulpTimeout = phba->fc_ratov * 2;
} else {
icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
/* Xmit ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
"NPORT x%x I/O tag: x%x, port state: x%x\n",
"NPORT x%x I/O tag: x%x, port state:x%x"
" fc_flag:x%x\n",
elscmd, did, elsiocb->iotag,
vport->port_state);
vport->port_state,
vport->fc_flag);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0117 Xmit ELS response x%x to remote "
"NPORT x%x I/O tag: x%x, size: x%x\n",
"NPORT x%x I/O tag: x%x, size: x%x "
"port_state x%x fc_flag x%x\n",
elscmd, ndlp->nlp_DID, elsiocb->iotag,
cmdSize);
cmdSize, vport->port_state,
vport->fc_flag);
}
return elsiocb;
@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_PT2PT;
spin_unlock_irq(shost->host_lock);
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
/* The FC_VFI_REGISTERED flag will get clear in the cmpl
* handler for unreg_vfi, but if we don't force the
* FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
* built with the update bit set instead of just the vp bit to
* change the Nport ID. We need to have the vp set and the
* Upd cleared on topology changes.
*/
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VFI_REGISTERED;
spin_unlock_irq(shost->host_lock);
phba->fc_topology_changed = 0;
lpfc_issue_reg_vfi(vport);
}
/* Start discovery - this should just do CLEAR_LA */
lpfc_disc_start(vport);
@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi:
vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
if ((phba->sli_rev == LPFC_SLI_REV4) &&
(!(vport->fc_flag & FC_VFI_REGISTERED) ||
(vport->fc_prevDID != vport->fc_myDID))) {
if (vport->fc_flag & FC_VFI_REGISTERED)
lpfc_sli4_unreg_all_rpis(vport);
(vport->fc_prevDID != vport->fc_myDID) ||
phba->fc_topology_changed)) {
if (vport->fc_flag & FC_VFI_REGISTERED) {
if (phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VFI_REGISTERED;
spin_unlock_irq(shost->host_lock);
phba->fc_topology_changed = 0;
} else {
lpfc_sli4_unreg_all_rpis(vport);
}
}
lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
goto out;
@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0101 FLOGI completes successfully "
"Data: x%x x%x x%x x%x\n",
"0101 FLOGI completes successfully, I/O tag:x%x, "
"Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
irsp->un.ulpWord[4], sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
vport->port_state, vport->fc_flag);
if (vport->port_state == LPFC_FLOGI) {
/*
@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct ls_rjt stat;
uint32_t cmd, did;
int rc;
uint32_t fc_flag = 0;
uint32_t port_state = 0;
cmd = *lp++;
sp = (struct serv_parm *) lp;
@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* will be.
*/
vport->fc_myDID = PT2PT_LocalID;
}
} else
vport->fc_myDID = PT2PT_RemoteID;
/*
* The vport state should go to LPFC_FLOGI only
* AFTER we issue a FLOGI, not receive one.
*/
spin_lock_irq(shost->host_lock);
fc_flag = vport->fc_flag;
port_state = vport->port_state;
vport->fc_flag |= FC_PT2PT;
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
vport->port_state = LPFC_FLOGI;
spin_unlock_irq(shost->host_lock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3311 Rcv Flogi PS x%x new PS x%x "
"fc_flag x%x new fc_flag x%x\n",
port_state, vport->port_state,
fc_flag, vport->fc_flag);
/*
* We temporarily set fc_myDID to make it look like we are
@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
mod_timer(&vport->els_tmofunc,
jiffies + msecs_to_jiffies(1000 * timeout));
}
/**
@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0112 ELS command x%x received from NPORT x%x "
"Data: x%x\n", cmd, did, vport->port_state);
"Data: x%x x%x x%x x%x\n",
cmd, did, vport->port_state, vport->fc_flag,
vport->fc_myDID, vport->fc_prevDID);
switch (cmd) {
case ELS_CMD_PLOGI:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPLOGI++;
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4 &&
(phba->pport->fc_flag & FC_PT2PT)) {
vport->fc_prevDID = vport->fc_myDID;
/* Our DID needs to be updated before registering
* the vfi. This is done in lpfc_rcv_plogi but
* that is called after the reg_vfi.
*/
vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3312 Remote port assigned DID x%x "
"%x\n", vport->fc_myDID,
vport->fc_prevDID);
}
lpfc_send_els_event(vport, ndlp, payload);
@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
shost = lpfc_shost_from_vport(vport);
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* another NPort and the other side has initiated
* the PLOGI before responding to our FLOGI.
*/
if (phba->sli_rev == LPFC_SLI_REV4 &&
(phba->fc_topology_changed ||
vport->fc_myDID != vport->fc_prevDID)) {
lpfc_unregister_fcf_prep(phba);
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VFI_REGISTERED;
spin_unlock_irq(shost->host_lock);
phba->fc_topology_changed = 0;
lpfc_issue_reg_vfi(vport);
}
}
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
spin_unlock_irq(shost->host_lock);
@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
spin_lock_irq(shost->host_lock);
if (vport->fc_flag & FC_DISC_DELAYED) {
spin_unlock_irq(shost->host_lock);
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"3334 Delay fc port discovery for %d seconds\n",
phba->fc_ratov);
mod_timer(&vport->delayed_disc_tmo,
jiffies + HZ * phba->fc_ratov);
jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
return;
}
spin_unlock_irq(shost->host_lock);
@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
return;
shost = lpfc_shost_from_vport(phba->pport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
/* Start a timer to unblock fabric iocbs after 100ms */
if (!blocked)
mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
mod_timer(&phba->fabric_block_timer,
jiffies + msecs_to_jiffies(100));
return;
}

View File

@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!list_empty(&evtp->evt_listp))
return;
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
spin_lock_irq(&phba->hbalock);
/* We need to hold the node by incrementing the reference
* count until this queued work is done
*/
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
lpfc_linkup_port(vports[i]);
lpfc_destroy_vport_work_array(phba, vports);
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(phba->sli_rev < LPFC_SLI_REV4))
lpfc_issue_clear_la(phba, phba->pport);
return 0;
}
@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
if (phba->fcf.fcf_flag & FCF_REGISTERED) {
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
phba->hba_flag &= ~FCF_TS_INPROG;
if (phba->pport->port_state != LPFC_FLOGI) {
if (phba->pport->port_state != LPFC_FLOGI &&
phba->pport->fc_flag & FC_FABRIC) {
phba->hba_flag |= FCF_RR_INPROG;
spin_unlock_irq(&phba->hbalock);
lpfc_initial_flogi(phba->pport);
@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2836 New FCF matches in-use "
"FCF (x%x)\n",
phba->fcf.current_rec.fcf_indx);
"FCF (x%x), port_state:x%x, "
"fc_flag:x%x\n",
phba->fcf.current_rec.fcf_indx,
phba->pport->port_state,
phba->pport->fc_flag);
goto out;
} else
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
@ -2796,7 +2798,19 @@ void
lpfc_issue_init_vpi(struct lpfc_vport *vport)
{
LPFC_MBOXQ_t *mboxq;
int rc;
int rc, vpi;
if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
vpi = lpfc_alloc_vpi(vport->phba);
if (!vpi) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
"3303 Failed to obtain vport vpi\n");
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
vport->vpi = vpi;
}
mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
goto out_free_mem;
}
/* If the VFI is already registered, there is nothing else to do */
/* If the VFI is already registered, there is nothing else to do
* Unless this was a VFI update and we are in PT2PT mode, then
* we should drop through to set the port state to ready.
*/
if (vport->fc_flag & FC_VFI_REGISTERED)
goto out_free_mem;
if (!(phba->sli_rev == LPFC_SLI_REV4 &&
vport->fc_flag & FC_PT2PT))
goto out_free_mem;
/* The VPI is implicitly registered when the VFI is registered */
spin_lock_irq(shost->host_lock);
@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
goto out_free_mem;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
"alpacnt:%d LinkState:%x topology:%x\n",
vport->port_state, vport->fc_flag, vport->fc_myDID,
vport->phba->alpa_map[0],
phba->link_state, phba->fc_topology);
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
/*
* For private loop or for NPort pt2pt,
@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
/* Use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
if (vport->fc_flag & FC_PT2PT)
vport->port_state = LPFC_VPORT_READY;
else
lpfc_disc_start(vport);
} else {
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
break;
}
if (phba->fc_topology &&
phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"3314 Toplogy changed was 0x%x is 0x%x\n",
phba->fc_topology,
bf_get(lpfc_mbx_read_top_topology, la));
phba->fc_topology_changed = 1;
}
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
tmo, vport->port_state, vport->fc_flag);
}
mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_DISC_TMO;
spin_unlock_irq(shost->host_lock);
@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport)
uint32_t clear_la_pending;
int did_changed;
if (!lpfc_is_link_up(phba))
if (!lpfc_is_link_up(phba)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"3315 Link is not up %x\n",
phba->link_state);
return;
}
if (phba->link_state == LPFC_CLEAR_LA)
clear_la_pending = 1;
@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport)
if (num_sent)
return;
/* Register the VPI for SLI3, NON-NPIV only. */
/* Register the VPI for SLI3, NPIV only. */
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
!(vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_RSCN_MODE) &&
(phba->sli_rev < LPFC_SLI_REV4)) {
if (vport->port_type == LPFC_PHYSICAL_PORT)
lpfc_issue_clear_la(phba, vport);
lpfc_issue_reg_vpi(phba, vport);
return;
}
@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (vport->cfg_fdmi_on == 1)
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
else
mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
mod_timer(&vport->fc_fdmitmo,
jiffies + msecs_to_jiffies(1000 * 60));
/* decrement the node reference count held for this callback
* function.
@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
int i, rc;
int i = 0, rc;
/* Unregister RPIs */
if (lpfc_fcf_inuse(phba))
@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
if (ndlp)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
lpfc_cleanup_pending_mbox(phba->pport);
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_sli4_unreg_all_rpis(phba->pport);
lpfc_mbx_unreg_vpi(phba->pport);
shost = lpfc_shost_from_vport(phba->pport);
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
spin_unlock_irq(shost->host_lock);
}
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);

View File

@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type {
#define BG_OP_IN_CSUM_OUT_CSUM 0x5
#define BG_OP_IN_CRC_OUT_CSUM 0x6
#define BG_OP_IN_CSUM_OUT_CRC 0x7
#define BG_OP_RAW_MODE 0x8
struct lpfc_pde5 {
uint32_t word0;

View File

@ -200,6 +200,11 @@ struct lpfc_sli_intf {
#define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 50000
#define LPFC_MIN_CPU_MAP 0
#define LPFC_MAX_CPU_MAP 2
#define LPFC_HBA_CPU_MAP 1
#define LPFC_DRIVER_CPU_MAP 2 /* Default */
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
@ -621,7 +626,7 @@ struct lpfc_register {
#define lpfc_sliport_status_rdy_SHIFT 23
#define lpfc_sliport_status_rdy_MASK 0x1
#define lpfc_sliport_status_rdy_WORD word0
#define MAX_IF_TYPE_2_RESETS 1000
#define MAX_IF_TYPE_2_RESETS 6
#define LPFC_CTL_PORT_CTL_OFFSET 0x408
#define lpfc_sliport_ctrl_end_SHIFT 30

View File

@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/miscdevice.h>
#include <linux/percpu.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@ -58,6 +59,9 @@ char *_dump_buf_dif;
unsigned long _dump_buf_dif_order;
spinlock_t _dump_buf_lock;
/* Used when mapping IRQ vectors in a driver centric manner */
uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up ring-0 (ELS) timer */
timeout = phba->fc_ratov * 2;
mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
mod_timer(&vport->els_tmofunc,
jiffies + msecs_to_jiffies(1000 * timeout));
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
mod_timer(&phba->hb_tmofunc,
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
phba->hb_outstanding = 0;
phba->last_completion_time = jiffies;
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
mod_timer(&phba->eratt_poll,
jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
if (phba->hba_flag & LINK_DISABLED) {
lpfc_printf_log(phba,
@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
psb->pCmd = NULL;
psb->status = IOSTAT_SUCCESS;
}
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
list_splice(&aborts, &phba->lpfc_scsi_buf_list);
spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
return 0;
}
@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
!(phba->link_state == LPFC_HBA_ERROR) &&
!(phba->pport->load_flag & FC_UNLOADING))
mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
jiffies +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
return;
}
@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
spin_lock_irq(&phba->pport->work_port_lock);
if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
jiffies)) {
if (time_after(phba->last_completion_time +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
jiffies)) {
spin_unlock_irq(&phba->pport->work_port_lock);
if (!phba->hb_outstanding)
mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
jiffies +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
else
mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
jiffies +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
return;
}
spin_unlock_irq(&phba->pport->work_port_lock);
@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
if (!pmboxq) {
mod_timer(&phba->hb_tmofunc,
jiffies +
HZ * LPFC_HB_MBOX_INTERVAL);
msecs_to_jiffies(1000 *
LPFC_HB_MBOX_INTERVAL));
return;
}
@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
phba->mbox_mem_pool);
mod_timer(&phba->hb_tmofunc,
jiffies +
HZ * LPFC_HB_MBOX_INTERVAL);
msecs_to_jiffies(1000 *
LPFC_HB_MBOX_INTERVAL));
return;
}
phba->skipped_hb = 0;
@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
phba->skipped_hb = jiffies;
mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
jiffies +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
return;
} else {
/*
@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
jiffies_to_msecs(jiffies
- phba->last_completion_time));
mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
jiffies +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
}
}
}
@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
* This routine is called to bring a SLI4 HBA offline when HBA hardware error
* other than Port Error 6 has been detected.
**/
static void
void
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba)
struct lpfc_vport *vport;
struct lpfc_vport **vports;
int i;
bool vpis_cleared = false;
if (!phba)
return 0;
@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba)
lpfc_unblock_mgmt_io(phba);
return 1;
}
spin_lock_irq(&phba->hbalock);
if (!phba->sli4_hba.max_cfg_param.vpi_used)
vpis_cleared = true;
spin_unlock_irq(&phba->hbalock);
} else {
if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
lpfc_unblock_mgmt_io(phba);
@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba)
vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
if (phba->sli_rev == LPFC_SLI_REV4)
if (phba->sli_rev == LPFC_SLI_REV4) {
vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
if ((vpis_cleared) &&
(vports[i]->port_type !=
LPFC_PHYSICAL_PORT))
vports[i]->vpi = 0;
}
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba)
struct lpfc_iocbq *io, *io_next;
spin_lock_irq(&phba->hbalock);
/* Release all the lpfc_scsi_bufs maintained by this host. */
spin_lock(&phba->scsi_buf_list_lock);
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
spin_lock(&phba->scsi_buf_list_put_lock);
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
list) {
list_del(&sb->list);
pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
sb->dma_handle);
kfree(sb);
phba->total_scsi_bufs--;
}
spin_unlock(&phba->scsi_buf_list_lock);
spin_unlock(&phba->scsi_buf_list_put_lock);
spin_lock(&phba->scsi_buf_list_get_lock);
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
list) {
list_del(&sb->list);
pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
sb->dma_handle);
kfree(sb);
phba->total_scsi_bufs--;
}
spin_unlock(&phba->scsi_buf_list_get_lock);
/* Release all the lpfc_iocbq entries maintained by this host. */
list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
phba->sli4_hba.scsi_xri_cnt,
phba->sli4_hba.scsi_xri_max);
spin_lock_irq(&phba->scsi_buf_list_lock);
list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
spin_unlock_irq(&phba->scsi_buf_list_lock);
spin_lock_irq(&phba->scsi_buf_list_get_lock);
spin_lock_irq(&phba->scsi_buf_list_put_lock);
list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
spin_unlock_irq(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
/* max scsi xri shrinked below the allocated scsi buffers */
@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
psb->dma_handle);
kfree(psb);
}
spin_lock_irq(&phba->scsi_buf_list_lock);
spin_lock_irq(&phba->scsi_buf_list_get_lock);
phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
spin_unlock_irq(&phba->scsi_buf_list_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
}
/* update xris associated to remaining allocated scsi buffers */
@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
psb->cur_iocbq.sli4_lxritag = lxri;
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
}
spin_lock_irq(&phba->scsi_buf_list_lock);
list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
spin_unlock_irq(&phba->scsi_buf_list_lock);
spin_lock_irq(&phba->scsi_buf_list_get_lock);
spin_lock_irq(&phba->scsi_buf_list_put_lock);
list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
spin_unlock_irq(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
return 0;
@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
stat = 1;
goto finished;
}
if (time >= 30 * HZ) {
if (time >= msecs_to_jiffies(30 * 1000)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0461 Scanning longer than 30 "
"seconds. Continuing initialization\n");
stat = 1;
goto finished;
}
if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
if (time >= msecs_to_jiffies(15 * 1000) &&
phba->link_state <= LPFC_LINK_DOWN) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0465 Link down longer than 15 "
"seconds. Continuing initialization\n");
@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
goto finished;
if (vport->num_disc_nodes || vport->fc_prli_sent)
goto finished;
if (vport->fc_map_cnt == 0 && time < 2 * HZ)
if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
goto finished;
if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
goto finished;
@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* If there are other active VLinks present,
* re-instantiate the Vlink using FDISC.
*/
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
@ -4707,24 +4754,53 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
return -ENOMEM;
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
* 2 segments are added since the IOCB needs a command and response bde.
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
if (phba->cfg_enable_bg) {
phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
phba->cfg_sg_dma_buf_size +=
phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
}
/* Also reinitialize the host templates with new values. */
/* Initialize the host templates the configured values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
if (phba->cfg_enable_bg) {
/*
* The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
* the FCP rsp, and a BDE for each. Sice we have no control
* over how many protection data segments the SCSI Layer
* will hand us (ie: there could be one for every block
* in the IO), we just allocate enough BDEs to accomidate
* our max amount and we need to limit lpfc_sg_seg_cnt to
* minimize the risk of running out.
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
(LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
/* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
} else {
/*
* The scsi_buf for a regular I/O will hold the FCP cmnd,
* the FCP rsp, a BDE for each, and a BDE for up to
* cfg_sg_seg_cnt data segments.
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
/* Total BDEs in BPL for scsi_sg_list */
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
phba->cfg_total_seg_cnt);
phba->max_vpi = LPFC_MAX_VPI;
/* This will be set to correct value after config_port mbox */
phba->max_vports = 0;
@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
static int
lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
{
struct lpfc_vector_map_info *cpup;
struct lpfc_sli *psli;
LPFC_MBOXQ_t *mboxq;
int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
int rc, i, hbq_count, max_buf_size;
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
int longs, sli_family;
int sges_per_segment;
int longs;
/* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba);
@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
/* With BlockGuard we can have multiple SGEs per Data Segemnt */
sges_per_segment = 1;
if (phba->cfg_enable_bg)
sges_per_segment = 2;
/*
* For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
* we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct lpfc_sli_ring), GFP_KERNEL);
if (!phba->sli.ring)
return -ENOMEM;
/*
* Since the sg_tablesize is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
* 2 segments are added since the IOCB needs a command and response bde.
* To insure that the scsi sgl does not cross a 4k page boundary only
* sgl sizes of must be a power of 2.
*/
buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
(((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
sizeof(struct sli4_sge)));
sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
switch (sli_family) {
case LPFC_SLI_INTF_FAMILY_BE2:
case LPFC_SLI_INTF_FAMILY_BE3:
/* There is a single hint for BE - 2 pages per BPL. */
if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_SLI_HINT1_1)
max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
break;
case LPFC_SLI_INTF_FAMILY_LNCR_A0:
case LPFC_SLI_INTF_FAMILY_LNCR_B0:
default:
break;
/*
* It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
/*
* Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
* used to create the sg_dma_buf_pool must be dynamically calculated.
*/
if (phba->cfg_enable_bg) {
/*
* The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
* the FCP rsp, and a SGE for each. Sice we have no control
* over how many protection data segments the SCSI Layer
* will hand us (ie: there could be one for every block
* in the IO), we just allocate enough SGEs to accomidate
* our max amount and we need to limit lpfc_sg_seg_cnt to
* minimize the risk of running out.
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) + max_buf_size;
/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
} else {
/*
* The scsi_buf for a regular I/O will hold the FCP cmnd,
* the FCP rsp, a SGE for each, and a SGE for up to
* cfg_sg_seg_cnt data segments.
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
/*
* NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
* to post 1 page for the SGL.
*/
}
for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
dma_buf_size < max_buf_size && buf_size > dma_buf_size;
dma_buf_size = dma_buf_size << 1)
;
if (dma_buf_size == max_buf_size)
phba->cfg_sg_seg_cnt = (dma_buf_size -
sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
(2 * sizeof(struct sli4_sge))) /
sizeof(struct sli4_sge);
phba->cfg_sg_dma_buf_size = dma_buf_size;
/* Initialize the host templates with the updated values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
else
phba->cfg_sg_dma_buf_size =
SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
phba->cfg_total_seg_cnt);
/* Initialize buffer queue management fields */
hbq_count = lpfc_sli_hbq_count();
@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_fcp_eq_hdl;
}
phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
phba->sli4_hba.num_present_cpu),
GFP_KERNEL);
if (!phba->sli4_hba.cpu_map) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3327 Failed allocate memory for msi-x "
"interrupt vector mapping\n");
rc = -ENOMEM;
goto out_free_msix;
}
/* Initialize io channels for round robin */
cpup = phba->sli4_hba.cpu_map;
rc = 0;
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
cpup->channel_id = rc;
rc++;
if (rc >= phba->cfg_fcp_io_channel)
rc = 0;
}
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
out_free_msix:
kfree(phba->sli4_hba.msix_entries);
out_free_fcp_eq_hdl:
kfree(phba->sli4_hba.fcp_eq_hdl);
out_free_fcf_rr_bmask:
@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
{
struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
/* Free memory allocated for msi-x interrupt vector to CPU mapping */
kfree(phba->sli4_hba.cpu_map);
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.num_online_cpu = 0;
/* Free memory allocated for msi-x interrupt vector entries */
kfree(phba->sli4_hba.msix_entries);
@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
init_waitqueue_head(&phba->work_waitq);
/* Initialize the scsi buffer list used by driver for scsi IO */
spin_lock_init(&phba->scsi_buf_list_lock);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
spin_lock_init(&phba->scsi_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
spin_lock_init(&phba->scsi_buf_list_put_lock);
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
/* Initialize the fabric iocb list */
INIT_LIST_HEAD(&phba->fabric_iocb_list);
@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
int cfg_fcp_io_channel;
uint32_t cpu;
uint32_t i = 0;
uint32_t j = 0;
/*
@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
/* Sanity check on HBA EQ parameters */
cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
/* It doesn't make sense to have more io channels then CPUs */
for_each_online_cpu(cpu) {
i++;
/* It doesn't make sense to have more io channels then online CPUs */
for_each_present_cpu(cpu) {
if (cpu_online(cpu))
i++;
j++;
}
phba->sli4_hba.num_online_cpu = i;
phba->sli4_hba.num_present_cpu = j;
if (i < cfg_fcp_io_channel) {
lpfc_printf_log(phba,
KERN_ERR, LOG_INIT,
"3188 Reducing IO channels to match number of "
"CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
"online CPUs: from %d to %d\n",
cfg_fcp_io_channel, i);
cfg_fcp_io_channel = i;
}
@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
out:
/* Catch the not-ready port failure after a port reset. */
if (num_resets >= MAX_IF_TYPE_2_RESETS)
if (num_resets >= MAX_IF_TYPE_2_RESETS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3317 HBA not functional: IP Reset Failed "
"after (%d) retries, try: "
"echo fw_reset > board_mode\n", num_resets);
rc = -ENODEV;
}
return rc;
}
@ -8208,6 +8348,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
return;
}
/**
* lpfc_find_next_cpu - Find next available CPU that matches the phys_id
* @phba: pointer to lpfc hba data structure.
*
* Find next available CPU to use for IRQ to CPU affinity.
*/
static int
lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
{
struct lpfc_vector_map_info *cpup;
int cpu;
cpup = phba->sli4_hba.cpu_map;
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
/* CPU must be online */
if (cpu_online(cpu)) {
if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
(lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
(cpup->phys_id == phys_id)) {
return cpu;
}
}
cpup++;
}
/*
* If we get here, we have used ALL CPUs for the specific
* phys_id. Now we need to clear out lpfc_used_cpu and start
* reusing CPUs.
*/
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
if (lpfc_used_cpu[cpu] == phys_id)
lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
}
cpup = phba->sli4_hba.cpu_map;
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
/* CPU must be online */
if (cpu_online(cpu)) {
if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
(cpup->phys_id == phys_id)) {
return cpu;
}
}
cpup++;
}
return LPFC_VECTOR_MAP_EMPTY;
}
/**
* lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
* @phba: pointer to lpfc hba data structure.
* @vectors: number of HBA vectors
*
* Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
* affinization across multple physical CPUs (numa nodes).
* In addition, this routine will assign an IO channel for each CPU
* to use when issuing I/Os.
*/
static int
lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
{
int i, idx, saved_chann, used_chann, cpu, phys_id;
int max_phys_id, num_io_channel, first_cpu;
struct lpfc_vector_map_info *cpup;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
struct cpumask *mask;
uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
/* If there is no mapping, just return */
if (!phba->cfg_fcp_cpu_map)
return 1;
/* Init cpu_map array */
memset(phba->sli4_hba.cpu_map, 0xff,
(sizeof(struct lpfc_vector_map_info) *
phba->sli4_hba.num_present_cpu));
max_phys_id = 0;
phys_id = 0;
num_io_channel = 0;
first_cpu = LPFC_VECTOR_MAP_EMPTY;
/* Update CPU map with physical id and core id of each CPU */
cpup = phba->sli4_hba.cpu_map;
for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
#ifdef CONFIG_X86
cpuinfo = &cpu_data(cpu);
cpup->phys_id = cpuinfo->phys_proc_id;
cpup->core_id = cpuinfo->cpu_core_id;
#else
/* No distinction between CPUs for other platforms */
cpup->phys_id = 0;
cpup->core_id = 0;
#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3328 CPU physid %d coreid %d\n",
cpup->phys_id, cpup->core_id);
if (cpup->phys_id > max_phys_id)
max_phys_id = cpup->phys_id;
cpup++;
}
/* Now associate the HBA vectors with specific CPUs */
for (idx = 0; idx < vectors; idx++) {
cpup = phba->sli4_hba.cpu_map;
cpu = lpfc_find_next_cpu(phba, phys_id);
if (cpu == LPFC_VECTOR_MAP_EMPTY) {
/* Try for all phys_id's */
for (i = 1; i < max_phys_id; i++) {
phys_id++;
if (phys_id > max_phys_id)
phys_id = 0;
cpu = lpfc_find_next_cpu(phba, phys_id);
if (cpu == LPFC_VECTOR_MAP_EMPTY)
continue;
goto found;
}
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3329 Cannot set affinity:"
"Error mapping vector %d (%d)\n",
idx, vectors);
return 0;
}
found:
cpup += cpu;
if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
lpfc_used_cpu[cpu] = phys_id;
/* Associate vector with selected CPU */
cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
/* Associate IO channel with selected CPU */
cpup->channel_id = idx;
num_io_channel++;
if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
first_cpu = cpu;
/* Now affinitize to the selected CPU */
mask = &cpup->maskbits;
cpumask_clear(mask);
cpumask_set_cpu(cpu, mask);
i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
vector, mask);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3330 Set Affinity: CPU %d channel %d "
"irq %d (%x)\n",
cpu, cpup->channel_id,
phba->sli4_hba.msix_entries[idx].vector, i);
/* Spread vector mapping across multple physical CPU nodes */
phys_id++;
if (phys_id > max_phys_id)
phys_id = 0;
}
/*
* Finally fill in the IO channel for any remaining CPUs.
* At this point, all IO channels have been assigned to a specific
* MSIx vector, mapped to a specific CPU.
* Base the remaining IO channel assigned, to IO channels already
* assigned to other CPUs on the same phys_id.
*/
for (i = 0; i <= max_phys_id; i++) {
/*
* If there are no io channels already mapped to
* this phys_id, just round robin thru the io_channels.
* Setup chann[] for round robin.
*/
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
chann[idx] = idx;
saved_chann = 0;
used_chann = 0;
/*
* First build a list of IO channels already assigned
* to this phys_id before reassigning the same IO
* channels to the remaining CPUs.
*/
cpup = phba->sli4_hba.cpu_map;
cpu = first_cpu;
cpup += cpu;
for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
idx++) {
if (cpup->phys_id == i) {
/*
* Save any IO channels that are
* already mapped to this phys_id.
*/
if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
chann[saved_chann] =
cpup->channel_id;
saved_chann++;
goto out;
}
/* See if we are using round-robin */
if (saved_chann == 0)
saved_chann =
phba->cfg_fcp_io_channel;
/* Associate next IO channel with CPU */
cpup->channel_id = chann[used_chann];
num_io_channel++;
used_chann++;
if (used_chann == saved_chann)
used_chann = 0;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3331 Set IO_CHANN "
"CPU %d channel %d\n",
idx, cpup->channel_id);
}
out:
cpu++;
if (cpu >= phba->sli4_hba.num_present_cpu) {
cpup = phba->sli4_hba.cpu_map;
cpu = 0;
} else {
cpup++;
}
}
}
if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
cpup = phba->sli4_hba.cpu_map;
for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
cpup->channel_id = 0;
num_io_channel++;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3332 Assign IO_CHANN "
"CPU %d channel %d\n",
idx, cpup->channel_id);
}
cpup++;
}
}
/* Sanity check */
if (num_io_channel != phba->sli4_hba.num_present_cpu)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3333 Set affinity mismatch:"
"%d chann != %d cpus: %d vactors\n",
num_io_channel, phba->sli4_hba.num_present_cpu,
vectors);
phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
return 1;
}
/**
* lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
* @phba: pointer to lpfc hba data structure.
@ -8259,9 +8662,7 @@ enable_msix_vectors:
phba->sli4_hba.msix_entries[index].vector,
phba->sli4_hba.msix_entries[index].entry);
/*
* Assign MSI-X vectors to interrupt handlers
*/
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
memset(&phba->sli4_hba.handler_name[index], 0, 16);
sprintf((char *)&phba->sli4_hba.handler_name[index],
@ -8289,6 +8690,8 @@ enable_msix_vectors:
phba->cfg_fcp_io_channel, vectors);
phba->cfg_fcp_io_channel = vectors;
}
lpfc_sli4_set_affinity(phba, vectors);
return rc;
cfg_fail_out:
@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
/* Disable interrupt and pci device */
lpfc_sli_disable_intr(phba);
pci_disable_device(phba->pcidev);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
}
/**
@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
lpfc_sli4_disable_intr(phba);
lpfc_sli4_queue_destroy(phba);
pci_disable_device(phba->pcidev);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
}
/**
@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = {
static int __init
lpfc_init(void)
{
int cpu;
int error = 0;
printk(LPFC_MODULE_DESC "\n");
@ -10561,6 +10965,11 @@ lpfc_init(void)
return -ENOMEM;
}
}
/* Initialize in case vector mapping is needed */
for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
error = pci_register_driver(&lpfc_driver);
if (error) {
fc_release_transport(lpfc_transport_template);

View File

@ -37,6 +37,7 @@
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \

View File

@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
/* Only FC supports upd bit */
if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
(vport->fc_flag & FC_VFI_REGISTERED)) {
(vport->fc_flag & FC_VFI_REGISTERED) &&
(!phba->fc_topology_changed)) {
bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
"3134 Register VFI, mydid:x%x, fcfi:%d, "
" vfi:%d, vpi:%d, fc_pname:%x%x\n",
" vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
" port_state:x%x topology chg:%d\n",
vport->fc_myDID,
phba->fcf.fcfi,
phba->sli4_hba.vfi_ids[vport->vfi],
phba->vpi_ids[vport->vpi],
reg_vfi->wwn[0], reg_vfi->wwn[1]);
reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
vport->port_state, phba->fc_topology_changed);
}
/**

View File

@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
int i;
if (phba->sli_rev == LPFC_SLI_REV4)
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Calculate alignment */
if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
i = phba->cfg_sg_dma_buf_size;
else
i = SLI4_PAGE_SIZE;
phba->lpfc_scsi_dma_buf_pool =
pci_pool_create("lpfc_scsi_dma_buf_pool",
phba->pcidev,
phba->cfg_sg_dma_buf_size,
phba->cfg_sg_dma_buf_size,
i,
0);
else
} else {
phba->lpfc_scsi_dma_buf_pool =
pci_pool_create("lpfc_scsi_dma_buf_pool",
phba->pcidev, phba->cfg_sg_dma_buf_size,
align, 0);
}
if (!phba->lpfc_scsi_dma_buf_pool)
goto fail;

View File

@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* PLOGI chkparm OK */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
"0114 PLOGI chkparm OK Data: x%x x%x x%x "
"x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
ndlp->nlp_rpi);
ndlp->nlp_rpi, vport->port_state,
vport->fc_flag);
if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
ndlp->nlp_fcp_info |= CLASS2;
@ -574,7 +576,7 @@ out:
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* If there are other active VLinks present,
* re-instantiate the Vlink using FDISC.
*/
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
/* Only try to re-login if this is NOT a Fabric Node */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
if ((irsp->ulpStatus) ||
(!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000));
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
}
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
ndlp->nlp_flag &= ~NLP_NPR_ADISC;

File diff suppressed because it is too large Load Diff

View File

@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov + 1);
next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
list_for_each_entry_safe(rrq, nextrrq,
&phba->active_rrq_list, list) {
if (time_after(jiffies, rrq->rrq_stop_time))
@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
return;
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov * 2);
next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
list_splice_init(&phba->active_rrq_list, &rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
else
rrq->send_rrq = 0;
rrq->xritag = xritag;
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->rrq_stop_time = jiffies +
msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
} else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
!(piocbq->iocb_flag & LPFC_IO_LIBDFC))
ndlp = piocbq->context_un.ndlp;
else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
(piocbq->iocb_flag & LPFC_IO_LIBDFC))
else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
ndlp = piocbq->context_un.ndlp;
else
ndlp = piocbq->context1;
@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
BUG();
else
mod_timer(&piocb->vport->els_tmofunc,
jiffies + HZ * (phba->fc_ratov << 1));
jiffies +
msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
}
@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
"x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba, pmb),
@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmbox->un.varWords[4],
pmbox->un.varWords[5],
pmbox->un.varWords[6],
pmbox->un.varWords[7]);
pmbox->un.varWords[7],
pmbox->un.varWords[8],
pmbox->un.varWords[9],
pmbox->un.varWords[10]);
if (pmb->mbox_cmpl)
pmb->mbox_cmpl(phba,pmb);
@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr)
lpfc_worker_wake_up(phba);
else
/* Restart the timer for next eratt poll */
mod_timer(&phba->eratt_poll, jiffies +
HZ * LPFC_ERATT_POLL_INTERVAL);
mod_timer(&phba->eratt_poll,
jiffies +
msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
return;
}
@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
list_del_init(&rsrc_blk->list);
kfree(rsrc_blk);
}
phba->sli4_hba.max_cfg_param.vpi_used = 0;
break;
case LPFC_RSC_TYPE_FCOE_XRI:
kfree(phba->sli4_hba.xri_bmask);
@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
} else {
kfree(phba->vpi_bmask);
phba->sli4_hba.max_cfg_param.vpi_used = 0;
kfree(phba->vpi_ids);
bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
kfree(phba->sli4_hba.xri_bmask);
@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
struct lpfc_sglq *sglq_entry = NULL;
struct lpfc_sglq *sglq_entry_next = NULL;
struct lpfc_sglq *sglq_entry_first = NULL;
int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
int last_xritag = NO_XRI;
LIST_HEAD(prep_sgl_list);
LIST_HEAD(blck_sgl_list);
@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
spin_unlock_irq(&phba->hbalock);
total_cnt = phba->sli4_hba.els_xri_cnt;
list_for_each_entry_safe(sglq_entry, sglq_entry_next,
&allc_sgl_list, list) {
list_del_init(&sglq_entry->list);
@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
sglq_entry->sli4_xritag);
list_add_tail(&sglq_entry->list,
&free_sgl_list);
spin_lock_irq(&phba->hbalock);
phba->sli4_hba.els_xri_cnt--;
spin_unlock_irq(&phba->hbalock);
total_cnt--;
}
}
}
@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
(sglq_entry_first->sli4_xritag +
post_cnt - 1));
list_splice_init(&blck_sgl_list, &free_sgl_list);
spin_lock_irq(&phba->hbalock);
phba->sli4_hba.els_xri_cnt -= post_cnt;
spin_unlock_irq(&phba->hbalock);
total_cnt -= post_cnt;
}
/* don't reset xirtag due to hole in xri block */
@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
/* reset els sgl post count for next round of posting */
post_cnt = 0;
}
/* update the number of XRIs posted for ELS */
phba->sli4_hba.els_xri_cnt = total_cnt;
/* free the els sgls failed to post */
lpfc_free_sgl_list(phba, &free_sgl_list);
@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
jiffies + HZ * (phba->fc_ratov * 2));
jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
phba->hb_outstanding = 0;
phba->last_completion_time = jiffies;
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
mod_timer(&phba->eratt_poll,
jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
/* Enable PCIe device Advanced Error Reporting (AER) if configured */
if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
/* timeout active mbox command */
mod_timer(&psli->mbox_tmo, (jiffies +
(HZ * lpfc_mbox_tmo_val(phba, pmbox))));
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
1000);
mod_timer(&psli->mbox_tmo, jiffies + timeout);
}
/* Mailbox cmd <cmd> issue */
@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
/* Start timer for the mbox_tmo and log some mailbox post messages */
mod_timer(&psli->mbox_tmo, (jiffies +
(HZ * lpfc_mbox_tmo_val(phba, mboxq))));
msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
static inline uint32_t
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
int i;
struct lpfc_vector_map_info *cpup;
int chann, cpu;
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
i = smp_processor_id();
else
i = atomic_add_return(1, &phba->fcp_qidx);
i = (i % phba->cfg_fcp_io_channel);
return i;
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
cpu = smp_processor_id();
if (cpu < phba->sli4_hba.num_present_cpu) {
cpup = phba->sli4_hba.cpu_map;
cpup += cpu;
return cpup->channel_id;
}
chann = cpu;
}
chann = atomic_add_return(1, &phba->fcp_qidx);
chann = (chann % phba->cfg_fcp_io_channel);
return chann;
}
/**
@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
if (unlikely(!phba->sli4_hba.fcp_wq))
return IOCB_ERROR;
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
&wqe))
return IOCB_ERROR;
} else {
if (unlikely(!phba->sli4_hba.els_wq))
return IOCB_ERROR;
if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
return IOCB_ERROR;
}
@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
SLI_IOCB_RET_IOCB);
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
timeout_req = msecs_to_jiffies(timeout * 1000);
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
wait_event_interruptible_timeout(done_q,
pmboxq->mbox_flag & LPFC_MBX_WAKE,
timeout * HZ);
msecs_to_jiffies(timeout * 1000));
spin_lock_irqsave(&phba->hbalock, flag);
pmboxq->context1 = NULL;
@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
}
wq->db_regaddr = bar_memmap_p + db_offset;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3264 WQ[%d]: barset:x%x, offset:x%x\n",
wq->queue_id, pci_barset, db_offset);
"3264 WQ[%d]: barset:x%x, offset:x%x, "
"format:x%x\n", wq->queue_id, pci_barset,
db_offset, wq->db_format);
} else {
wq->db_format = LPFC_DB_LIST_FORMAT;
wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
hrq->db_regaddr = bar_memmap_p + db_offset;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
hrq->queue_id, pci_barset, db_offset);
"3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
"format:x%x\n", hrq->queue_id, pci_barset,
db_offset, hrq->db_format);
} else {
hrq->db_format = LPFC_DB_RING_FORMAT;
hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"2538 Received frame rctl:%s type:%s "
"Frame Data:%08x %08x %08x %08x %08x %08x\n",
rctl_names[fc_hdr->fh_r_ctl],
type_names[fc_hdr->fh_type],
"2538 Received frame rctl:%s (x%x), type:%s (x%x), "
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
type_names[fc_hdr->fh_type], fc_hdr->fh_type,
be32_to_cpu(header[0]), be32_to_cpu(header[1]),
be32_to_cpu(header[2]), be32_to_cpu(header[3]),
be32_to_cpu(header[4]), be32_to_cpu(header[5]));
be32_to_cpu(header[4]), be32_to_cpu(header[5]),
be32_to_cpu(header[6]));
return 0;
drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,

View File

@ -346,11 +346,6 @@ struct lpfc_bmbx {
#define SLI4_CT_VFI 2
#define SLI4_CT_FCFI 3
#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000
#define LPFC_SLI4_MIN_BUF_SIZE 0x400
#define LPFC_SLI4_MAX_BUF_SIZE 0x20000
/*
* SLI4 specific data structures
*/
@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info {
#define LPFC_SLI4_HANDLER_NAME_SZ 16
/* Used for IRQ vector to CPU mapping */
struct lpfc_vector_map_info {
uint16_t phys_id;
uint16_t core_id;
uint16_t irq;
uint16_t channel_id;
struct cpumask maskbits;
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
#define LPFC_MAX_CPU 256
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@ -573,6 +579,11 @@ struct lpfc_sli4_hba {
struct lpfc_iov iov;
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
/* CPU to vector mapping information */
struct lpfc_vector_map_info *cpu_map;
uint16_t num_online_cpu;
uint16_t num_present_cpu;
};
enum lpfc_sge_type {

View File

@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "8.3.38"
#define LPFC_DRIVER_VERSION "8.3.39"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */

View File

@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
}
}
static int
int
lpfc_alloc_vpi(struct lpfc_hba *phba)
{
unsigned long vpi;
@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
long timeout;
bool ns_ndlp_referenced = false;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
lpfc_debugfs_terminate(vport);
/*
* The call to fc_remove_host might release the NameServer ndlp. Since
* we might need to use the ndlp to send the DA_ID CT command,
* increment the reference for the NameServer ndlp to prevent it from
* being released.
*/
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
lpfc_nlp_get(ndlp);
ns_ndlp_referenced = true;
}
/* Remove FC host and then SCSI host with the vport */
fc_remove_host(lpfc_shost_from_vport(vport));
scsi_remove_host(lpfc_shost_from_vport(vport));
@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
lpfc_discovery_wait(vport);
skip_logo:
/*
* If the NameServer ndlp has been incremented to allow the DA_ID CT
* command to be sent, decrement the ndlp now.
*/
if (ns_ndlp_referenced) {
ndlp = lpfc_findnode_did(vport, NameServer_DID);
lpfc_nlp_put(ndlp);
}
lpfc_cleanup(vport);
lpfc_sli_host_down(vport);

View File

@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
int lpfc_alloc_vpi(struct lpfc_hba *phba);
/*
* queuecommand VPORT-specific return codes. Specified in the host byte code.

View File

@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
printk(KERN_ERR "megaraid_sas: timed out while"
"waiting for HBA to recover\n");
error = -ENODEV;
goto out_kfree_ioc;
goto out_up;
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
out_up:
up(&instance->ioctl_sem);
out_kfree_ioc:

View File

@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
}
for (i = 0; i < MVS_MAX_DEVICES; i++) {
mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
mvi->devices[i].dev_type = NO_DEVICE;
mvi->devices[i].dev_type = SAS_PHY_UNUSED;
mvi->devices[i].device_id = i;
mvi->devices[i].dev_status = MVS_DEV_NORMAL;
init_timer(&mvi->devices[i].timer);

View File

@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
return 0;
}
#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
struct mvs_tmf_task *tmf, int *pass)
{
@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
* libsas will use dev->port, should
* not call task_done for sata
*/
if (dev->dev_type != SATA_DEV)
if (dev->dev_type != SAS_SATA_DEV)
task->task_done(task);
return rc;
}
@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
phy->identify.device_type =
phy->att_dev_info & PORT_DEV_TYPE_MASK;
if (phy->identify.device_type == SAS_END_DEV)
if (phy->identify.device_type == SAS_END_DEVICE)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SSP;
else if (phy->identify.device_type != NO_DEVICE)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
if (oob_done)
@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
{
u32 dev;
for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
if (mvi->devices[dev].dev_type == NO_DEVICE) {
if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
mvi->devices[dev].device_id = dev;
return &mvi->devices[dev];
}
@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
u32 id = mvi_dev->device_id;
memset(mvi_dev, 0, sizeof(*mvi_dev));
mvi_dev->device_id = id;
mvi_dev->dev_type = NO_DEVICE;
mvi_dev->dev_type = SAS_PHY_UNUSED;
mvi_dev->dev_status = MVS_DEV_NORMAL;
mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
}
@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
{
int rc;
struct sas_phy *phy = sas_get_local_phy(dev);
int reset_type = (dev->dev_type == SATA_DEV ||
int reset_type = (dev->dev_type == SAS_SATA_DEV ||
(dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
rc = sas_phy_reset(phy, reset_type);
sas_put_local_phy(phy);
@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (SATA_DEV == dev->dev_type) {
if (SAS_SATA_DEV == dev->dev_type) {
struct mvs_slot_info *slot = task->lldd_task;
u32 slot_idx = (u32)(slot - mvi->slot_info);
mv_dprintk("mvs_abort_task() mvi=%p task=%p "

View File

@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch;
extern struct kmem_cache *mvs_task_list_cache;
#define DEV_IS_EXPANDER(type) \
((type == EDGE_DEV) || (type == FANOUT_DEV))
((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
#define bit(n) ((u64)1 << n)
@ -241,7 +241,7 @@ struct mvs_phy {
struct mvs_device {
struct list_head dev_entry;
enum sas_dev_type dev_type;
enum sas_device_type dev_type;
struct mvs_info *mvi_info;
struct domain_device *sas_device;
struct timer_list timer;

View File

@ -4,9 +4,10 @@
# Copyright (C) 2008-2009 USI Co., Ltd.
obj-$(CONFIG_SCSI_PM8001) += pm8001.o
pm8001-y += pm8001_init.o \
obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
pm80xx-y += pm8001_init.o \
pm8001_sas.o \
pm8001_ctl.o \
pm8001_hwi.o
pm8001_hwi.o \
pm80xx_hwi.o

View File

@ -1,5 +1,5 @@
/*
* PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
* PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
*
* Copyright (c) 2008-2009 USI Co., Ltd.
* All rights reserved.
@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return snprintf(buf, PAGE_SIZE, "%d\n",
pm8001_ha->main_cfg_tbl.interface_rev);
if (pm8001_ha->chip_id == chip_8001) {
return snprintf(buf, PAGE_SIZE, "%d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
} else {
return snprintf(buf, PAGE_SIZE, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
}
}
static
DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24),
(u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16),
(u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8),
(u8)(pm8001_ha->main_cfg_tbl.firmware_rev));
if (pm8001_ha->chip_id == chip_8001) {
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
} else {
return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
}
}
static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
/**
@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return snprintf(buf, PAGE_SIZE, "%d\n",
pm8001_ha->main_cfg_tbl.max_out_io);
if (pm8001_ha->chip_id == chip_8001) {
return snprintf(buf, PAGE_SIZE, "%d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
} else {
return snprintf(buf, PAGE_SIZE, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
}
}
static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
/**
@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return snprintf(buf, PAGE_SIZE, "%04d\n",
(u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16));
if (pm8001_ha->chip_id == chip_8001) {
return snprintf(buf, PAGE_SIZE, "%04d\n",
(u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
);
} else {
return snprintf(buf, PAGE_SIZE, "%04d\n",
(u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
);
}
}
static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
/**
@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
return snprintf(buf, PAGE_SIZE, "%04d\n",
pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF);
if (pm8001_ha->chip_id == chip_8001) {
return snprintf(buf, PAGE_SIZE, "%04d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
);
} else {
return snprintf(buf, PAGE_SIZE, "%04d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
);
}
}
static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25;
/* fe000000 means supports SAS2.1 */
if (pm8001_ha->chip_id == chip_8001)
mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
0xfe000000)>>25;
else
/* fe000000 means supports SAS2.1 */
mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
0xfe000000)>>25;
return show_sas_spec_support_status(mode, buf);
}
static DEVICE_ATTR(sas_spec_support, S_IRUGO,
@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
goto out;
}
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
pm8001_ha->fw_image->size);
payload->length = pm8001_ha->fw_image->size;
payload->id = 0;
payload->minor_function = 0x1;
pm8001_ha->nvmd_completion = &completion;
ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
wait_for_completion(&completion);
@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
payload->length = 1024*16;
payload->id = 0;
fwControl =
(struct fw_control_info *)payload->func_specific;
(struct fw_control_info *)&payload->func_specific;
fwControl->len = IOCTL_BUF_SIZE; /* IN */
fwControl->size = partitionSize + HEADER_LEN;/* IN */
fwControl->retcode = 0;/* OUT */

View File

@ -1,5 +1,5 @@
/*
* PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
* PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
*
* Copyright (c) 2008-2009 USI Co., Ltd.
* All rights reserved.
@ -43,9 +43,12 @@
enum chip_flavors {
chip_8001,
chip_8008,
chip_8009,
chip_8018,
chip_8019
};
#define USI_MAX_MEMCNT 9
#define PM8001_MAX_DMA_SG SG_ALL
enum phy_speed {
PHY_SPEED_15 = 0x01,
PHY_SPEED_30 = 0x02,
@ -69,23 +72,34 @@ enum port_type {
#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
#define PM8001_MAX_INB_NUM 1
#define PM8001_MAX_OUTB_NUM 1
#define PM8001_MAX_SPCV_INB_NUM 1
#define PM8001_MAX_SPCV_OUTB_NUM 4
#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */
/* unchangeable hardware details */
#define PM8001_MAX_PHYS 8 /* max. possible phys */
#define PM8001_MAX_PORTS 8 /* max. possible ports */
#define PM8001_MAX_DEVICES 1024 /* max supported device */
/* Inbound/Outbound queue size */
#define IOMB_SIZE_SPC 64
#define IOMB_SIZE_SPCV 128
/* unchangeable hardware details */
#define PM8001_MAX_PHYS 16 /* max. possible phys */
#define PM8001_MAX_PORTS 16 /* max. possible ports */
#define PM8001_MAX_DEVICES 2048 /* max supported device */
#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */
#define USI_MAX_MEMCNT_BASE 5
#define IB (USI_MAX_MEMCNT_BASE + 1)
#define CI (IB + PM8001_MAX_SPCV_INB_NUM)
#define OB (CI + PM8001_MAX_SPCV_INB_NUM)
#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM)
#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM)
#define PM8001_MAX_DMA_SG SG_ALL
enum memory_region_num {
AAP1 = 0x0, /* application acceleration processor */
IOP, /* IO processor */
CI, /* consumer index */
PI, /* producer index */
IB, /* inbound queue */
OB, /* outbound queue */
NVMD, /* NVM device */
DEV_MEM, /* memory for devices */
CCB_MEM, /* memory for command control block */
FW_FLASH /* memory for fw flash update */
};
#define PM8001_EVENT_LOG_SIZE (128 * 1024)

File diff suppressed because it is too large Load Diff

View File

@ -131,6 +131,8 @@
#define LINKRATE_30 (0x02 << 8)
#define LINKRATE_60 (0x04 << 8)
/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
#define GSM_SM_BASE 0x4F0000
struct mpi_msg_hdr{
__le32 header; /* Bits [11:0] - Message operation code */
/* Bits [15:12] - Message Category */
@ -298,7 +300,7 @@ struct local_phy_ctl_resp {
#define OP_BITS 0x0000FF00
#define ID_BITS 0x0000000F
#define ID_BITS 0x000000FF
/*
* brief the data structure of PORT Control Command

View File

@ -1,5 +1,5 @@
/*
* PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
* PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
*
* Copyright (c) 2008-2009 USI Co., Ltd.
* All rights reserved.
@ -44,8 +44,16 @@
static struct scsi_transport_template *pm8001_stt;
/**
* chip info structure to identify chip key functionality as
* encryption available/not, no of ports, hw specific function ref
*/
static const struct pm8001_chip_info pm8001_chips[] = {
[chip_8001] = { 8, &pm8001_8001_dispatch,},
[chip_8001] = {0, 8, &pm8001_8001_dispatch,},
[chip_8008] = {0, 8, &pm8001_80xx_dispatch,},
[chip_8009] = {1, 8, &pm8001_80xx_dispatch,},
[chip_8018] = {0, 16, &pm8001_80xx_dispatch,},
[chip_8019] = {1, 16, &pm8001_80xx_dispatch,},
};
static int pm8001_id;
@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
}
#ifdef PM8001_USE_TASKLET
/**
* tasklet for 64 msi-x interrupt handler
* @opaque: the passed general host adapter struct
* Note: pm8001_tasklet is common for pm8001 & pm80xx
*/
static void pm8001_tasklet(unsigned long opaque)
{
struct pm8001_hba_info *pm8001_ha;
u32 vec;
pm8001_ha = (struct pm8001_hba_info *)opaque;
if (unlikely(!pm8001_ha))
BUG_ON(1);
PM8001_CHIP_DISP->isr(pm8001_ha);
vec = pm8001_ha->int_vector;
PM8001_CHIP_DISP->isr(pm8001_ha, vec);
}
#endif
static struct pm8001_hba_info *outq_to_hba(u8 *outq)
{
return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
}
/**
* pm8001_interrupt - when HBA originate a interrupt,we should invoke this
* dispatcher to handle each case.
* @irq: irq number.
* @opaque: the passed general host adapter struct
*/
static irqreturn_t pm8001_interrupt(int irq, void *opaque)
/**
* pm8001_interrupt_handler_msix - main MSIX interrupt handler.
* It obtains the vector number and calls the equivalent bottom
* half or services directly.
* @opaque: the passed outbound queue/vector. Host structure is
* retrieved from the same.
*/
static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
{
struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque);
u8 outq = *(u8 *)opaque;
irqreturn_t ret = IRQ_HANDLED;
if (unlikely(!pm8001_ha))
return IRQ_NONE;
if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
return IRQ_NONE;
pm8001_ha->int_vector = outq;
#ifdef PM8001_USE_TASKLET
tasklet_schedule(&pm8001_ha->tasklet);
#else
ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq);
#endif
return ret;
}
/**
* pm8001_interrupt_handler_intx - main INTx interrupt handler.
* @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
*/
static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
{
struct pm8001_hba_info *pm8001_ha;
irqreturn_t ret = IRQ_HANDLED;
struct sas_ha_struct *sha = opaque;
struct sas_ha_struct *sha = dev_id;
pm8001_ha = sha->lldd_ha;
if (unlikely(!pm8001_ha))
return IRQ_NONE;
if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
return IRQ_NONE;
pm8001_ha->int_vector = 0;
#ifdef PM8001_USE_TASKLET
tasklet_schedule(&pm8001_ha->tasklet);
#else
ret = PM8001_CHIP_DISP->isr(pm8001_ha);
ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
#endif
return ret;
}
@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque)
* @pm8001_ha:our hba structure.
*
*/
static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
const struct pci_device_id *ent)
{
int i;
spin_lock_init(&pm8001_ha->lock);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("pm8001_alloc: PHY:%x\n",
pm8001_ha->chip->n_phy));
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
pm8001_phy_init(pm8001_ha, i);
pm8001_ha->port[i].wide_port_phymap = 0;
@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
pm8001_ha->memoryMap.region[IOP].alignment = 32;
/* MPI Memory region 3 for consumer Index of inbound queues */
pm8001_ha->memoryMap.region[CI].num_elements = 1;
pm8001_ha->memoryMap.region[CI].element_size = 4;
pm8001_ha->memoryMap.region[CI].total_len = 4;
pm8001_ha->memoryMap.region[CI].alignment = 4;
for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
/* MPI Memory region 3 for consumer Index of inbound queues */
pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
pm8001_ha->memoryMap.region[CI+i].element_size = 4;
pm8001_ha->memoryMap.region[CI+i].total_len = 4;
pm8001_ha->memoryMap.region[CI+i].alignment = 4;
/* MPI Memory region 4 for producer Index of outbound queues */
pm8001_ha->memoryMap.region[PI].num_elements = 1;
pm8001_ha->memoryMap.region[PI].element_size = 4;
pm8001_ha->memoryMap.region[PI].total_len = 4;
pm8001_ha->memoryMap.region[PI].alignment = 4;
if ((ent->driver_data) != chip_8001) {
/* MPI Memory region 5 inbound queues */
pm8001_ha->memoryMap.region[IB+i].num_elements =
PM8001_MPI_QUEUE;
pm8001_ha->memoryMap.region[IB+i].element_size = 128;
pm8001_ha->memoryMap.region[IB+i].total_len =
PM8001_MPI_QUEUE * 128;
pm8001_ha->memoryMap.region[IB+i].alignment = 128;
} else {
pm8001_ha->memoryMap.region[IB+i].num_elements =
PM8001_MPI_QUEUE;
pm8001_ha->memoryMap.region[IB+i].element_size = 64;
pm8001_ha->memoryMap.region[IB+i].total_len =
PM8001_MPI_QUEUE * 64;
pm8001_ha->memoryMap.region[IB+i].alignment = 64;
}
}
/* MPI Memory region 5 inbound queues */
pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
pm8001_ha->memoryMap.region[IB].element_size = 64;
pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
pm8001_ha->memoryMap.region[IB].alignment = 64;
for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
/* MPI Memory region 4 for producer Index of outbound queues */
pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
pm8001_ha->memoryMap.region[PI+i].element_size = 4;
pm8001_ha->memoryMap.region[PI+i].total_len = 4;
pm8001_ha->memoryMap.region[PI+i].alignment = 4;
/* MPI Memory region 6 outbound queues */
pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
pm8001_ha->memoryMap.region[OB].element_size = 64;
pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
pm8001_ha->memoryMap.region[OB].alignment = 64;
if (ent->driver_data != chip_8001) {
/* MPI Memory region 6 Outbound queues */
pm8001_ha->memoryMap.region[OB+i].num_elements =
PM8001_MPI_QUEUE;
pm8001_ha->memoryMap.region[OB+i].element_size = 128;
pm8001_ha->memoryMap.region[OB+i].total_len =
PM8001_MPI_QUEUE * 128;
pm8001_ha->memoryMap.region[OB+i].alignment = 128;
} else {
/* MPI Memory region 6 Outbound queues */
pm8001_ha->memoryMap.region[OB+i].num_elements =
PM8001_MPI_QUEUE;
pm8001_ha->memoryMap.region[OB+i].element_size = 64;
pm8001_ha->memoryMap.region[OB+i].total_len =
PM8001_MPI_QUEUE * 64;
pm8001_ha->memoryMap.region[OB+i].alignment = 64;
}
}
/* Memory region write DMA*/
pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
sizeof(struct pm8001_ccb_info);
/* Memory region for fw flash */
pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_mem_alloc(pm8001_ha->pdev,
&pm8001_ha->memoryMap.region[i].virt_ptr,
@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
for (i = 0; i < PM8001_MAX_DEVICES; i++) {
pm8001_ha->devices[i].dev_type = NO_DEVICE;
pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
pm8001_ha->devices[i].id = i;
pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
pm8001_ha->devices[i].running_req = 0;
@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
ioremap(pm8001_ha->io_mem[logicalBar].membase,
pm8001_ha->io_mem[logicalBar].memsize);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("PCI: bar %d, logicalBar %d "
"virt_addr=%lx,len=%d\n", bar, logicalBar,
(unsigned long)
pm8001_ha->io_mem[logicalBar].memvirtaddr,
pm8001_printk("PCI: bar %d, logicalBar %d ",
bar, logicalBar));
PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
"base addr %llx virt_addr=%llx len=%d\n",
(u64)pm8001_ha->io_mem[logicalBar].membase,
(u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
pm8001_ha->io_mem[logicalBar].memsize));
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
* @shost: scsi host struct which has been initialized before.
*/
static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
u32 chip_id,
struct Scsi_Host *shost)
const struct pci_device_id *ent,
struct Scsi_Host *shost)
{
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->pdev = pdev;
pm8001_ha->dev = &pdev->dev;
pm8001_ha->chip_id = chip_id;
pm8001_ha->chip_id = ent->driver_data;
pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
pm8001_ha->irq = pdev->irq;
pm8001_ha->sas = sha;
@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->id = pm8001_id++;
pm8001_ha->logging_level = 0x01;
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
/* IOMB size is 128 for 8088/89 controllers */
if (pm8001_ha->chip_id != chip_8001)
pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
else
pm8001_ha->iomb_size = IOMB_SIZE_SPC;
#ifdef PM8001_USE_TASKLET
/**
* default tasklet for non msi-x interrupt handler/first msi-x
* interrupt handler
**/
tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
(unsigned long)pm8001_ha);
(unsigned long)pm8001_ha);
#endif
pm8001_ioremap(pm8001_ha);
if (!pm8001_alloc(pm8001_ha))
if (!pm8001_alloc(pm8001_ha, ent))
return pm8001_ha;
pm8001_free(pm8001_ha);
return NULL;
@ -512,21 +605,50 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
*/
static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
{
u8 i;
u8 i, j;
#ifdef PM8001_READ_VPD
/* For new SPC controllers WWN is stored in flash vpd
* For SPC/SPCve controllers WWN is stored in EEPROM
* For Older SPC WWN is stored in NVMD
*/
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
u16 deviceid;
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
pm8001_ha->nvmd_completion = &completion;
payload.minor_function = 0;
payload.length = 128;
payload.func_specific = kzalloc(128, GFP_KERNEL);
if (pm8001_ha->chip_id == chip_8001) {
if (deviceid == 0x8081) {
payload.minor_function = 4;
payload.length = 4096;
} else {
payload.minor_function = 0;
payload.length = 128;
}
} else {
payload.minor_function = 1;
payload.length = 4096;
}
payload.offset = 0;
payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
wait_for_completion(&completion);
for (i = 0, j = 0; i <= 7; i++, j++) {
if (pm8001_ha->chip_id == chip_8001) {
if (deviceid == 0x8081)
pm8001_ha->sas_addr[j] =
payload.func_specific[0x704 + i];
} else
pm8001_ha->sas_addr[j] =
payload.func_specific[0x804 + i];
}
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
SAS_ADDR_SIZE);
memcpy(&pm8001_ha->phy[i].dev_sas_addr,
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("phy %d sas_addr = %016llx \n", i,
pm8001_printk("phy %d sas_addr = %016llx\n", i,
pm8001_ha->phy[i].dev_sas_addr));
}
#else
@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
* @chip_info: our ha struct.
* @irq_handler: irq_handler
*/
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
irq_handler_t irq_handler)
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
{
u32 i = 0, j = 0;
u32 number_of_intr = 1;
u32 number_of_intr;
int flag = 0;
u32 max_entry;
int rc;
static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
number_of_intr = 1;
flag |= IRQF_DISABLED;
} else {
number_of_intr = PM8001_MAX_MSIX_VEC;
flag &= ~IRQF_SHARED;
flag |= IRQF_DISABLED;
}
max_entry = sizeof(pm8001_ha->msix_entries) /
sizeof(pm8001_ha->msix_entries[0]);
flag |= IRQF_DISABLED;
for (i = 0; i < max_entry ; i++)
pm8001_ha->msix_entries[i].entry = i;
rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
number_of_intr);
pm8001_ha->number_of_intr = number_of_intr;
if (!rc) {
PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
"pci_enable_msix request ret:%d no of intr %d\n",
rc, pm8001_ha->number_of_intr));
for (i = 0; i < number_of_intr; i++)
pm8001_ha->outq[i] = i;
for (i = 0; i < number_of_intr; i++) {
snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
DRV_NAME"%d", i);
if (request_irq(pm8001_ha->msix_entries[i].vector,
irq_handler, flag, DRV_NAME,
SHOST_TO_SAS_HA(pm8001_ha->shost))) {
pm8001_interrupt_handler_msix, flag,
intr_drvname[i], &pm8001_ha->outq[i])) {
for (j = 0; j < i; j++)
free_irq(
pm8001_ha->msix_entries[j].vector,
SHOST_TO_SAS_HA(pm8001_ha->shost));
&pm8001_ha->outq[j]);
pci_disable_msix(pm8001_ha->pdev);
break;
}
@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
{
struct pci_dev *pdev;
irq_handler_t irq_handler = pm8001_interrupt;
int rc;
pdev = pm8001_ha->pdev;
#ifdef PM8001_USE_MSIX
if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
return pm8001_setup_msix(pm8001_ha, irq_handler);
else
return pm8001_setup_msix(pm8001_ha);
else {
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("MSIX not supported!!!\n"));
goto intx;
}
#endif
intx:
/* initialize the INT-X interrupt */
rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
SHOST_TO_SAS_HA(pm8001_ha->shost));
rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
return rc;
}
@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
{
unsigned int rc;
u32 pci_reg;
u8 i = 0;
struct pm8001_hba_info *pm8001_ha;
struct Scsi_Host *shost = NULL;
const struct pm8001_chip_info *chip;
dev_printk(KERN_INFO, &pdev->dev,
"pm8001: driver version %s\n", DRV_VERSION);
"pm80xx: driver version %s\n", DRV_VERSION);
rc = pci_enable_device(pdev);
if (rc)
goto err_out_enable;
@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_free;
}
pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
/* ent->driver variable is used to differentiate between controllers */
pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
if (!pm8001_ha) {
rc = -ENOMEM;
goto err_out_free;
}
list_add_tail(&pm8001_ha->list, &hba_list);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc)
if (rc) {
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
"chip_init failed [ret: %d]\n", rc));
goto err_out_ha_free;
}
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
rc = pm8001_request_irq(pm8001_ha);
if (rc)
if (rc) {
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
"pm8001_request_irq failed [ret: %d]\n", rc));
goto err_out_shost;
}
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
if (pm8001_ha->chip_id != chip_8001) {
for (i = 1; i < pm8001_ha->number_of_intr; i++)
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
/* setup thermal configuration. */
pm80xx_set_thermal_config(pm8001_ha);
}
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
pm8001_init_sas_add(pm8001_ha);
pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
sas_remove_host(pm8001_ha->shost);
list_del(&pm8001_ha->list);
scsi_remove_host(pm8001_ha->shost);
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
#ifdef PM8001_USE_MSIX
for (i = 0; i < pm8001_ha->number_of_intr; i++)
synchronize_irq(pm8001_ha->msix_entries[i].vector);
for (i = 0; i < pm8001_ha->number_of_intr; i++)
free_irq(pm8001_ha->msix_entries[i].vector, sha);
free_irq(pm8001_ha->msix_entries[i].vector,
&pm8001_ha->outq[i]);
pci_disable_msix(pdev);
#else
free_irq(pm8001_ha->irq, sha);
@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
printk(KERN_ERR " PCI PM not supported\n");
return -ENODEV;
}
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
#ifdef PM8001_USE_MSIX
for (i = 0; i < pm8001_ha->number_of_intr; i++)
synchronize_irq(pm8001_ha->msix_entries[i].vector);
for (i = 0; i < pm8001_ha->number_of_intr; i++)
free_irq(pm8001_ha->msix_entries[i].vector, sha);
free_irq(pm8001_ha->msix_entries[i].vector,
&pm8001_ha->outq[i]);
pci_disable_msix(pdev);
#else
free_irq(pm8001_ha->irq, sha);
@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
int rc;
u8 i = 0;
u32 device_state;
pm8001_ha = sha->lldd_ha;
device_state = pdev->current_state;
@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
if (rc)
goto err_out_disable;
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
/* chip soft rst only for spc */
if (pm8001_ha->chip_id == chip_8001) {
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("chip soft reset successful\n"));
}
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc)
goto err_out_disable;
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
/* disable all the interrupt bits */
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
rc = pm8001_request_irq(pm8001_ha);
if (rc)
goto err_out_disable;
#ifdef PM8001_USE_TASKLET
#ifdef PM8001_USE_TASKLET
/* default tasklet for non msi-x interrupt handler/first msi-x
* interrupt handler */
tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
(unsigned long)pm8001_ha);
#endif
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
(unsigned long)pm8001_ha);
#endif
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
if (pm8001_ha->chip_id != chip_8001) {
for (i = 1; i < pm8001_ha->number_of_intr; i++)
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
}
scsi_unblock_requests(pm8001_ha->shost);
return 0;
@ -843,14 +1018,45 @@ err_out_enable:
return rc;
}
/* update of pci device, vendor id and driver data with
* unique value for each of the controller
*/
static struct pci_device_id pm8001_pci_table[] = {
{
PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
},
{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
{
PCI_DEVICE(0x117c, 0x0042),
.driver_data = chip_8001
},
/* Support for SPC/SPCv/SPCve controllers */
{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
{ PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
{ PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
{ PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
{ PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
{ PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
{ PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
{ PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
{ PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,
PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8088,
PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8088,
PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8088,
PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8088,
PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,
PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
{} /* terminate list */
};
@ -870,7 +1076,7 @@ static int __init pm8001_init(void)
{
int rc = -ENOMEM;
pm8001_wq = alloc_workqueue("pm8001", 0, 0);
pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
if (!pm8001_wq)
goto err;
@ -902,7 +1108,8 @@ module_init(pm8001_init);
module_exit(pm8001_exit);
MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
MODULE_DESCRIPTION(
"PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pm8001_pci_table);

View File

@ -1,5 +1,5 @@
/*
* PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
* PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
*
* Copyright (c) 2008-2009 USI Co., Ltd.
* All rights reserved.
@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
clear_bit(tag, bitmap);
}
static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
{
pm8001_tag_clear(pm8001_ha, tag);
}
@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
break;
case PHY_FUNC_GET_EVENTS:
spin_lock_irqsave(&pm8001_ha->lock, flags);
if (-1 == pm8001_bar4_shift(pm8001_ha,
if (pm8001_ha->chip_id == chip_8001) {
if (-1 == pm8001_bar4_shift(pm8001_ha,
(phy_id < 4) ? 0x30000 : 0x40000)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -EINVAL;
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return -EINVAL;
}
}
{
struct sas_phy *phy = sas_phy->phy;
@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
phy->loss_of_dword_sync_count = qp[3];
phy->phy_reset_problem_count = qp[4];
}
pm8001_bar4_shift(pm8001_ha, 0);
if (pm8001_ha->chip_id == chip_8001)
pm8001_bar4_shift(pm8001_ha, 0);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return 0;
default:
@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost)
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
pm8001_ha = sha->lldd_ha;
PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
/* SAS_RE_INITIALIZATION not available in SPCv/ve */
if (pm8001_ha->chip_id == chip_8001)
PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
}
@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
* @tmf: the task management IU
*/
#define DEV_IS_GONE(pm8001_dev) \
((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)))
((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
static int pm8001_task_exec(struct sas_task *task, const int num,
gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
{
@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
struct task_status_struct *tsm = &t->task_status;
tsm->resp = SAS_TASK_UNDELIVERED;
tsm->stat = SAS_PHY_DOWN;
if (dev->dev_type != SATA_DEV)
if (dev->dev_type != SAS_SATA_DEV)
t->task_done(t);
return 0;
}
@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
{
u32 dev;
for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) {
if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
pm8001_ha->devices[dev].id = dev;
return &pm8001_ha->devices[dev];
}
@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
}
return NULL;
}
/**
* pm8001_find_dev - find a matching pm8001_device
* @pm8001_ha: our hba card information
*/
struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
u32 device_id)
{
u32 dev;
for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
if (pm8001_ha->devices[dev].device_id == device_id)
return &pm8001_ha->devices[dev];
}
if (dev == PM8001_MAX_DEVICES) {
PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
"DEVICE FOUND !!!\n"));
}
return NULL;
}
static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
{
u32 id = pm8001_dev->id;
memset(pm8001_dev, 0, sizeof(*pm8001_dev));
pm8001_dev->id = id;
pm8001_dev->dev_type = NO_DEVICE;
pm8001_dev->dev_type = SAS_PHY_UNUSED;
pm8001_dev->device_id = PM8001_MAX_DEVICES;
pm8001_dev->sas_device = NULL;
}
@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
res = -1;
}
} else {
if (dev->dev_type == SATA_DEV) {
if (dev->dev_type == SAS_SATA_DEV) {
pm8001_device->attached_phy =
dev->rphy->identify.phy_identifier;
flag = 1; /* directly sata*/
@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
wait_for_completion(&completion);
if (dev->dev_type == SAS_END_DEV)
if (dev->dev_type == SAS_END_DEVICE)
msleep(50);
pm8001_ha->flags = PM8001F_RUN_TIME;
return 0;
@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev)
return pm8001_dev_found_notify(dev);
}
static void pm8001_task_done(struct sas_task *task)
void pm8001_task_done(struct sas_task *task)
{
if (!del_timer(&task->slow_task->timer))
return;
@ -904,7 +927,7 @@ void pm8001_open_reject_retry(
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
pm8001_dev = ccb->device;
if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
continue;
if (!device_to_close) {
uintptr_t d = (uintptr_t)pm8001_dev
@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
return rc;
}
/*
* This function handle the IT_NEXUS_XXX event or completion
* status code for SSP/SATA/SMP I/O request.
*/
int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
{
int rc = TMF_RESP_FUNC_FAILED;
struct pm8001_device *pm8001_dev;
struct pm8001_hba_info *pm8001_ha;
struct sas_phy *phy;
u32 device_id = 0;
if (!dev || !dev->lldd_dev)
return -1;
pm8001_dev = dev->lldd_dev;
device_id = pm8001_dev->device_id;
pm8001_ha = pm8001_find_ha_by_dev(dev);
PM8001_EH_DBG(pm8001_ha,
pm8001_printk("I_T_Nexus handler invoked !!"));
phy = sas_get_local_phy(dev);
if (dev_is_sata(dev)) {
DECLARE_COMPLETION_ONSTACK(completion_setstate);
if (scsi_is_sas_phy_local(phy)) {
rc = 0;
goto out;
}
/* send internal ssp/sata/smp abort command to FW */
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
msleep(100);
/* deregister the target device */
pm8001_dev_gone_notify(dev);
msleep(200);
/*send phy reset to hard reset target */
rc = sas_phy_reset(phy, 1);
msleep(2000);
pm8001_dev->setds_completion = &completion_setstate;
wait_for_completion(&completion_setstate);
} else {
/* send internal ssp/sata/smp abort command to FW */
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
msleep(100);
/* deregister the target device */
pm8001_dev_gone_notify(dev);
msleep(200);
/*send phy reset to hard reset target */
rc = sas_phy_reset(phy, 1);
msleep(2000);
}
PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
pm8001_dev->device_id, rc));
out:
sas_put_local_phy(phy);
return rc;
}
/* mandatory SAM-3, the task reset the specified LUN*/
int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
{

View File

@ -1,5 +1,5 @@
/*
* PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
* PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
*
* Copyright (c) 2008-2009 USI Co., Ltd.
* All rights reserved.
@ -57,8 +57,8 @@
#include <linux/atomic.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm8001"
#define DRV_VERSION "0.1.36"
#define DRV_NAME "pm80xx"
#define DRV_VERSION "0.1.37"
#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@ -66,8 +66,8 @@
#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\
__func__, __LINE__, ## arg)
#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \
format, __func__, __LINE__, ## arg)
#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
do { \
if (unlikely(HBA->logging_level & LEVEL)) \
@ -103,11 +103,12 @@ do { \
#define PM8001_READ_VPD
#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV))
#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
#define PM8001_NAME_LENGTH 32/* generic length of strings */
extern struct list_head hba_list;
extern const struct pm8001_dispatch pm8001_8001_dispatch;
extern const struct pm8001_dispatch pm8001_80xx_dispatch;
struct pm8001_hba_info;
struct pm8001_ccb_info;
@ -131,15 +132,15 @@ struct pm8001_ioctl_payload {
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature);
int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha);
irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha);
int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
struct pm8001_ccb_info *ccb);
@ -173,6 +174,7 @@ struct pm8001_dispatch {
};
struct pm8001_chip_info {
u32 encrypt;
u32 n_phy;
const struct pm8001_dispatch *dispatch;
};
@ -204,7 +206,7 @@ struct pm8001_phy {
};
struct pm8001_device {
enum sas_dev_type dev_type;
enum sas_device_type dev_type;
struct domain_device *sas_device;
u32 attached_phy;
u32 id;
@ -256,7 +258,20 @@ struct mpi_mem_req {
struct mpi_mem region[USI_MAX_MEMCNT];
};
struct main_cfg_table {
struct encrypt {
u32 cipher_mode;
u32 sec_mode;
u32 status;
u32 flag;
};
struct sas_phy_attribute_table {
u32 phystart1_16[16];
u32 outbound_hw_event_pid1_16[16];
};
union main_cfg_table {
struct {
u32 signature;
u32 interface_rev;
u32 firmware_rev;
@ -292,19 +307,69 @@ struct main_cfg_table {
u32 fatal_err_dump_length1;
u32 hda_mode_flag;
u32 anolog_setup_table_offset;
u32 rsvd[4];
} pm8001_tbl;
struct {
u32 signature;
u32 interface_rev;
u32 firmware_rev;
u32 max_out_io;
u32 max_sgl;
u32 ctrl_cap_flag;
u32 gst_offset;
u32 inbound_queue_offset;
u32 outbound_queue_offset;
u32 inbound_q_nppd_hppd;
u32 rsvd[8];
u32 crc_core_dump;
u32 rsvd1;
u32 upper_event_log_addr;
u32 lower_event_log_addr;
u32 event_log_size;
u32 event_log_severity;
u32 upper_pcs_event_log_addr;
u32 lower_pcs_event_log_addr;
u32 pcs_event_log_size;
u32 pcs_event_log_severity;
u32 fatal_err_interrupt;
u32 fatal_err_dump_offset0;
u32 fatal_err_dump_length0;
u32 fatal_err_dump_offset1;
u32 fatal_err_dump_length1;
u32 gpio_led_mapping;
u32 analog_setup_table_offset;
u32 int_vec_table_offset;
u32 phy_attr_table_offset;
u32 port_recovery_timer;
u32 interrupt_reassertion_delay;
} pm80xx_tbl;
};
struct general_status_table {
union general_status_table {
struct {
u32 gst_len_mpistate;
u32 iq_freeze_state0;
u32 iq_freeze_state1;
u32 msgu_tcnt;
u32 iop_tcnt;
u32 reserved;
u32 rsvd;
u32 phy_state[8];
u32 reserved1;
u32 reserved2;
u32 reserved3;
u32 gpio_input_val;
u32 rsvd1[2];
u32 recover_err_info[8];
} pm8001_tbl;
struct {
u32 gst_len_mpistate;
u32 iq_freeze_state0;
u32 iq_freeze_state1;
u32 msgu_tcnt;
u32 iop_tcnt;
u32 rsvd[9];
u32 gpio_input_val;
u32 rsvd1[2];
u32 recover_err_info[8];
} pm80xx_tbl;
};
struct inbound_queue_table {
u32 element_pri_size_cnt;
@ -351,15 +416,21 @@ struct pm8001_hba_info {
struct device *dev;
struct pm8001_hba_memspace io_mem[6];
struct mpi_mem_req memoryMap;
struct encrypt encrypt_info; /* support encryption */
void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/
void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/
void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
struct main_cfg_table main_cfg_tbl;
struct general_status_table gs_tbl;
struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM];
struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
void __iomem *pspa_q_tbl_addr;
/*MPI SAS PHY attributes Queue Config Table Addr*/
void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */
union main_cfg_table main_cfg_tbl;
union general_status_table gs_tbl;
struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
struct sas_phy_attribute_table phy_attr_table;
/* MPI SAS PHY attributes */
u8 sas_addr[SAS_ADDR_SIZE];
struct sas_ha_struct *sas;/* SCSI/SAS glue */
struct Scsi_Host *shost;
@ -372,10 +443,12 @@ struct pm8001_hba_info {
struct pm8001_port port[PM8001_MAX_PHYS];
u32 id;
u32 irq;
u32 iomb_size; /* SPC and SPCV IOMB size */
struct pm8001_device *devices;
struct pm8001_ccb_info *ccb_info;
#ifdef PM8001_USE_MSIX
struct msix_entry msix_entries[16];/*for msi-x interrupt*/
struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC];
/*for msi-x interrupt*/
int number_of_intr;/*will be used in remove()*/
#endif
#ifdef PM8001_USE_TASKLET
@ -383,7 +456,10 @@ struct pm8001_hba_info {
#endif
u32 logging_level;
u32 fw_status;
u32 smp_exp_mode;
u32 int_vector;
const struct firmware *fw_image;
u8 outq[PM8001_MAX_MSIX_VEC];
};
struct pm8001_work {
@ -419,6 +495,9 @@ struct pm8001_fw_image_header {
#define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10
#define FLASH_UPDATE_DISABLED 0x11
#define NCQ_READ_LOG_FLAG 0x80000000
#define NCQ_ABORT_ALL_FLAG 0x40000000
#define NCQ_2ND_RLE_FLAG 0x20000000
/**
* brief param structure for firmware flash update.
*/
@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev);
void pm8001_dev_gone(struct domain_device *dev);
int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
int pm8001_I_T_nexus_reset(struct domain_device *dev);
int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
int pm8001_query_task(struct sas_task *task);
void pm8001_open_reject_retry(
struct pm8001_hba_info *pm8001_ha,
@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
u32 mem_size, u32 align);
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ,
u32 opCode, void *payload, u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
u16 messageSize, void **messagePtr);
u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
struct outbound_queue_table *circularQ, u8 bc);
u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
struct outbound_queue_table *circularQ,
void **messagePtr1, u8 *pBC);
int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev, u32 state);
int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
void *payload);
int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
void *fw_flash_updata_info, u32 tag);
int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_ccb_info *ccb,
struct pm8001_tmf_task *tmf);
int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev,
u8 flag, u32 task_tag, u32 cmd_tag);
int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
void pm8001_work_fn(struct work_struct *work);
int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
void *data, int handler);
void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb);
void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb);
void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb);
int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
void *piomb);
void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb);
int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
struct sas_task *pm8001_alloc_task(void);
void pm8001_task_done(struct sas_task *task);
void pm8001_free_task(struct sas_task *task);
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
u32 device_id);
int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
/* ctl shared API */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,9 @@ config SCSI_QLA_FC
Firmware images can be retrieved from:
ftp://ftp.qlogic.com/outgoing/linux/firmware/
http://ldriver.qlogic.com/firmware/
They are also included in the linux-firmware tree as well.
config TCM_QLA2XXX
tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"

View File

@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
lcmd_pkt->cntrl_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
lcmd_pkt->cntrl_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
lcmd_pkt->cntrl_flags = TMF_READ_DATA;
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}

View File

@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
qla2x00_rel_sp(sp->fcport->vha, sp);
}
void
static void
qla2x00_sp_compl(void *data, void *ptr, int res)
{
struct qla_hw_data *ha = (struct qla_hw_data *)data;

View File

@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
mrb->mbox_cmd = in_mbox[0];
wmb();
ha->iocb_cnt += mrb->iocb_cnt;
ha->isp_ops->queue_iocb(ha);
exit_mbox_iocb:
spin_unlock_irqrestore(&ha->hardware_lock, flags);

View File

@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf);
fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf);
fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn);
fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn);
fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
* If this is invoked as a result of a userspace call then the entry is marked
* as nonpersistent using flash_state field.
**/
int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
uint16_t *idx, int user)
static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
uint16_t *idx, int user)
{
struct iscsi_bus_flash_session *fnode_sess = NULL;
struct iscsi_bus_flash_conn *fnode_conn = NULL;
@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
ql4_printk(KERN_ERR, ha,
"%s: A non-persistent entry %s found\n",
__func__, dev->kobj.name);
put_device(dev);
goto exit_ddb_add;
}
@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
int parent_type, parent_index = 0xffff;
int rc = 0;
dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
iscsi_is_flashnode_conn_dev);
dev = iscsi_find_flashnode_conn(fnode_sess);
if (!dev)
return -EIO;
@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
rc = sprintf(buf, "\n");
break;
case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
if ((fnode_sess->discovery_parent_idx) >= 0 &&
(fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
parent_index = fnode_sess->discovery_parent_idx;
rc = sprintf(buf, "%u\n", parent_index);
@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
parent_type = ISCSI_DISC_PARENT_ISNS;
else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
parent_type = ISCSI_DISC_PARENT_UNKNOWN;
else if (fnode_sess->discovery_parent_type >= 0 &&
fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
parent_type = ISCSI_DISC_PARENT_SENDTGT;
else
parent_type = ISCSI_DISC_PARENT_UNKNOWN;
@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
rc = -ENOSYS;
break;
}
put_device(dev);
return rc;
}
@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
{
struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
struct scsi_qla_host *ha = to_qla_host(shost);
struct dev_db_entry *fw_ddb_entry = NULL;
struct iscsi_flashnode_param_info *fnode_param;
struct nlattr *attr;
int rc = QLA_ERROR;
uint32_t rem = len;
fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
if (!fw_ddb_entry) {
DEBUG2(ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate ddb buffer\n",
__func__));
return -ENOMEM;
}
nla_for_each_attr(attr, data, len, rem) {
fnode_param = nla_data(attr);
@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
uint16_t *ddb_cookie = NULL;
size_t ddb_size;
size_t ddb_size = 0;
void *pddb = NULL;
int target_id;
int rc = 0;
if (!fnode_sess) {
rc = -EINVAL;
goto exit_ddb_del;
}
if (fnode_sess->is_boot_target) {
rc = -EPERM;
DEBUG2(ql4_printk(KERN_ERR, ha,
@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
dev_db_start_offset += (fnode_sess->target_id *
sizeof(*fw_ddb_entry));
dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) -
(void *)fw_ddb_entry;
dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
ddb_size = sizeof(*ddb_cookie);
}

View File

@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#define QLA4XXX_DRIVER_VERSION "5.03.00-k8"
#define QLA4XXX_DRIVER_VERSION "5.03.00-k9"

View File

@ -1997,24 +1997,39 @@ out:
return ret;
}
static unsigned long lba_to_map_index(sector_t lba)
{
if (scsi_debug_unmap_alignment) {
lba += scsi_debug_unmap_granularity -
scsi_debug_unmap_alignment;
}
do_div(lba, scsi_debug_unmap_granularity);
return lba;
}
static sector_t map_index_to_lba(unsigned long index)
{
return index * scsi_debug_unmap_granularity -
scsi_debug_unmap_alignment;
}
static unsigned int map_state(sector_t lba, unsigned int *num)
{
unsigned int granularity, alignment, mapped;
sector_t block, next, end;
sector_t end;
unsigned int mapped;
unsigned long index;
unsigned long next;
granularity = scsi_debug_unmap_granularity;
alignment = granularity - scsi_debug_unmap_alignment;
block = lba + alignment;
do_div(block, granularity);
mapped = test_bit(block, map_storep);
index = lba_to_map_index(lba);
mapped = test_bit(index, map_storep);
if (mapped)
next = find_next_zero_bit(map_storep, map_size, block);
next = find_next_zero_bit(map_storep, map_size, index);
else
next = find_next_bit(map_storep, map_size, block);
next = find_next_bit(map_storep, map_size, index);
end = next * granularity - scsi_debug_unmap_alignment;
end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
*num = end - lba;
return mapped;
@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
static void map_region(sector_t lba, unsigned int len)
{
unsigned int granularity, alignment;
sector_t end = lba + len;
granularity = scsi_debug_unmap_granularity;
alignment = granularity - scsi_debug_unmap_alignment;
while (lba < end) {
sector_t block, rem;
unsigned long index = lba_to_map_index(lba);
block = lba + alignment;
rem = do_div(block, granularity);
if (index < map_size)
set_bit(index, map_storep);
if (block < map_size)
set_bit(block, map_storep);
lba += granularity - rem;
lba = map_index_to_lba(index + 1);
}
}
static void unmap_region(sector_t lba, unsigned int len)
{
unsigned int granularity, alignment;
sector_t end = lba + len;
granularity = scsi_debug_unmap_granularity;
alignment = granularity - scsi_debug_unmap_alignment;
while (lba < end) {
sector_t block, rem;
unsigned long index = lba_to_map_index(lba);
block = lba + alignment;
rem = do_div(block, granularity);
if (rem == 0 && lba + granularity < end && block < map_size) {
clear_bit(block, map_storep);
if (scsi_debug_lbprz)
if (lba == map_index_to_lba(index) &&
lba + scsi_debug_unmap_granularity <= end &&
index < map_size) {
clear_bit(index, map_storep);
if (scsi_debug_lbprz) {
memset(fake_storep +
block * scsi_debug_sector_size, 0,
scsi_debug_sector_size);
lba * scsi_debug_sector_size, 0,
scsi_debug_sector_size *
scsi_debug_unmap_granularity);
}
}
lba += granularity - rem;
lba = map_index_to_lba(index + 1);
}
}
@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
write_lock_irqsave(&atomic_rw, iflags);
ret = do_device_access(SCpnt, devip, lba, num, 1);
if (scsi_debug_unmap_granularity)
if (scsi_debug_lbp())
map_region(lba, num);
write_unlock_irqrestore(&atomic_rw, iflags);
if (-1 == ret)
@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
write_lock_irqsave(&atomic_rw, iflags);
if (unmap && scsi_debug_unmap_granularity) {
if (unmap && scsi_debug_lbp()) {
unmap_region(lba, num);
goto out;
}
@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
fake_storep + (lba * scsi_debug_sector_size),
scsi_debug_sector_size);
if (scsi_debug_unmap_granularity)
if (scsi_debug_lbp())
map_region(lba, num);
out:
write_unlock_irqrestore(&atomic_rw, iflags);
@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void)
/* Logical Block Provisioning */
if (scsi_debug_lbp()) {
unsigned int map_bytes;
scsi_debug_unmap_max_blocks =
clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void)
clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
if (scsi_debug_unmap_alignment &&
scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
scsi_debug_unmap_granularity <=
scsi_debug_unmap_alignment) {
printk(KERN_ERR
"%s: ERR: unmap_granularity < unmap_alignment\n",
"%s: ERR: unmap_granularity <= unmap_alignment\n",
__func__);
return -EINVAL;
}
map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
map_bytes = map_size >> 3;
map_storep = vmalloc(map_bytes);
map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
map_size);
@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void)
goto free_vm;
}
memset(map_storep, 0x0, map_bytes);
bitmap_zero(map_storep, map_size);
/* Map first 1KB for partition table */
if (scsi_debug_num_parts)

View File

@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
DECLARE_COMPLETION_ONSTACK(done);
unsigned long timeleft;
unsigned long timeleft = timeout;
struct scsi_eh_save ses;
const unsigned long stall_for = msecs_to_jiffies(100);
int rtn;
retry:
scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
shost->eh_action = &done;
scsi_log_send(scmd);
scmd->scsi_done = scsi_eh_done;
shost->hostt->queuecommand(shost, scmd);
timeleft = wait_for_completion_timeout(&done, timeout);
rtn = shost->hostt->queuecommand(shost, scmd);
if (rtn) {
if (timeleft > stall_for) {
scsi_eh_restore_cmnd(scmd, &ses);
timeleft -= stall_for;
msleep(jiffies_to_msecs(stall_for));
goto retry;
}
/* signal not to enter either branch of the if () below */
timeleft = 0;
rtn = NEEDS_RETRY;
} else {
timeleft = wait_for_completion_timeout(&done, timeout);
}
shost->eh_action = NULL;
scsi_log_completion(scmd, SUCCESS);
scsi_log_completion(scmd, rtn);
SCSI_LOG_ERROR_RECOVERY(3,
printk("%s: scmd: %p, timeleft: %ld\n",
__func__, scmd, timeleft));
/*
* If there is time left scsi_eh_done got called, and we will
* examine the actual status codes to see whether the command
* actually did complete normally, else tell the host to forget
* about this command.
* If there is time left scsi_eh_done got called, and we will examine
* the actual status codes to see whether the command actually did
* complete normally, else if we have a zero return and no time left,
* the command must still be pending, so abort it and return FAILED.
* If we never actually managed to issue the command, because
* ->queuecommand() kept returning non zero, use the rtn = FAILED
* value above (so don't execute either branch of the if)
*/
if (timeleft) {
rtn = scsi_eh_completed_normally(scmd);
@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
rtn = FAILED;
break;
}
} else {
} else if (!rtn) {
scsi_abort_eh_cmnd(scmd);
rtn = FAILED;
}

View File

@ -1019,8 +1019,7 @@ exit_match_index:
/**
* iscsi_get_flashnode_by_index -finds flashnode session entry by index
* @shost: pointer to host data
* @data: pointer to data containing value to use for comparison
* @fn: function pointer that does actual comparison
* @idx: index to match
*
* Finds the flashnode session object for the passed index
*
@ -1029,13 +1028,13 @@ exit_match_index:
* %NULL on failure
*/
static struct iscsi_bus_flash_session *
iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
int (*fn)(struct device *dev, void *data))
iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
{
struct iscsi_bus_flash_session *fnode_sess = NULL;
struct device *dev;
dev = device_find_child(&shost->shost_gendev, data, fn);
dev = device_find_child(&shost->shost_gendev, &idx,
flashnode_match_index);
if (dev)
fnode_sess = iscsi_dev_to_flash_session(dev);
@ -1059,18 +1058,13 @@ struct device *
iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
int (*fn)(struct device *dev, void *data))
{
struct device *dev;
dev = device_find_child(&shost->shost_gendev, data, fn);
return dev;
return device_find_child(&shost->shost_gendev, data, fn);
}
EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
/**
* iscsi_find_flashnode_conn - finds flashnode connection entry
* @fnode_sess: pointer to parent flashnode session entry
* @data: pointer to data containing value to use for comparison
* @fn: function pointer that does actual comparison
*
* Finds the flashnode connection object comparing the data passed using logic
* defined in passed function pointer
@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
* %NULL on failure
*/
struct device *
iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
void *data,
int (*fn)(struct device *dev, void *data))
iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
{
struct device *dev;
dev = device_find_child(&fnode_sess->dev, data, fn);
return dev;
return device_find_child(&fnode_sess->dev, NULL,
iscsi_is_flashnode_conn_dev);
}
EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
struct iscsi_bus_flash_session *fnode_sess;
struct iscsi_bus_flash_conn *fnode_conn;
struct device *dev;
uint32_t *idx;
uint32_t idx;
int err = 0;
if (!transport->set_flashnode_param) {
@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
goto put_host;
}
idx = &ev->u.set_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
flashnode_match_index);
idx = ev->u.set_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
if (!fnode_sess) {
pr_err("%s could not find flashnode %u for host no %u\n",
__func__, *idx, ev->u.set_flashnode.host_no);
__func__, idx, ev->u.set_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
iscsi_is_flashnode_conn_dev);
dev = iscsi_find_flashnode_conn(fnode_sess);
if (!dev) {
err = -ENODEV;
goto put_host;
goto put_sess;
}
fnode_conn = iscsi_dev_to_flash_conn(dev);
err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
put_device(dev);
put_sess:
put_device(&fnode_sess->dev);
put_host:
scsi_host_put(shost);
@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
{
struct Scsi_Host *shost;
struct iscsi_bus_flash_session *fnode_sess;
uint32_t *idx;
uint32_t idx;
int err = 0;
if (!transport->del_flashnode) {
@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
goto put_host;
}
idx = &ev->u.del_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
flashnode_match_index);
idx = ev->u.del_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
if (!fnode_sess) {
pr_err("%s could not find flashnode %u for host no %u\n",
__func__, *idx, ev->u.del_flashnode.host_no);
__func__, idx, ev->u.del_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
err = transport->del_flashnode(fnode_sess);
put_device(&fnode_sess->dev);
put_host:
scsi_host_put(shost);
@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
struct iscsi_bus_flash_session *fnode_sess;
struct iscsi_bus_flash_conn *fnode_conn;
struct device *dev;
uint32_t *idx;
uint32_t idx;
int err = 0;
if (!transport->login_flashnode) {
@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
goto put_host;
}
idx = &ev->u.login_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
flashnode_match_index);
idx = ev->u.login_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
if (!fnode_sess) {
pr_err("%s could not find flashnode %u for host no %u\n",
__func__, *idx, ev->u.login_flashnode.host_no);
__func__, idx, ev->u.login_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
iscsi_is_flashnode_conn_dev);
dev = iscsi_find_flashnode_conn(fnode_sess);
if (!dev) {
err = -ENODEV;
goto put_host;
goto put_sess;
}
fnode_conn = iscsi_dev_to_flash_conn(dev);
err = transport->login_flashnode(fnode_sess, fnode_conn);
put_device(dev);
put_sess:
put_device(&fnode_sess->dev);
put_host:
scsi_host_put(shost);
@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
struct iscsi_bus_flash_session *fnode_sess;
struct iscsi_bus_flash_conn *fnode_conn;
struct device *dev;
uint32_t *idx;
uint32_t idx;
int err = 0;
if (!transport->logout_flashnode) {
@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
goto put_host;
}
idx = &ev->u.logout_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
flashnode_match_index);
idx = ev->u.logout_flashnode.flashnode_idx;
fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
if (!fnode_sess) {
pr_err("%s could not find flashnode %u for host no %u\n",
__func__, *idx, ev->u.logout_flashnode.host_no);
__func__, idx, ev->u.logout_flashnode.host_no);
err = -ENODEV;
goto put_host;
}
dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
iscsi_is_flashnode_conn_dev);
dev = iscsi_find_flashnode_conn(fnode_sess);
if (!dev) {
err = -ENODEV;
goto put_host;
goto put_sess;
}
fnode_conn = iscsi_dev_to_flash_conn(dev);
err = transport->logout_flashnode(fnode_sess, fnode_conn);
put_device(dev);
put_sess:
put_device(&fnode_sess->dev);
put_host:
scsi_host_put(shost);
@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void)
}
iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
if (!iscsi_eh_timer_workq)
if (!iscsi_eh_timer_workq) {
err = -ENOMEM;
goto release_nls;
}
return 0;

View File

@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
char *buffer_data;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
const char *temp = "temporary ";
int len;
if (sdp->type != TYPE_DISK)
@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
* it's not worth the risk */
return -EINVAL;
if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
buf += sizeof(temp) - 1;
sdkp->cache_override = 1;
} else {
sdkp->cache_override = 0;
}
for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
len = strlen(sd_cache_types[i]);
if (strncmp(sd_cache_types[i], buf, len) == 0 &&
@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
return -EINVAL;
rcd = ct & 0x01 ? 1 : 0;
wce = ct & 0x02 ? 1 : 0;
if (sdkp->cache_override) {
sdkp->WCE = wce;
sdkp->RCD = rcd;
return count;
}
if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
SD_MAX_RETRIES, &data, NULL))
return -EINVAL;
@ -2318,6 +2333,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
int old_rcd = sdkp->RCD;
int old_dpofua = sdkp->DPOFUA;
if (sdkp->cache_override)
return;
first_len = 4;
if (sdp->skip_ms_page_8) {
if (sdp->type == TYPE_RBC)
@ -2811,6 +2830,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sdkp->capacity = 0;
sdkp->media_present = 1;
sdkp->write_prot = 0;
sdkp->cache_override = 0;
sdkp->WCE = 0;
sdkp->RCD = 0;
sdkp->ATO = 0;

View File

@ -73,6 +73,7 @@ struct scsi_disk {
u8 protection_type;/* Data Integrity Field */
u8 provisioning_mode;
unsigned ATO : 1; /* state of disk ATO bit */
unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */

View File

@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
if (sdt->app_tag == 0xffff)
return 0;
/* Bad ref tag received from disk */
if (sdt->ref_tag == 0xffffffff) {
printk(KERN_ERR
"%s: bad phys ref tag on sector %lu\n",
bix->disk_name, (unsigned long)sector);
return -EIO;
}
if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
printk(KERN_ERR
"%s: ref tag error on sector %lu (rcvd %u)\n",

View File

@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI
If you have a controller with this interface, say Y or M here.
If unsure, say N.
config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support"
depends on SCSI_UFSHCD
---help---
This selects the UFS host controller support. Select this if
you have an UFS controller on Platform bus.
If you have a controller with this interface, say Y or M here.
If unsure, say N.

View File

@ -1,3 +1,4 @@
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o

View File

@ -0,0 +1,217 @@
/*
* Universal Flash Storage Host controller Platform bus based glue driver
*
* This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
* Copyright (C) 2011-2013 Samsung India Software Operations
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
* Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* See the COPYING file in the top-level directory or visit
* <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This program is provided "AS IS" and "WITH ALL FAULTS" and
* without warranty of any kind. You are solely responsible for
* determining the appropriateness of using and distributing
* the program and assume all risks associated with your exercise
* of rights with respect to the program, including but not limited
* to infringement of third party rights, the risks and costs of
* program errors, damage to or loss of data, programs or equipment,
* and unavailability or interruption of operations. Under no
* circumstances will the contributor of this Program be liable for
* any damages of any kind arising from your use or distribution of
* this program.
*/
#include "ufshcd.h"
#include <linux/platform_device.h>
#ifdef CONFIG_PM
/**
* ufshcd_pltfrm_suspend - suspend power management function
* @dev: pointer to device handle
*
*
* Returns 0
*/
static int ufshcd_pltfrm_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ufs_hba *hba = platform_get_drvdata(pdev);
/*
* TODO:
* 1. Call ufshcd_suspend
* 2. Do bus specific power management
*/
disable_irq(hba->irq);
return 0;
}
/**
* ufshcd_pltfrm_resume - resume power management function
* @dev: pointer to device handle
*
* Returns 0
*/
static int ufshcd_pltfrm_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ufs_hba *hba = platform_get_drvdata(pdev);
/*
* TODO:
* 1. Call ufshcd_resume.
* 2. Do bus specific wake up
*/
enable_irq(hba->irq);
return 0;
}
#else
#define ufshcd_pltfrm_suspend NULL
#define ufshcd_pltfrm_resume NULL
#endif
/**
* ufshcd_pltfrm_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
*
* Returns 0 on success, non-zero value on failure
*/
static int ufshcd_pltfrm_probe(struct platform_device *pdev)
{
struct ufs_hba *hba;
void __iomem *mmio_base;
struct resource *mem_res;
struct resource *irq_res;
resource_size_t mem_size;
int err;
struct device *dev = &pdev->dev;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_res) {
dev_err(&pdev->dev,
"Memory resource not available\n");
err = -ENODEV;
goto out_error;
}
mem_size = resource_size(mem_res);
if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
dev_err(&pdev->dev,
"Cannot reserve the memory resource\n");
err = -EBUSY;
goto out_error;
}
mmio_base = ioremap_nocache(mem_res->start, mem_size);
if (!mmio_base) {
dev_err(&pdev->dev, "memory map failed\n");
err = -ENOMEM;
goto out_release_regions;
}
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
dev_err(&pdev->dev, "IRQ resource not available\n");
err = -ENODEV;
goto out_iounmap;
}
err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
if (err) {
dev_err(&pdev->dev, "set dma mask failed\n");
goto out_iounmap;
}
err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
if (err) {
dev_err(&pdev->dev, "Intialization failed\n");
goto out_iounmap;
}
platform_set_drvdata(pdev, hba);
return 0;
out_iounmap:
iounmap(mmio_base);
out_release_regions:
release_mem_region(mem_res->start, mem_size);
out_error:
return err;
}
/**
* ufshcd_pltfrm_remove - remove platform driver routine
* @pdev: pointer to platform device handle
*
* Returns 0 on success, non-zero value on failure
*/
static int ufshcd_pltfrm_remove(struct platform_device *pdev)
{
struct resource *mem_res;
resource_size_t mem_size;
struct ufs_hba *hba = platform_get_drvdata(pdev);
disable_irq(hba->irq);
/* Some buggy controllers raise interrupt after
* the resources are removed. So first we unregister the
* irq handler and then the resources used by driver
*/
free_irq(hba->irq, hba);
ufshcd_remove(hba);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_res)
dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
else {
mem_size = resource_size(mem_res);
release_mem_region(mem_res->start, mem_size);
}
platform_set_drvdata(pdev, NULL);
return 0;
}
static const struct of_device_id ufs_of_match[] = {
{ .compatible = "jedec,ufs-1.1"},
};
static const struct dev_pm_ops ufshcd_dev_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
.resume = ufshcd_pltfrm_resume,
};
static struct platform_driver ufshcd_pltfrm_driver = {
.probe = ufshcd_pltfrm_probe,
.remove = ufshcd_pltfrm_remove,
.driver = {
.name = "ufshcd",
.owner = THIS_MODULE,
.pm = &ufshcd_dev_pm_ops,
.of_match_table = ufs_of_match,
},
};
module_platform_driver(ufshcd_pltfrm_driver);
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(UFSHCD_DRIVER_VERSION);

View File

@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
ucd_cmd_ptr->header.dword_2 = 0;
ucd_cmd_ptr->exp_data_transfer_len =
cpu_to_be32(lrbp->cmd->transfersize);
cpu_to_be32(lrbp->cmd->sdb.length);
memcpy(ucd_cmd_ptr->cdb,
lrbp->cmd->cmnd,

View File

@ -2147,11 +2147,13 @@
#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
#define PCI_DEVICE_ID_NX2_57712 0x1662
#define PCI_DEVICE_ID_NX2_57712E 0x1663
#define PCI_DEVICE_ID_NX2_57712_MF 0x1663
#define PCI_DEVICE_ID_TIGON3_5714 0x1668
#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
#define PCI_DEVICE_ID_TIGON3_5780 0x166a
#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
#define PCI_DEVICE_ID_NX2_57712_VF 0x166f
#define PCI_DEVICE_ID_TIGON3_5754M 0x1672
#define PCI_DEVICE_ID_TIGON3_5755M 0x1673
#define PCI_DEVICE_ID_TIGON3_5756 0x1674
@ -2177,13 +2179,15 @@
#define PCI_DEVICE_ID_TIGON3_5787 0x169b
#define PCI_DEVICE_ID_TIGON3_5788 0x169c
#define PCI_DEVICE_ID_TIGON3_5789 0x169d
#define PCI_DEVICE_ID_NX2_57840_4_10 0x16a1
#define PCI_DEVICE_ID_NX2_57840_2_20 0x16a2
#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
#define PCI_DEVICE_ID_NX2_57800_MF 0x16a5
#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6
#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7
#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
#define PCI_DEVICE_ID_NX2_5706S 0x16aa
#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
#define PCI_DEVICE_ID_NX2_5708S 0x16ac
#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae

View File

@ -118,7 +118,7 @@ struct ex_phy {
enum ex_phy_state phy_state;
enum sas_dev_type attached_dev_type;
enum sas_device_type attached_dev_type;
enum sas_linkrate linkrate;
u8 attached_sata_host:1;
@ -195,7 +195,7 @@ enum {
struct domain_device {
spinlock_t done_lock;
enum sas_dev_type dev_type;
enum sas_device_type dev_type;
enum sas_linkrate linkrate;
enum sas_linkrate min_linkrate;

View File

@ -107,7 +107,7 @@ enum osd_attributes_mode {
* int exponent: 04;
* }
*/
typedef __be32 __bitwise osd_cdb_offset;
typedef __be32 osd_cdb_offset;
enum {
OSD_OFFSET_UNUSED = 0xFFFFFFFF,

View File

@ -90,16 +90,18 @@ enum sas_oob_mode {
};
/* See sas_discover.c if you plan on changing these */
enum sas_dev_type {
NO_DEVICE = 0, /* protocol */
SAS_END_DEV = 1, /* protocol */
EDGE_DEV = 2, /* protocol */
FANOUT_DEV = 3, /* protocol */
SAS_HA = 4,
SATA_DEV = 5,
SATA_PM = 7,
SATA_PM_PORT= 8,
SATA_PENDING = 9,
enum sas_device_type {
/* these are SAS protocol defined (attached device type field) */
SAS_PHY_UNUSED = 0,
SAS_END_DEVICE = 1,
SAS_EDGE_EXPANDER_DEVICE = 2,
SAS_FANOUT_EXPANDER_DEVICE = 3,
/* these are internal to libsas */
SAS_HA = 4,
SAS_SATA_DEV = 5,
SAS_SATA_PM = 7,
SAS_SATA_PM_PORT = 8,
SAS_SATA_PENDING = 9,
};
enum sas_protocol {

View File

@ -32,8 +32,8 @@
static inline int dev_is_sata(struct domain_device *dev)
{
return dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
dev->dev_type == SATA_PM_PORT || dev->dev_type == SATA_PENDING;
return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING;
}
int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);

View File

@ -471,14 +471,10 @@ iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
extern int iscsi_flashnode_bus_match(struct device *dev,
struct device_driver *drv);
extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
extern struct device *
iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
int (*fn)(struct device *dev, void *data));
extern struct device *
iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
void *data,
int (*fn)(struct device *dev, void *data));
iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess);
#endif

View File

@ -10,13 +10,6 @@ struct scsi_transport_template;
struct sas_rphy;
struct request;
enum sas_device_type {
SAS_PHY_UNUSED = 0,
SAS_END_DEVICE = 1,
SAS_EDGE_EXPANDER_DEVICE = 2,
SAS_FANOUT_EXPANDER_DEVICE = 3,
};
static inline int sas_protocol_ata(enum sas_protocol proto)
{
return ((proto & SAS_PROTOCOL_SATA) ||