mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-03 00:54:09 +08:00
Merge branch '5.17/scsi-fixes' into 5.18/scsi-staging
Pull 5.17 fixes branch into 5.18 tree to resolve a few pm8001 driver merge conflicts. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
ac2beb4e3b
@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* re-init to undo drop from zfcp_fc_adisc() */
|
||||
port->d_id = ntoh24(adisc_resp->adisc_port_id);
|
||||
/* port is good, unblock rport without going through erp */
|
||||
zfcp_scsi_schedule_rport_register(port);
|
||||
out:
|
||||
@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
|
||||
struct zfcp_fc_req *fc_req;
|
||||
struct zfcp_adapter *adapter = port->adapter;
|
||||
struct Scsi_Host *shost = adapter->scsi_host;
|
||||
u32 d_id;
|
||||
int ret;
|
||||
|
||||
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
|
||||
@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
|
||||
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
|
||||
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
|
||||
|
||||
ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
|
||||
d_id = port->d_id; /* remember as destination for send els below */
|
||||
/*
|
||||
* Force fresh GID_PN lookup on next port recovery.
|
||||
* Must happen after request setup and before sending request,
|
||||
* to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
|
||||
*/
|
||||
port->d_id = 0;
|
||||
|
||||
ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
|
||||
ZFCP_FC_CTELS_TMO);
|
||||
if (ret)
|
||||
kmem_cache_free(zfcp_fc_req_cache, fc_req);
|
||||
|
@ -1567,8 +1567,6 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (retval)
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (retval) {
|
||||
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
|
||||
retval = -ENODEV;
|
||||
@ -1786,8 +1784,6 @@ static int __maybe_unused twl_resume(struct device *dev)
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (retval)
|
||||
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (retval) {
|
||||
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
|
||||
retval = -ENODEV;
|
||||
|
@ -1507,7 +1507,6 @@ NCR_700_intr(int irq, void *dev_id)
|
||||
struct scsi_cmnd *SCp = hostdata->cmd;
|
||||
|
||||
handled = 1;
|
||||
SCp = hostdata->cmd;
|
||||
|
||||
if(istat & SCSI_INT_PENDING) {
|
||||
udelay(10);
|
||||
|
@ -732,9 +732,6 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
|
||||
pci_set_master(pdev);
|
||||
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (rc)
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
|
||||
if (rc) {
|
||||
rc = -ENODEV;
|
||||
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
|
||||
@ -1559,9 +1556,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
|
||||
pci_set_master(pdev);
|
||||
|
||||
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
|
||||
if (rc)
|
||||
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
|
||||
DMA_BIT_MASK(32));
|
||||
if (rc)
|
||||
goto out_disable_device;
|
||||
|
||||
|
@ -82,7 +82,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
|
||||
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
|
||||
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
|
||||
struct device *parent, int npiv);
|
||||
static void bnx2fc_destroy_work(struct work_struct *work);
|
||||
static void bnx2fc_port_destroy(struct fcoe_port *port);
|
||||
|
||||
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
|
||||
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
|
||||
@ -508,7 +508,8 @@ static int bnx2fc_l2_rcv_thread(void *arg)
|
||||
|
||||
static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
{
|
||||
u32 fr_len;
|
||||
u64 crc_err;
|
||||
u32 fr_len, fr_crc;
|
||||
struct fc_lport *lport;
|
||||
struct fcoe_rcv_info *fr;
|
||||
struct fc_stats *stats;
|
||||
@ -542,6 +543,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
skb_pull(skb, sizeof(struct fcoe_hdr));
|
||||
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
stats->RxFrames++;
|
||||
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
|
||||
put_cpu();
|
||||
|
||||
fp = (struct fc_frame *)skb;
|
||||
fc_frame_init(fp);
|
||||
fr_dev(fp) = lport;
|
||||
@ -624,16 +630,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, smp_processor_id());
|
||||
stats->RxFrames++;
|
||||
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
|
||||
fr_crc = le32_to_cpu(fr_crc(fp));
|
||||
|
||||
if (le32_to_cpu(fr_crc(fp)) !=
|
||||
~crc32(~0, skb->data, fr_len)) {
|
||||
if (stats->InvalidCRCCount < 5)
|
||||
if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
|
||||
stats = per_cpu_ptr(lport->stats, get_cpu());
|
||||
crc_err = (stats->InvalidCRCCount++);
|
||||
put_cpu();
|
||||
if (crc_err < 5)
|
||||
printk(KERN_WARNING PFX "dropping frame with "
|
||||
"CRC error\n");
|
||||
stats->InvalidCRCCount++;
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
@ -907,9 +912,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
|
||||
__bnx2fc_destroy(interface);
|
||||
}
|
||||
mutex_unlock(&bnx2fc_dev_lock);
|
||||
|
||||
/* Ensure ALL destroy work has been completed before return */
|
||||
flush_workqueue(bnx2fc_wq);
|
||||
return;
|
||||
|
||||
default:
|
||||
@ -1215,8 +1217,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
|
||||
mutex_unlock(&n_port->lp_mutex);
|
||||
bnx2fc_free_vport(interface->hba, port->lport);
|
||||
bnx2fc_port_shutdown(port->lport);
|
||||
bnx2fc_port_destroy(port);
|
||||
bnx2fc_interface_put(interface);
|
||||
queue_work(bnx2fc_wq, &port->destroy_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1525,7 +1527,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
|
||||
port->lport = lport;
|
||||
port->priv = interface;
|
||||
port->get_netdev = bnx2fc_netdev;
|
||||
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
|
||||
|
||||
/* Configure fcoe_port */
|
||||
rc = bnx2fc_lport_config(lport);
|
||||
@ -1653,8 +1654,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
|
||||
bnx2fc_interface_cleanup(interface);
|
||||
bnx2fc_stop(interface);
|
||||
list_del(&interface->list);
|
||||
bnx2fc_port_destroy(port);
|
||||
bnx2fc_interface_put(interface);
|
||||
queue_work(bnx2fc_wq, &port->destroy_work);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1694,15 +1695,12 @@ netdev_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnx2fc_destroy_work(struct work_struct *work)
|
||||
static void bnx2fc_port_destroy(struct fcoe_port *port)
|
||||
{
|
||||
struct fcoe_port *port;
|
||||
struct fc_lport *lport;
|
||||
|
||||
port = container_of(work, struct fcoe_port, destroy_work);
|
||||
lport = port->lport;
|
||||
|
||||
BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
|
||||
BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
|
||||
|
||||
bnx2fc_if_destroy(lport);
|
||||
}
|
||||
@ -2556,9 +2554,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
|
||||
__bnx2fc_destroy(interface);
|
||||
mutex_unlock(&bnx2fc_dev_lock);
|
||||
|
||||
/* Ensure ALL destroy work has been completed before return */
|
||||
flush_workqueue(bnx2fc_wq);
|
||||
|
||||
bnx2fc_ulp_stop(hba);
|
||||
/* unregister cnic device */
|
||||
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
|
||||
|
@ -46,18 +46,14 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
|
||||
|
||||
efc = node->efc;
|
||||
|
||||
spin_lock_irqsave(&node->els_ios_lock, flags);
|
||||
|
||||
if (!node->els_io_enabled) {
|
||||
efc_log_err(efc, "els io alloc disabled\n");
|
||||
spin_unlock_irqrestore(&node->els_ios_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
|
||||
if (!els) {
|
||||
atomic_add_return(1, &efc->els_io_alloc_failed_count);
|
||||
spin_unlock_irqrestore(&node->els_ios_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -74,7 +70,6 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
|
||||
&els->io.req.phys, GFP_KERNEL);
|
||||
if (!els->io.req.virt) {
|
||||
mempool_free(els, efc->els_io_pool);
|
||||
spin_unlock_irqrestore(&node->els_ios_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -94,10 +89,11 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
|
||||
|
||||
/* add els structure to ELS IO list */
|
||||
INIT_LIST_HEAD(&els->list_entry);
|
||||
spin_lock_irqsave(&node->els_ios_lock, flags);
|
||||
list_add_tail(&els->list_entry, &node->els_ios_list);
|
||||
spin_unlock_irqrestore(&node->els_ios_lock, flags);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&node->els_ios_lock, flags);
|
||||
return els;
|
||||
}
|
||||
|
||||
|
@ -400,8 +400,7 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_slot *slot,
|
||||
struct hisi_sas_dq *dq,
|
||||
struct hisi_sas_device *sas_dev,
|
||||
struct hisi_sas_internal_abort *abort,
|
||||
struct hisi_sas_tmf_task *tmf)
|
||||
struct hisi_sas_internal_abort *abort)
|
||||
{
|
||||
struct hisi_sas_cmd_hdr *cmd_hdr_base;
|
||||
int dlvry_queue_slot, dlvry_queue;
|
||||
@ -426,8 +425,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
|
||||
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
|
||||
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
|
||||
|
||||
slot->tmf = tmf;
|
||||
slot->is_internal = tmf;
|
||||
task->lldd_task = slot;
|
||||
|
||||
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
|
||||
@ -582,7 +579,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
|
||||
slot->is_internal = tmf;
|
||||
|
||||
/* protect task_prep and start_delivery sequence */
|
||||
hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf);
|
||||
hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1374,12 +1371,13 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int s = sizeof(struct host_to_dev_fis);
|
||||
struct hisi_sas_tmf_task tmf = {};
|
||||
|
||||
ata_for_each_link(link, ap, EDGE) {
|
||||
int pmp = sata_srst_pmp(link);
|
||||
|
||||
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
|
||||
rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
|
||||
rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf);
|
||||
if (rc != TMF_RESP_FUNC_COMPLETE)
|
||||
break;
|
||||
}
|
||||
@ -1390,7 +1388,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
|
||||
|
||||
hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
|
||||
rc = hisi_sas_exec_internal_tmf_task(device, fis,
|
||||
s, NULL);
|
||||
s, &tmf);
|
||||
if (rc != TMF_RESP_FUNC_COMPLETE)
|
||||
dev_err(dev, "ata disk %016llx de-reset failed\n",
|
||||
SAS_ADDR(device->sas_addr));
|
||||
@ -2061,7 +2059,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
|
||||
slot->port = port;
|
||||
slot->is_internal = true;
|
||||
|
||||
hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL);
|
||||
hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -2660,9 +2658,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
|
||||
goto err_out;
|
||||
|
||||
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
||||
if (error)
|
||||
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
||||
|
||||
if (error) {
|
||||
dev_err(dev, "No usable DMA addressing method\n");
|
||||
goto err_out;
|
||||
|
@ -4694,8 +4694,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto err_out;
|
||||
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (rc)
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (rc) {
|
||||
dev_err(dev, "No usable DMA addressing method\n");
|
||||
rc = -ENODEV;
|
||||
|
@ -1161,6 +1161,16 @@ struct lpfc_hba {
|
||||
uint32_t cfg_hostmem_hgp;
|
||||
uint32_t cfg_log_verbose;
|
||||
uint32_t cfg_enable_fc4_type;
|
||||
#define LPFC_ENABLE_FCP 1
|
||||
#define LPFC_ENABLE_NVME 2
|
||||
#define LPFC_ENABLE_BOTH 3
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
|
||||
#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
|
||||
#else
|
||||
#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
|
||||
#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
|
||||
#endif
|
||||
uint32_t cfg_aer_support;
|
||||
uint32_t cfg_sriov_nr_virtfn;
|
||||
uint32_t cfg_request_firmware_upgrade;
|
||||
@ -1182,9 +1192,6 @@ struct lpfc_hba {
|
||||
uint32_t cfg_ras_fwlog_func;
|
||||
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
|
||||
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
|
||||
#define LPFC_ENABLE_FCP 1
|
||||
#define LPFC_ENABLE_NVME 2
|
||||
#define LPFC_ENABLE_BOTH 3
|
||||
uint32_t cfg_enable_pbde;
|
||||
uint32_t cfg_enable_mi;
|
||||
struct nvmet_fc_target_port *targetport;
|
||||
|
@ -3978,8 +3978,8 @@ LPFC_ATTR_R(nvmet_mrq_post,
|
||||
* 3 - register both FCP and NVME
|
||||
* Supported values are [1,3]. Default value is 3
|
||||
*/
|
||||
LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
|
||||
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
|
||||
LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
|
||||
LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
|
||||
"Enable FC4 Protocol support - FCP / NVME");
|
||||
|
||||
/*
|
||||
|
@ -2104,7 +2104,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
|
||||
}
|
||||
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
|
||||
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"3143 Port Down: Firmware Update "
|
||||
"Detected\n");
|
||||
en_rn_msg = false;
|
||||
|
@ -13363,6 +13363,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
|
||||
uint32_t uerr_sta_hi, uerr_sta_lo;
|
||||
uint32_t if_type, portsmphr;
|
||||
struct lpfc_register portstat_reg;
|
||||
u32 logmask;
|
||||
|
||||
/*
|
||||
* For now, use the SLI4 device internal unrecoverable error
|
||||
@ -13413,7 +13414,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
|
||||
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
|
||||
phba->work_status[1] =
|
||||
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
|
||||
logmask = LOG_TRACE_EVENT;
|
||||
if (phba->work_status[0] ==
|
||||
SLIPORT_ERR1_REG_ERR_CODE_2 &&
|
||||
phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
|
||||
logmask = LOG_SLI;
|
||||
lpfc_printf_log(phba, KERN_ERR, logmask,
|
||||
"2885 Port Status Event: "
|
||||
"port status reg 0x%x, "
|
||||
"port smphr reg 0x%x, "
|
||||
|
@ -2267,7 +2267,8 @@ static void myrs_cleanup(struct myrs_hba *cs)
|
||||
myrs_unmap(cs);
|
||||
|
||||
if (cs->mmio_base) {
|
||||
cs->disable_intr(cs);
|
||||
if (cs->disable_intr)
|
||||
cs->disable_intr(cs);
|
||||
iounmap(cs->mmio_base);
|
||||
cs->mmio_base = NULL;
|
||||
}
|
||||
|
@ -2688,7 +2688,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
u32 tag = le32_to_cpu(psataPayload->tag);
|
||||
u32 port_id = le32_to_cpu(psataPayload->port_id);
|
||||
u32 dev_id = le32_to_cpu(psataPayload->device_id);
|
||||
unsigned long flags;
|
||||
|
||||
if (event)
|
||||
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
|
||||
@ -2720,8 +2719,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
ts->stat = SAS_DATA_OVERRUN;
|
||||
ts->residual = 0;
|
||||
if (pm8001_dev)
|
||||
atomic_dec(&pm8001_dev->running_req);
|
||||
break;
|
||||
case IO_XFER_ERROR_BREAK:
|
||||
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
|
||||
@ -2763,7 +2760,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
ts->stat = SAS_QUEUE_FULL;
|
||||
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
@ -2849,19 +2845,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
ts->stat = SAS_OPEN_TO;
|
||||
break;
|
||||
}
|
||||
spin_lock_irqsave(&t->task_state_lock, flags);
|
||||
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
|
||||
t->task_state_flags |= SAS_TASK_STATE_DONE;
|
||||
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
pm8001_dbg(pm8001_ha, FAIL,
|
||||
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
||||
t, event, ts->resp, ts->stat);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
|
||||
}
|
||||
}
|
||||
|
||||
/*See the comments for mpi_ssp_completion */
|
||||
|
@ -766,8 +766,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
|
||||
res = -TMF_RESP_FUNC_FAILED;
|
||||
/* Even TMF timed out, return direct. */
|
||||
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
|
||||
struct pm8001_ccb_info *ccb = task->lldd_task;
|
||||
|
||||
pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
|
||||
tmf->tmf);
|
||||
|
||||
if (ccb)
|
||||
ccb->task = NULL;
|
||||
goto ex_err;
|
||||
}
|
||||
|
||||
|
@ -2184,9 +2184,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
pm8001_dbg(pm8001_ha, FAIL,
|
||||
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
||||
t, status, ts->resp, ts->stat);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
if (t->slow_task)
|
||||
complete(&t->slow_task->completion);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
@ -2791,9 +2791,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
|
||||
pm8001_dbg(pm8001_ha, FAIL,
|
||||
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
||||
t, status, ts->resp, ts->stat);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
if (t->slow_task)
|
||||
complete(&t->slow_task->completion);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
spin_unlock_irqrestore(&circularQ->oq_lock,
|
||||
@ -2818,7 +2818,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
|
||||
u32 tag = le32_to_cpu(psataPayload->tag);
|
||||
u32 port_id = le32_to_cpu(psataPayload->port_id);
|
||||
u32 dev_id = le32_to_cpu(psataPayload->device_id);
|
||||
unsigned long flags;
|
||||
|
||||
if (event)
|
||||
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
|
||||
@ -2851,8 +2850,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
ts->stat = SAS_DATA_OVERRUN;
|
||||
ts->residual = 0;
|
||||
if (pm8001_dev)
|
||||
atomic_dec(&pm8001_dev->running_req);
|
||||
break;
|
||||
case IO_XFER_ERROR_BREAK:
|
||||
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
|
||||
@ -2901,11 +2898,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
|
||||
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
ts->stat = SAS_QUEUE_FULL;
|
||||
spin_unlock_irqrestore(&circularQ->oq_lock,
|
||||
circularQ->lock_flags);
|
||||
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
|
||||
spin_lock_irqsave(&circularQ->oq_lock,
|
||||
circularQ->lock_flags);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
@ -3005,23 +2997,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
|
||||
ts->stat = SAS_OPEN_TO;
|
||||
break;
|
||||
}
|
||||
spin_lock_irqsave(&t->task_state_lock, flags);
|
||||
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
|
||||
t->task_state_flags |= SAS_TASK_STATE_DONE;
|
||||
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
pm8001_dbg(pm8001_ha, FAIL,
|
||||
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
||||
t, event, ts->resp, ts->stat);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
spin_unlock_irqrestore(&circularQ->oq_lock,
|
||||
circularQ->lock_flags);
|
||||
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
|
||||
spin_lock_irqsave(&circularQ->oq_lock,
|
||||
circularQ->lock_flags);
|
||||
}
|
||||
}
|
||||
|
||||
/*See the comments for mpi_ssp_completion */
|
||||
@ -3926,6 +3901,7 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
|
||||
/**
|
||||
* process_one_iomb - process one outbound Queue memory block
|
||||
* @pm8001_ha: our hba card information
|
||||
* @circularQ: outbound circular queue
|
||||
* @piomb: IO message buffer
|
||||
*/
|
||||
static void process_one_iomb(struct pm8001_hba_info *pm8001_ha,
|
||||
@ -4146,10 +4122,22 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
|
||||
u32 ret = MPI_IO_STATUS_FAIL;
|
||||
u32 regval;
|
||||
|
||||
/*
|
||||
* Fatal errors are programmed to be signalled in irq vector
|
||||
* pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl.
|
||||
* fatal_err_interrupt
|
||||
*/
|
||||
if (vec == (pm8001_ha->max_q_num - 1)) {
|
||||
u32 mipsall_ready;
|
||||
|
||||
if (pm8001_ha->chip_id == chip_8008 ||
|
||||
pm8001_ha->chip_id == chip_8009)
|
||||
mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT;
|
||||
else
|
||||
mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT;
|
||||
|
||||
regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
|
||||
if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
|
||||
SCRATCH_PAD_MIPSALL_READY) {
|
||||
if ((regval & mipsall_ready) != mipsall_ready) {
|
||||
pm8001_ha->controller_fatal_error = true;
|
||||
pm8001_dbg(pm8001_ha, FAIL,
|
||||
"Firmware Fatal error! Regval:0x%x\n",
|
||||
|
@ -1405,8 +1405,12 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
|
||||
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
|
||||
#define SCRATCH_PAD_IOP0_READY 0xC00
|
||||
#define SCRATCH_PAD_IOP1_READY 0x3000
|
||||
#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
|
||||
#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \
|
||||
SCRATCH_PAD_IOP0_READY | \
|
||||
SCRATCH_PAD_ILA_READY | \
|
||||
SCRATCH_PAD_RAAE_READY)
|
||||
#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \
|
||||
SCRATCH_PAD_ILA_READY | \
|
||||
SCRATCH_PAD_RAAE_READY)
|
||||
|
||||
/* boot loader state */
|
||||
|
@ -2250,6 +2250,7 @@ process_els:
|
||||
io_req->tm_flags == FCP_TMF_TGT_RESET) {
|
||||
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
|
||||
io_req->sc_cmd = NULL;
|
||||
kref_put(&io_req->refcount, qedf_release_cmd);
|
||||
complete(&io_req->tm_done);
|
||||
}
|
||||
|
||||
|
@ -911,7 +911,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
|
||||
struct qed_link_output if_link;
|
||||
|
||||
if (lport->vport) {
|
||||
QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
|
||||
printk_ratelimited("Cannot issue host reset on NPIV port.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1864,6 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
|
||||
vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
|
||||
init_completion(&vport_qedf->flogi_compl);
|
||||
INIT_LIST_HEAD(&vport_qedf->fcports);
|
||||
INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
|
||||
|
||||
rc = qedf_vport_libfc_config(vport, vn_port);
|
||||
if (rc) {
|
||||
@ -3980,7 +3981,9 @@ void qedf_stag_change_work(struct work_struct *work)
|
||||
struct qedf_ctx *qedf =
|
||||
container_of(work, struct qedf_ctx, stag_work.work);
|
||||
|
||||
QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
|
||||
printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
|
||||
dev_name(&qedf->pdev->dev), __func__, __LINE__,
|
||||
qedf->dbg_ctx.host_no);
|
||||
qedf_ctx_soft_reset(qedf->lport);
|
||||
}
|
||||
|
||||
|
@ -214,6 +214,48 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
|
||||
SCSI_TIMEOUT, 3, NULL);
|
||||
}
|
||||
|
||||
static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
|
||||
unsigned int depth)
|
||||
{
|
||||
int new_shift = sbitmap_calculate_shift(depth);
|
||||
bool need_alloc = !sdev->budget_map.map;
|
||||
bool need_free = false;
|
||||
int ret;
|
||||
struct sbitmap sb_backup;
|
||||
|
||||
/*
|
||||
* realloc if new shift is calculated, which is caused by setting
|
||||
* up one new default queue depth after calling ->slave_configure
|
||||
*/
|
||||
if (!need_alloc && new_shift != sdev->budget_map.shift)
|
||||
need_alloc = need_free = true;
|
||||
|
||||
if (!need_alloc)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Request queue has to be frozen for reallocating budget map,
|
||||
* and here disk isn't added yet, so freezing is pretty fast
|
||||
*/
|
||||
if (need_free) {
|
||||
blk_mq_freeze_queue(sdev->request_queue);
|
||||
sb_backup = sdev->budget_map;
|
||||
}
|
||||
ret = sbitmap_init_node(&sdev->budget_map,
|
||||
scsi_device_max_queue_depth(sdev),
|
||||
new_shift, GFP_KERNEL,
|
||||
sdev->request_queue->node, false, true);
|
||||
if (need_free) {
|
||||
if (ret)
|
||||
sdev->budget_map = sb_backup;
|
||||
else
|
||||
sbitmap_free(&sb_backup);
|
||||
ret = 0;
|
||||
blk_mq_unfreeze_queue(sdev->request_queue);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_alloc_sdev - allocate and setup a scsi_Device
|
||||
* @starget: which target to allocate a &scsi_device for
|
||||
@ -306,11 +348,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
|
||||
* default device queue depth to figure out sbitmap shift
|
||||
* since we use this queue depth most of times.
|
||||
*/
|
||||
if (sbitmap_init_node(&sdev->budget_map,
|
||||
scsi_device_max_queue_depth(sdev),
|
||||
sbitmap_calculate_shift(depth),
|
||||
GFP_KERNEL, sdev->request_queue->node,
|
||||
false, true)) {
|
||||
if (scsi_realloc_sdev_budget_map(sdev, depth)) {
|
||||
put_device(&starget->dev);
|
||||
kfree(sdev);
|
||||
goto out;
|
||||
@ -1017,6 +1055,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
|
||||
}
|
||||
return SCSI_SCAN_NO_RESPONSE;
|
||||
}
|
||||
|
||||
/*
|
||||
* The queue_depth is often changed in ->slave_configure.
|
||||
* Set up budget map again since memory consumption of
|
||||
* the map depends on actual queue depth.
|
||||
*/
|
||||
scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
|
||||
}
|
||||
|
||||
if (sdev->scsi_level >= SCSI_3)
|
||||
|
@ -92,6 +92,11 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
|
||||
clki->min_freq = clkfreq[i];
|
||||
clki->max_freq = clkfreq[i+1];
|
||||
clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
|
||||
if (!clki->name) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!strcmp(name, "ref_clk"))
|
||||
clki->keep_link_active = true;
|
||||
dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
|
||||
@ -127,6 +132,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
|
||||
return -ENOMEM;
|
||||
|
||||
vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
|
||||
if (!vreg->name)
|
||||
return -ENOMEM;
|
||||
|
||||
snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
|
||||
if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
|
||||
|
@ -8665,7 +8665,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
|
||||
* @pwr_mode: device power mode to set
|
||||
*
|
||||
* Returns 0 if requested power mode is set successfully
|
||||
* Returns non-zero if failed to set the requested power mode
|
||||
* Returns < 0 if failed to set the requested power mode
|
||||
*/
|
||||
static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
|
||||
enum ufs_dev_pwr_mode pwr_mode)
|
||||
@ -8719,8 +8719,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
|
||||
sdev_printk(KERN_WARNING, sdp,
|
||||
"START_STOP failed for power mode: %d, result %x\n",
|
||||
pwr_mode, ret);
|
||||
if (ret > 0 && scsi_sense_valid(&sshdr))
|
||||
scsi_print_sense_hdr(sdp, NULL, &sshdr);
|
||||
if (ret > 0) {
|
||||
if (scsi_sense_valid(&sshdr))
|
||||
scsi_print_sense_hdr(sdp, NULL, &sshdr);
|
||||
ret = -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
|
@ -142,7 +142,8 @@ static inline u32 ufshci_version(u32 major, u32 minor)
|
||||
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
|
||||
CONTROLLER_FATAL_ERROR |\
|
||||
SYSTEM_BUS_FATAL_ERROR |\
|
||||
CRYPTO_ENGINE_FATAL_ERROR)
|
||||
CRYPTO_ENGINE_FATAL_ERROR |\
|
||||
UIC_LINK_LOST)
|
||||
|
||||
/* HCS - Host Controller Status 30h */
|
||||
#define DEVICE_PRESENT 0x1
|
||||
|
@ -443,6 +443,9 @@ static bool iscsit_tpg_check_network_portal(
|
||||
break;
|
||||
}
|
||||
spin_unlock(&tpg->tpg_np_lock);
|
||||
|
||||
if (match)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&tiqn->tiqn_tpg_lock);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user