mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 12:43:55 +08:00
scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu
Currently, both nvme and fcp each have their own concept of an io_channel, which is a combination wq/cq and associated msix. Different cpus would share an io_channel. The driver is now moving to per-cpu wq/cq pairs and msix vectors. The driver will still use separate wq/cq pairs per protocol on each cpu, but the protocols will share the msix vector. Given the elimination of the nvme and fcp io channels, the module parameters will be removed. A new parameter, lpfc_hdw_queue is added which allows the wq/cq pair allocation per cpu to be overridden and allocated to lesser value. If lpfc_hdw_queue is zero, the number of pairs allocated will be based on the number of cpus. If non-zero, the parameter specifies the number of queues to allocate. At this time, the maximum non-zero value is 64. To manage this new paradigm, a new hardware queue structure is created to track queue activity and relationships. As MSIX vector allocation must be known before setting up the relationships, msix allocation now occurs before queue datastructures are allocated. If the number of vectors allocated is less than the desired hardware queues, the hardware queue counts will be reduced to the number of vectors Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
7370d10ac9
commit
cdb42becdd
@ -810,11 +810,10 @@ struct lpfc_hba {
|
||||
uint32_t cfg_auto_imax;
|
||||
uint32_t cfg_fcp_imax;
|
||||
uint32_t cfg_fcp_cpu_map;
|
||||
uint32_t cfg_fcp_io_channel;
|
||||
uint32_t cfg_hdw_queue;
|
||||
uint32_t cfg_suppress_rsp;
|
||||
uint32_t cfg_nvme_oas;
|
||||
uint32_t cfg_nvme_embed_cmd;
|
||||
uint32_t cfg_nvme_io_channel;
|
||||
uint32_t cfg_nvmet_mrq_post;
|
||||
uint32_t cfg_nvmet_mrq;
|
||||
uint32_t cfg_enable_nvmet;
|
||||
@ -877,7 +876,6 @@ struct lpfc_hba {
|
||||
#define LPFC_ENABLE_NVME 2
|
||||
#define LPFC_ENABLE_BOTH 3
|
||||
uint32_t cfg_enable_pbde;
|
||||
uint32_t io_channel_irqs; /* number of irqs for io channels */
|
||||
struct nvmet_fc_target_port *targetport;
|
||||
lpfc_vpd_t vpd; /* vital product data */
|
||||
|
||||
|
@ -456,7 +456,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
totin = 0;
|
||||
totout = 0;
|
||||
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
cstat = &lport->cstat[i];
|
||||
tot = atomic_read(&cstat->fc4NvmeIoCmpls);
|
||||
totin += tot;
|
||||
@ -4909,7 +4909,7 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
|
||||
phba->cfg_fcp_imax = (uint32_t)val;
|
||||
phba->initial_imax = phba->cfg_fcp_imax;
|
||||
|
||||
for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
|
||||
val);
|
||||
|
||||
@ -5398,41 +5398,23 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
|
||||
"Embed NVME Command in WQE");
|
||||
|
||||
/*
|
||||
* lpfc_fcp_io_channel: Set the number of FCP IO channels the driver
|
||||
* will advertise it supports to the SCSI layer. This also will map to
|
||||
* the number of WQs the driver will create.
|
||||
*
|
||||
* 0 = Configure the number of io channels to the number of active CPUs.
|
||||
* 1,32 = Manually specify how many io channels to use.
|
||||
*
|
||||
* Value range is [0,32]. Default value is 4.
|
||||
*/
|
||||
LPFC_ATTR_R(fcp_io_channel,
|
||||
LPFC_FCP_IO_CHAN_DEF,
|
||||
LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
|
||||
"Set the number of FCP I/O channels");
|
||||
|
||||
/*
|
||||
* lpfc_nvme_io_channel: Set the number of IO hardware queues the driver
|
||||
* will advertise it supports to the NVME layer. This also will map to
|
||||
* the number of WQs the driver will create.
|
||||
*
|
||||
* This module parameter is valid when lpfc_enable_fc4_type is set
|
||||
* to support NVME.
|
||||
* lpfc_hdw_queue: Set the number of IO channels the driver
|
||||
* will advertise it supports to the NVME and SCSI layers. This also
|
||||
* will map to the number of EQ/CQ/WQs the driver will create.
|
||||
*
|
||||
* The NVME Layer will try to create this many, plus 1 administrative
|
||||
* hardware queue. The administrative queue will always map to WQ 0
|
||||
* A hardware IO queue maps (qidx) to a specific driver WQ.
|
||||
*
|
||||
* 0 = Configure the number of io channels to the number of active CPUs.
|
||||
* 1,32 = Manually specify how many io channels to use.
|
||||
* 0 = Configure the number of hdw queues to the number of active CPUs.
|
||||
* 1,64 = Manually specify how many hdw queues to use.
|
||||
*
|
||||
* Value range is [0,32]. Default value is 0.
|
||||
* Value range is [0,64]. Default value is 0.
|
||||
*/
|
||||
LPFC_ATTR_R(nvme_io_channel,
|
||||
LPFC_NVME_IO_CHAN_DEF,
|
||||
LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
|
||||
"Set the number of NVME I/O channels");
|
||||
LPFC_ATTR_R(hdw_queue,
|
||||
LPFC_HBA_HDWQ_DEF,
|
||||
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
|
||||
"Set the number of I/O Hardware Queues");
|
||||
|
||||
/*
|
||||
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
|
||||
@ -5727,9 +5709,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
||||
&dev_attr_lpfc_auto_imax,
|
||||
&dev_attr_lpfc_fcp_imax,
|
||||
&dev_attr_lpfc_fcp_cpu_map,
|
||||
&dev_attr_lpfc_fcp_io_channel,
|
||||
&dev_attr_lpfc_hdw_queue,
|
||||
&dev_attr_lpfc_suppress_rsp,
|
||||
&dev_attr_lpfc_nvme_io_channel,
|
||||
&dev_attr_lpfc_nvmet_mrq,
|
||||
&dev_attr_lpfc_nvmet_mrq_post,
|
||||
&dev_attr_lpfc_nvme_enable_fb,
|
||||
@ -6806,8 +6787,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
/* Initialize first burst. Target vs Initiator are different. */
|
||||
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
|
||||
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
|
||||
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
|
||||
lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
|
||||
lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
|
||||
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
|
||||
lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
|
||||
|
||||
@ -6829,21 +6809,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
phba->cfg_enable_pbde = 0;
|
||||
|
||||
/* A value of 0 means use the number of CPUs found in the system */
|
||||
if (phba->cfg_fcp_io_channel == 0)
|
||||
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
|
||||
if (phba->cfg_nvme_io_channel == 0)
|
||||
phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
|
||||
|
||||
if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
|
||||
phba->cfg_fcp_io_channel = 0;
|
||||
|
||||
if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
|
||||
phba->cfg_nvme_io_channel = 0;
|
||||
|
||||
if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
|
||||
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
|
||||
else
|
||||
phba->io_channel_irqs = phba->cfg_nvme_io_channel;
|
||||
if (phba->cfg_hdw_queue == 0)
|
||||
phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
|
||||
|
||||
phba->cfg_soft_wwnn = 0L;
|
||||
phba->cfg_soft_wwpn = 0L;
|
||||
@ -6884,16 +6851,12 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
void
|
||||
lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
|
||||
{
|
||||
if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu)
|
||||
phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
|
||||
|
||||
if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu)
|
||||
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
|
||||
if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu)
|
||||
phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
|
||||
phba->nvmet_support) {
|
||||
phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
|
||||
phba->cfg_fcp_io_channel = 0;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6013 %s x%x fb_size x%x, fb_max x%x\n",
|
||||
@ -6910,11 +6873,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
if (!phba->cfg_nvmet_mrq)
|
||||
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
|
||||
phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
|
||||
|
||||
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
|
||||
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
|
||||
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
|
||||
if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
|
||||
phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
||||
"6018 Adjust lpfc_nvmet_mrq to %d\n",
|
||||
phba->cfg_nvmet_mrq);
|
||||
@ -6928,11 +6891,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
|
||||
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
|
||||
phba->cfg_nvmet_fb_size = 0;
|
||||
}
|
||||
|
||||
if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
|
||||
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
|
||||
else
|
||||
phba->io_channel_irqs = phba->cfg_nvme_io_channel;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -919,13 +919,13 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
atomic_read(&lport->fc4NvmeLsRequests),
|
||||
atomic_read(&lport->fc4NvmeLsCmpls));
|
||||
|
||||
if (phba->cfg_nvme_io_channel < 32)
|
||||
maxch = phba->cfg_nvme_io_channel;
|
||||
if (phba->cfg_hdw_queue < LPFC_HBA_HDWQ_MAX)
|
||||
maxch = phba->cfg_hdw_queue;
|
||||
else
|
||||
maxch = 32;
|
||||
maxch = LPFC_HBA_HDWQ_MAX;
|
||||
totin = 0;
|
||||
totout = 0;
|
||||
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
cstat = &lport->cstat[i];
|
||||
tot = atomic_read(&cstat->fc4NvmeIoCmpls);
|
||||
totin += tot;
|
||||
@ -3182,21 +3182,23 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
|
||||
struct lpfc_queue *qp;
|
||||
int qidx;
|
||||
|
||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
|
||||
qp = phba->sli4_hba.fcp_wq[qidx];
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
|
||||
if (qp->assoc_qid != cq_id)
|
||||
continue;
|
||||
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
|
||||
if (*len >= max_cnt)
|
||||
return 1;
|
||||
}
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
|
||||
qp = phba->sli4_hba.nvme_wq[qidx];
|
||||
if (qp->assoc_qid != cq_id)
|
||||
continue;
|
||||
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
|
||||
if (*len >= max_cnt)
|
||||
return 1;
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
|
||||
if (qp->assoc_qid != cq_id)
|
||||
continue;
|
||||
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
|
||||
if (*len >= max_cnt)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3262,8 +3264,8 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
|
||||
struct lpfc_queue *qp;
|
||||
int qidx, rc;
|
||||
|
||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
|
||||
qp = phba->sli4_hba.fcp_cq[qidx];
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
|
||||
if (qp->assoc_qid != eq_id)
|
||||
continue;
|
||||
|
||||
@ -3281,23 +3283,25 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
|
||||
return 1;
|
||||
}
|
||||
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
|
||||
qp = phba->sli4_hba.nvme_cq[qidx];
|
||||
if (qp->assoc_qid != eq_id)
|
||||
continue;
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
|
||||
if (qp->assoc_qid != eq_id)
|
||||
continue;
|
||||
|
||||
*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
|
||||
*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
|
||||
|
||||
/* Reset max counter */
|
||||
qp->CQ_max_cqe = 0;
|
||||
/* Reset max counter */
|
||||
qp->CQ_max_cqe = 0;
|
||||
|
||||
if (*len >= max_cnt)
|
||||
return 1;
|
||||
if (*len >= max_cnt)
|
||||
return 1;
|
||||
|
||||
rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
|
||||
max_cnt, qp->queue_id);
|
||||
if (rc)
|
||||
return 1;
|
||||
rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
|
||||
max_cnt, qp->queue_id);
|
||||
if (rc)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
|
||||
@ -3387,19 +3391,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
|
||||
/* Fast-path event queue */
|
||||
if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) {
|
||||
if (phba->sli4_hba.hdwq && phba->cfg_hdw_queue) {
|
||||
|
||||
x = phba->lpfc_idiag_last_eq;
|
||||
phba->lpfc_idiag_last_eq++;
|
||||
if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs)
|
||||
if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue)
|
||||
phba->lpfc_idiag_last_eq = 0;
|
||||
|
||||
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
|
||||
"EQ %d out of %d HBA EQs\n",
|
||||
x, phba->io_channel_irqs);
|
||||
x, phba->cfg_hdw_queue);
|
||||
|
||||
/* Fast-path EQ */
|
||||
qp = phba->sli4_hba.hba_eq[x];
|
||||
qp = phba->sli4_hba.hdwq[x].hba_eq;
|
||||
if (!qp)
|
||||
goto out;
|
||||
|
||||
@ -3691,9 +3695,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||
switch (quetp) {
|
||||
case LPFC_IDIAG_EQ:
|
||||
/* HBA event queue */
|
||||
if (phba->sli4_hba.hba_eq) {
|
||||
for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
|
||||
qp = phba->sli4_hba.hba_eq[qidx];
|
||||
if (phba->sli4_hba.hdwq) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].hba_eq;
|
||||
if (qp && qp->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(qp,
|
||||
@ -3742,10 +3746,10 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||
goto pass_check;
|
||||
}
|
||||
/* FCP complete queue */
|
||||
if (phba->sli4_hba.fcp_cq) {
|
||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
|
||||
if (phba->sli4_hba.hdwq) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue;
|
||||
qidx++) {
|
||||
qp = phba->sli4_hba.fcp_cq[qidx];
|
||||
qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
|
||||
if (qp && qp->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
@ -3758,23 +3762,20 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||
}
|
||||
}
|
||||
/* NVME complete queue */
|
||||
if (phba->sli4_hba.nvme_cq) {
|
||||
if (phba->sli4_hba.hdwq) {
|
||||
qidx = 0;
|
||||
do {
|
||||
if (phba->sli4_hba.nvme_cq[qidx] &&
|
||||
phba->sli4_hba.nvme_cq[qidx]->queue_id ==
|
||||
queid) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
|
||||
if (qp && qp->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
phba->sli4_hba.nvme_cq[qidx],
|
||||
index, count);
|
||||
qp, index, count);
|
||||
if (rc)
|
||||
goto error_out;
|
||||
idiag.ptr_private =
|
||||
phba->sli4_hba.nvme_cq[qidx];
|
||||
idiag.ptr_private = qp;
|
||||
goto pass_check;
|
||||
}
|
||||
} while (++qidx < phba->cfg_nvme_io_channel);
|
||||
} while (++qidx < phba->cfg_hdw_queue);
|
||||
}
|
||||
goto error_out;
|
||||
break;
|
||||
@ -3815,11 +3816,11 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||
idiag.ptr_private = phba->sli4_hba.nvmels_wq;
|
||||
goto pass_check;
|
||||
}
|
||||
/* FCP work queue */
|
||||
if (phba->sli4_hba.fcp_wq) {
|
||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
|
||||
qidx++) {
|
||||
qp = phba->sli4_hba.fcp_wq[qidx];
|
||||
|
||||
if (phba->sli4_hba.hdwq) {
|
||||
/* FCP/SCSI work queue */
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
|
||||
if (qp && qp->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
@ -3830,12 +3831,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||
goto pass_check;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* NVME work queue */
|
||||
if (phba->sli4_hba.nvme_wq) {
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
|
||||
qidx++) {
|
||||
qp = phba->sli4_hba.nvme_wq[qidx];
|
||||
/* NVME work queue */
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
|
||||
if (qp && qp->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
@ -3848,26 +3846,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||
}
|
||||
}
|
||||
|
||||
/* NVME work queues */
|
||||
if (phba->sli4_hba.nvme_wq) {
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
|
||||
qidx++) {
|
||||
if (!phba->sli4_hba.nvme_wq[qidx])
|
||||
continue;
|
||||
if (phba->sli4_hba.nvme_wq[qidx]->queue_id ==
|
||||
queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
phba->sli4_hba.nvme_wq[qidx],
|
||||
index, count);
|
||||
if (rc)
|
||||
goto error_out;
|
||||
idiag.ptr_private =
|
||||
phba->sli4_hba.nvme_wq[qidx];
|
||||
goto pass_check;
|
||||
}
|
||||
}
|
||||
}
|
||||
goto error_out;
|
||||
break;
|
||||
case LPFC_IDIAG_RQ:
|
||||
@ -5784,11 +5762,13 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
|
||||
lpfc_debug_dump_wq(phba, DUMP_ELS, 0);
|
||||
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
|
||||
|
||||
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
|
||||
|
||||
for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
|
||||
lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
|
||||
}
|
||||
|
||||
lpfc_debug_dump_hdr_rq(phba);
|
||||
lpfc_debug_dump_dat_rq(phba);
|
||||
@ -5799,15 +5779,17 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
|
||||
lpfc_debug_dump_cq(phba, DUMP_ELS, 0);
|
||||
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
|
||||
|
||||
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
|
||||
|
||||
for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
|
||||
lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dump Event Queues (EQs)
|
||||
*/
|
||||
for (idx = 0; idx < phba->io_channel_irqs; idx++)
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
lpfc_debug_dump_hba_eq(phba, idx);
|
||||
}
|
||||
|
@ -410,10 +410,10 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
|
||||
char *qtypestr;
|
||||
|
||||
if (qtype == DUMP_FCP) {
|
||||
wq = phba->sli4_hba.fcp_wq[wqidx];
|
||||
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
|
||||
qtypestr = "FCP";
|
||||
} else if (qtype == DUMP_NVME) {
|
||||
wq = phba->sli4_hba.nvme_wq[wqidx];
|
||||
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
|
||||
qtypestr = "NVME";
|
||||
} else if (qtype == DUMP_MBX) {
|
||||
wq = phba->sli4_hba.mbx_wq;
|
||||
@ -454,14 +454,15 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
|
||||
int eqidx;
|
||||
|
||||
/* fcp/nvme wq and cq are 1:1, thus same indexes */
|
||||
eq = NULL;
|
||||
|
||||
if (qtype == DUMP_FCP) {
|
||||
wq = phba->sli4_hba.fcp_wq[wqidx];
|
||||
cq = phba->sli4_hba.fcp_cq[wqidx];
|
||||
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
|
||||
cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
|
||||
qtypestr = "FCP";
|
||||
} else if (qtype == DUMP_NVME) {
|
||||
wq = phba->sli4_hba.nvme_wq[wqidx];
|
||||
cq = phba->sli4_hba.nvme_cq[wqidx];
|
||||
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
|
||||
cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
|
||||
qtypestr = "NVME";
|
||||
} else if (qtype == DUMP_MBX) {
|
||||
wq = phba->sli4_hba.mbx_wq;
|
||||
@ -478,17 +479,17 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
|
||||
} else
|
||||
return;
|
||||
|
||||
for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
|
||||
if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id)
|
||||
for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++) {
|
||||
eq = phba->sli4_hba.hdwq[eqidx].hba_eq;
|
||||
if (cq->assoc_qid == eq->queue_id)
|
||||
break;
|
||||
}
|
||||
if (eqidx == phba->io_channel_irqs) {
|
||||
if (eqidx == phba->cfg_hdw_queue) {
|
||||
pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
|
||||
eqidx = 0;
|
||||
eq = phba->sli4_hba.hdwq[0].hba_eq;
|
||||
}
|
||||
|
||||
eq = phba->sli4_hba.hba_eq[eqidx];
|
||||
|
||||
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
|
||||
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
|
||||
"->EQ[Idx:%d|Qid:%d]:\n",
|
||||
@ -516,7 +517,7 @@ lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx)
|
||||
{
|
||||
struct lpfc_queue *qp;
|
||||
|
||||
qp = phba->sli4_hba.hba_eq[qidx];
|
||||
qp = phba->sli4_hba.hdwq[qidx].hba_eq;
|
||||
|
||||
pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
|
||||
|
||||
@ -564,21 +565,21 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
|
||||
{
|
||||
int wq_idx;
|
||||
|
||||
for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
|
||||
if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
|
||||
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
|
||||
if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
|
||||
break;
|
||||
if (wq_idx < phba->cfg_fcp_io_channel) {
|
||||
if (wq_idx < phba->cfg_hdw_queue) {
|
||||
pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
|
||||
return;
|
||||
}
|
||||
|
||||
for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
|
||||
if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
|
||||
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
|
||||
if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
|
||||
break;
|
||||
if (wq_idx < phba->cfg_nvme_io_channel) {
|
||||
if (wq_idx < phba->cfg_hdw_queue) {
|
||||
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -646,23 +647,23 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
|
||||
{
|
||||
int cq_idx;
|
||||
|
||||
for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++)
|
||||
if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
|
||||
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
|
||||
if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
|
||||
break;
|
||||
|
||||
if (cq_idx < phba->cfg_fcp_io_channel) {
|
||||
if (cq_idx < phba->cfg_hdw_queue) {
|
||||
pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
|
||||
return;
|
||||
}
|
||||
|
||||
for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++)
|
||||
if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
|
||||
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
|
||||
if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
|
||||
break;
|
||||
|
||||
if (cq_idx < phba->cfg_nvme_io_channel) {
|
||||
if (cq_idx < phba->cfg_hdw_queue) {
|
||||
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -697,13 +698,13 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
|
||||
{
|
||||
int eq_idx;
|
||||
|
||||
for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++)
|
||||
if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
|
||||
for (eq_idx = 0; eq_idx < phba->cfg_hdw_queue; eq_idx++)
|
||||
if (phba->sli4_hba.hdwq[eq_idx].hba_eq->queue_id == qid)
|
||||
break;
|
||||
|
||||
if (eq_idx < phba->io_channel_irqs) {
|
||||
if (eq_idx < phba->cfg_hdw_queue) {
|
||||
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[eq_idx].hba_eq);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -1315,7 +1315,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
|
||||
localport->private;
|
||||
tot = 0;
|
||||
for (i = 0;
|
||||
i < phba->cfg_nvme_io_channel; i++) {
|
||||
i < phba->cfg_hdw_queue; i++) {
|
||||
cstat = &lport->cstat[i];
|
||||
data1 = atomic_read(
|
||||
&cstat->fc4NvmeInputRequests);
|
||||
@ -1331,15 +1331,15 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
/* Interrupts per sec per EQ */
|
||||
val = phba->cfg_fcp_imax / phba->io_channel_irqs;
|
||||
val = phba->cfg_fcp_imax / phba->cfg_hdw_queue;
|
||||
tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
|
||||
|
||||
/* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
|
||||
max_cqe = time_elapsed * tick_cqe;
|
||||
|
||||
for (i = 0; i < phba->io_channel_irqs; i++) {
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
/* Fast-path EQ */
|
||||
qp = phba->sli4_hba.hba_eq[i];
|
||||
qp = phba->sli4_hba.hdwq[i].hba_eq;
|
||||
if (!qp)
|
||||
continue;
|
||||
|
||||
@ -1361,7 +1361,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
|
||||
if (val) {
|
||||
/* First, interrupts per sec per EQ */
|
||||
val = phba->cfg_fcp_imax /
|
||||
phba->io_channel_irqs;
|
||||
phba->cfg_hdw_queue;
|
||||
|
||||
/* us delay between each interrupt */
|
||||
val = LPFC_SEC_TO_USEC / val;
|
||||
@ -2945,7 +2945,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
|
||||
void
|
||||
lpfc_stop_hba_timers(struct lpfc_hba *phba)
|
||||
{
|
||||
lpfc_stop_vport_timers(phba->pport);
|
||||
if (phba->pport)
|
||||
lpfc_stop_vport_timers(phba->pport);
|
||||
del_timer_sync(&phba->sli.mbox_tmo);
|
||||
del_timer_sync(&phba->fabric_block_timer);
|
||||
del_timer_sync(&phba->eratt_poll);
|
||||
@ -3989,7 +3990,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
||||
shost->max_lun = vport->cfg_max_luns;
|
||||
shost->this_id = -1;
|
||||
shost->max_cmd_len = 16;
|
||||
shost->nr_hw_queues = phba->cfg_fcp_io_channel;
|
||||
shost->nr_hw_queues = phba->cfg_hdw_queue;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
shost->dma_boundary =
|
||||
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
|
||||
@ -4248,7 +4249,8 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
|
||||
{
|
||||
/* Reset some HBA SLI4 setup states */
|
||||
lpfc_stop_hba_timers(phba);
|
||||
phba->pport->work_port_events = 0;
|
||||
if (phba->pport)
|
||||
phba->pport->work_port_events = 0;
|
||||
phba->sli4_hba.intr_enable = 0;
|
||||
}
|
||||
|
||||
@ -6475,9 +6477,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
goto out_remove_rpi_hdrs;
|
||||
}
|
||||
|
||||
phba->sli4_hba.hba_eq_hdl = kcalloc(phba->io_channel_irqs,
|
||||
sizeof(struct lpfc_hba_eq_hdl),
|
||||
GFP_KERNEL);
|
||||
phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_hdw_queue,
|
||||
sizeof(struct lpfc_hba_eq_hdl),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.hba_eq_hdl) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2572 Failed allocate memory for "
|
||||
@ -8049,21 +8051,23 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
||||
* NVMET, FCP io channel WQs are not created.
|
||||
*/
|
||||
length -= 6;
|
||||
if (!phba->nvmet_support)
|
||||
length -= phba->cfg_fcp_io_channel;
|
||||
|
||||
if (phba->cfg_nvme_io_channel > length) {
|
||||
/* Take off FCP queues */
|
||||
if (!phba->nvmet_support)
|
||||
length -= phba->cfg_hdw_queue;
|
||||
|
||||
/* Check to see if there is enough for NVME */
|
||||
if (phba->cfg_hdw_queue > length) {
|
||||
lpfc_printf_log(
|
||||
phba, KERN_ERR, LOG_SLI,
|
||||
"2005 Reducing NVME IO channel to %d: "
|
||||
"WQ %d CQ %d NVMEIO %d FCPIO %d\n",
|
||||
"WQ %d CQ %d CommonIO %d\n",
|
||||
length,
|
||||
phba->sli4_hba.max_cfg_param.max_wq,
|
||||
phba->sli4_hba.max_cfg_param.max_cq,
|
||||
phba->cfg_nvme_io_channel,
|
||||
phba->cfg_fcp_io_channel);
|
||||
phba->cfg_hdw_queue);
|
||||
|
||||
phba->cfg_nvme_io_channel = length;
|
||||
phba->cfg_hdw_queue = length;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -8276,52 +8280,30 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
|
||||
static int
|
||||
lpfc_sli4_queue_verify(struct lpfc_hba *phba)
|
||||
{
|
||||
int io_channel;
|
||||
|
||||
/*
|
||||
* Sanity check for configured queue parameters against the run-time
|
||||
* device parameters
|
||||
*/
|
||||
|
||||
/* Sanity check on HBA EQ parameters */
|
||||
io_channel = phba->io_channel_irqs;
|
||||
|
||||
if (phba->sli4_hba.num_online_cpu < io_channel) {
|
||||
lpfc_printf_log(phba,
|
||||
KERN_ERR, LOG_INIT,
|
||||
"3188 Reducing IO channels to match number of "
|
||||
"online CPUs: from %d to %d\n",
|
||||
io_channel, phba->sli4_hba.num_online_cpu);
|
||||
io_channel = phba->sli4_hba.num_online_cpu;
|
||||
}
|
||||
|
||||
if (io_channel > phba->sli4_hba.max_cfg_param.max_eq) {
|
||||
if (phba->cfg_hdw_queue > phba->sli4_hba.max_cfg_param.max_eq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2575 Reducing IO channels to match number of "
|
||||
"available EQs: from %d to %d\n",
|
||||
io_channel,
|
||||
phba->cfg_hdw_queue,
|
||||
phba->sli4_hba.max_cfg_param.max_eq);
|
||||
io_channel = phba->sli4_hba.max_cfg_param.max_eq;
|
||||
phba->cfg_hdw_queue = phba->sli4_hba.max_cfg_param.max_eq;
|
||||
}
|
||||
|
||||
/* The actual number of FCP / NVME event queues adopted */
|
||||
if (io_channel != phba->io_channel_irqs)
|
||||
phba->io_channel_irqs = io_channel;
|
||||
if (phba->cfg_fcp_io_channel > io_channel)
|
||||
phba->cfg_fcp_io_channel = io_channel;
|
||||
if (phba->cfg_nvme_io_channel > io_channel)
|
||||
phba->cfg_nvme_io_channel = io_channel;
|
||||
if (phba->nvmet_support) {
|
||||
if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
|
||||
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
|
||||
if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
|
||||
phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
|
||||
}
|
||||
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
|
||||
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
|
||||
phba->io_channel_irqs, phba->cfg_fcp_io_channel,
|
||||
phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
|
||||
"2574 IO channels: hdwQ %d MRQ: %d\n",
|
||||
phba->cfg_hdw_queue, phba->cfg_nvmet_mrq);
|
||||
|
||||
/* Get EQ depth from module parameter, fake the default for now */
|
||||
phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
|
||||
@ -8348,7 +8330,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
|
||||
return 1;
|
||||
}
|
||||
qdesc->qe_valid = 1;
|
||||
phba->sli4_hba.nvme_cq[wqidx] = qdesc;
|
||||
phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
|
||||
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
|
||||
LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
|
||||
@ -8358,7 +8340,7 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
|
||||
wqidx);
|
||||
return 1;
|
||||
}
|
||||
phba->sli4_hba.nvme_wq[wqidx] = qdesc;
|
||||
phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
|
||||
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
|
||||
return 0;
|
||||
}
|
||||
@ -8386,7 +8368,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
|
||||
return 1;
|
||||
}
|
||||
qdesc->qe_valid = 1;
|
||||
phba->sli4_hba.fcp_cq[wqidx] = qdesc;
|
||||
phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
|
||||
|
||||
/* Create Fast Path FCP WQs */
|
||||
if (phba->enab_exp_wqcq_pages) {
|
||||
@ -8407,7 +8389,7 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
|
||||
wqidx);
|
||||
return 1;
|
||||
}
|
||||
phba->sli4_hba.fcp_wq[wqidx] = qdesc;
|
||||
phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
|
||||
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
|
||||
return 0;
|
||||
}
|
||||
@ -8430,16 +8412,12 @@ int
|
||||
lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_queue *qdesc;
|
||||
int idx, io_channel;
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* Create HBA Record arrays.
|
||||
* Both NVME and FCP will share that same vectors / EQs
|
||||
*/
|
||||
io_channel = phba->io_channel_irqs;
|
||||
if (!io_channel)
|
||||
return -ERANGE;
|
||||
|
||||
phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
|
||||
phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
|
||||
phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
|
||||
@ -8451,87 +8429,17 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
|
||||
phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
|
||||
|
||||
phba->sli4_hba.hba_eq = kcalloc(io_channel,
|
||||
sizeof(struct lpfc_queue *),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.hba_eq) {
|
||||
phba->sli4_hba.hdwq = kcalloc(phba->cfg_hdw_queue,
|
||||
sizeof(struct lpfc_sli4_hdw_queue),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.hdwq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2576 Failed allocate memory for "
|
||||
"fast-path EQ record array\n");
|
||||
"6427 Failed allocate memory for "
|
||||
"fast-path Hardware Queue array\n");
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
if (phba->cfg_fcp_io_channel) {
|
||||
phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
|
||||
sizeof(struct lpfc_queue *),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.fcp_cq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2577 Failed allocate memory for "
|
||||
"fast-path CQ record array\n");
|
||||
goto out_error;
|
||||
}
|
||||
phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
|
||||
sizeof(struct lpfc_queue *),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.fcp_wq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2578 Failed allocate memory for "
|
||||
"fast-path FCP WQ record array\n");
|
||||
goto out_error;
|
||||
}
|
||||
/*
|
||||
* Since the first EQ can have multiple CQs associated with it,
|
||||
* this array is used to quickly see if we have a FCP fast-path
|
||||
* CQ match.
|
||||
*/
|
||||
phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
|
||||
sizeof(uint16_t),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.fcp_cq_map) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2545 Failed allocate memory for "
|
||||
"fast-path CQ map\n");
|
||||
goto out_error;
|
||||
}
|
||||
}
|
||||
|
||||
if (phba->cfg_nvme_io_channel) {
|
||||
phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
|
||||
sizeof(struct lpfc_queue *),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.nvme_cq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6077 Failed allocate memory for "
|
||||
"fast-path CQ record array\n");
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
|
||||
sizeof(struct lpfc_queue *),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.nvme_wq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2581 Failed allocate memory for "
|
||||
"fast-path NVME WQ record array\n");
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since the first EQ can have multiple CQs associated with it,
|
||||
* this array is used to quickly see if we have a NVME fast-path
|
||||
* CQ match.
|
||||
*/
|
||||
phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
|
||||
sizeof(uint16_t),
|
||||
GFP_KERNEL);
|
||||
if (!phba->sli4_hba.nvme_cq_map) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6078 Failed allocate memory for "
|
||||
"fast-path CQ map\n");
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
if (phba->nvmet_support) {
|
||||
phba->sli4_hba.nvmet_cqset = kcalloc(
|
||||
phba->cfg_nvmet_mrq,
|
||||
@ -8569,7 +8477,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
|
||||
|
||||
/* Create HBA Event Queues (EQs) */
|
||||
for (idx = 0; idx < io_channel; idx++) {
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
|
||||
/* Create EQs */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.eq_esize,
|
||||
@ -8580,33 +8488,38 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
goto out_error;
|
||||
}
|
||||
qdesc->qe_valid = 1;
|
||||
phba->sli4_hba.hba_eq[idx] = qdesc;
|
||||
phba->sli4_hba.hdwq[idx].hba_eq = qdesc;
|
||||
}
|
||||
|
||||
/* FCP and NVME io channels are not required to be balanced */
|
||||
|
||||
for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
|
||||
/* Allocate SCSI SLI4 CQ/WQs */
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
if (lpfc_alloc_fcp_wq_cq(phba, idx))
|
||||
goto out_error;
|
||||
|
||||
for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
|
||||
if (lpfc_alloc_nvme_wq_cq(phba, idx))
|
||||
goto out_error;
|
||||
/* Allocate NVME SLI4 CQ/WQs */
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
if (lpfc_alloc_nvme_wq_cq(phba, idx))
|
||||
goto out_error;
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
if (phba->nvmet_support) {
|
||||
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
|
||||
qdesc = lpfc_sli4_queue_alloc(
|
||||
phba,
|
||||
LPFC_DEFAULT_PAGE_SIZE,
|
||||
phba->sli4_hba.cq_esize,
|
||||
phba->sli4_hba.cq_ecount);
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3142 Failed allocate NVME "
|
||||
"CQ Set (%d)\n", idx);
|
||||
goto out_error;
|
||||
if (!qdesc) {
|
||||
lpfc_printf_log(
|
||||
phba, KERN_ERR, LOG_INIT,
|
||||
"3142 Failed allocate NVME "
|
||||
"CQ Set (%d)\n", idx);
|
||||
goto out_error;
|
||||
}
|
||||
qdesc->qe_valid = 1;
|
||||
phba->sli4_hba.nvmet_cqset[idx] = qdesc;
|
||||
}
|
||||
qdesc->qe_valid = 1;
|
||||
phba->sli4_hba.nvmet_cqset[idx] = qdesc;
|
||||
}
|
||||
}
|
||||
|
||||
@ -8723,7 +8636,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
||||
}
|
||||
phba->sli4_hba.dat_rq = qdesc;
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
|
||||
phba->nvmet_support) {
|
||||
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
|
||||
/* Create NVMET Receive Queue for header */
|
||||
qdesc = lpfc_sli4_queue_alloc(phba,
|
||||
@ -8797,12 +8711,23 @@ lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
|
||||
}
|
||||
|
||||
static inline void
|
||||
lpfc_sli4_release_queue_map(uint16_t **qmap)
|
||||
lpfc_sli4_release_hdwq(struct lpfc_sli4_hdw_queue *hdwq, int max)
|
||||
{
|
||||
if (*qmap != NULL) {
|
||||
kfree(*qmap);
|
||||
*qmap = NULL;
|
||||
uint32_t idx;
|
||||
|
||||
for (idx = 0; idx < max; idx++) {
|
||||
lpfc_sli4_queue_free(hdwq[idx].hba_eq);
|
||||
lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
|
||||
lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
|
||||
lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
|
||||
lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
|
||||
hdwq[idx].hba_eq = NULL;
|
||||
hdwq[idx].fcp_cq = NULL;
|
||||
hdwq[idx].nvme_cq = NULL;
|
||||
hdwq[idx].fcp_wq = NULL;
|
||||
hdwq[idx].nvme_wq = NULL;
|
||||
}
|
||||
kfree(hdwq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -8821,29 +8746,10 @@ void
|
||||
lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
|
||||
{
|
||||
/* Release HBA eqs */
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
|
||||
|
||||
/* Release FCP cqs */
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
|
||||
phba->cfg_fcp_io_channel);
|
||||
|
||||
/* Release FCP wqs */
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
|
||||
phba->cfg_fcp_io_channel);
|
||||
|
||||
/* Release FCP CQ mapping array */
|
||||
lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
|
||||
|
||||
/* Release NVME cqs */
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
|
||||
phba->cfg_nvme_io_channel);
|
||||
|
||||
/* Release NVME wqs */
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
|
||||
phba->cfg_nvme_io_channel);
|
||||
|
||||
/* Release NVME CQ mapping array */
|
||||
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
|
||||
if (phba->sli4_hba.hdwq)
|
||||
lpfc_sli4_release_hdwq(phba->sli4_hba.hdwq,
|
||||
phba->cfg_hdw_queue);
|
||||
phba->sli4_hba.hdwq = NULL;
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
|
||||
@ -8927,7 +8833,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
|
||||
cq->chann = qidx;
|
||||
|
||||
if (qtype != LPFC_MBOX) {
|
||||
/* Setup nvme_cq_map for fast lookup */
|
||||
/* Setup cq_map for fast lookup */
|
||||
if (cq_map)
|
||||
*cq_map = cq->queue_id;
|
||||
|
||||
@ -8990,9 +8896,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
{
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
struct lpfc_sli4_hdw_queue *qp;
|
||||
LPFC_MBOXQ_t *mboxq;
|
||||
int qidx;
|
||||
uint32_t length, io_channel;
|
||||
uint32_t length;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
/* Check for dual-ULP support */
|
||||
@ -9043,25 +8950,25 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
/*
|
||||
* Set up HBA Event Queues (EQs)
|
||||
*/
|
||||
io_channel = phba->io_channel_irqs;
|
||||
qp = phba->sli4_hba.hdwq;
|
||||
|
||||
/* Set up HBA event queue */
|
||||
if (io_channel && !phba->sli4_hba.hba_eq) {
|
||||
if (!qp) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3147 Fast-path EQs not allocated\n");
|
||||
rc = -ENOMEM;
|
||||
goto out_error;
|
||||
}
|
||||
for (qidx = 0; qidx < io_channel; qidx++) {
|
||||
if (!phba->sli4_hba.hba_eq[qidx]) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
if (!qp[qidx].hba_eq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0522 Fast-path EQ (%d) not "
|
||||
"allocated\n", qidx);
|
||||
rc = -ENOMEM;
|
||||
goto out_destroy;
|
||||
}
|
||||
rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
|
||||
phba->cfg_fcp_imax);
|
||||
rc = lpfc_eq_create(phba, qp[qidx].hba_eq,
|
||||
phba->cfg_fcp_imax);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0523 Failed setup of fast-path EQ "
|
||||
@ -9070,26 +8977,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
goto out_destroy;
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"2584 HBA EQ setup: queue[%d]-id=%d\n",
|
||||
qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
|
||||
"2584 HBA EQ setup: queue[%d]-id=%d\n", qidx,
|
||||
qp[qidx].hba_eq->queue_id);
|
||||
}
|
||||
|
||||
if (phba->cfg_nvme_io_channel) {
|
||||
if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6084 Fast-path NVME %s array not allocated\n",
|
||||
(phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
|
||||
rc = -ENOMEM;
|
||||
goto out_destroy;
|
||||
}
|
||||
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
rc = lpfc_create_wq_cq(phba,
|
||||
phba->sli4_hba.hba_eq[
|
||||
qidx % io_channel],
|
||||
phba->sli4_hba.nvme_cq[qidx],
|
||||
phba->sli4_hba.nvme_wq[qidx],
|
||||
&phba->sli4_hba.nvme_cq_map[qidx],
|
||||
qp[qidx].hba_eq,
|
||||
qp[qidx].nvme_cq,
|
||||
qp[qidx].nvme_wq,
|
||||
&phba->sli4_hba.hdwq[qidx].nvme_cq_map,
|
||||
qidx, LPFC_NVME);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -9101,31 +8999,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
}
|
||||
|
||||
if (phba->cfg_fcp_io_channel) {
|
||||
/* Set up fast-path FCP Response Complete Queue */
|
||||
if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
rc = lpfc_create_wq_cq(phba,
|
||||
qp[qidx].hba_eq,
|
||||
qp[qidx].fcp_cq,
|
||||
qp[qidx].fcp_wq,
|
||||
&phba->sli4_hba.hdwq[qidx].fcp_cq_map,
|
||||
qidx, LPFC_FCP);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3148 Fast-path FCP %s array not allocated\n",
|
||||
phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
|
||||
rc = -ENOMEM;
|
||||
goto out_destroy;
|
||||
}
|
||||
|
||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
|
||||
rc = lpfc_create_wq_cq(phba,
|
||||
phba->sli4_hba.hba_eq[
|
||||
qidx % io_channel],
|
||||
phba->sli4_hba.fcp_cq[qidx],
|
||||
phba->sli4_hba.fcp_wq[qidx],
|
||||
&phba->sli4_hba.fcp_cq_map[qidx],
|
||||
qidx, LPFC_FCP);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0535 Failed to setup fastpath "
|
||||
"FCP WQ/CQ (%d), rc = 0x%x\n",
|
||||
qidx, (uint32_t)rc);
|
||||
goto out_destroy;
|
||||
}
|
||||
goto out_destroy;
|
||||
}
|
||||
}
|
||||
|
||||
@ -9144,7 +9030,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
goto out_destroy;
|
||||
}
|
||||
|
||||
rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
|
||||
rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
|
||||
phba->sli4_hba.mbx_cq,
|
||||
phba->sli4_hba.mbx_wq,
|
||||
NULL, 0, LPFC_MBOX);
|
||||
@ -9165,7 +9051,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
if (phba->cfg_nvmet_mrq > 1) {
|
||||
rc = lpfc_cq_create_set(phba,
|
||||
phba->sli4_hba.nvmet_cqset,
|
||||
phba->sli4_hba.hba_eq,
|
||||
qp,
|
||||
LPFC_WCQ, LPFC_NVMET);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -9177,7 +9063,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
} else {
|
||||
/* Set up NVMET Receive Complete Queue */
|
||||
rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
|
||||
phba->sli4_hba.hba_eq[0],
|
||||
qp[0].hba_eq,
|
||||
LPFC_WCQ, LPFC_NVMET);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -9191,7 +9077,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
"6090 NVMET CQ setup: cq-id=%d, "
|
||||
"parent eq-id=%d\n",
|
||||
phba->sli4_hba.nvmet_cqset[0]->queue_id,
|
||||
phba->sli4_hba.hba_eq[0]->queue_id);
|
||||
qp[0].hba_eq->queue_id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -9203,14 +9089,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
rc = -ENOMEM;
|
||||
goto out_destroy;
|
||||
}
|
||||
rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
|
||||
phba->sli4_hba.els_cq,
|
||||
phba->sli4_hba.els_wq,
|
||||
NULL, 0, LPFC_ELS);
|
||||
rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
|
||||
phba->sli4_hba.els_cq,
|
||||
phba->sli4_hba.els_wq,
|
||||
NULL, 0, LPFC_ELS);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
|
||||
(uint32_t)rc);
|
||||
"0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
|
||||
(uint32_t)rc);
|
||||
goto out_destroy;
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
@ -9218,7 +9104,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.els_wq->queue_id,
|
||||
phba->sli4_hba.els_cq->queue_id);
|
||||
|
||||
if (phba->cfg_nvme_io_channel) {
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
/* Set up NVME LS Complete Queue */
|
||||
if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
@ -9227,14 +9113,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
rc = -ENOMEM;
|
||||
goto out_destroy;
|
||||
}
|
||||
rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
|
||||
phba->sli4_hba.nvmels_cq,
|
||||
phba->sli4_hba.nvmels_wq,
|
||||
NULL, 0, LPFC_NVME_LS);
|
||||
rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
|
||||
phba->sli4_hba.nvmels_cq,
|
||||
phba->sli4_hba.nvmels_wq,
|
||||
NULL, 0, LPFC_NVME_LS);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0529 Failed setup of NVVME LS WQ/CQ: "
|
||||
"rc = 0x%x\n", (uint32_t)rc);
|
||||
"0526 Failed setup of NVVME LS WQ/CQ: "
|
||||
"rc = 0x%x\n", (uint32_t)rc);
|
||||
goto out_destroy;
|
||||
}
|
||||
|
||||
@ -9320,7 +9206,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.dat_rq->queue_id,
|
||||
phba->sli4_hba.els_cq->queue_id);
|
||||
|
||||
for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue;
|
||||
qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
|
||||
phba->cfg_fcp_imax);
|
||||
|
||||
@ -9347,6 +9234,7 @@ out_error:
|
||||
void
|
||||
lpfc_sli4_queue_unset(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_sli4_hdw_queue *qp;
|
||||
int qidx;
|
||||
|
||||
/* Unset mailbox command work queue */
|
||||
@ -9366,17 +9254,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
|
||||
lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
|
||||
phba->sli4_hba.dat_rq);
|
||||
|
||||
/* Unset FCP work queue */
|
||||
if (phba->sli4_hba.fcp_wq)
|
||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
|
||||
lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
|
||||
|
||||
/* Unset NVME work queue */
|
||||
if (phba->sli4_hba.nvme_wq) {
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
|
||||
lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
|
||||
}
|
||||
|
||||
/* Unset mailbox command complete queue */
|
||||
if (phba->sli4_hba.mbx_cq)
|
||||
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
|
||||
@ -9389,11 +9266,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
|
||||
if (phba->sli4_hba.nvmels_cq)
|
||||
lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
|
||||
|
||||
/* Unset NVME response complete queue */
|
||||
if (phba->sli4_hba.nvme_cq)
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
|
||||
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
/* Unset NVMET MRQ queue */
|
||||
if (phba->sli4_hba.nvmet_mrq_hdr) {
|
||||
@ -9412,15 +9284,17 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
|
||||
}
|
||||
}
|
||||
|
||||
/* Unset FCP response complete queue */
|
||||
if (phba->sli4_hba.fcp_cq)
|
||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
|
||||
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
|
||||
|
||||
/* Unset fast-path event queue */
|
||||
if (phba->sli4_hba.hba_eq)
|
||||
for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
|
||||
lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
|
||||
/* Unset fast-path SLI4 queues */
|
||||
if (phba->sli4_hba.hdwq) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = &phba->sli4_hba.hdwq[qidx];
|
||||
lpfc_wq_destroy(phba, qp->fcp_wq);
|
||||
lpfc_wq_destroy(phba, qp->nvme_wq);
|
||||
lpfc_cq_destroy(phba, qp->fcp_cq);
|
||||
lpfc_cq_destroy(phba, qp->nvme_cq);
|
||||
lpfc_eq_destroy(phba, qp->hba_eq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -10246,7 +10120,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
|
||||
if (vec >= vectors)
|
||||
vec = 0;
|
||||
index++;
|
||||
if (index >= phba->cfg_fcp_io_channel)
|
||||
if (index >= phba->cfg_hdw_queue)
|
||||
index = 0;
|
||||
cpup++;
|
||||
}
|
||||
@ -10271,7 +10145,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
char *name;
|
||||
|
||||
/* Set up MSI-X multi-message vectors */
|
||||
vectors = phba->io_channel_irqs;
|
||||
vectors = phba->cfg_hdw_queue;
|
||||
|
||||
rc = pci_alloc_irq_vectors(phba->pcidev,
|
||||
(phba->nvmet_support) ? 1 : 2,
|
||||
@ -10305,19 +10179,15 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
}
|
||||
}
|
||||
|
||||
if (vectors != phba->io_channel_irqs) {
|
||||
if (vectors != phba->cfg_hdw_queue) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3238 Reducing IO channels to match number of "
|
||||
"MSI-X vectors, requested %d got %d\n",
|
||||
phba->io_channel_irqs, vectors);
|
||||
if (phba->cfg_fcp_io_channel > vectors)
|
||||
phba->cfg_fcp_io_channel = vectors;
|
||||
if (phba->cfg_nvme_io_channel > vectors)
|
||||
phba->cfg_nvme_io_channel = vectors;
|
||||
if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
|
||||
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
|
||||
else
|
||||
phba->io_channel_irqs = phba->cfg_nvme_io_channel;
|
||||
phba->cfg_hdw_queue, vectors);
|
||||
if (phba->cfg_hdw_queue > vectors)
|
||||
phba->cfg_hdw_queue = vectors;
|
||||
if (phba->cfg_nvmet_mrq > vectors)
|
||||
phba->cfg_nvmet_mrq = vectors;
|
||||
}
|
||||
lpfc_cpu_affinity_check(phba, vectors);
|
||||
|
||||
@ -10374,7 +10244,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
|
||||
return rc;
|
||||
}
|
||||
|
||||
for (index = 0; index < phba->io_channel_irqs; index++) {
|
||||
for (index = 0; index < phba->cfg_hdw_queue; index++) {
|
||||
phba->sli4_hba.hba_eq_hdl[index].idx = index;
|
||||
phba->sli4_hba.hba_eq_hdl[index].phba = phba;
|
||||
}
|
||||
@ -10439,7 +10309,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
|
||||
phba->intr_type = INTx;
|
||||
intr_mode = 0;
|
||||
|
||||
for (idx = 0; idx < phba->io_channel_irqs; idx++) {
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
|
||||
eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
|
||||
eqhdl->idx = idx;
|
||||
eqhdl->phba = phba;
|
||||
@ -10467,7 +10337,7 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
|
||||
int index;
|
||||
|
||||
/* Free up MSI-X multi-message vectors */
|
||||
for (index = 0; index < phba->io_channel_irqs; index++)
|
||||
for (index = 0; index < phba->cfg_hdw_queue; index++)
|
||||
free_irq(pci_irq_vector(phba->pcidev, index),
|
||||
&phba->sli4_hba.hba_eq_hdl[index]);
|
||||
} else {
|
||||
@ -10620,7 +10490,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
|
||||
struct pci_dev *pdev = phba->pcidev;
|
||||
|
||||
lpfc_stop_hba_timers(phba);
|
||||
phba->sli4_hba.intr_enable = 0;
|
||||
if (phba->pport)
|
||||
phba->sli4_hba.intr_enable = 0;
|
||||
|
||||
/*
|
||||
* Gracefully wait out the potential current outstanding asynchronous
|
||||
@ -10839,8 +10710,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
phba->nvme_support = 0;
|
||||
phba->nvmet_support = 0;
|
||||
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
|
||||
phba->cfg_nvme_io_channel = 0;
|
||||
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
|
||||
"6101 Disabling NVME support: "
|
||||
"Not supported by firmware: %d %d\n",
|
||||
@ -11792,28 +11661,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
/* Get the default values for Model Name and Description */
|
||||
lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
|
||||
|
||||
/* Create SCSI host to the physical port */
|
||||
error = lpfc_create_shost(phba);
|
||||
if (error) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1415 Failed to create scsi host.\n");
|
||||
goto out_unset_driver_resource;
|
||||
}
|
||||
|
||||
/* Configure sysfs attributes */
|
||||
vport = phba->pport;
|
||||
error = lpfc_alloc_sysfs_attr(vport);
|
||||
if (error) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1416 Failed to allocate sysfs attr\n");
|
||||
goto out_destroy_shost;
|
||||
}
|
||||
|
||||
shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
|
||||
/* Now, trying to enable interrupt and bring up the device */
|
||||
cfg_mode = phba->cfg_use_msi;
|
||||
|
||||
/* Put device to a known state before enabling interrupt */
|
||||
phba->pport = NULL;
|
||||
lpfc_stop_port(phba);
|
||||
|
||||
/* Configure and enable interrupt */
|
||||
@ -11822,18 +11674,33 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0426 Failed to enable interrupt.\n");
|
||||
error = -ENODEV;
|
||||
goto out_free_sysfs_attr;
|
||||
goto out_unset_driver_resource;
|
||||
}
|
||||
/* Default to single EQ for non-MSI-X */
|
||||
if (phba->intr_type != MSIX) {
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
|
||||
phba->cfg_fcp_io_channel = 1;
|
||||
phba->cfg_hdw_queue = 1;
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
phba->cfg_nvme_io_channel = 1;
|
||||
if (phba->nvmet_support)
|
||||
phba->cfg_nvmet_mrq = 1;
|
||||
}
|
||||
phba->io_channel_irqs = 1;
|
||||
}
|
||||
|
||||
/* Create SCSI host to the physical port */
|
||||
error = lpfc_create_shost(phba);
|
||||
if (error) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1415 Failed to create scsi host.\n");
|
||||
goto out_disable_intr;
|
||||
}
|
||||
vport = phba->pport;
|
||||
shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
|
||||
|
||||
/* Configure sysfs attributes */
|
||||
error = lpfc_alloc_sysfs_attr(vport);
|
||||
if (error) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1416 Failed to allocate sysfs attr\n");
|
||||
goto out_destroy_shost;
|
||||
}
|
||||
|
||||
/* Set up SLI-4 HBA */
|
||||
@ -11841,7 +11708,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1421 Failed to set up hba\n");
|
||||
error = -ENODEV;
|
||||
goto out_disable_intr;
|
||||
goto out_free_sysfs_attr;
|
||||
}
|
||||
|
||||
/* Log the current active interrupt mode */
|
||||
@ -11877,7 +11744,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
phba, phba->sli4_hba.common_xri_max);
|
||||
if (len == 0) {
|
||||
error = -ENOMEM;
|
||||
goto out_disable_intr;
|
||||
goto out_free_sysfs_attr;
|
||||
}
|
||||
phba->total_common_bufs += len;
|
||||
}
|
||||
@ -11895,12 +11762,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_intr:
|
||||
lpfc_sli4_disable_intr(phba);
|
||||
out_free_sysfs_attr:
|
||||
lpfc_free_sysfs_attr(vport);
|
||||
out_destroy_shost:
|
||||
lpfc_destroy_shost(phba);
|
||||
out_disable_intr:
|
||||
lpfc_sli4_disable_intr(phba);
|
||||
out_unset_driver_resource:
|
||||
lpfc_unset_driver_resource_phase2(phba);
|
||||
out_unset_driver_resource_s4:
|
||||
|
@ -239,7 +239,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
|
||||
if (qidx) {
|
||||
str = "IO "; /* IO queue */
|
||||
qhandle->index = ((qidx - 1) %
|
||||
vport->phba->cfg_nvme_io_channel);
|
||||
vport->phba->cfg_hdw_queue);
|
||||
} else {
|
||||
str = "ADM"; /* Admin queue */
|
||||
qhandle->index = qidx;
|
||||
@ -247,7 +247,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6073 Binding %s HdwQueue %d (cpu %d) to "
|
||||
"io_channel %d qhandle %p\n", str,
|
||||
"hdw_queue %d qhandle %p\n", str,
|
||||
qidx, qhandle->cpu_id, qhandle->index, qhandle);
|
||||
*handle = (void *)qhandle;
|
||||
return 0;
|
||||
@ -2083,10 +2083,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
* allocate + 3, one for cmd, one for rsp and one for this alignment
|
||||
*/
|
||||
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
|
||||
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
|
||||
lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
|
||||
|
||||
cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) *
|
||||
phba->cfg_nvme_io_channel), GFP_KERNEL);
|
||||
phba->cfg_hdw_queue), GFP_KERNEL);
|
||||
if (!cstat)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2130,7 +2130,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
atomic_set(&lport->fc4NvmeLsRequests, 0);
|
||||
atomic_set(&lport->fc4NvmeLsCmpls, 0);
|
||||
|
||||
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
cstat = &lport->cstat[i];
|
||||
atomic_set(&cstat->fc4NvmeInputRequests, 0);
|
||||
atomic_set(&cstat->fc4NvmeOutputRequests, 0);
|
||||
@ -2587,14 +2587,14 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
|
||||
struct lpfc_sli_ring *pring;
|
||||
u32 i, wait_cnt = 0;
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
|
||||
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
|
||||
return;
|
||||
|
||||
/* Cycle through all NVME rings and make sure all outstanding
|
||||
* WQEs have been removed from the txcmplqs.
|
||||
*/
|
||||
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
|
||||
pring = phba->sli4_hba.nvme_wq[i]->pring;
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
|
||||
|
||||
if (!pring)
|
||||
continue;
|
||||
|
@ -973,7 +973,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
* WQE release CQE
|
||||
*/
|
||||
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
|
||||
wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
|
||||
wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq;
|
||||
pring = wq->pring;
|
||||
spin_lock_irqsave(&pring->ring_lock, iflags);
|
||||
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
|
||||
@ -1047,7 +1047,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
|
||||
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
|
||||
wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
|
||||
return;
|
||||
@ -1377,7 +1377,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
|
||||
* allocate + 3, one for cmd, one for rsp and one for this alignment
|
||||
*/
|
||||
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
|
||||
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
|
||||
lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
|
||||
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
|
||||
|
||||
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
||||
@ -1697,8 +1697,8 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
||||
return;
|
||||
if (phba->targetport) {
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
|
||||
wq = phba->sli4_hba.nvme_wq[qidx];
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
|
||||
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
|
||||
}
|
||||
init_completion(&tgtp->tport_unreg_done);
|
||||
|
@ -3661,8 +3661,8 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
|
||||
return hwq;
|
||||
}
|
||||
|
||||
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
|
||||
&& phba->cfg_fcp_io_channel > 1) {
|
||||
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU &&
|
||||
phba->cfg_hdw_queue > 1) {
|
||||
cpu = lpfc_cmd->cpu;
|
||||
if (cpu < phba->sli4_hba.num_present_cpu) {
|
||||
cpup = phba->sli4_hba.cpu_map;
|
||||
@ -3671,7 +3671,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
|
||||
}
|
||||
}
|
||||
chann = atomic_add_return(1, &phba->fcp_qidx);
|
||||
chann = chann % phba->cfg_fcp_io_channel;
|
||||
chann = chann % phba->cfg_hdw_queue;
|
||||
return chann;
|
||||
}
|
||||
|
||||
@ -4598,7 +4598,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
|
||||
iocb = &lpfc_cmd->cur_iocbq;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
pring_s4 = phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
|
||||
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring;
|
||||
if (!pring_s4) {
|
||||
ret = FAILED;
|
||||
goto out_unlock;
|
||||
|
@ -3981,8 +3981,8 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
|
||||
|
||||
/* Look on all the FCP Rings for the iotag */
|
||||
if (phba->sli_rev >= LPFC_SLI_REV4) {
|
||||
for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
|
||||
pring = phba->sli4_hba.fcp_wq[i]->pring;
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
|
||||
lpfc_sli_abort_iocb_ring(phba, pring);
|
||||
}
|
||||
} else {
|
||||
@ -4006,12 +4006,13 @@ lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
|
||||
struct lpfc_sli_ring *pring;
|
||||
uint32_t i;
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
if ((phba->sli_rev < LPFC_SLI_REV4) ||
|
||||
!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return;
|
||||
|
||||
/* Abort all IO on each NVME ring. */
|
||||
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
|
||||
pring = phba->sli4_hba.nvme_wq[i]->pring;
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
|
||||
lpfc_sli_abort_wqe_ring(phba, pring);
|
||||
}
|
||||
}
|
||||
@ -4044,8 +4045,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
|
||||
|
||||
/* Look on all the FCP Rings for the iotag */
|
||||
if (phba->sli_rev >= LPFC_SLI_REV4) {
|
||||
for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
|
||||
pring = phba->sli4_hba.fcp_wq[i]->pring;
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
|
||||
|
||||
spin_lock_irq(&pring->ring_lock);
|
||||
/* Retrieve everything on txq */
|
||||
@ -4110,7 +4111,8 @@ lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
|
||||
uint32_t i;
|
||||
struct lpfc_iocbq *piocb, *next_iocb;
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
if ((phba->sli_rev < LPFC_SLI_REV4) ||
|
||||
!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return;
|
||||
|
||||
/* Hint to other driver operations that a flush is in progress. */
|
||||
@ -4122,8 +4124,8 @@ lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
|
||||
* a local driver reason code. This is a flush so no
|
||||
* abort exchange to FW.
|
||||
*/
|
||||
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
|
||||
pring = phba->sli4_hba.nvme_wq[i]->pring;
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
|
||||
|
||||
spin_lock_irq(&pring->ring_lock);
|
||||
list_for_each_entry_safe(piocb, next_iocb,
|
||||
@ -5564,6 +5566,7 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
|
||||
{
|
||||
int qidx;
|
||||
struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
|
||||
struct lpfc_sli4_hdw_queue *qp;
|
||||
|
||||
sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
|
||||
sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
|
||||
@ -5571,20 +5574,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
|
||||
sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
|
||||
LPFC_QUEUE_REARM);
|
||||
|
||||
if (sli4_hba->fcp_cq)
|
||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
|
||||
sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
|
||||
qp = sli4_hba->hdwq;
|
||||
if (sli4_hba->hdwq) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
sli4_hba->sli4_cq_release(qp[qidx].fcp_cq,
|
||||
LPFC_QUEUE_REARM);
|
||||
|
||||
if (sli4_hba->nvme_cq)
|
||||
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
|
||||
sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
|
||||
sli4_hba->sli4_cq_release(qp[qidx].nvme_cq,
|
||||
LPFC_QUEUE_REARM);
|
||||
}
|
||||
|
||||
if (sli4_hba->hba_eq)
|
||||
for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
|
||||
sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
|
||||
LPFC_QUEUE_REARM);
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++)
|
||||
sli4_hba->sli4_eq_release(qp[qidx].hba_eq,
|
||||
LPFC_QUEUE_REARM);
|
||||
}
|
||||
|
||||
if (phba->nvmet_support) {
|
||||
for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
|
||||
@ -7857,11 +7859,11 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
|
||||
|
||||
/* Find the eq associated with the mcq */
|
||||
|
||||
if (sli4_hba->hba_eq)
|
||||
for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
|
||||
if (sli4_hba->hba_eq[eqidx]->queue_id ==
|
||||
if (sli4_hba->hdwq)
|
||||
for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++)
|
||||
if (sli4_hba->hdwq[eqidx].hba_eq->queue_id ==
|
||||
sli4_hba->mbx_cq->assoc_qid) {
|
||||
fpeq = sli4_hba->hba_eq[eqidx];
|
||||
fpeq = sli4_hba->hdwq[eqidx].hba_eq;
|
||||
break;
|
||||
}
|
||||
if (!fpeq)
|
||||
@ -9866,7 +9868,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
|
||||
/* Get the WQ */
|
||||
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
|
||||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
|
||||
wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
|
||||
wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
|
||||
} else {
|
||||
wq = phba->sli4_hba.els_wq;
|
||||
}
|
||||
@ -10001,7 +10003,7 @@ struct lpfc_sli_ring *
|
||||
lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
|
||||
{
|
||||
if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
|
||||
if (unlikely(!phba->sli4_hba.fcp_wq))
|
||||
if (unlikely(!phba->sli4_hba.hdwq))
|
||||
return NULL;
|
||||
/*
|
||||
* for abort iocb hba_wqidx should already
|
||||
@ -10012,9 +10014,9 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
|
||||
lpfc_sli4_scmd_to_wqidx_distr(
|
||||
phba, piocb->context1);
|
||||
piocb->hba_wqidx = piocb->hba_wqidx %
|
||||
phba->cfg_fcp_io_channel;
|
||||
phba->cfg_hdw_queue;
|
||||
}
|
||||
return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
|
||||
return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
|
||||
} else {
|
||||
if (unlikely(!phba->sli4_hba.els_wq))
|
||||
return NULL;
|
||||
@ -10063,7 +10065,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
|
||||
if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
|
||||
|
||||
/* Get associated EQ with this index */
|
||||
fpeq = phba->sli4_hba.hba_eq[idx];
|
||||
fpeq = phba->sli4_hba.hdwq[idx].hba_eq;
|
||||
|
||||
/* Turn off interrupts from this EQ */
|
||||
phba->sli4_hba.sli4_eq_clr_intr(fpeq);
|
||||
@ -10497,17 +10499,8 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
|
||||
INIT_LIST_HEAD(&psli->mboxq);
|
||||
INIT_LIST_HEAD(&psli->mboxq_cmpl);
|
||||
/* Initialize list headers for txq and txcmplq as double linked lists */
|
||||
for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
|
||||
pring = phba->sli4_hba.fcp_wq[i]->pring;
|
||||
pring->flag = 0;
|
||||
pring->ringno = LPFC_FCP_RING;
|
||||
INIT_LIST_HEAD(&pring->txq);
|
||||
INIT_LIST_HEAD(&pring->txcmplq);
|
||||
INIT_LIST_HEAD(&pring->iocb_continueq);
|
||||
spin_lock_init(&pring->ring_lock);
|
||||
}
|
||||
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
|
||||
pring = phba->sli4_hba.nvme_wq[i]->pring;
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
|
||||
pring->flag = 0;
|
||||
pring->ringno = LPFC_FCP_RING;
|
||||
INIT_LIST_HEAD(&pring->txq);
|
||||
@ -10523,7 +10516,16 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
|
||||
INIT_LIST_HEAD(&pring->iocb_continueq);
|
||||
spin_lock_init(&pring->ring_lock);
|
||||
|
||||
if (phba->cfg_nvme_io_channel) {
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
|
||||
pring->flag = 0;
|
||||
pring->ringno = LPFC_FCP_RING;
|
||||
INIT_LIST_HEAD(&pring->txq);
|
||||
INIT_LIST_HEAD(&pring->txcmplq);
|
||||
INIT_LIST_HEAD(&pring->iocb_continueq);
|
||||
spin_lock_init(&pring->ring_lock);
|
||||
}
|
||||
pring = phba->sli4_hba.nvmels_wq->pring;
|
||||
pring->flag = 0;
|
||||
pring->ringno = LPFC_ELS_RING;
|
||||
@ -14070,6 +14072,20 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
|
||||
/* Get the reference to the corresponding CQ */
|
||||
cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
|
||||
|
||||
/* First check for NVME/SCSI completion */
|
||||
if (cqid == phba->sli4_hba.hdwq[qidx].nvme_cq_map) {
|
||||
/* Process NVME / NVMET command completion */
|
||||
cq = phba->sli4_hba.hdwq[qidx].nvme_cq;
|
||||
goto process_cq;
|
||||
}
|
||||
|
||||
if (cqid == phba->sli4_hba.hdwq[qidx].fcp_cq_map) {
|
||||
/* Process FCP command completion */
|
||||
cq = phba->sli4_hba.hdwq[qidx].fcp_cq;
|
||||
goto process_cq;
|
||||
}
|
||||
|
||||
/* Next check for NVMET completion */
|
||||
if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
|
||||
id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
|
||||
if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
|
||||
@ -14079,20 +14095,6 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
|
||||
}
|
||||
}
|
||||
|
||||
if (phba->sli4_hba.nvme_cq_map &&
|
||||
(cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
|
||||
/* Process NVME / NVMET command completion */
|
||||
cq = phba->sli4_hba.nvme_cq[qidx];
|
||||
goto process_cq;
|
||||
}
|
||||
|
||||
if (phba->sli4_hba.fcp_cq_map &&
|
||||
(cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
|
||||
/* Process FCP command completion */
|
||||
cq = phba->sli4_hba.fcp_cq[qidx];
|
||||
goto process_cq;
|
||||
}
|
||||
|
||||
if (phba->sli4_hba.nvmels_cq &&
|
||||
(cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
|
||||
/* Process NVME unsol rcv */
|
||||
@ -14101,7 +14103,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
|
||||
|
||||
/* Otherwise this is a Slow path event */
|
||||
if (cq == NULL) {
|
||||
lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
|
||||
lpfc_sli4_sp_handle_eqe(phba, eqe,
|
||||
phba->sli4_hba.hdwq[qidx].hba_eq);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -14115,7 +14118,7 @@ process_cq:
|
||||
}
|
||||
|
||||
/* Save EQ associated with this CQ */
|
||||
cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
|
||||
cq->assoc_qp = phba->sli4_hba.hdwq[qidx].hba_eq;
|
||||
|
||||
if (!queue_work(phba->wq, &cq->irqwork))
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
@ -14236,11 +14239,11 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
|
||||
|
||||
if (unlikely(!phba))
|
||||
return IRQ_NONE;
|
||||
if (unlikely(!phba->sli4_hba.hba_eq))
|
||||
if (unlikely(!phba->sli4_hba.hdwq))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* Get to the EQ struct associated with this vector */
|
||||
fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
|
||||
fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq;
|
||||
if (unlikely(!fpeq))
|
||||
return IRQ_NONE;
|
||||
|
||||
@ -14340,7 +14343,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
|
||||
/*
|
||||
* Invoke fast-path host attention interrupt handling as appropriate.
|
||||
*/
|
||||
for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
|
||||
&phba->sli4_hba.hba_eq_hdl[qidx]);
|
||||
if (hba_irq_rc == IRQ_HANDLED)
|
||||
@ -14527,7 +14530,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
uint16_t dmult;
|
||||
|
||||
if (startq >= phba->io_channel_irqs)
|
||||
if (startq >= phba->cfg_hdw_queue)
|
||||
return 0;
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
@ -14541,7 +14544,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
eq_delay = &mbox->u.mqe.un.eq_delay;
|
||||
|
||||
/* Calculate delay multiper from maximum interrupt per second */
|
||||
result = imax / phba->io_channel_irqs;
|
||||
result = imax / phba->cfg_hdw_queue;
|
||||
if (result > LPFC_DMULT_CONST || result == 0)
|
||||
dmult = 0;
|
||||
else
|
||||
@ -14550,8 +14553,8 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
dmult = LPFC_DMULT_MAX;
|
||||
|
||||
cnt = 0;
|
||||
for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
|
||||
eq = phba->sli4_hba.hba_eq[qidx];
|
||||
for (qidx = startq; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
eq = phba->sli4_hba.hdwq[qidx].hba_eq;
|
||||
if (!eq)
|
||||
continue;
|
||||
eq->q_mode = imax;
|
||||
@ -14568,8 +14571,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
val = phba->cfg_fcp_imax;
|
||||
if (val) {
|
||||
/* First, interrupts per sec per EQ */
|
||||
val = phba->cfg_fcp_imax /
|
||||
phba->io_channel_irqs;
|
||||
val = phba->cfg_fcp_imax / phba->cfg_hdw_queue;
|
||||
|
||||
/* us delay between each interrupt */
|
||||
val = LPFC_SEC_TO_USEC / val;
|
||||
@ -14877,7 +14879,7 @@ out:
|
||||
* lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
|
||||
* @phba: HBA structure that indicates port to create a queue on.
|
||||
* @cqp: The queue structure array to use to create the completion queues.
|
||||
* @eqp: The event queue array to bind these completion queues to.
|
||||
* @hdwq: The hardware queue array with the EQ to bind completion queues to.
|
||||
*
|
||||
* This function creates a set of completion queue, s to support MRQ
|
||||
* as detailed in @cqp, on a port,
|
||||
@ -14897,7 +14899,8 @@ out:
|
||||
**/
|
||||
int
|
||||
lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
|
||||
struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
|
||||
uint32_t subtype)
|
||||
{
|
||||
struct lpfc_queue *cq;
|
||||
struct lpfc_queue *eq;
|
||||
@ -14912,7 +14915,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
|
||||
/* sanity check on queue memory */
|
||||
numcq = phba->cfg_nvmet_mrq;
|
||||
if (!cqp || !eqp || !numcq)
|
||||
if (!cqp || !hdwq || !numcq)
|
||||
return -ENODEV;
|
||||
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
@ -14939,7 +14942,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
|
||||
for (idx = 0; idx < numcq; idx++) {
|
||||
cq = cqp[idx];
|
||||
eq = eqp[idx];
|
||||
eq = hdwq[idx].hba_eq;
|
||||
if (!cq || !eq) {
|
||||
status = -ENOMEM;
|
||||
goto out;
|
||||
@ -19462,7 +19465,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
|
||||
|
||||
if (phba->link_flag & LS_MDS_LOOPBACK) {
|
||||
/* MDS WQE are posted only to first WQ*/
|
||||
wq = phba->sli4_hba.fcp_wq[0];
|
||||
wq = phba->sli4_hba.hdwq[0].fcp_wq;
|
||||
if (unlikely(!wq))
|
||||
return 0;
|
||||
pring = wq->pring;
|
||||
@ -19712,12 +19715,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
|
||||
/* NVME_FCREQ and NVME_ABTS requests */
|
||||
if (pwqe->iocb_flag & LPFC_IO_NVME) {
|
||||
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
|
||||
pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
|
||||
pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring;
|
||||
|
||||
spin_lock_irqsave(&pring->ring_lock, iflags);
|
||||
wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
|
||||
wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq;
|
||||
bf_set(wqe_cqid, &wqe->generic.wqe_com,
|
||||
phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
|
||||
phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id);
|
||||
ret = lpfc_sli4_wq_put(wq, wqe);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&pring->ring_lock, iflags);
|
||||
@ -19731,7 +19734,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
|
||||
/* NVMET requests */
|
||||
if (pwqe->iocb_flag & LPFC_IO_NVMET) {
|
||||
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
|
||||
pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
|
||||
pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring;
|
||||
|
||||
spin_lock_irqsave(&pring->ring_lock, iflags);
|
||||
ctxp = pwqe->context2;
|
||||
@ -19742,9 +19745,9 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
|
||||
}
|
||||
bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
|
||||
pwqe->sli4_xritag);
|
||||
wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
|
||||
wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq;
|
||||
bf_set(wqe_cqid, &wqe->generic.wqe_com,
|
||||
phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
|
||||
phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id);
|
||||
ret = lpfc_sli4_wq_put(wq, wqe);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&pring->ring_lock, iflags);
|
||||
|
@ -36,18 +36,13 @@
|
||||
#define LPFC_NEMBED_MBOX_SGL_CNT 254
|
||||
|
||||
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
|
||||
#define LPFC_HBA_IO_CHAN_MIN 0
|
||||
#define LPFC_HBA_IO_CHAN_MAX 32
|
||||
#define LPFC_FCP_IO_CHAN_DEF 4
|
||||
#define LPFC_NVME_IO_CHAN_DEF 0
|
||||
#define LPFC_HBA_HDWQ_MIN 0
|
||||
#define LPFC_HBA_HDWQ_MAX 64
|
||||
#define LPFC_HBA_HDWQ_DEF 0
|
||||
|
||||
/* Common buffer size to accomidate SCSI and NVME IO buffers */
|
||||
#define LPFC_COMMON_IO_BUF_SZ 768
|
||||
|
||||
/* Number of channels used for Flash Optimized Fabric (FOF) operations */
|
||||
|
||||
#define LPFC_FOF_IO_CHAN_NUM 1
|
||||
|
||||
/*
|
||||
* Provide the default FCF Record attributes used by the driver
|
||||
* when nonFIP mode is configured and there is no other default
|
||||
@ -534,6 +529,17 @@ struct lpfc_vector_map_info {
|
||||
#define LPFC_VECTOR_MAP_EMPTY 0xffff
|
||||
|
||||
/* SLI4 HBA data structure entries */
|
||||
struct lpfc_sli4_hdw_queue {
|
||||
/* Pointers to the constructed SLI4 queues */
|
||||
struct lpfc_queue *hba_eq; /* Event queues for HBA */
|
||||
struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */
|
||||
struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */
|
||||
struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */
|
||||
struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
|
||||
uint16_t fcp_cq_map;
|
||||
uint16_t nvme_cq_map;
|
||||
};
|
||||
|
||||
struct lpfc_sli4_hba {
|
||||
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
|
||||
* config space registers
|
||||
@ -606,17 +612,13 @@ struct lpfc_sli4_hba {
|
||||
uint32_t (*sli4_cq_release)(struct lpfc_queue *q, bool arm);
|
||||
|
||||
/* Pointers to the constructed SLI4 queues */
|
||||
struct lpfc_queue **hba_eq; /* Event queues for HBA */
|
||||
struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */
|
||||
struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
|
||||
struct lpfc_sli4_hdw_queue *hdwq;
|
||||
struct list_head lpfc_wq_list;
|
||||
|
||||
/* Pointers to the constructed SLI4 queues for NVMET */
|
||||
struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
|
||||
struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
|
||||
struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
|
||||
struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */
|
||||
struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
|
||||
uint16_t *fcp_cq_map;
|
||||
uint16_t *nvme_cq_map;
|
||||
struct list_head lpfc_wq_list;
|
||||
|
||||
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
|
||||
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
|
||||
@ -817,7 +819,7 @@ int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
|
||||
struct lpfc_queue *, uint32_t, uint32_t);
|
||||
int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
struct lpfc_queue **eqp, uint32_t type,
|
||||
struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
|
||||
uint32_t subtype);
|
||||
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
|
||||
struct lpfc_queue *, uint32_t);
|
||||
|
Loading…
Reference in New Issue
Block a user