2017-02-13 05:52:37 +08:00
|
|
|
/*******************************************************************
|
|
|
|
* This file is part of the Emulex Linux Device Driver for *
|
2017-02-13 05:52:39 +08:00
|
|
|
* Fibre Channsel Host Bus Adapters. *
|
2019-01-29 03:14:41 +08:00
|
|
|
* Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
|
2018-06-26 23:24:31 +08:00
|
|
|
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
2017-02-13 05:52:37 +08:00
|
|
|
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
|
|
|
* EMULEX and SLI are trademarks of Emulex. *
|
2017-02-13 05:52:39 +08:00
|
|
|
* www.broadcom.com *
|
2017-02-13 05:52:37 +08:00
|
|
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
|
|
|
* *
|
|
|
|
* This program is free software; you can redistribute it and/or *
|
|
|
|
* modify it under the terms of version 2 of the GNU General *
|
|
|
|
* Public License as published by the Free Software Foundation. *
|
|
|
|
* This program is distributed in the hope that it will be useful. *
|
|
|
|
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
|
|
|
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
|
|
|
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
|
|
|
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
|
|
|
* more details, a copy of which can be found in the file COPYING *
|
|
|
|
* included with this package. *
|
|
|
|
********************************************************************/
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <asm/unaligned.h>
|
|
|
|
#include <linux/crc-t10dif.h>
|
|
|
|
#include <net/checksum.h>
|
|
|
|
|
|
|
|
#include <scsi/scsi.h>
|
|
|
|
#include <scsi/scsi_device.h>
|
|
|
|
#include <scsi/scsi_eh.h>
|
|
|
|
#include <scsi/scsi_host.h>
|
|
|
|
#include <scsi/scsi_tcq.h>
|
|
|
|
#include <scsi/scsi_transport_fc.h>
|
|
|
|
#include <scsi/fc/fc_fs.h>
|
|
|
|
|
2018-01-31 07:58:59 +08:00
|
|
|
#include <linux/nvme.h>
|
2017-02-13 05:52:37 +08:00
|
|
|
#include <linux/nvme-fc-driver.h>
|
2017-12-09 09:18:04 +08:00
|
|
|
#include <linux/nvme-fc.h>
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
#include "lpfc_version.h"
|
|
|
|
#include "lpfc_hw4.h"
|
|
|
|
#include "lpfc_hw.h"
|
|
|
|
#include "lpfc_sli.h"
|
|
|
|
#include "lpfc_sli4.h"
|
|
|
|
#include "lpfc_nl.h"
|
|
|
|
#include "lpfc_disc.h"
|
|
|
|
#include "lpfc.h"
|
|
|
|
#include "lpfc_scsi.h"
|
|
|
|
#include "lpfc_nvme.h"
|
|
|
|
#include "lpfc_nvmet.h"
|
|
|
|
#include "lpfc_logmsg.h"
|
|
|
|
#include "lpfc_crtn.h"
|
|
|
|
#include "lpfc_vport.h"
|
2017-02-13 05:52:38 +08:00
|
|
|
#include "lpfc_debugfs.h"
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *,
|
|
|
|
dma_addr_t rspbuf,
|
|
|
|
uint16_t rspsize);
|
|
|
|
static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *);
|
|
|
|
static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *,
|
|
|
|
uint32_t, uint16_t);
|
|
|
|
static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *,
|
|
|
|
uint32_t, uint16_t);
|
|
|
|
static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *,
|
|
|
|
uint32_t, uint16_t);
|
2018-01-31 07:58:49 +08:00
|
|
|
static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *);
|
2019-01-29 03:14:39 +08:00
|
|
|
static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
|
|
|
|
|
|
|
|
static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2018-03-13 20:08:49 +08:00
|
|
|
static union lpfc_wqe128 lpfc_tsend_cmd_template;
|
|
|
|
static union lpfc_wqe128 lpfc_treceive_cmd_template;
|
|
|
|
static union lpfc_wqe128 lpfc_trsp_cmd_template;
|
2018-03-06 04:04:05 +08:00
|
|
|
|
|
|
|
/* Setup WQE templates for NVME IOs */
|
|
|
|
void
|
2018-03-13 20:08:49 +08:00
|
|
|
lpfc_nvmet_cmd_template(void)
|
2018-03-06 04:04:05 +08:00
|
|
|
{
|
|
|
|
union lpfc_wqe128 *wqe;
|
|
|
|
|
|
|
|
/* TSEND template */
|
|
|
|
wqe = &lpfc_tsend_cmd_template;
|
|
|
|
memset(wqe, 0, sizeof(union lpfc_wqe128));
|
|
|
|
|
|
|
|
/* Word 0, 1, 2 - BDE is variable */
|
|
|
|
|
|
|
|
/* Word 3 - payload_offset_len is zero */
|
|
|
|
|
|
|
|
/* Word 4 - relative_offset is variable */
|
|
|
|
|
|
|
|
/* Word 5 - is zero */
|
|
|
|
|
|
|
|
/* Word 6 - ctxt_tag, xri_tag is variable */
|
|
|
|
|
|
|
|
/* Word 7 - wqe_ar is variable */
|
|
|
|
bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
|
|
|
|
bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
|
|
|
|
bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
|
|
|
|
bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
|
|
|
|
bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
|
|
|
|
|
|
|
|
/* Word 8 - abort_tag is variable */
|
|
|
|
|
|
|
|
/* Word 9 - reqtag, rcvoxid is variable */
|
|
|
|
|
|
|
|
/* Word 10 - wqes, xc is variable */
|
|
|
|
bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
|
|
|
|
bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
|
|
|
|
bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
|
|
|
|
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
|
|
|
|
bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
|
|
|
|
bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
|
|
|
|
|
|
|
|
/* Word 11 - sup, irsp, irsplen is variable */
|
|
|
|
bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
|
|
|
|
bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
|
|
|
bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
|
|
|
|
bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
|
|
|
|
bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
|
|
|
|
bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
|
|
|
|
|
|
|
|
/* Word 12 - fcp_data_len is variable */
|
|
|
|
|
|
|
|
/* Word 13, 14, 15 - PBDE is zero */
|
|
|
|
|
|
|
|
/* TRECEIVE template */
|
|
|
|
wqe = &lpfc_treceive_cmd_template;
|
|
|
|
memset(wqe, 0, sizeof(union lpfc_wqe128));
|
|
|
|
|
|
|
|
/* Word 0, 1, 2 - BDE is variable */
|
|
|
|
|
|
|
|
/* Word 3 */
|
|
|
|
wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
|
|
|
|
|
|
|
|
/* Word 4 - relative_offset is variable */
|
|
|
|
|
|
|
|
/* Word 5 - is zero */
|
|
|
|
|
|
|
|
/* Word 6 - ctxt_tag, xri_tag is variable */
|
|
|
|
|
|
|
|
/* Word 7 */
|
|
|
|
bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
|
|
|
|
bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
|
|
|
|
bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
|
|
|
|
bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
|
|
|
|
bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
|
|
|
|
|
|
|
|
/* Word 8 - abort_tag is variable */
|
|
|
|
|
|
|
|
/* Word 9 - reqtag, rcvoxid is variable */
|
|
|
|
|
|
|
|
/* Word 10 - xc is variable */
|
|
|
|
bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
|
|
|
|
bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
|
|
|
|
bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
|
|
|
|
bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
|
|
|
|
bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
|
|
|
|
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
|
|
|
|
|
|
|
|
/* Word 11 - pbde is variable */
|
|
|
|
bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
|
|
|
|
bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
|
|
|
bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
|
|
|
|
bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
|
|
|
|
bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
|
|
|
|
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
|
|
|
|
|
|
|
|
/* Word 12 - fcp_data_len is variable */
|
|
|
|
|
|
|
|
/* Word 13, 14, 15 - PBDE is variable */
|
|
|
|
|
|
|
|
/* TRSP template */
|
|
|
|
wqe = &lpfc_trsp_cmd_template;
|
|
|
|
memset(wqe, 0, sizeof(union lpfc_wqe128));
|
|
|
|
|
|
|
|
/* Word 0, 1, 2 - BDE is variable */
|
|
|
|
|
|
|
|
/* Word 3 - response_len is variable */
|
|
|
|
|
|
|
|
/* Word 4, 5 - is zero */
|
|
|
|
|
|
|
|
/* Word 6 - ctxt_tag, xri_tag is variable */
|
|
|
|
|
|
|
|
/* Word 7 */
|
|
|
|
bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
|
|
|
|
bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
|
|
|
|
bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
|
|
|
|
bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
|
|
|
|
bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
|
|
|
|
|
|
|
|
/* Word 8 - abort_tag is variable */
|
|
|
|
|
|
|
|
/* Word 9 - reqtag is variable */
|
|
|
|
|
|
|
|
/* Word 10 wqes, xc is variable */
|
|
|
|
bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
|
|
|
|
bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
|
|
|
|
bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
|
|
|
|
bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
|
|
|
|
bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
|
|
|
|
bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
|
|
|
|
|
|
|
|
/* Word 11 irsp, irsplen is variable */
|
|
|
|
bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
|
|
|
|
bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
|
|
|
bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
|
|
|
|
bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
|
|
|
|
bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
|
|
|
|
bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
|
|
|
|
|
|
|
|
/* Word 12, 13, 14, 15 - is zero */
|
|
|
|
}
|
|
|
|
|
2019-05-31 23:28:41 +08:00
|
|
|
static struct lpfc_nvmet_rcv_ctx *
|
2019-05-22 08:48:56 +08:00
|
|
|
lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
|
|
|
unsigned long iflag;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
|
|
|
|
list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
|
|
|
|
if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
|
|
|
|
if (found)
|
|
|
|
return ctxp;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-05-31 23:28:41 +08:00
|
|
|
static struct lpfc_nvmet_rcv_ctx *
|
2019-05-22 08:48:56 +08:00
|
|
|
lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
|
|
|
unsigned long iflag;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
|
|
|
|
list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
|
|
|
|
if (ctxp->oxid != oxid || ctxp->sid != sid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
|
|
|
|
if (found)
|
|
|
|
return ctxp;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-03-29 02:06:16 +08:00
|
|
|
static void
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
|
|
|
|
{
|
2019-01-29 03:14:39 +08:00
|
|
|
lockdep_assert_held(&ctxp->ctxlock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
2017-09-30 08:34:36 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6313 NVMET Defer ctx release oxid x%x flg x%x\n",
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
ctxp->oxid, ctxp->flag);
|
|
|
|
|
2019-01-29 03:14:39 +08:00
|
|
|
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
return;
|
2019-01-29 03:14:39 +08:00
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
ctxp->flag |= LPFC_NVMET_CTX_RLS;
|
2019-05-22 08:48:56 +08:00
|
|
|
spin_lock(&phba->sli4_hba.t_active_list_lock);
|
|
|
|
list_del(&ctxp->list);
|
|
|
|
spin_unlock(&phba->sli4_hba.t_active_list_lock);
|
2019-01-29 03:14:39 +08:00
|
|
|
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
2019-01-29 03:14:39 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
/**
|
|
|
|
* lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
|
|
|
|
* @phba: Pointer to HBA context object.
|
|
|
|
* @cmdwqe: Pointer to driver command WQE object.
|
|
|
|
* @wcqe: Pointer to driver response CQE object.
|
|
|
|
*
|
|
|
|
* The function is called from SLI ring event handler with no
|
|
|
|
* lock held. This function is the completion handler for NVME LS commands
|
|
|
|
* The function frees memory resources used for the NVME commands.
|
|
|
|
**/
|
|
|
|
static void
|
|
|
|
lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
|
|
|
struct lpfc_wcqe_complete *wcqe)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct nvmefc_tgt_ls_req *rsp;
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
|
|
|
uint32_t status, result;
|
|
|
|
|
|
|
|
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
|
|
|
result = wcqe->parameter;
|
2017-06-02 12:06:58 +08:00
|
|
|
ctxp = cmdwqe->context2;
|
|
|
|
|
|
|
|
if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6410 NVMET LS cmpl state mismatch IO x%x: "
|
|
|
|
"%d %d\n",
|
|
|
|
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
if (!phba->targetport)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
|
2017-12-09 09:18:10 +08:00
|
|
|
if (tgtp) {
|
|
|
|
if (status) {
|
|
|
|
atomic_inc(&tgtp->xmt_ls_rsp_error);
|
2018-01-31 07:58:48 +08:00
|
|
|
if (result == IOERR_ABORT_REQUESTED)
|
2017-12-09 09:18:10 +08:00
|
|
|
atomic_inc(&tgtp->xmt_ls_rsp_aborted);
|
|
|
|
if (bf_get(lpfc_wcqe_c_xb, wcqe))
|
|
|
|
atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
|
|
|
|
} else {
|
|
|
|
atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
|
|
|
|
}
|
|
|
|
}
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
rsp = &ctxp->ctx.ls_req;
|
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
|
|
|
|
ctxp->oxid, status, result);
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
|
|
|
|
status, result, ctxp->oxid);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
lpfc_nlp_put(cmdwqe->context1);
|
|
|
|
cmdwqe->context2 = NULL;
|
|
|
|
cmdwqe->context3 = NULL;
|
|
|
|
lpfc_sli_release_iocbq(phba, cmdwqe);
|
|
|
|
rsp->done(rsp);
|
|
|
|
kfree(ctxp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-05-16 06:20:45 +08:00
|
|
|
* lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
|
2017-02-13 05:52:37 +08:00
|
|
|
* @phba: HBA buffer is associated with
|
|
|
|
* @ctxp: context to clean up
|
|
|
|
* @mp: Buffer to free
|
|
|
|
*
|
|
|
|
* Description: Frees the given DMA buffer in the appropriate way given by
|
|
|
|
* reposting it to its associated RQ so it can be reused.
|
|
|
|
*
|
|
|
|
* Notes: Takes phba->hbalock. Can be called with or without other locks held.
|
|
|
|
*
|
|
|
|
* Returns: None
|
|
|
|
**/
|
|
|
|
void
|
2017-05-16 06:20:45 +08:00
|
|
|
lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
2017-02-13 05:52:37 +08:00
|
|
|
{
|
2017-05-17 11:52:29 +08:00
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
2017-05-16 06:20:45 +08:00
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
|
2017-05-16 06:20:46 +08:00
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct fc_frame_header *fc_hdr;
|
|
|
|
struct rqb_dmabuf *nvmebuf;
|
2017-08-24 07:55:42 +08:00
|
|
|
struct lpfc_nvmet_ctx_info *infop;
|
2019-01-29 03:14:39 +08:00
|
|
|
uint32_t size, oxid, sid;
|
2017-08-24 07:55:42 +08:00
|
|
|
int cpu;
|
2017-05-16 06:20:45 +08:00
|
|
|
unsigned long iflag;
|
|
|
|
|
|
|
|
if (ctxp->txrdy) {
|
2017-07-06 16:13:05 +08:00
|
|
|
dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
|
2017-05-16 06:20:45 +08:00
|
|
|
ctxp->txrdy_phys);
|
|
|
|
ctxp->txrdy = NULL;
|
|
|
|
ctxp->txrdy_phys = 0;
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
2017-06-02 12:06:58 +08:00
|
|
|
|
|
|
|
if (ctxp->state == LPFC_NVMET_STE_FREE) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6411 NVMET free, already free IO x%x: %d %d\n",
|
|
|
|
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
|
|
|
}
|
2019-01-29 03:14:39 +08:00
|
|
|
|
|
|
|
if (ctxp->rqb_buffer) {
|
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
2019-05-22 08:48:53 +08:00
|
|
|
nvmebuf = ctxp->rqb_buffer;
|
|
|
|
/* check if freed in another path whilst acquiring lock */
|
|
|
|
if (nvmebuf) {
|
|
|
|
ctxp->rqb_buffer = NULL;
|
|
|
|
if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
|
|
|
|
ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
|
|
|
nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
|
|
|
|
nvmebuf);
|
|
|
|
} else {
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
|
|
|
/* repost */
|
|
|
|
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
|
|
|
|
}
|
2019-01-29 03:14:39 +08:00
|
|
|
} else {
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
|
|
|
}
|
|
|
|
}
|
2017-05-16 06:20:45 +08:00
|
|
|
ctxp->state = LPFC_NVMET_STE_FREE;
|
|
|
|
|
2017-05-16 06:20:46 +08:00
|
|
|
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
|
|
|
|
if (phba->sli4_hba.nvmet_io_wait_cnt) {
|
|
|
|
list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
|
|
|
|
nvmebuf, struct rqb_dmabuf,
|
|
|
|
hbuf.list);
|
|
|
|
phba->sli4_hba.nvmet_io_wait_cnt--;
|
|
|
|
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
|
|
|
|
iflag);
|
|
|
|
|
|
|
|
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
|
|
|
|
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
size = nvmebuf->bytes_recv;
|
|
|
|
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
|
|
|
|
|
|
|
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
|
|
|
|
ctxp->wqeq = NULL;
|
|
|
|
ctxp->txrdy = NULL;
|
|
|
|
ctxp->offset = 0;
|
|
|
|
ctxp->phba = phba;
|
|
|
|
ctxp->size = size;
|
|
|
|
ctxp->oxid = oxid;
|
|
|
|
ctxp->sid = sid;
|
|
|
|
ctxp->state = LPFC_NVMET_STE_RCV;
|
|
|
|
ctxp->entry_cnt = 1;
|
|
|
|
ctxp->flag = 0;
|
|
|
|
ctxp->ctxbuf = ctx_buf;
|
2017-12-09 09:18:04 +08:00
|
|
|
ctxp->rqb_buffer = (void *)nvmebuf;
|
2017-05-16 06:20:46 +08:00
|
|
|
spin_lock_init(&ctxp->ctxlock);
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
/* NOTE: isr time stamp is stale when context is re-assigned*/
|
|
|
|
if (ctxp->ts_isr_cmd) {
|
|
|
|
ctxp->ts_cmd_nvme = 0;
|
2017-05-16 06:20:46 +08:00
|
|
|
ctxp->ts_nvme_data = 0;
|
|
|
|
ctxp->ts_data_wqput = 0;
|
|
|
|
ctxp->ts_isr_data = 0;
|
|
|
|
ctxp->ts_data_nvme = 0;
|
|
|
|
ctxp->ts_nvme_status = 0;
|
|
|
|
ctxp->ts_status_wqput = 0;
|
|
|
|
ctxp->ts_isr_status = 0;
|
|
|
|
ctxp->ts_status_nvme = 0;
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
2017-05-16 06:20:46 +08:00
|
|
|
#endif
|
|
|
|
atomic_inc(&tgtp->rcv_fcp_cmd_in);
|
|
|
|
|
2019-05-22 08:48:56 +08:00
|
|
|
/* Indicate that a replacement buffer has been posted */
|
2019-01-29 03:14:39 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
|
|
|
ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
2017-05-16 06:20:46 +08:00
|
|
|
|
2019-01-29 03:14:39 +08:00
|
|
|
if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
|
|
|
|
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
|
|
|
"6181 Unable to queue deferred work "
|
|
|
|
"for oxid x%x. "
|
|
|
|
"FCP Drop IO [x%x x%x x%x]\n",
|
|
|
|
ctxp->oxid,
|
|
|
|
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
|
|
|
atomic_read(&tgtp->rcv_fcp_cmd_out),
|
|
|
|
atomic_read(&tgtp->xmt_fcp_release));
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
|
|
|
lpfc_nvmet_defer_release(phba, ctxp);
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
|
|
|
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
|
2017-12-09 09:18:04 +08:00
|
|
|
}
|
2017-05-16 06:20:46 +08:00
|
|
|
return;
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
2017-05-16 06:20:46 +08:00
|
|
|
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
|
|
|
|
|
2017-08-24 07:55:42 +08:00
|
|
|
/*
|
|
|
|
* Use the CPU context list, from the MRQ the IO was received on
|
|
|
|
* (ctxp->idx), to save context structure.
|
|
|
|
*/
|
2019-05-22 08:48:56 +08:00
|
|
|
spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
|
|
|
|
list_del_init(&ctxp->list);
|
|
|
|
spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
|
2019-03-29 02:06:22 +08:00
|
|
|
cpu = raw_smp_processor_id();
|
2017-08-24 07:55:42 +08:00
|
|
|
infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
|
|
|
|
spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
|
|
|
|
list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
|
|
|
|
infop->nvmet_ctx_list_cnt++;
|
|
|
|
spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
|
2017-05-17 11:52:29 +08:00
|
|
|
#endif
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
|
|
|
static void
|
|
|
|
lpfc_nvmet_ktime(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp)
|
|
|
|
{
|
|
|
|
uint64_t seg1, seg2, seg3, seg4, seg5;
|
|
|
|
uint64_t seg6, seg7, seg8, seg9, seg10;
|
2017-09-30 08:34:33 +08:00
|
|
|
uint64_t segsum;
|
2017-02-13 05:52:38 +08:00
|
|
|
|
|
|
|
if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
|
|
|
|
!ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
|
|
|
|
!ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
|
|
|
|
!ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
|
|
|
|
!ctxp->ts_isr_status || !ctxp->ts_status_nvme)
|
|
|
|
return;
|
|
|
|
|
2017-09-30 08:34:33 +08:00
|
|
|
if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
|
|
|
|
return;
|
2017-02-13 05:52:38 +08:00
|
|
|
if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
|
|
|
|
return;
|
|
|
|
if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
|
|
|
|
return;
|
|
|
|
if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
|
|
|
|
return;
|
|
|
|
if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
|
|
|
|
return;
|
|
|
|
if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
|
|
|
|
return;
|
|
|
|
if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
|
|
|
|
return;
|
|
|
|
if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
|
|
|
|
return;
|
|
|
|
if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
|
|
|
|
return;
|
|
|
|
if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* Segment 1 - Time from FCP command received by MSI-X ISR
|
|
|
|
* to FCP command is passed to NVME Layer.
|
|
|
|
* Segment 2 - Time from FCP command payload handed
|
|
|
|
* off to NVME Layer to Driver receives a Command op
|
|
|
|
* from NVME Layer.
|
|
|
|
* Segment 3 - Time from Driver receives a Command op
|
|
|
|
* from NVME Layer to Command is put on WQ.
|
|
|
|
* Segment 4 - Time from Driver WQ put is done
|
|
|
|
* to MSI-X ISR for Command cmpl.
|
|
|
|
* Segment 5 - Time from MSI-X ISR for Command cmpl to
|
|
|
|
* Command cmpl is passed to NVME Layer.
|
|
|
|
* Segment 6 - Time from Command cmpl is passed to NVME
|
|
|
|
* Layer to Driver receives a RSP op from NVME Layer.
|
|
|
|
* Segment 7 - Time from Driver receives a RSP op from
|
|
|
|
* NVME Layer to WQ put is done on TRSP FCP Status.
|
|
|
|
* Segment 8 - Time from Driver WQ put is done on TRSP
|
|
|
|
* FCP Status to MSI-X ISR for TRSP cmpl.
|
|
|
|
* Segment 9 - Time from MSI-X ISR for TRSP cmpl to
|
|
|
|
* TRSP cmpl is passed to NVME Layer.
|
|
|
|
* Segment 10 - Time from FCP command received by
|
|
|
|
* MSI-X ISR to command is completed on wire.
|
|
|
|
* (Segments 1 thru 8) for READDATA / WRITEDATA
|
|
|
|
* (Segments 1 thru 4) for READDATA_RSP
|
|
|
|
*/
|
|
|
|
seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
|
2017-09-30 08:34:33 +08:00
|
|
|
segsum = seg1;
|
|
|
|
|
|
|
|
seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
|
|
|
|
if (segsum > seg2)
|
|
|
|
return;
|
|
|
|
seg2 -= segsum;
|
|
|
|
segsum += seg2;
|
|
|
|
|
|
|
|
seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
|
|
|
|
if (segsum > seg3)
|
|
|
|
return;
|
|
|
|
seg3 -= segsum;
|
|
|
|
segsum += seg3;
|
|
|
|
|
|
|
|
seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
|
|
|
|
if (segsum > seg4)
|
|
|
|
return;
|
|
|
|
seg4 -= segsum;
|
|
|
|
segsum += seg4;
|
|
|
|
|
|
|
|
seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
|
|
|
|
if (segsum > seg5)
|
|
|
|
return;
|
|
|
|
seg5 -= segsum;
|
|
|
|
segsum += seg5;
|
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
|
|
|
|
/* For auto rsp commands seg6 thru seg10 will be 0 */
|
|
|
|
if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
|
2017-09-30 08:34:33 +08:00
|
|
|
seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
|
|
|
|
if (segsum > seg6)
|
|
|
|
return;
|
|
|
|
seg6 -= segsum;
|
|
|
|
segsum += seg6;
|
|
|
|
|
|
|
|
seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
|
|
|
|
if (segsum > seg7)
|
|
|
|
return;
|
|
|
|
seg7 -= segsum;
|
|
|
|
segsum += seg7;
|
|
|
|
|
|
|
|
seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
|
|
|
|
if (segsum > seg8)
|
|
|
|
return;
|
|
|
|
seg8 -= segsum;
|
|
|
|
segsum += seg8;
|
|
|
|
|
|
|
|
seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
|
|
|
|
if (segsum > seg9)
|
|
|
|
return;
|
|
|
|
seg9 -= segsum;
|
|
|
|
segsum += seg9;
|
|
|
|
|
|
|
|
if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
|
|
|
|
return;
|
2017-02-13 05:52:38 +08:00
|
|
|
seg10 = (ctxp->ts_isr_status -
|
|
|
|
ctxp->ts_isr_cmd);
|
|
|
|
} else {
|
2017-09-30 08:34:33 +08:00
|
|
|
if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
|
|
|
|
return;
|
2017-02-13 05:52:38 +08:00
|
|
|
seg6 = 0;
|
|
|
|
seg7 = 0;
|
|
|
|
seg8 = 0;
|
|
|
|
seg9 = 0;
|
|
|
|
seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
phba->ktime_seg1_total += seg1;
|
|
|
|
if (seg1 < phba->ktime_seg1_min)
|
|
|
|
phba->ktime_seg1_min = seg1;
|
|
|
|
else if (seg1 > phba->ktime_seg1_max)
|
|
|
|
phba->ktime_seg1_max = seg1;
|
|
|
|
|
|
|
|
phba->ktime_seg2_total += seg2;
|
|
|
|
if (seg2 < phba->ktime_seg2_min)
|
|
|
|
phba->ktime_seg2_min = seg2;
|
|
|
|
else if (seg2 > phba->ktime_seg2_max)
|
|
|
|
phba->ktime_seg2_max = seg2;
|
|
|
|
|
|
|
|
phba->ktime_seg3_total += seg3;
|
|
|
|
if (seg3 < phba->ktime_seg3_min)
|
|
|
|
phba->ktime_seg3_min = seg3;
|
|
|
|
else if (seg3 > phba->ktime_seg3_max)
|
|
|
|
phba->ktime_seg3_max = seg3;
|
|
|
|
|
|
|
|
phba->ktime_seg4_total += seg4;
|
|
|
|
if (seg4 < phba->ktime_seg4_min)
|
|
|
|
phba->ktime_seg4_min = seg4;
|
|
|
|
else if (seg4 > phba->ktime_seg4_max)
|
|
|
|
phba->ktime_seg4_max = seg4;
|
|
|
|
|
|
|
|
phba->ktime_seg5_total += seg5;
|
|
|
|
if (seg5 < phba->ktime_seg5_min)
|
|
|
|
phba->ktime_seg5_min = seg5;
|
|
|
|
else if (seg5 > phba->ktime_seg5_max)
|
|
|
|
phba->ktime_seg5_max = seg5;
|
|
|
|
|
|
|
|
phba->ktime_data_samples++;
|
|
|
|
if (!seg6)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
phba->ktime_seg6_total += seg6;
|
|
|
|
if (seg6 < phba->ktime_seg6_min)
|
|
|
|
phba->ktime_seg6_min = seg6;
|
|
|
|
else if (seg6 > phba->ktime_seg6_max)
|
|
|
|
phba->ktime_seg6_max = seg6;
|
|
|
|
|
|
|
|
phba->ktime_seg7_total += seg7;
|
|
|
|
if (seg7 < phba->ktime_seg7_min)
|
|
|
|
phba->ktime_seg7_min = seg7;
|
|
|
|
else if (seg7 > phba->ktime_seg7_max)
|
|
|
|
phba->ktime_seg7_max = seg7;
|
|
|
|
|
|
|
|
phba->ktime_seg8_total += seg8;
|
|
|
|
if (seg8 < phba->ktime_seg8_min)
|
|
|
|
phba->ktime_seg8_min = seg8;
|
|
|
|
else if (seg8 > phba->ktime_seg8_max)
|
|
|
|
phba->ktime_seg8_max = seg8;
|
|
|
|
|
|
|
|
phba->ktime_seg9_total += seg9;
|
|
|
|
if (seg9 < phba->ktime_seg9_min)
|
|
|
|
phba->ktime_seg9_min = seg9;
|
|
|
|
else if (seg9 > phba->ktime_seg9_max)
|
|
|
|
phba->ktime_seg9_max = seg9;
|
|
|
|
out:
|
|
|
|
phba->ktime_seg10_total += seg10;
|
|
|
|
if (seg10 < phba->ktime_seg10_min)
|
|
|
|
phba->ktime_seg10_min = seg10;
|
|
|
|
else if (seg10 > phba->ktime_seg10_max)
|
|
|
|
phba->ktime_seg10_max = seg10;
|
|
|
|
phba->ktime_status_samples++;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
/**
|
|
|
|
* lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
|
|
|
|
* @phba: Pointer to HBA context object.
|
|
|
|
* @cmdwqe: Pointer to driver command WQE object.
|
|
|
|
* @wcqe: Pointer to driver response CQE object.
|
|
|
|
*
|
|
|
|
* The function is called from SLI ring event handler with no
|
|
|
|
* lock held. This function is the completion handler for NVME FCP commands
|
|
|
|
* The function frees memory resources used for the NVME commands.
|
|
|
|
**/
|
|
|
|
static void
|
|
|
|
lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
|
|
|
struct lpfc_wcqe_complete *wcqe)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct nvmefc_tgt_fcp_req *rsp;
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
2017-09-30 08:34:36 +08:00
|
|
|
uint32_t status, result, op, start_clean, logerr;
|
2017-02-13 05:52:38 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
|
|
|
uint32_t id;
|
|
|
|
#endif
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
ctxp = cmdwqe->context2;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
ctxp->flag &= ~LPFC_NVMET_IO_INP;
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
rsp = &ctxp->ctx.fcp_req;
|
|
|
|
op = rsp->op;
|
|
|
|
|
|
|
|
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
|
|
|
result = wcqe->parameter;
|
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
if (phba->targetport)
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
else
|
|
|
|
tgtp = NULL;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
|
|
|
|
ctxp->oxid, op, status);
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
if (status) {
|
|
|
|
rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
|
|
|
|
rsp->transferred_length = 0;
|
2017-12-09 09:18:10 +08:00
|
|
|
if (tgtp) {
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
atomic_inc(&tgtp->xmt_fcp_rsp_error);
|
2018-01-31 07:58:48 +08:00
|
|
|
if (result == IOERR_ABORT_REQUESTED)
|
2017-12-09 09:18:10 +08:00
|
|
|
atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
|
|
|
|
}
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
2017-09-30 08:34:36 +08:00
|
|
|
logerr = LOG_NVME_IOERR;
|
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
/* pick up SLI4 exhange busy condition */
|
|
|
|
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
|
|
|
|
ctxp->flag |= LPFC_NVMET_XBUSY;
|
2017-09-30 08:34:36 +08:00
|
|
|
logerr |= LOG_NVME_ABTS;
|
2017-12-09 09:18:10 +08:00
|
|
|
if (tgtp)
|
|
|
|
atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
|
|
|
} else {
|
|
|
|
ctxp->flag &= ~LPFC_NVMET_XBUSY;
|
|
|
|
}
|
|
|
|
|
2017-09-30 08:34:36 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, logerr,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
|
|
|
|
"XBUSY:x%x\n",
|
|
|
|
ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
|
|
|
|
status, result, ctxp->flag);
|
2017-09-30 08:34:36 +08:00
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
} else {
|
|
|
|
rsp->fcp_error = NVME_SC_SUCCESS;
|
|
|
|
if (op == NVMET_FCOP_RSP)
|
|
|
|
rsp->transferred_length = rsp->rsplen;
|
|
|
|
else
|
|
|
|
rsp->transferred_length = rsp->transfer_length;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
if (tgtp)
|
|
|
|
atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((op == NVMET_FCOP_READDATA_RSP) ||
|
|
|
|
(op == NVMET_FCOP_RSP)) {
|
|
|
|
/* Sanity check */
|
|
|
|
ctxp->state = LPFC_NVMET_STE_DONE;
|
|
|
|
ctxp->entry_cnt++;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
2017-09-30 08:34:33 +08:00
|
|
|
if (ctxp->ts_cmd_nvme) {
|
2017-02-13 05:52:38 +08:00
|
|
|
if (rsp->op == NVMET_FCOP_READDATA_RSP) {
|
|
|
|
ctxp->ts_isr_data =
|
|
|
|
cmdwqe->isr_timestamp;
|
|
|
|
ctxp->ts_data_nvme =
|
|
|
|
ktime_get_ns();
|
|
|
|
ctxp->ts_nvme_status =
|
|
|
|
ctxp->ts_data_nvme;
|
|
|
|
ctxp->ts_status_wqput =
|
|
|
|
ctxp->ts_data_nvme;
|
|
|
|
ctxp->ts_isr_status =
|
|
|
|
ctxp->ts_data_nvme;
|
|
|
|
ctxp->ts_status_nvme =
|
|
|
|
ctxp->ts_data_nvme;
|
|
|
|
} else {
|
|
|
|
ctxp->ts_isr_status =
|
|
|
|
cmdwqe->isr_timestamp;
|
|
|
|
ctxp->ts_status_nvme =
|
|
|
|
ktime_get_ns();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2017-02-13 05:52:37 +08:00
|
|
|
rsp->done(rsp);
|
2017-02-13 05:52:38 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
2017-09-30 08:34:33 +08:00
|
|
|
if (ctxp->ts_cmd_nvme)
|
2017-02-13 05:52:38 +08:00
|
|
|
lpfc_nvmet_ktime(phba, ctxp);
|
|
|
|
#endif
|
2017-04-12 02:32:29 +08:00
|
|
|
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
|
2017-02-13 05:52:37 +08:00
|
|
|
} else {
|
|
|
|
ctxp->entry_cnt++;
|
2017-08-24 07:55:43 +08:00
|
|
|
start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
|
2017-02-13 05:52:37 +08:00
|
|
|
memset(((char *)cmdwqe) + start_clean, 0,
|
|
|
|
(sizeof(struct lpfc_iocbq) - start_clean));
|
2017-02-13 05:52:38 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
2017-09-30 08:34:33 +08:00
|
|
|
if (ctxp->ts_cmd_nvme) {
|
2017-02-13 05:52:38 +08:00
|
|
|
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
|
|
|
|
ctxp->ts_data_nvme = ktime_get_ns();
|
|
|
|
}
|
2019-01-29 03:14:24 +08:00
|
|
|
#endif
|
|
|
|
rsp->done(rsp);
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
|
|
|
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
|
2019-03-29 02:06:22 +08:00
|
|
|
id = raw_smp_processor_id();
|
2019-01-29 03:14:24 +08:00
|
|
|
if (id < LPFC_CHECK_CPU_CNT) {
|
2017-02-13 05:52:38 +08:00
|
|
|
if (ctxp->cpu != id)
|
2019-01-29 03:14:24 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
|
2017-02-13 05:52:38 +08:00
|
|
|
"6704 CPU Check cmdcmpl: "
|
|
|
|
"cpu %d expect %d\n",
|
|
|
|
id, ctxp->cpu);
|
2019-01-29 03:14:24 +08:00
|
|
|
phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
|
2017-02-13 05:52:38 +08:00
|
|
|
}
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
2019-01-29 03:14:24 +08:00
|
|
|
#endif
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_ls_req *rsp)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp =
|
|
|
|
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
|
|
|
|
struct lpfc_hba *phba = ctxp->phba;
|
|
|
|
struct hbq_dmabuf *nvmebuf =
|
|
|
|
(struct hbq_dmabuf *)ctxp->rqb_buffer;
|
|
|
|
struct lpfc_iocbq *nvmewqeq;
|
|
|
|
struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
|
|
|
|
struct lpfc_dmabuf dmabuf;
|
|
|
|
struct ulp_bde64 bpl;
|
|
|
|
int rc;
|
|
|
|
|
2017-11-21 08:00:41 +08:00
|
|
|
if (phba->pport->load_flag & FC_UNLOADING)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2017-09-30 08:34:45 +08:00
|
|
|
if (phba->pport->load_flag & FC_UNLOADING)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
|
|
|
|
|
|
|
|
if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
|
|
|
|
(ctxp->entry_cnt != 1)) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6412 NVMET LS rsp state mismatch "
|
|
|
|
"oxid x%x: %d %d\n",
|
|
|
|
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
|
|
|
}
|
|
|
|
ctxp->state = LPFC_NVMET_STE_LS_RSP;
|
|
|
|
ctxp->entry_cnt++;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
|
|
|
|
rsp->rsplen);
|
|
|
|
if (nvmewqeq == NULL) {
|
|
|
|
atomic_inc(&nvmep->xmt_ls_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6150 LS Drop IO x%x: Prep\n",
|
|
|
|
ctxp->oxid);
|
|
|
|
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&nvmep->xmt_ls_abort);
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
|
|
|
|
ctxp->sid, ctxp->oxid);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Save numBdes for bpl2sgl */
|
|
|
|
nvmewqeq->rsvd2 = 1;
|
|
|
|
nvmewqeq->hba_wqidx = 0;
|
|
|
|
nvmewqeq->context3 = &dmabuf;
|
|
|
|
dmabuf.virt = &bpl;
|
|
|
|
bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
|
|
|
|
bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
|
|
|
|
bpl.tus.f.bdeSize = rsp->rsplen;
|
|
|
|
bpl.tus.f.bdeFlags = 0;
|
|
|
|
bpl.tus.w = le32_to_cpu(bpl.tus.w);
|
|
|
|
|
|
|
|
nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
|
|
|
|
nvmewqeq->iocb_cmpl = NULL;
|
|
|
|
nvmewqeq->context2 = ctxp;
|
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
|
|
|
|
ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
|
|
|
|
|
2019-01-29 03:14:26 +08:00
|
|
|
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
|
2017-02-13 05:52:37 +08:00
|
|
|
if (rc == WQE_SUCCESS) {
|
|
|
|
/*
|
|
|
|
* Okay to repost buffer here, but wait till cmpl
|
|
|
|
* before freeing ctxp and iocbq.
|
|
|
|
*/
|
|
|
|
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
|
|
|
atomic_inc(&nvmep->xmt_ls_rsp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* Give back resources */
|
|
|
|
atomic_inc(&nvmep->xmt_ls_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6151 LS Drop IO x%x: Issue %d\n",
|
|
|
|
ctxp->oxid, rc);
|
|
|
|
|
|
|
|
lpfc_nlp_put(nvmewqeq->context1);
|
|
|
|
|
|
|
|
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&nvmep->xmt_ls_abort);
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *rsp)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp =
|
|
|
|
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
|
|
|
|
struct lpfc_hba *phba = ctxp->phba;
|
2018-01-31 07:58:49 +08:00
|
|
|
struct lpfc_queue *wq;
|
2017-02-13 05:52:37 +08:00
|
|
|
struct lpfc_iocbq *nvmewqeq;
|
2018-01-31 07:58:49 +08:00
|
|
|
struct lpfc_sli_ring *pring;
|
|
|
|
unsigned long iflags;
|
2017-03-23 22:53:45 +08:00
|
|
|
int rc;
|
2017-02-13 05:52:38 +08:00
|
|
|
|
2017-09-30 08:34:45 +08:00
|
|
|
if (phba->pport->load_flag & FC_UNLOADING) {
|
|
|
|
rc = -ENODEV;
|
|
|
|
goto aerr;
|
|
|
|
}
|
|
|
|
|
2017-11-21 08:00:41 +08:00
|
|
|
if (phba->pport->load_flag & FC_UNLOADING) {
|
|
|
|
rc = -ENODEV;
|
|
|
|
goto aerr;
|
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
2017-09-30 08:34:33 +08:00
|
|
|
if (ctxp->ts_cmd_nvme) {
|
2017-02-13 05:52:38 +08:00
|
|
|
if (rsp->op == NVMET_FCOP_RSP)
|
|
|
|
ctxp->ts_nvme_status = ktime_get_ns();
|
|
|
|
else
|
|
|
|
ctxp->ts_nvme_data = ktime_get_ns();
|
|
|
|
}
|
2019-01-29 03:14:26 +08:00
|
|
|
|
|
|
|
/* Setup the hdw queue if not already set */
|
|
|
|
if (!ctxp->hdwq)
|
|
|
|
ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
|
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
|
2019-03-29 02:06:22 +08:00
|
|
|
int id = raw_smp_processor_id();
|
2019-01-29 03:14:24 +08:00
|
|
|
if (id < LPFC_CHECK_CPU_CNT) {
|
|
|
|
if (rsp->hwqid != id)
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
|
|
|
|
"6705 CPU Check OP: "
|
|
|
|
"cpu %d expect %d\n",
|
|
|
|
id, rsp->hwqid);
|
|
|
|
phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
|
2017-02-13 05:52:38 +08:00
|
|
|
}
|
2019-01-29 03:14:24 +08:00
|
|
|
ctxp->cpu = id; /* Setup cpu for cmpl check */
|
2017-02-13 05:52:38 +08:00
|
|
|
}
|
|
|
|
#endif
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* Sanity check */
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
|
|
|
|
(ctxp->state == LPFC_NVMET_STE_ABORT)) {
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6102 IO oxid x%x aborted\n",
|
2017-02-13 05:52:37 +08:00
|
|
|
ctxp->oxid);
|
2017-03-05 01:30:28 +08:00
|
|
|
rc = -ENXIO;
|
2017-02-13 05:52:37 +08:00
|
|
|
goto aerr;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
|
|
|
|
if (nvmewqeq == NULL) {
|
|
|
|
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6152 FCP Drop IO x%x: Prep\n",
|
|
|
|
ctxp->oxid);
|
2017-03-05 01:30:28 +08:00
|
|
|
rc = -ENXIO;
|
2017-02-13 05:52:37 +08:00
|
|
|
goto aerr;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
|
|
|
|
nvmewqeq->iocb_cmpl = NULL;
|
|
|
|
nvmewqeq->context2 = ctxp;
|
|
|
|
nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
|
|
|
|
ctxp->wqeq->hba_wqidx = rsp->hwqid;
|
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
|
|
|
|
ctxp->oxid, rsp->op, rsp->rsplen);
|
|
|
|
|
2017-05-16 06:20:41 +08:00
|
|
|
ctxp->flag |= LPFC_NVMET_IO_INP;
|
2019-01-29 03:14:26 +08:00
|
|
|
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
|
2017-02-13 05:52:37 +08:00
|
|
|
if (rc == WQE_SUCCESS) {
|
2017-02-13 05:52:38 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
2017-09-30 08:34:33 +08:00
|
|
|
if (!ctxp->ts_cmd_nvme)
|
2017-02-13 05:52:38 +08:00
|
|
|
return 0;
|
|
|
|
if (rsp->op == NVMET_FCOP_RSP)
|
|
|
|
ctxp->ts_status_wqput = ktime_get_ns();
|
|
|
|
else
|
|
|
|
ctxp->ts_data_wqput = ktime_get_ns();
|
|
|
|
#endif
|
2017-02-13 05:52:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-31 07:58:49 +08:00
|
|
|
if (rc == -EBUSY) {
|
|
|
|
/*
|
|
|
|
* WQ was full, so queue nvmewqeq to be sent after
|
|
|
|
* WQE release CQE
|
|
|
|
*/
|
|
|
|
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
|
2019-01-29 03:14:26 +08:00
|
|
|
wq = ctxp->hdwq->nvme_wq;
|
2018-01-31 07:58:49 +08:00
|
|
|
pring = wq->pring;
|
|
|
|
spin_lock_irqsave(&pring->ring_lock, iflags);
|
|
|
|
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
|
|
|
|
wq->q_flag |= HBA_NVMET_WQFULL;
|
|
|
|
spin_unlock_irqrestore(&pring->ring_lock, iflags);
|
2018-01-31 07:58:52 +08:00
|
|
|
atomic_inc(&lpfc_nvmep->defer_wqfull);
|
2018-01-31 07:58:49 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
/* Give back resources */
|
|
|
|
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6153 FCP Drop IO x%x: Issue: %d\n",
|
|
|
|
ctxp->oxid, rc);
|
|
|
|
|
|
|
|
ctxp->wqeq->hba_wqidx = 0;
|
|
|
|
nvmewqeq->context2 = NULL;
|
|
|
|
nvmewqeq->context3 = NULL;
|
2017-03-05 01:30:28 +08:00
|
|
|
rc = -EBUSY;
|
2017-02-13 05:52:37 +08:00
|
|
|
aerr:
|
2017-03-05 01:30:28 +08:00
|
|
|
return rc;
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *tport = targetport->private;
|
|
|
|
|
|
|
|
/* release any threads waiting for the unreg to complete */
|
2019-01-18 00:14:45 +08:00
|
|
|
if (tport->phba->targetport)
|
|
|
|
complete(tport->tport_unreg_cmp);
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
static void
|
|
|
|
lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *req)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp =
|
|
|
|
container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
|
|
|
|
struct lpfc_hba *phba = ctxp->phba;
|
2018-01-31 07:58:49 +08:00
|
|
|
struct lpfc_queue *wq;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
unsigned long flags;
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
2017-11-21 08:00:41 +08:00
|
|
|
if (phba->pport->load_flag & FC_UNLOADING)
|
|
|
|
return;
|
|
|
|
|
2017-09-30 08:34:45 +08:00
|
|
|
if (phba->pport->load_flag & FC_UNLOADING)
|
|
|
|
return;
|
|
|
|
|
2019-01-29 03:14:26 +08:00
|
|
|
if (!ctxp->hdwq)
|
|
|
|
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
|
|
|
|
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
|
2017-06-02 12:06:58 +08:00
|
|
|
ctxp->oxid, ctxp->flag, ctxp->state);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
2017-06-02 12:06:58 +08:00
|
|
|
lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
|
|
|
|
ctxp->oxid, ctxp->flag, ctxp->state);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
|
|
|
|
atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
|
2017-06-02 12:06:58 +08:00
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
|
|
|
|
|
|
|
/* Since iaab/iaar are NOT set, we need to check
|
|
|
|
* if the firmware is in process of aborting IO
|
|
|
|
*/
|
2019-05-22 08:48:56 +08:00
|
|
|
if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
|
|
|
return;
|
|
|
|
}
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
ctxp->flag |= LPFC_NVMET_ABORT_OP;
|
2017-06-02 12:06:58 +08:00
|
|
|
|
2018-01-31 07:58:49 +08:00
|
|
|
if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2018-01-31 07:58:49 +08:00
|
|
|
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
|
|
|
ctxp->oxid);
|
2019-01-29 03:14:26 +08:00
|
|
|
wq = ctxp->hdwq->nvme_wq;
|
2018-01-31 07:58:49 +08:00
|
|
|
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
|
|
|
|
return;
|
|
|
|
}
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2018-01-31 07:58:49 +08:00
|
|
|
|
2017-06-02 12:06:58 +08:00
|
|
|
/* An state of LPFC_NVMET_STE_RCV means we have just received
|
|
|
|
* the NVME command and have not started processing it.
|
|
|
|
* (by issuing any IO WQEs on this exchange yet)
|
|
|
|
*/
|
|
|
|
if (ctxp->state == LPFC_NVMET_STE_RCV)
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
|
|
|
ctxp->oxid);
|
2017-06-02 12:06:58 +08:00
|
|
|
else
|
|
|
|
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
|
|
|
ctxp->oxid);
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
}
|
|
|
|
|
2017-04-12 02:32:29 +08:00
|
|
|
static void
|
|
|
|
lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *rsp)
|
|
|
|
{
|
2017-05-16 06:20:40 +08:00
|
|
|
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
|
2017-04-12 02:32:29 +08:00
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp =
|
|
|
|
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
|
|
|
|
struct lpfc_hba *phba = ctxp->phba;
|
|
|
|
unsigned long flags;
|
|
|
|
bool aborting = false;
|
|
|
|
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
|
|
|
if (ctxp->flag & LPFC_NVMET_XBUSY)
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
|
|
|
|
"6027 NVMET release with XBUSY flag x%x"
|
|
|
|
" oxid x%x\n",
|
|
|
|
ctxp->flag, ctxp->oxid);
|
|
|
|
else if (ctxp->state != LPFC_NVMET_STE_DONE &&
|
|
|
|
ctxp->state != LPFC_NVMET_STE_ABORT)
|
2017-06-02 12:06:58 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6413 NVMET release bad state %d %d oxid x%x\n",
|
|
|
|
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
|
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
|
|
|
|
(ctxp->flag & LPFC_NVMET_XBUSY)) {
|
2017-04-12 02:32:29 +08:00
|
|
|
aborting = true;
|
|
|
|
/* let the abort path do the real release */
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
lpfc_nvmet_defer_release(phba, ctxp);
|
2017-04-12 02:32:29 +08:00
|
|
|
}
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2017-04-12 02:32:29 +08:00
|
|
|
|
2017-05-19 16:04:31 +08:00
|
|
|
lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
|
|
|
|
ctxp->state, aborting);
|
2017-04-12 02:32:29 +08:00
|
|
|
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&lpfc_nvmep->xmt_fcp_release);
|
2019-05-22 08:48:56 +08:00
|
|
|
ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
|
2017-05-16 06:20:40 +08:00
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
if (aborting)
|
|
|
|
return;
|
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
2017-04-12 02:32:29 +08:00
|
|
|
}
|
|
|
|
|
2017-08-02 06:12:40 +08:00
|
|
|
static void
|
|
|
|
lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
|
|
|
|
struct nvmefc_tgt_fcp_req *rsp)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp =
|
|
|
|
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
|
|
|
|
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
|
|
|
|
struct lpfc_hba *phba = ctxp->phba;
|
2019-01-29 03:14:39 +08:00
|
|
|
unsigned long iflag;
|
|
|
|
|
2017-08-02 06:12:40 +08:00
|
|
|
|
|
|
|
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
|
2019-03-29 02:06:22 +08:00
|
|
|
ctxp->oxid, ctxp->size, raw_smp_processor_id());
|
2017-08-02 06:12:40 +08:00
|
|
|
|
2018-06-26 23:24:28 +08:00
|
|
|
if (!nvmebuf) {
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6425 Defer rcv: no buffer oxid x%x: "
|
2018-06-26 23:24:28 +08:00
|
|
|
"flg %x ste %x\n",
|
|
|
|
ctxp->oxid, ctxp->flag, ctxp->state);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-08-02 06:12:40 +08:00
|
|
|
tgtp = phba->targetport->private;
|
2018-06-26 23:24:28 +08:00
|
|
|
if (tgtp)
|
|
|
|
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
|
2018-01-31 07:58:52 +08:00
|
|
|
|
|
|
|
/* Free the nvmebuf since a new buffer already replaced it */
|
|
|
|
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
|
2019-01-29 03:14:39 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
|
|
|
ctxp->rqb_buffer = NULL;
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
2017-08-02 06:12:40 +08:00
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
static struct nvmet_fc_target_template lpfc_tgttemplate = {
|
|
|
|
.targetport_delete = lpfc_nvmet_targetport_delete,
|
|
|
|
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
|
|
|
|
.fcp_op = lpfc_nvmet_xmt_fcp_op,
|
nvmet_fc: Rework target side abort handling
target transport:
----------------------
There are cases when there is a need to abort in-progress target
operations (writedata) so that controller termination or errors can
clean up. That can't happen currently as the abort is another target
op type, so it can't be used till the running one finishes (and it may
not). Solve by removing the abort op type and creating a separate
downcall from the transport to the lldd to request an io to be aborted.
The transport will abort ios on queue teardown or io errors. In general
the transport tries to call the lldd abort only when the io state is
idle. Meaning: ops that transmit data (readdata or rsp) will always
finish their transmit (or the lldd will see a state on the
link or initiator port that fails the transmit) and the done call for
the operation will occur. The transport will wait for the op done
upcall before calling the abort function, and as the io is idle, the
io can be cleaned up immediately after the abort call; Similarly, ios
that are not waiting for data or transmitting data must be in the nvmet
layer being processed. The transport will wait for the nvmet layer
completion before calling the abort function, and as the io is idle,
the io can be cleaned up immediately after the abort call; As for ops
that are waiting for data (writedata), they may be outstanding
indefinitely if the lldd doesn't see a condition where the initiatior
port or link is bad. In those cases, the transport will call the abort
function and wait for the lldd's op done upcall for the operation, where
it will then clean up the io.
Additionally, if a lldd receives an ABTS and matches it to an outstanding
request in the transport, A new new transport upcall was created to abort
the outstanding request in the transport. The transport expects any
outstanding op call (readdata or writedata) will completed by the lldd and
the operation upcall made. The transport doesn't act on the reported
abort (e.g. clean up the io) until an op done upcall occurs, a new op is
attempted, or the nvmet layer completes the io processing.
fcloop:
----------------------
Updated to support the new target apis.
On fcp io aborts from the initiator, the loopback context is updated to
NULL out the half that has completed. The initiator side is immediately
called after the abort request with an io completion (abort status).
On fcp io aborts from the target, the io is stopped and the initiator side
sees it as an aborted io. Target side ops, perhaps in progress while the
initiator side is done, continue but noop the data movement as there's no
structure on the initiator side to reference.
patch also contains:
----------------------
Revised lpfc to support the new abort api
commonized rsp buffer syncing and nulling of private data based on
calling paths.
errors in op done calls don't take action on the fod. They're bad
operations which implies the fod may be bad.
Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
2017-04-12 02:32:31 +08:00
|
|
|
.fcp_abort = lpfc_nvmet_xmt_fcp_abort,
|
2017-04-12 02:32:29 +08:00
|
|
|
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
|
2017-08-02 06:12:40 +08:00
|
|
|
.defer_rcv = lpfc_nvmet_defer_rcv,
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
.max_hw_queues = 1,
|
|
|
|
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
|
|
|
|
.max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
|
|
|
|
.dma_boundary = 0xFFFFFFFF,
|
|
|
|
|
|
|
|
/* optional features */
|
|
|
|
.target_features = 0,
|
|
|
|
/* sizes of additional private data for data structures */
|
|
|
|
.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
|
|
|
|
};
|
|
|
|
|
2017-05-18 17:35:24 +08:00
|
|
|
static void
|
2017-08-24 07:55:42 +08:00
|
|
|
__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_nvmet_ctx_info *infop)
|
2017-05-16 06:20:45 +08:00
|
|
|
{
|
|
|
|
struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2017-08-24 07:55:42 +08:00
|
|
|
spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
|
2017-06-16 13:56:45 +08:00
|
|
|
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
|
2017-08-24 07:55:42 +08:00
|
|
|
&infop->nvmet_ctx_list, list) {
|
2019-01-29 03:14:22 +08:00
|
|
|
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
2017-05-16 06:20:45 +08:00
|
|
|
list_del_init(&ctx_buf->list);
|
2019-01-29 03:14:22 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
2017-08-24 07:55:42 +08:00
|
|
|
|
|
|
|
__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
|
2017-05-16 06:20:45 +08:00
|
|
|
ctx_buf->sglq->state = SGL_FREED;
|
|
|
|
ctx_buf->sglq->ndlp = NULL;
|
|
|
|
|
2017-06-30 16:02:51 +08:00
|
|
|
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
2017-05-16 06:20:45 +08:00
|
|
|
list_add_tail(&ctx_buf->sglq->list,
|
2017-08-24 07:55:42 +08:00
|
|
|
&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
2017-06-30 16:02:51 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
2017-05-16 06:20:45 +08:00
|
|
|
|
|
|
|
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
|
|
|
|
kfree(ctx_buf->context);
|
|
|
|
}
|
2017-08-24 07:55:42 +08:00
|
|
|
spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
|
|
|
|
}
|
2017-06-16 13:56:45 +08:00
|
|
|
|
2017-08-24 07:55:42 +08:00
|
|
|
static void
|
|
|
|
lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_ctx_info *infop;
|
|
|
|
int i, j;
|
2017-06-16 13:56:45 +08:00
|
|
|
|
2017-08-24 07:55:42 +08:00
|
|
|
/* The first context list, MRQ 0 CPU 0 */
|
|
|
|
infop = phba->sli4_hba.nvmet_ctx_info;
|
|
|
|
if (!infop)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Cycle the the entire CPU context list for every MRQ */
|
|
|
|
for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
|
2019-01-29 03:14:35 +08:00
|
|
|
for_each_present_cpu(j) {
|
|
|
|
infop = lpfc_get_ctx_list(phba, j, i);
|
2017-08-24 07:55:42 +08:00
|
|
|
__lpfc_nvmet_clean_io_for_cpu(phba, infop);
|
|
|
|
}
|
2017-06-16 13:56:45 +08:00
|
|
|
}
|
2017-08-24 07:55:42 +08:00
|
|
|
kfree(phba->sli4_hba.nvmet_ctx_info);
|
|
|
|
phba->sli4_hba.nvmet_ctx_info = NULL;
|
2017-05-16 06:20:45 +08:00
|
|
|
}
|
|
|
|
|
2017-05-18 17:35:24 +08:00
|
|
|
static int
|
2017-05-16 06:20:45 +08:00
|
|
|
lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_ctxbuf *ctx_buf;
|
|
|
|
struct lpfc_iocbq *nvmewqe;
|
|
|
|
union lpfc_wqe128 *wqe;
|
2017-08-24 07:55:42 +08:00
|
|
|
struct lpfc_nvmet_ctx_info *last_infop;
|
|
|
|
struct lpfc_nvmet_ctx_info *infop;
|
2019-01-29 03:14:35 +08:00
|
|
|
int i, j, idx, cpu;
|
2017-05-16 06:20:45 +08:00
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
|
|
|
|
"6403 Allocate NVMET resources for %d XRIs\n",
|
|
|
|
phba->sli4_hba.nvmet_xri_cnt);
|
|
|
|
|
2017-08-24 07:55:42 +08:00
|
|
|
phba->sli4_hba.nvmet_ctx_info = kcalloc(
|
2019-01-29 03:14:35 +08:00
|
|
|
phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
|
2017-08-24 07:55:42 +08:00
|
|
|
sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
|
|
|
|
if (!phba->sli4_hba.nvmet_ctx_info) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
|
"6419 Failed allocate memory for "
|
|
|
|
"nvmet context lists\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assuming X CPUs in the system, and Y MRQs, allocate some
|
|
|
|
* lpfc_nvmet_ctx_info structures as follows:
|
|
|
|
*
|
|
|
|
* cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
|
|
|
|
* cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
|
|
|
|
* ...
|
|
|
|
* cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
|
|
|
|
*
|
|
|
|
* Each line represents a MRQ "silo" containing an entry for
|
|
|
|
* every CPU.
|
|
|
|
*
|
|
|
|
* MRQ X is initially assumed to be associated with CPU X, thus
|
|
|
|
* contexts are initially distributed across all MRQs using
|
|
|
|
* the MRQ index (N) as follows cpuN/mrqN. When contexts are
|
|
|
|
* freed, the are freed to the MRQ silo based on the CPU number
|
|
|
|
* of the IO completion. Thus a context that was allocated for MRQ A
|
|
|
|
* whose IO completed on CPU B will be freed to cpuB/mrqA.
|
|
|
|
*/
|
2019-01-29 03:14:35 +08:00
|
|
|
for_each_possible_cpu(i) {
|
2017-08-24 07:55:42 +08:00
|
|
|
for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
|
2019-01-29 03:14:35 +08:00
|
|
|
infop = lpfc_get_ctx_list(phba, i, j);
|
2017-08-24 07:55:42 +08:00
|
|
|
INIT_LIST_HEAD(&infop->nvmet_ctx_list);
|
|
|
|
spin_lock_init(&infop->nvmet_ctx_list_lock);
|
|
|
|
infop->nvmet_ctx_list_cnt = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup the next CPU context info ptr for each MRQ.
|
|
|
|
* MRQ 0 will cycle thru CPUs 0 - X separately from
|
|
|
|
* MRQ 1 cycling thru CPUs 0 - X, and so on.
|
|
|
|
*/
|
|
|
|
for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
|
2019-01-29 03:14:35 +08:00
|
|
|
last_infop = lpfc_get_ctx_list(phba,
|
|
|
|
cpumask_first(cpu_present_mask),
|
|
|
|
j);
|
|
|
|
for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
|
2017-08-24 07:55:42 +08:00
|
|
|
infop = lpfc_get_ctx_list(phba, i, j);
|
|
|
|
infop->nvmet_ctx_next_cpu = last_infop;
|
|
|
|
last_infop = infop;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
/* For all nvmet xris, allocate resources needed to process a
|
|
|
|
* received command on a per xri basis.
|
|
|
|
*/
|
2017-08-24 07:55:42 +08:00
|
|
|
idx = 0;
|
2019-01-29 03:14:35 +08:00
|
|
|
cpu = cpumask_first(cpu_present_mask);
|
2017-05-16 06:20:45 +08:00
|
|
|
for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
|
|
|
|
ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
|
|
|
|
if (!ctx_buf) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
|
|
|
"6404 Ran out of memory for NVMET\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!ctx_buf->context) {
|
|
|
|
kfree(ctx_buf);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
|
|
|
"6405 Ran out of NVMET "
|
|
|
|
"context memory\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ctx_buf->context->ctxbuf = ctx_buf;
|
2017-06-02 12:06:58 +08:00
|
|
|
ctx_buf->context->state = LPFC_NVMET_STE_FREE;
|
2017-05-16 06:20:45 +08:00
|
|
|
|
|
|
|
ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
|
|
|
|
if (!ctx_buf->iocbq) {
|
|
|
|
kfree(ctx_buf->context);
|
|
|
|
kfree(ctx_buf);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
|
|
|
"6406 Ran out of NVMET iocb/WQEs\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
|
|
|
|
nvmewqe = ctx_buf->iocbq;
|
2018-03-06 04:04:03 +08:00
|
|
|
wqe = &nvmewqe->wqe;
|
2018-03-06 04:04:05 +08:00
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
/* Initialize WQE */
|
|
|
|
memset(wqe, 0, sizeof(union lpfc_wqe));
|
|
|
|
|
|
|
|
ctx_buf->iocbq->context1 = NULL;
|
|
|
|
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
|
|
|
ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
|
|
|
|
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
|
|
|
if (!ctx_buf->sglq) {
|
|
|
|
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
|
|
|
|
kfree(ctx_buf->context);
|
|
|
|
kfree(ctx_buf);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
|
|
|
"6407 Ran out of NVMET XRIs\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2019-01-29 03:14:39 +08:00
|
|
|
INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
|
2017-08-24 07:55:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Add ctx to MRQidx context list. Our initial assumption
|
|
|
|
* is MRQidx will be associated with CPUidx. This association
|
|
|
|
* can change on the fly.
|
|
|
|
*/
|
2019-01-29 03:14:35 +08:00
|
|
|
infop = lpfc_get_ctx_list(phba, cpu, idx);
|
2017-08-24 07:55:42 +08:00
|
|
|
spin_lock(&infop->nvmet_ctx_list_lock);
|
|
|
|
list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
|
|
|
|
infop->nvmet_ctx_list_cnt++;
|
|
|
|
spin_unlock(&infop->nvmet_ctx_list_lock);
|
|
|
|
|
|
|
|
/* Spread ctx structures evenly across all MRQs */
|
|
|
|
idx++;
|
2019-01-29 03:14:35 +08:00
|
|
|
if (idx >= phba->cfg_nvmet_mrq) {
|
2017-08-24 07:55:42 +08:00
|
|
|
idx = 0;
|
2019-01-29 03:14:35 +08:00
|
|
|
cpu = cpumask_first(cpu_present_mask);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
cpu = cpumask_next(cpu, cpu_present_mask);
|
|
|
|
if (cpu == nr_cpu_ids)
|
|
|
|
cpu = cpumask_first(cpu_present_mask);
|
|
|
|
|
2017-08-24 07:55:42 +08:00
|
|
|
}
|
|
|
|
|
2019-01-29 03:14:35 +08:00
|
|
|
for_each_present_cpu(i) {
|
2018-09-11 01:30:48 +08:00
|
|
|
for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
|
|
|
|
infop = lpfc_get_ctx_list(phba, i, j);
|
2017-08-24 07:55:42 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
|
|
|
|
"6408 TOTAL NVMET ctx for CPU %d "
|
|
|
|
"MRQ %d: cnt %d nextcpu %p\n",
|
|
|
|
i, j, infop->nvmet_ctx_list_cnt,
|
|
|
|
infop->nvmet_ctx_next_cpu);
|
|
|
|
}
|
2017-05-16 06:20:45 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
int
|
|
|
|
lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
|
|
|
|
{
|
|
|
|
struct lpfc_vport *vport = phba->pport;
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct nvmet_fc_port_info pinfo;
|
2017-05-16 06:20:45 +08:00
|
|
|
int error;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
if (phba->targetport)
|
|
|
|
return 0;
|
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
error = lpfc_nvmet_setup_io_context(phba);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
|
|
|
|
pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
|
|
|
|
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
|
|
|
|
pinfo.port_id = vport->fc_myDID;
|
|
|
|
|
2018-09-11 01:30:42 +08:00
|
|
|
/* We need to tell the transport layer + 1 because it takes page
|
|
|
|
* alignment into account. When space for the SGL is allocated we
|
|
|
|
* allocate + 3, one for cmd, one for rsp and one for this alignment
|
2017-04-22 07:05:01 +08:00
|
|
|
*/
|
|
|
|
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
|
scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu
Currently, both nvme and fcp each have their own concept of an io_channel,
which is a combination wq/cq and associated msix. Different cpus would
share an io_channel.
The driver is now moving to per-cpu wq/cq pairs and msix vectors. The
driver will still use separate wq/cq pairs per protocol on each cpu, but
the protocols will share the msix vector.
Given the elimination of the nvme and fcp io channels, the module
parameters will be removed. A new parameter, lpfc_hdw_queue is added which
allows the wq/cq pair allocation per cpu to be overridden and allocated to
lesser value. If lpfc_hdw_queue is zero, the number of pairs allocated will
be based on the number of cpus. If non-zero, the parameter specifies the
number of queues to allocate. At this time, the maximum non-zero value is
64.
To manage this new paradigm, a new hardware queue structure is created to
track queue activity and relationships.
As MSIX vector allocation must be known before setting up the
relationships, msix allocation now occurs before queue datastructures are
allocated. If the number of vectors allocated is less than the desired
hardware queues, the hardware queue counts will be reduced to the number of
vectors
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-01-29 03:14:21 +08:00
|
|
|
lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
|
2017-09-30 08:34:34 +08:00
|
|
|
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2017-03-09 06:36:01 +08:00
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
2017-02-13 05:52:37 +08:00
|
|
|
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
|
|
|
|
&phba->pcidev->dev,
|
|
|
|
&phba->targetport);
|
2017-03-05 01:30:33 +08:00
|
|
|
#else
|
2017-05-16 06:20:45 +08:00
|
|
|
error = -ENOENT;
|
2017-03-05 01:30:33 +08:00
|
|
|
#endif
|
2017-02-13 05:52:37 +08:00
|
|
|
if (error) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
2017-09-30 08:34:39 +08:00
|
|
|
"6025 Cannot register NVME targetport x%x: "
|
|
|
|
"portnm %llx nodenm %llx segs %d qs %d\n",
|
|
|
|
error,
|
|
|
|
pinfo.port_name, pinfo.node_name,
|
|
|
|
lpfc_tgttemplate.max_sgl_segments,
|
|
|
|
lpfc_tgttemplate.max_hw_queues);
|
2017-02-13 05:52:37 +08:00
|
|
|
phba->targetport = NULL;
|
2017-09-30 08:34:39 +08:00
|
|
|
phba->nvmet_support = 0;
|
2017-05-16 06:20:45 +08:00
|
|
|
|
|
|
|
lpfc_nvmet_cleanup_io_context(phba);
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
} else {
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)
|
|
|
|
phba->targetport->private;
|
|
|
|
tgtp->phba = phba;
|
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
|
|
|
"6026 Registered NVME "
|
|
|
|
"targetport: %p, private %p "
|
2017-09-30 08:34:39 +08:00
|
|
|
"portnm %llx nodenm %llx segs %d qs %d\n",
|
2017-02-13 05:52:37 +08:00
|
|
|
phba->targetport, tgtp,
|
2017-09-30 08:34:39 +08:00
|
|
|
pinfo.port_name, pinfo.node_name,
|
|
|
|
lpfc_tgttemplate.max_sgl_segments,
|
|
|
|
lpfc_tgttemplate.max_hw_queues);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
atomic_set(&tgtp->rcv_ls_req_in, 0);
|
|
|
|
atomic_set(&tgtp->rcv_ls_req_out, 0);
|
|
|
|
atomic_set(&tgtp->rcv_ls_req_drop, 0);
|
|
|
|
atomic_set(&tgtp->xmt_ls_abort, 0);
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_set(&tgtp->xmt_ls_rsp, 0);
|
|
|
|
atomic_set(&tgtp->xmt_ls_drop, 0);
|
|
|
|
atomic_set(&tgtp->xmt_ls_rsp_error, 0);
|
2017-12-09 09:18:10 +08:00
|
|
|
atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
|
|
|
|
atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
|
|
|
|
atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
|
|
|
|
atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
|
|
|
|
atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
|
|
|
|
atomic_set(&tgtp->xmt_fcp_drop, 0);
|
|
|
|
atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
|
|
|
|
atomic_set(&tgtp->xmt_fcp_read, 0);
|
|
|
|
atomic_set(&tgtp->xmt_fcp_write, 0);
|
|
|
|
atomic_set(&tgtp->xmt_fcp_rsp, 0);
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_set(&tgtp->xmt_fcp_release, 0);
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
|
|
|
|
atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
|
2017-12-09 09:18:10 +08:00
|
|
|
atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
|
|
|
|
atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
|
2017-12-09 09:18:10 +08:00
|
|
|
atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_set(&tgtp->xmt_fcp_abort, 0);
|
|
|
|
atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
|
|
|
|
atomic_set(&tgtp->xmt_abort_unsol, 0);
|
|
|
|
atomic_set(&tgtp->xmt_abort_sol, 0);
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_set(&tgtp->xmt_abort_rsp, 0);
|
|
|
|
atomic_set(&tgtp->xmt_abort_rsp_error, 0);
|
2018-01-31 07:58:52 +08:00
|
|
|
atomic_set(&tgtp->defer_ctx, 0);
|
|
|
|
atomic_set(&tgtp->defer_fod, 0);
|
|
|
|
atomic_set(&tgtp->defer_wqfull, 0);
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
|
|
|
|
{
|
|
|
|
struct lpfc_vport *vport = phba->pport;
|
|
|
|
|
|
|
|
if (!phba->targetport)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
|
|
|
"6007 Update NVMET port %p did x%x\n",
|
|
|
|
phba->targetport, vport->fc_myDID);
|
|
|
|
|
|
|
|
phba->targetport->port_id = vport->fc_myDID;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-05 01:30:30 +08:00
|
|
|
/**
|
|
|
|
* lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
|
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
|
* @axri: pointer to the nvmet xri abort wcqe structure.
|
|
|
|
*
|
|
|
|
* This routine is invoked by the worker thread to process a SLI4 fast-path
|
|
|
|
* NVMET aborted xri.
|
|
|
|
**/
|
|
|
|
void
|
|
|
|
lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
|
|
|
struct sli4_wcqe_xri_aborted *axri)
|
|
|
|
{
|
2019-05-22 08:48:52 +08:00
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
|
|
|
|
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
|
2017-12-09 09:18:10 +08:00
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
2019-05-22 08:48:56 +08:00
|
|
|
struct nvmefc_tgt_fcp_req *req = NULL;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
struct lpfc_nodelist *ndlp;
|
|
|
|
unsigned long iflag = 0;
|
|
|
|
int rrq_empty = 0;
|
|
|
|
bool released = false;
|
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
|
|
|
"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
|
|
|
|
|
|
|
|
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
|
|
|
return;
|
2017-12-09 09:18:10 +08:00
|
|
|
|
|
|
|
if (phba->targetport) {
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
|
|
|
|
}
|
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
spin_lock_irqsave(&phba->hbalock, iflag);
|
2019-01-29 03:14:22 +08:00
|
|
|
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
list_for_each_entry_safe(ctxp, next_ctxp,
|
|
|
|
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
|
|
|
list) {
|
2017-05-16 06:20:45 +08:00
|
|
|
if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
continue;
|
|
|
|
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock(&ctxp->ctxlock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
/* Check if we already received a free context call
|
|
|
|
* and we have completed processing an abort situation.
|
|
|
|
*/
|
|
|
|
if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
|
|
|
|
!(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
|
2019-05-22 08:48:56 +08:00
|
|
|
list_del_init(&ctxp->list);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
released = true;
|
|
|
|
}
|
|
|
|
ctxp->flag &= ~LPFC_NVMET_XBUSY;
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock(&ctxp->ctxlock);
|
2019-01-29 03:14:22 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
|
|
|
rrq_empty = list_empty(&phba->active_rrq_list);
|
|
|
|
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
|
|
|
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
|
|
|
|
if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
|
|
|
|
(ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
|
|
|
|
ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
|
|
|
|
lpfc_set_rrq_active(phba, ndlp,
|
2017-05-16 06:20:45 +08:00
|
|
|
ctxp->ctxbuf->sglq->sli4_lxritag,
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
rxid, 1);
|
|
|
|
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
|
|
|
|
}
|
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6318 XB aborted oxid x%x flg x%x (%x)\n",
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
ctxp->oxid, ctxp->flag, released);
|
|
|
|
if (released)
|
2017-05-16 06:20:45 +08:00
|
|
|
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
if (rrq_empty)
|
|
|
|
lpfc_worker_wake_up(phba);
|
|
|
|
return;
|
|
|
|
}
|
2019-01-29 03:14:22 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
2019-05-22 08:48:56 +08:00
|
|
|
|
|
|
|
ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
|
|
|
|
if (ctxp) {
|
|
|
|
/*
|
|
|
|
* Abort already done by FW, so BA_ACC sent.
|
|
|
|
* However, the transport may be unaware.
|
|
|
|
*/
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
|
|
|
"6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
|
|
|
|
"flag x%x oxid x%x rxid x%x\n",
|
|
|
|
xri, ctxp->state, ctxp->flag, ctxp->oxid,
|
|
|
|
rxid);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
|
|
|
ctxp->flag |= LPFC_NVMET_ABTS_RCV;
|
|
|
|
ctxp->state = LPFC_NVMET_STE_ABORT;
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
|
|
|
|
|
|
|
lpfc_nvmeio_data(phba,
|
|
|
|
"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
|
2019-05-22 08:49:10 +08:00
|
|
|
xri, raw_smp_processor_id(), 0);
|
2019-05-22 08:48:56 +08:00
|
|
|
|
|
|
|
req = &ctxp->ctx.fcp_req;
|
|
|
|
if (req)
|
|
|
|
nvmet_fc_rcv_fcp_abort(phba->targetport, req);
|
|
|
|
}
|
2019-05-22 08:48:52 +08:00
|
|
|
#endif
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
|
|
|
|
struct fc_frame_header *fc_hdr)
|
|
|
|
{
|
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
|
|
|
struct lpfc_hba *phba = vport->phba;
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
|
|
|
|
struct nvmefc_tgt_fcp_req *rsp;
|
2019-05-22 08:48:52 +08:00
|
|
|
uint32_t sid;
|
2019-05-22 08:48:56 +08:00
|
|
|
uint16_t oxid, xri;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
unsigned long iflag = 0;
|
|
|
|
|
2019-05-22 08:48:52 +08:00
|
|
|
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
2019-05-22 08:48:56 +08:00
|
|
|
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&phba->hbalock, iflag);
|
2019-01-29 03:14:22 +08:00
|
|
|
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
list_for_each_entry_safe(ctxp, next_ctxp,
|
|
|
|
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
|
|
|
|
list) {
|
2019-05-22 08:48:56 +08:00
|
|
|
if (ctxp->oxid != oxid || ctxp->sid != sid)
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
continue;
|
|
|
|
|
2019-05-22 08:48:52 +08:00
|
|
|
xri = ctxp->ctxbuf->sglq->sli4_xritag;
|
|
|
|
|
2019-01-29 03:14:22 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
|
|
|
ctxp->flag |= LPFC_NVMET_ABTS_RCV;
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
|
|
|
|
|
|
|
lpfc_nvmeio_data(phba,
|
|
|
|
"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
|
2019-03-29 02:06:22 +08:00
|
|
|
xri, raw_smp_processor_id(), 0);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
|
|
|
"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
|
|
|
|
|
|
|
|
rsp = &ctxp->ctx.fcp_req;
|
|
|
|
nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
|
|
|
|
|
|
|
|
/* Respond with BA_ACC accordingly */
|
|
|
|
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
|
|
|
|
return 0;
|
|
|
|
}
|
2019-01-29 03:14:22 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
|
|
|
|
2019-05-22 08:48:56 +08:00
|
|
|
/* check the wait list */
|
|
|
|
if (phba->sli4_hba.nvmet_io_wait_cnt) {
|
|
|
|
struct rqb_dmabuf *nvmebuf;
|
|
|
|
struct fc_frame_header *fc_hdr_tmp;
|
|
|
|
u32 sid_tmp;
|
|
|
|
u16 oxid_tmp;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
|
|
|
|
|
|
|
|
/* match by oxid and s_id */
|
|
|
|
list_for_each_entry(nvmebuf,
|
|
|
|
&phba->sli4_hba.lpfc_nvmet_io_wait_list,
|
|
|
|
hbuf.list) {
|
|
|
|
fc_hdr_tmp = (struct fc_frame_header *)
|
|
|
|
(nvmebuf->hbuf.virt);
|
|
|
|
oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
|
|
|
|
sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
|
|
|
|
if (oxid_tmp != oxid || sid_tmp != sid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
|
|
|
"6321 NVMET Rcv ABTS oxid x%x from x%x "
|
|
|
|
"is waiting for a ctxp\n",
|
|
|
|
oxid, sid);
|
|
|
|
|
|
|
|
list_del_init(&nvmebuf->hbuf.list);
|
|
|
|
phba->sli4_hba.nvmet_io_wait_cnt--;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
|
|
|
|
iflag);
|
|
|
|
|
|
|
|
/* free buffer since already posted a new DMA buffer to RQ */
|
|
|
|
if (found) {
|
|
|
|
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
|
|
|
|
/* Respond with BA_ACC accordingly */
|
|
|
|
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check active list */
|
|
|
|
ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
|
|
|
|
if (ctxp) {
|
|
|
|
xri = ctxp->ctxbuf->sglq->sli4_xritag;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
|
|
|
ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
|
|
|
|
|
|
|
lpfc_nvmeio_data(phba,
|
|
|
|
"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
|
|
|
|
xri, raw_smp_processor_id(), 0);
|
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
|
|
|
"6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
|
|
|
|
"flag x%x state x%x\n",
|
|
|
|
ctxp->oxid, xri, ctxp->flag, ctxp->state);
|
|
|
|
|
|
|
|
if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
|
|
|
|
/* Notify the transport */
|
|
|
|
nvmet_fc_rcv_fcp_abort(phba->targetport,
|
|
|
|
&ctxp->ctx.fcp_req);
|
|
|
|
} else {
|
2019-05-22 08:49:00 +08:00
|
|
|
cancel_work_sync(&ctxp->ctxbuf->defer_work);
|
2019-05-22 08:48:56 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
|
|
|
lpfc_nvmet_defer_release(phba, ctxp);
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
|
|
|
}
|
|
|
|
if (ctxp->state == LPFC_NVMET_STE_RCV)
|
|
|
|
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
|
|
|
ctxp->oxid);
|
|
|
|
else
|
|
|
|
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
|
|
|
ctxp->oxid);
|
|
|
|
|
2019-05-22 08:49:00 +08:00
|
|
|
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
|
2019-05-22 08:48:56 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
|
|
|
|
oxid, raw_smp_processor_id(), 1);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
|
|
|
/* Respond with BA_RJT accordingly */
|
|
|
|
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
|
|
|
|
#endif
|
2017-04-28 01:33:01 +08:00
|
|
|
return 0;
|
2017-03-05 01:30:30 +08:00
|
|
|
}
|
|
|
|
|
2018-01-31 07:58:49 +08:00
|
|
|
static void
|
|
|
|
lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp)
|
|
|
|
{
|
|
|
|
struct lpfc_sli_ring *pring;
|
|
|
|
struct lpfc_iocbq *nvmewqeq;
|
|
|
|
struct lpfc_iocbq *next_nvmewqeq;
|
|
|
|
unsigned long iflags;
|
|
|
|
struct lpfc_wcqe_complete wcqe;
|
|
|
|
struct lpfc_wcqe_complete *wcqep;
|
|
|
|
|
|
|
|
pring = wq->pring;
|
|
|
|
wcqep = &wcqe;
|
|
|
|
|
|
|
|
/* Fake an ABORT error code back to cmpl routine */
|
|
|
|
memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
|
|
|
|
bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
|
|
|
|
wcqep->parameter = IOERR_ABORT_REQUESTED;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&pring->ring_lock, iflags);
|
|
|
|
list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
|
|
|
|
&wq->wqfull_list, list) {
|
|
|
|
if (ctxp) {
|
|
|
|
/* Checking for a specific IO to flush */
|
|
|
|
if (nvmewqeq->context2 == ctxp) {
|
|
|
|
list_del(&nvmewqeq->list);
|
|
|
|
spin_unlock_irqrestore(&pring->ring_lock,
|
|
|
|
iflags);
|
|
|
|
lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
|
|
|
|
wcqep);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
/* Flush all IOs */
|
|
|
|
list_del(&nvmewqeq->list);
|
|
|
|
spin_unlock_irqrestore(&pring->ring_lock, iflags);
|
|
|
|
lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
|
|
|
|
spin_lock_irqsave(&pring->ring_lock, iflags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!ctxp)
|
|
|
|
wq->q_flag &= ~HBA_NVMET_WQFULL;
|
|
|
|
spin_unlock_irqrestore(&pring->ring_lock, iflags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_queue *wq)
|
|
|
|
{
|
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
|
|
|
struct lpfc_sli_ring *pring;
|
|
|
|
struct lpfc_iocbq *nvmewqeq;
|
2019-01-29 03:14:26 +08:00
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
2018-01-31 07:58:49 +08:00
|
|
|
unsigned long iflags;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some WQE slots are available, so try to re-issue anything
|
|
|
|
* on the WQ wqfull_list.
|
|
|
|
*/
|
|
|
|
pring = wq->pring;
|
|
|
|
spin_lock_irqsave(&pring->ring_lock, iflags);
|
|
|
|
while (!list_empty(&wq->wqfull_list)) {
|
|
|
|
list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
|
|
|
|
list);
|
|
|
|
spin_unlock_irqrestore(&pring->ring_lock, iflags);
|
2019-01-29 03:14:26 +08:00
|
|
|
ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
|
|
|
|
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
|
2018-01-31 07:58:49 +08:00
|
|
|
spin_lock_irqsave(&pring->ring_lock, iflags);
|
|
|
|
if (rc == -EBUSY) {
|
|
|
|
/* WQ was full again, so put it back on the list */
|
|
|
|
list_add(&nvmewqeq->list, &wq->wqfull_list);
|
|
|
|
spin_unlock_irqrestore(&pring->ring_lock, iflags);
|
|
|
|
return;
|
|
|
|
}
|
2019-05-22 08:48:56 +08:00
|
|
|
if (rc == WQE_SUCCESS) {
|
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
|
|
|
if (ctxp->ts_cmd_nvme) {
|
|
|
|
if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
|
|
|
|
ctxp->ts_status_wqput = ktime_get_ns();
|
|
|
|
else
|
|
|
|
ctxp->ts_data_wqput = ktime_get_ns();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
WARN_ON(rc);
|
|
|
|
}
|
2018-01-31 07:58:49 +08:00
|
|
|
}
|
|
|
|
wq->q_flag &= ~HBA_NVMET_WQFULL;
|
|
|
|
spin_unlock_irqrestore(&pring->ring_lock, iflags);
|
|
|
|
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
void
|
|
|
|
lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
|
|
|
{
|
2017-03-09 06:36:01 +08:00
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
2017-02-13 05:52:37 +08:00
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
2018-01-31 07:58:49 +08:00
|
|
|
struct lpfc_queue *wq;
|
|
|
|
uint32_t qidx;
|
2019-01-18 00:14:45 +08:00
|
|
|
DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
if (phba->nvmet_support == 0)
|
|
|
|
return;
|
|
|
|
if (phba->targetport) {
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu
Currently, both nvme and fcp each have their own concept of an io_channel,
which is a combination wq/cq and associated msix. Different cpus would
share an io_channel.
The driver is now moving to per-cpu wq/cq pairs and msix vectors. The
driver will still use separate wq/cq pairs per protocol on each cpu, but
the protocols will share the msix vector.
Given the elimination of the nvme and fcp io channels, the module
parameters will be removed. A new parameter, lpfc_hdw_queue is added which
allows the wq/cq pair allocation per cpu to be overridden and allocated to
lesser value. If lpfc_hdw_queue is zero, the number of pairs allocated will
be based on the number of cpus. If non-zero, the parameter specifies the
number of queues to allocate. At this time, the maximum non-zero value is
64.
To manage this new paradigm, a new hardware queue structure is created to
track queue activity and relationships.
As MSIX vector allocation must be known before setting up the
relationships, msix allocation now occurs before queue datastructures are
allocated. If the number of vectors allocated is less than the desired
hardware queues, the hardware queue counts will be reduced to the number of
vectors
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-01-29 03:14:21 +08:00
|
|
|
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
|
|
|
wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
|
2018-01-31 07:58:49 +08:00
|
|
|
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
|
|
|
|
}
|
2019-01-18 00:14:45 +08:00
|
|
|
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
|
2017-02-13 05:52:37 +08:00
|
|
|
nvmet_fc_unregister_targetport(phba->targetport);
|
2019-03-13 07:30:16 +08:00
|
|
|
if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
|
|
|
|
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
|
|
|
"6179 Unreg targetport %p timeout "
|
|
|
|
"reached.\n", phba->targetport);
|
2017-05-16 06:20:45 +08:00
|
|
|
lpfc_nvmet_cleanup_io_context(phba);
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
phba->targetport = NULL;
|
2017-03-05 01:30:33 +08:00
|
|
|
#endif
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
|
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
|
* @pring: pointer to a SLI ring.
|
|
|
|
* @nvmebuf: pointer to lpfc nvme command HBQ data structure.
|
|
|
|
*
|
|
|
|
* This routine is used for processing the WQE associated with a unsolicited
|
|
|
|
* event. It first determines whether there is an existing ndlp that matches
|
|
|
|
* the DID from the unsolicited WQE. If not, it will create a new one with
|
|
|
|
* the DID from the unsolicited WQE. The ELS command from the unsolicited
|
|
|
|
* WQE is then used to invoke the proper routine and to set up proper state
|
|
|
|
* of the discovery state machine.
|
|
|
|
**/
|
|
|
|
static void
|
|
|
|
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|
|
|
struct hbq_dmabuf *nvmebuf)
|
|
|
|
{
|
2017-03-09 06:36:01 +08:00
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
2017-02-13 05:52:37 +08:00
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct fc_frame_header *fc_hdr;
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
|
|
|
uint32_t *payload;
|
|
|
|
uint32_t size, oxid, sid, rc;
|
|
|
|
|
2018-06-26 23:24:25 +08:00
|
|
|
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
|
|
|
|
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
|
|
|
|
2018-07-13 16:39:54 +08:00
|
|
|
if (!phba->targetport) {
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
2018-06-26 23:24:25 +08:00
|
|
|
"6154 LS Drop IO x%x\n", oxid);
|
2017-02-13 05:52:38 +08:00
|
|
|
oxid = 0;
|
|
|
|
size = 0;
|
|
|
|
sid = 0;
|
2017-05-16 06:20:40 +08:00
|
|
|
ctxp = NULL;
|
2017-02-13 05:52:37 +08:00
|
|
|
goto dropit;
|
|
|
|
}
|
|
|
|
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
payload = (uint32_t *)(nvmebuf->dbuf.virt);
|
|
|
|
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
|
|
|
|
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
|
|
|
|
|
|
|
ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
|
|
|
|
if (ctxp == NULL) {
|
|
|
|
atomic_inc(&tgtp->rcv_ls_req_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6155 LS Drop IO x%x: Alloc\n",
|
|
|
|
oxid);
|
|
|
|
dropit:
|
2017-02-13 05:52:38 +08:00
|
|
|
lpfc_nvmeio_data(phba, "NVMET LS DROP: "
|
|
|
|
"xri x%x sz %d from %06x\n",
|
|
|
|
oxid, size, sid);
|
2018-07-13 16:39:54 +08:00
|
|
|
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
2017-02-13 05:52:37 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
ctxp->phba = phba;
|
|
|
|
ctxp->size = size;
|
|
|
|
ctxp->oxid = oxid;
|
|
|
|
ctxp->sid = sid;
|
|
|
|
ctxp->wqeq = NULL;
|
2017-06-02 12:06:58 +08:00
|
|
|
ctxp->state = LPFC_NVMET_STE_LS_RCV;
|
|
|
|
ctxp->entry_cnt = 1;
|
2017-02-13 05:52:37 +08:00
|
|
|
ctxp->rqb_buffer = (void *)nvmebuf;
|
2019-01-29 03:14:26 +08:00
|
|
|
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
|
2017-02-13 05:52:38 +08:00
|
|
|
|
|
|
|
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
|
|
|
|
oxid, size, sid);
|
2017-02-13 05:52:37 +08:00
|
|
|
/*
|
|
|
|
* The calling sequence should be:
|
|
|
|
* nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
|
|
|
|
* lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
|
|
|
|
*/
|
|
|
|
atomic_inc(&tgtp->rcv_ls_req_in);
|
|
|
|
rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
|
|
|
|
payload, size);
|
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
|
|
|
|
"%08x %08x %08x\n", size, rc,
|
2017-02-13 05:52:37 +08:00
|
|
|
*payload, *(payload+1), *(payload+2),
|
|
|
|
*(payload+3), *(payload+4), *(payload+5));
|
2017-02-13 05:52:38 +08:00
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
if (rc == 0) {
|
|
|
|
atomic_inc(&tgtp->rcv_ls_req_out);
|
|
|
|
return;
|
|
|
|
}
|
2017-02-13 05:52:38 +08:00
|
|
|
|
|
|
|
lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
|
|
|
|
oxid, size, sid);
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_inc(&tgtp->rcv_ls_req_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
|
|
|
|
ctxp->oxid, rc);
|
|
|
|
|
|
|
|
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
|
2018-07-13 16:39:54 +08:00
|
|
|
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
atomic_inc(&tgtp->xmt_ls_abort);
|
|
|
|
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
|
2017-03-05 01:30:33 +08:00
|
|
|
#endif
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
2019-01-29 03:14:39 +08:00
|
|
|
static void
|
|
|
|
lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
|
|
|
|
{
|
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
|
|
|
|
struct lpfc_hba *phba = ctxp->phba;
|
|
|
|
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
scsi: lpfc: Fix nvmet async receive buffer replenishment
Under circustances with high load, the driver is running out of async
receive buffers which may result in one of the following messages:
0:6401 RQE Error x13, posted 226 err_cnt 0: 925c6050 925c604e 925c5d54
or
0:2885 Port Status Event: port status reg 0x81800000,
port smphr reg 0xc000, error 1=0x52004a01, error 2=0x0
The driver is waiting for full io completion before returning receive
buffers to the adapter. There is no need for such a relationship.
Whenever a new command is received from the wire, the driver will have two
contexts - an io context (ctxp) and a receive buffer context. In current
code, the receive buffer context stays 1:1 with the io and won't be
reposted to the hardware until the io completes. There is no need for such
a relationship.
Change the driver so that up on successful handing of the command to the
transport, where the transport has copied what it needed thus the buffer is
returned to the driver, have the driver immediately repost the buffer to
the hardware. If the command cannot be successfully handed to the transport
as transport resources are temporarily busy, have the driver allocate a new
and separate receive buffer and post it to the hardware so that hardware
can continue while the command is queued for the transport.
When an io is complete, the transport returns the io context to the driver,
and the driver may be waiting for more contexts, thus immediately reuse the
io context. In this path, there was a buffer posted when the receive buffer
was queued waiting for an io context so a replacement is not needed in the
new code additions. Thus, exempt this the context reuse case from the
buffer reposting.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-03-13 07:30:09 +08:00
|
|
|
uint32_t *payload, qno;
|
2019-01-29 03:14:39 +08:00
|
|
|
uint32_t rc;
|
|
|
|
unsigned long iflags;
|
|
|
|
|
|
|
|
if (!nvmebuf) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6159 process_rcv_fcp_req, nvmebuf is NULL, "
|
|
|
|
"oxid: x%x flg: x%x state: x%x\n",
|
|
|
|
ctxp->oxid, ctxp->flag, ctxp->state);
|
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflags);
|
|
|
|
lpfc_nvmet_defer_release(phba, ctxp);
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
|
|
|
|
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
|
|
|
ctxp->oxid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-22 08:48:56 +08:00
|
|
|
if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6324 IO oxid x%x aborted\n",
|
|
|
|
ctxp->oxid);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-01-29 03:14:39 +08:00
|
|
|
payload = (uint32_t *)(nvmebuf->dbuf.virt);
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
2019-05-22 08:48:56 +08:00
|
|
|
ctxp->flag |= LPFC_NVMET_TNOTIFY;
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
|
|
|
if (ctxp->ts_isr_cmd)
|
|
|
|
ctxp->ts_cmd_nvme = ktime_get_ns();
|
|
|
|
#endif
|
2019-01-29 03:14:39 +08:00
|
|
|
/*
|
|
|
|
* The calling sequence should be:
|
|
|
|
* nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
|
|
|
|
* lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
|
|
|
|
* When we return from nvmet_fc_rcv_fcp_req, all relevant info
|
|
|
|
* the NVME command / FC header is stored.
|
|
|
|
* A buffer has already been reposted for this IO, so just free
|
|
|
|
* the nvmebuf.
|
|
|
|
*/
|
|
|
|
rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
|
|
|
|
payload, ctxp->size);
|
|
|
|
/* Process FCP command */
|
|
|
|
if (rc == 0) {
|
|
|
|
atomic_inc(&tgtp->rcv_fcp_cmd_out);
|
scsi: lpfc: Fix nvmet async receive buffer replenishment
Under circustances with high load, the driver is running out of async
receive buffers which may result in one of the following messages:
0:6401 RQE Error x13, posted 226 err_cnt 0: 925c6050 925c604e 925c5d54
or
0:2885 Port Status Event: port status reg 0x81800000,
port smphr reg 0xc000, error 1=0x52004a01, error 2=0x0
The driver is waiting for full io completion before returning receive
buffers to the adapter. There is no need for such a relationship.
Whenever a new command is received from the wire, the driver will have two
contexts - an io context (ctxp) and a receive buffer context. In current
code, the receive buffer context stays 1:1 with the io and won't be
reposted to the hardware until the io completes. There is no need for such
a relationship.
Change the driver so that up on successful handing of the command to the
transport, where the transport has copied what it needed thus the buffer is
returned to the driver, have the driver immediately repost the buffer to
the hardware. If the command cannot be successfully handed to the transport
as transport resources are temporarily busy, have the driver allocate a new
and separate receive buffer and post it to the hardware so that hardware
can continue while the command is queued for the transport.
When an io is complete, the transport returns the io context to the driver,
and the driver may be waiting for more contexts, thus immediately reuse the
io context. In this path, there was a buffer posted when the receive buffer
was queued waiting for an io context so a replacement is not needed in the
new code additions. Thus, exempt this the context reuse case from the
buffer reposting.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-03-13 07:30:09 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflags);
|
|
|
|
if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
|
|
|
|
(nvmebuf != ctxp->rqb_buffer)) {
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ctxp->rqb_buffer = NULL;
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
|
|
|
|
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
|
2019-01-29 03:14:39 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Processing of FCP command is deferred */
|
|
|
|
if (rc == -EOVERFLOW) {
|
|
|
|
lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
|
|
|
|
"from %06x\n",
|
|
|
|
ctxp->oxid, ctxp->size, ctxp->sid);
|
|
|
|
atomic_inc(&tgtp->rcv_fcp_cmd_out);
|
|
|
|
atomic_inc(&tgtp->defer_fod);
|
scsi: lpfc: Fix nvmet async receive buffer replenishment
Under circustances with high load, the driver is running out of async
receive buffers which may result in one of the following messages:
0:6401 RQE Error x13, posted 226 err_cnt 0: 925c6050 925c604e 925c5d54
or
0:2885 Port Status Event: port status reg 0x81800000,
port smphr reg 0xc000, error 1=0x52004a01, error 2=0x0
The driver is waiting for full io completion before returning receive
buffers to the adapter. There is no need for such a relationship.
Whenever a new command is received from the wire, the driver will have two
contexts - an io context (ctxp) and a receive buffer context. In current
code, the receive buffer context stays 1:1 with the io and won't be
reposted to the hardware until the io completes. There is no need for such
a relationship.
Change the driver so that up on successful handing of the command to the
transport, where the transport has copied what it needed thus the buffer is
returned to the driver, have the driver immediately repost the buffer to
the hardware. If the command cannot be successfully handed to the transport
as transport resources are temporarily busy, have the driver allocate a new
and separate receive buffer and post it to the hardware so that hardware
can continue while the command is queued for the transport.
When an io is complete, the transport returns the io context to the driver,
and the driver may be waiting for more contexts, thus immediately reuse the
io context. In this path, there was a buffer posted when the receive buffer
was queued waiting for an io context so a replacement is not needed in the
new code additions. Thus, exempt this the context reuse case from the
buffer reposting.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-03-13 07:30:09 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflags);
|
|
|
|
if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
|
|
|
|
/*
|
|
|
|
* Post a replacement DMA buffer to RQ and defer
|
|
|
|
* freeing rcv buffer till .defer_rcv callback
|
|
|
|
*/
|
|
|
|
qno = nvmebuf->idx;
|
|
|
|
lpfc_post_rq_buffer(
|
|
|
|
phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
|
|
|
|
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
|
2019-01-29 03:14:39 +08:00
|
|
|
return;
|
|
|
|
}
|
2019-05-22 08:48:56 +08:00
|
|
|
ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
|
2019-01-29 03:14:39 +08:00
|
|
|
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
|
|
|
|
ctxp->oxid, rc,
|
|
|
|
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
|
|
|
atomic_read(&tgtp->rcv_fcp_cmd_out),
|
|
|
|
atomic_read(&tgtp->xmt_fcp_release));
|
|
|
|
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
|
|
|
|
ctxp->oxid, ctxp->size, ctxp->sid);
|
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflags);
|
|
|
|
lpfc_nvmet_defer_release(phba, ctxp);
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
|
|
|
|
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
|
|
|
struct lpfc_nvmet_ctxbuf *ctx_buf =
|
|
|
|
container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
|
|
|
|
|
|
|
|
lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-08-24 07:55:42 +08:00
|
|
|
static struct lpfc_nvmet_ctxbuf *
|
|
|
|
lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_nvmet_ctx_info *current_infop)
|
|
|
|
{
|
2017-09-30 08:34:31 +08:00
|
|
|
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
2017-08-24 07:55:42 +08:00
|
|
|
struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
|
|
|
|
struct lpfc_nvmet_ctx_info *get_infop;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The current_infop for the MRQ a NVME command IU was received
|
|
|
|
* on is empty. Our goal is to replenish this MRQs context
|
|
|
|
* list from a another CPUs.
|
|
|
|
*
|
|
|
|
* First we need to pick a context list to start looking on.
|
|
|
|
* nvmet_ctx_start_cpu has available context the last time
|
|
|
|
* we needed to replenish this CPU where nvmet_ctx_next_cpu
|
|
|
|
* is just the next sequential CPU for this MRQ.
|
|
|
|
*/
|
|
|
|
if (current_infop->nvmet_ctx_start_cpu)
|
|
|
|
get_infop = current_infop->nvmet_ctx_start_cpu;
|
|
|
|
else
|
|
|
|
get_infop = current_infop->nvmet_ctx_next_cpu;
|
|
|
|
|
2019-01-29 03:14:35 +08:00
|
|
|
for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
|
2017-08-24 07:55:42 +08:00
|
|
|
if (get_infop == current_infop) {
|
|
|
|
get_infop = get_infop->nvmet_ctx_next_cpu;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
spin_lock(&get_infop->nvmet_ctx_list_lock);
|
|
|
|
|
|
|
|
/* Just take the entire context list, if there are any */
|
|
|
|
if (get_infop->nvmet_ctx_list_cnt) {
|
|
|
|
list_splice_init(&get_infop->nvmet_ctx_list,
|
|
|
|
¤t_infop->nvmet_ctx_list);
|
|
|
|
current_infop->nvmet_ctx_list_cnt =
|
|
|
|
get_infop->nvmet_ctx_list_cnt - 1;
|
|
|
|
get_infop->nvmet_ctx_list_cnt = 0;
|
|
|
|
spin_unlock(&get_infop->nvmet_ctx_list_lock);
|
|
|
|
|
|
|
|
current_infop->nvmet_ctx_start_cpu = get_infop;
|
|
|
|
list_remove_head(¤t_infop->nvmet_ctx_list,
|
|
|
|
ctx_buf, struct lpfc_nvmet_ctxbuf,
|
|
|
|
list);
|
|
|
|
return ctx_buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise, move on to the next CPU for this MRQ */
|
|
|
|
spin_unlock(&get_infop->nvmet_ctx_list_lock);
|
|
|
|
get_infop = get_infop->nvmet_ctx_next_cpu;
|
|
|
|
}
|
|
|
|
|
2017-09-30 08:34:31 +08:00
|
|
|
#endif
|
2017-08-24 07:55:42 +08:00
|
|
|
/* Nothing found, all contexts for the MRQ are in-flight */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
/**
|
|
|
|
* lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
|
|
|
|
* @phba: pointer to lpfc hba data structure.
|
2017-08-24 07:55:42 +08:00
|
|
|
* @idx: relative index of MRQ vector
|
2017-02-13 05:52:37 +08:00
|
|
|
* @nvmebuf: pointer to lpfc nvme command HBQ data structure.
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
* @isr_timestamp: in jiffies.
|
|
|
|
* @cqflag: cq processing information regarding workload.
|
2017-02-13 05:52:37 +08:00
|
|
|
*
|
|
|
|
* This routine is used for processing the WQE associated with a unsolicited
|
|
|
|
* event. It first determines whether there is an existing ndlp that matches
|
|
|
|
* the DID from the unsolicited WQE. If not, it will create a new one with
|
|
|
|
* the DID from the unsolicited WQE. The ELS command from the unsolicited
|
|
|
|
* WQE is then used to invoke the proper routine and to set up proper state
|
|
|
|
* of the discovery state machine.
|
|
|
|
**/
|
|
|
|
static void
|
|
|
|
lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
2017-08-24 07:55:42 +08:00
|
|
|
uint32_t idx,
|
2017-02-13 05:52:37 +08:00
|
|
|
struct rqb_dmabuf *nvmebuf,
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
uint64_t isr_timestamp,
|
|
|
|
uint8_t cqflag)
|
2017-02-13 05:52:37 +08:00
|
|
|
{
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct fc_frame_header *fc_hdr;
|
2017-05-16 06:20:45 +08:00
|
|
|
struct lpfc_nvmet_ctxbuf *ctx_buf;
|
2017-08-24 07:55:42 +08:00
|
|
|
struct lpfc_nvmet_ctx_info *current_infop;
|
2019-01-29 03:14:39 +08:00
|
|
|
uint32_t size, oxid, sid, qno;
|
2017-05-16 06:20:45 +08:00
|
|
|
unsigned long iflag;
|
2017-08-24 07:55:42 +08:00
|
|
|
int current_cpu;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2017-08-25 07:09:59 +08:00
|
|
|
if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
|
|
|
|
return;
|
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
ctx_buf = NULL;
|
2017-02-13 05:52:37 +08:00
|
|
|
if (!nvmebuf || !phba->targetport) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
2017-05-16 06:20:45 +08:00
|
|
|
"6157 NVMET FCP Drop IO\n");
|
2019-01-29 03:14:39 +08:00
|
|
|
if (nvmebuf)
|
|
|
|
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
|
|
|
|
return;
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
2017-08-24 07:55:42 +08:00
|
|
|
/*
|
|
|
|
* Get a pointer to the context list for this MRQ based on
|
|
|
|
* the CPU this MRQ IRQ is associated with. If the CPU association
|
|
|
|
* changes from our initial assumption, the context list could
|
|
|
|
* be empty, thus it would need to be replenished with the
|
|
|
|
* context list from another CPU for this MRQ.
|
|
|
|
*/
|
2019-03-29 02:06:22 +08:00
|
|
|
current_cpu = raw_smp_processor_id();
|
2017-08-24 07:55:42 +08:00
|
|
|
current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
|
|
|
|
spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
|
|
|
|
if (current_infop->nvmet_ctx_list_cnt) {
|
|
|
|
list_remove_head(¤t_infop->nvmet_ctx_list,
|
2017-05-16 06:20:45 +08:00
|
|
|
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
|
2017-08-24 07:55:42 +08:00
|
|
|
current_infop->nvmet_ctx_list_cnt--;
|
2017-06-16 13:56:45 +08:00
|
|
|
} else {
|
2017-08-24 07:55:42 +08:00
|
|
|
ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
|
2017-05-16 06:20:45 +08:00
|
|
|
}
|
2017-08-24 07:55:42 +08:00
|
|
|
spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
|
|
|
|
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
2017-05-16 06:20:46 +08:00
|
|
|
size = nvmebuf->bytes_recv;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2017-05-16 06:20:46 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
|
|
|
if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
|
2019-01-29 03:14:24 +08:00
|
|
|
if (current_cpu < LPFC_CHECK_CPU_CNT) {
|
|
|
|
if (idx != current_cpu)
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
|
|
|
|
"6703 CPU Check rcv: "
|
|
|
|
"cpu %d expect %d\n",
|
|
|
|
current_cpu, idx);
|
|
|
|
phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
|
|
|
|
}
|
2017-05-16 06:20:46 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
|
2019-03-29 02:06:22 +08:00
|
|
|
oxid, size, raw_smp_processor_id());
|
2017-05-16 06:20:46 +08:00
|
|
|
|
2018-01-31 07:58:52 +08:00
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
if (!ctx_buf) {
|
2017-05-16 06:20:46 +08:00
|
|
|
/* Queue this NVME IO to process later */
|
|
|
|
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
|
|
|
|
list_add_tail(&nvmebuf->hbuf.list,
|
|
|
|
&phba->sli4_hba.lpfc_nvmet_io_wait_list);
|
|
|
|
phba->sli4_hba.nvmet_io_wait_cnt++;
|
|
|
|
phba->sli4_hba.nvmet_io_wait_total++;
|
|
|
|
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
|
|
|
|
iflag);
|
|
|
|
|
|
|
|
/* Post a brand new DMA buffer to RQ */
|
|
|
|
qno = nvmebuf->idx;
|
|
|
|
lpfc_post_rq_buffer(
|
|
|
|
phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
|
|
|
|
phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
|
2018-01-31 07:58:52 +08:00
|
|
|
|
|
|
|
atomic_inc(&tgtp->defer_ctx);
|
2017-02-13 05:52:37 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
|
2019-05-22 08:48:56 +08:00
|
|
|
spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
|
|
|
|
list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
|
|
|
|
spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
|
2017-06-02 12:06:58 +08:00
|
|
|
if (ctxp->state != LPFC_NVMET_STE_FREE) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6414 NVMET Context corrupt %d %d oxid x%x\n",
|
|
|
|
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
|
|
|
|
}
|
2017-02-13 05:52:37 +08:00
|
|
|
ctxp->wqeq = NULL;
|
|
|
|
ctxp->txrdy = NULL;
|
|
|
|
ctxp->offset = 0;
|
|
|
|
ctxp->phba = phba;
|
|
|
|
ctxp->size = size;
|
|
|
|
ctxp->oxid = oxid;
|
|
|
|
ctxp->sid = sid;
|
2017-08-24 07:55:42 +08:00
|
|
|
ctxp->idx = idx;
|
2017-02-13 05:52:37 +08:00
|
|
|
ctxp->state = LPFC_NVMET_STE_RCV;
|
|
|
|
ctxp->entry_cnt = 1;
|
|
|
|
ctxp->flag = 0;
|
2017-05-16 06:20:45 +08:00
|
|
|
ctxp->ctxbuf = ctx_buf;
|
2017-12-09 09:18:04 +08:00
|
|
|
ctxp->rqb_buffer = (void *)nvmebuf;
|
2019-01-29 03:14:26 +08:00
|
|
|
ctxp->hdwq = NULL;
|
2017-04-22 07:04:59 +08:00
|
|
|
spin_lock_init(&ctxp->ctxlock);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2017-02-13 05:52:38 +08:00
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
if (isr_timestamp)
|
2017-02-13 05:52:38 +08:00
|
|
|
ctxp->ts_isr_cmd = isr_timestamp;
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
ctxp->ts_cmd_nvme = 0;
|
|
|
|
ctxp->ts_nvme_data = 0;
|
|
|
|
ctxp->ts_data_wqput = 0;
|
|
|
|
ctxp->ts_isr_data = 0;
|
|
|
|
ctxp->ts_data_nvme = 0;
|
|
|
|
ctxp->ts_nvme_status = 0;
|
|
|
|
ctxp->ts_status_wqput = 0;
|
|
|
|
ctxp->ts_isr_status = 0;
|
|
|
|
ctxp->ts_status_nvme = 0;
|
2017-02-13 05:52:38 +08:00
|
|
|
#endif
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_inc(&tgtp->rcv_fcp_cmd_in);
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
/* check for cq processing load */
|
|
|
|
if (!cqflag) {
|
|
|
|
lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
|
|
|
|
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
|
|
|
"6325 Unable to queue work for oxid x%x. "
|
|
|
|
"FCP Drop IO [x%x x%x x%x]\n",
|
|
|
|
ctxp->oxid,
|
|
|
|
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
|
|
|
atomic_read(&tgtp->rcv_fcp_cmd_out),
|
|
|
|
atomic_read(&tgtp->xmt_fcp_release));
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, iflag);
|
|
|
|
lpfc_nvmet_defer_release(phba, ctxp);
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
|
|
|
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
|
|
|
|
}
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
|
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
|
* @pring: pointer to a SLI ring.
|
|
|
|
* @nvmebuf: pointer to received nvme data structure.
|
|
|
|
*
|
|
|
|
* This routine is used to process an unsolicited event received from a SLI
|
|
|
|
* (Service Level Interface) ring. The actual processing of the data buffer
|
|
|
|
* associated with the unsolicited event is done by invoking the routine
|
|
|
|
* lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
|
|
|
|
* SLI RQ on which the unsolicited event was received.
|
|
|
|
**/
|
|
|
|
void
|
|
|
|
lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|
|
|
struct lpfc_iocbq *piocb)
|
|
|
|
{
|
|
|
|
struct lpfc_dmabuf *d_buf;
|
|
|
|
struct hbq_dmabuf *nvmebuf;
|
|
|
|
|
|
|
|
d_buf = piocb->context2;
|
|
|
|
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
|
|
|
|
|
|
|
|
if (phba->nvmet_support == 0) {
|
|
|
|
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
|
|
|
|
* @phba: pointer to lpfc hba data structure.
|
2017-08-24 07:55:42 +08:00
|
|
|
* @idx: relative index of MRQ vector
|
2017-02-13 05:52:37 +08:00
|
|
|
* @nvmebuf: pointer to received nvme data structure.
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
* @isr_timestamp: in jiffies.
|
|
|
|
* @cqflag: cq processing information regarding workload.
|
2017-02-13 05:52:37 +08:00
|
|
|
*
|
|
|
|
* This routine is used to process an unsolicited event received from a SLI
|
|
|
|
* (Service Level Interface) ring. The actual processing of the data buffer
|
|
|
|
* associated with the unsolicited event is done by invoking the routine
|
|
|
|
* lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
|
|
|
|
* SLI RQ on which the unsolicited event was received.
|
|
|
|
**/
|
|
|
|
void
|
|
|
|
lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
|
2017-08-24 07:55:42 +08:00
|
|
|
uint32_t idx,
|
2017-02-13 05:52:37 +08:00
|
|
|
struct rqb_dmabuf *nvmebuf,
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
uint64_t isr_timestamp,
|
|
|
|
uint8_t cqflag)
|
2017-02-13 05:52:37 +08:00
|
|
|
{
|
|
|
|
if (phba->nvmet_support == 0) {
|
2017-05-16 06:20:45 +08:00
|
|
|
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
|
2017-02-13 05:52:37 +08:00
|
|
|
return;
|
|
|
|
}
|
scsi: lpfc: Separate CQ processing for nvmet_fc upcalls
Currently the driver is notified of new command frame receipt by CQEs. As
part of the CQE processing, the driver upcalls the nvmet_fc transport to
deliver the command. nvmet_fc, as part of receiving the command builds out
a context for it, where one of the first steps is to allocate memory for
the io.
When running with tests that do large ios (1MB), it was found on some
systems, the total number of outstanding I/O's, at 1MB per, completely
consumed the system's memory. Thus additional ios were getting blocked in
the memory allocator. Given that this blocked the lpfc thread processing
CQEs, there were lots of other commands that were received and which are
then held up, and given CQEs are serially processed, the aggregate delays
for an IO waiting behind the others became cummulative - enough so that the
initiator hit timeouts for the ios.
The basic fix is to avoid the direct upcall and instead schedule a work
item for each io as it is received. This allows the cq processing to
complete very quickly, and each io can then run or block on it's own.
However, this general solution hurts latency when there are few ios. As
such, implemented the fix such that the driver watches how many CQEs it has
processed sequentially in one run. As long as the count is below a
threshold, the direct nvmet_fc upcall will be made. Only when the count is
exceeded will it revert to work scheduling.
Given that debug of this showed a surprisingly long delay in cq processing,
the io timer stats were updated to better reflect the processing of the
different points.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-05-22 08:48:55 +08:00
|
|
|
lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
|
|
|
|
* @phba: pointer to a host N_Port data structure.
|
|
|
|
* @ctxp: Context info for NVME LS Request
|
|
|
|
* @rspbuf: DMA buffer of NVME command.
|
|
|
|
* @rspsize: size of the NVME command.
|
|
|
|
*
|
|
|
|
* This routine is used for allocating a lpfc-WQE data structure from
|
|
|
|
* the driver lpfc-WQE free-list and prepare the WQE with the parameters
|
|
|
|
* passed into the routine for discovery state machine to issue an Extended
|
|
|
|
* Link Service (NVME) commands. It is a generic lpfc-WQE allocation
|
|
|
|
* and preparation routine that is used by all the discovery state machine
|
|
|
|
* routines and the NVME command-specific fields will be later set up by
|
|
|
|
* the individual discovery machine routines after calling this routine
|
|
|
|
* allocating and preparing a generic WQE data structure. It fills in the
|
|
|
|
* Buffer Descriptor Entries (BDEs), allocates buffers for both command
|
|
|
|
* payload and response payload (if expected). The reference count on the
|
|
|
|
* ndlp is incremented by 1 and the reference to the ndlp is put into
|
|
|
|
* context1 of the WQE data structure for this WQE to hold the ndlp
|
|
|
|
* reference for the command's callback function to access later.
|
|
|
|
*
|
|
|
|
* Return code
|
|
|
|
* Pointer to the newly allocated/prepared nvme wqe data structure
|
|
|
|
* NULL - when nvme wqe data structure allocation/preparation failed
|
|
|
|
**/
|
|
|
|
static struct lpfc_iocbq *
|
|
|
|
lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp,
|
|
|
|
dma_addr_t rspbuf, uint16_t rspsize)
|
|
|
|
{
|
|
|
|
struct lpfc_nodelist *ndlp;
|
|
|
|
struct lpfc_iocbq *nvmewqe;
|
2018-03-06 04:04:03 +08:00
|
|
|
union lpfc_wqe128 *wqe;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
if (!lpfc_is_link_up(phba)) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6104 NVMET prep LS wqe: link err: "
|
|
|
|
"NPORT x%x oxid:x%x ste %d\n",
|
|
|
|
ctxp->sid, ctxp->oxid, ctxp->state);
|
2017-02-13 05:52:37 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate buffer for command wqe */
|
|
|
|
nvmewqe = lpfc_sli_get_iocbq(phba);
|
|
|
|
if (nvmewqe == NULL) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6105 NVMET prep LS wqe: No WQE: "
|
|
|
|
"NPORT x%x oxid x%x ste %d\n",
|
|
|
|
ctxp->sid, ctxp->oxid, ctxp->state);
|
2017-02-13 05:52:37 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
|
|
|
|
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
|
|
|
|
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
|
|
|
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6106 NVMET prep LS wqe: No ndlp: "
|
|
|
|
"NPORT x%x oxid x%x ste %d\n",
|
|
|
|
ctxp->sid, ctxp->oxid, ctxp->state);
|
2017-02-13 05:52:37 +08:00
|
|
|
goto nvme_wqe_free_wqeq_exit;
|
|
|
|
}
|
|
|
|
ctxp->wqeq = nvmewqe;
|
|
|
|
|
|
|
|
/* prevent preparing wqe with NULL ndlp reference */
|
|
|
|
nvmewqe->context1 = lpfc_nlp_get(ndlp);
|
|
|
|
if (nvmewqe->context1 == NULL)
|
|
|
|
goto nvme_wqe_free_wqeq_exit;
|
|
|
|
nvmewqe->context2 = ctxp;
|
|
|
|
|
|
|
|
wqe = &nvmewqe->wqe;
|
|
|
|
memset(wqe, 0, sizeof(union lpfc_wqe));
|
|
|
|
|
|
|
|
/* Words 0 - 2 */
|
|
|
|
wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
|
|
|
wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
|
|
|
|
wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
|
|
|
|
wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
|
|
|
|
|
|
|
|
/* Word 3 */
|
|
|
|
|
|
|
|
/* Word 4 */
|
|
|
|
|
|
|
|
/* Word 5 */
|
|
|
|
bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
|
|
|
|
bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
|
|
|
|
bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
|
2017-03-05 01:30:26 +08:00
|
|
|
bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
|
2017-02-13 05:52:37 +08:00
|
|
|
bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
|
|
|
|
|
|
|
|
/* Word 6 */
|
|
|
|
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
|
|
|
|
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
|
|
|
|
bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
|
|
|
|
|
|
|
|
/* Word 7 */
|
|
|
|
bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
|
|
|
|
CMD_XMIT_SEQUENCE64_WQE);
|
|
|
|
bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
|
|
|
|
bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
|
|
|
|
bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
|
|
|
|
|
|
|
|
/* Word 8 */
|
|
|
|
wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
|
|
|
|
|
|
|
|
/* Word 9 */
|
|
|
|
bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
|
|
|
|
/* Needs to be set by caller */
|
|
|
|
bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
|
|
|
|
|
|
|
|
/* Word 10 */
|
|
|
|
bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
|
|
|
|
bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
|
|
|
|
bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
|
|
|
|
LPFC_WQE_LENLOC_WORD12);
|
|
|
|
bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
|
|
|
|
|
|
|
|
/* Word 11 */
|
|
|
|
bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
|
|
|
|
LPFC_WQE_CQ_ID_DEFAULT);
|
|
|
|
bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
|
|
|
|
OTHER_COMMAND);
|
|
|
|
|
|
|
|
/* Word 12 */
|
|
|
|
wqe->xmit_sequence.xmit_len = rspsize;
|
|
|
|
|
|
|
|
nvmewqe->retry = 1;
|
|
|
|
nvmewqe->vport = phba->pport;
|
|
|
|
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
|
|
|
|
nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
|
|
|
|
|
2017-06-02 12:06:58 +08:00
|
|
|
/* Xmit NVMET response to remote NPORT <did> */
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6039 Xmit NVMET LS response to remote "
|
2017-02-13 05:52:37 +08:00
|
|
|
"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
|
|
|
|
ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
|
|
|
|
rspsize);
|
|
|
|
return nvmewqe;
|
|
|
|
|
|
|
|
nvme_wqe_free_wqeq_exit:
|
|
|
|
nvmewqe->context2 = NULL;
|
|
|
|
nvmewqe->context3 = NULL;
|
|
|
|
lpfc_sli_release_iocbq(phba, nvmewqe);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct lpfc_iocbq *
|
|
|
|
lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp)
|
|
|
|
{
|
|
|
|
struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct sli4_sge *sgl;
|
|
|
|
struct lpfc_nodelist *ndlp;
|
|
|
|
struct lpfc_iocbq *nvmewqe;
|
|
|
|
struct scatterlist *sgel;
|
|
|
|
union lpfc_wqe128 *wqe;
|
2018-02-23 00:18:48 +08:00
|
|
|
struct ulp_bde64 *bde;
|
2017-02-13 05:52:37 +08:00
|
|
|
uint32_t *txrdy;
|
|
|
|
dma_addr_t physaddr;
|
|
|
|
int i, cnt;
|
2018-02-23 00:18:48 +08:00
|
|
|
int do_pbde;
|
2017-02-13 05:52:37 +08:00
|
|
|
int xc = 1;
|
|
|
|
|
|
|
|
if (!lpfc_is_link_up(phba)) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6107 NVMET prep FCP wqe: link err:"
|
|
|
|
"NPORT x%x oxid x%x ste %d\n",
|
|
|
|
ctxp->sid, ctxp->oxid, ctxp->state);
|
2017-02-13 05:52:37 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
|
|
|
|
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
|
|
|
|
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
|
|
|
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6108 NVMET prep FCP wqe: no ndlp: "
|
|
|
|
"NPORT x%x oxid x%x ste %d\n",
|
|
|
|
ctxp->sid, ctxp->oxid, ctxp->state);
|
2017-02-13 05:52:37 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-11-21 08:00:43 +08:00
|
|
|
if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6109 NVMET prep FCP wqe: seg cnt err: "
|
|
|
|
"NPORT x%x oxid x%x ste %d cnt %d\n",
|
|
|
|
ctxp->sid, ctxp->oxid, ctxp->state,
|
|
|
|
phba->cfg_nvme_seg_cnt);
|
2017-02-13 05:52:37 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
nvmewqe = ctxp->wqeq;
|
|
|
|
if (nvmewqe == NULL) {
|
|
|
|
/* Allocate buffer for command wqe */
|
2017-05-16 06:20:45 +08:00
|
|
|
nvmewqe = ctxp->ctxbuf->iocbq;
|
2017-02-13 05:52:37 +08:00
|
|
|
if (nvmewqe == NULL) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6110 NVMET prep FCP wqe: No "
|
|
|
|
"WQE: NPORT x%x oxid x%x ste %d\n",
|
|
|
|
ctxp->sid, ctxp->oxid, ctxp->state);
|
2017-02-13 05:52:37 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ctxp->wqeq = nvmewqe;
|
|
|
|
xc = 0; /* create new XRI */
|
|
|
|
nvmewqe->sli4_lxritag = NO_XRI;
|
|
|
|
nvmewqe->sli4_xritag = NO_XRI;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sanity check */
|
|
|
|
if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
|
|
|
|
(ctxp->entry_cnt == 1)) ||
|
2017-06-02 12:06:58 +08:00
|
|
|
(ctxp->state == LPFC_NVMET_STE_DATA)) {
|
2018-03-06 04:04:03 +08:00
|
|
|
wqe = &nvmewqe->wqe;
|
2017-02-13 05:52:37 +08:00
|
|
|
} else {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6111 Wrong state NVMET FCP: %d cnt %d\n",
|
|
|
|
ctxp->state, ctxp->entry_cnt);
|
2017-02-13 05:52:37 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
|
2017-02-13 05:52:37 +08:00
|
|
|
switch (rsp->op) {
|
|
|
|
case NVMET_FCOP_READDATA:
|
|
|
|
case NVMET_FCOP_READDATA_RSP:
|
2018-03-06 04:04:05 +08:00
|
|
|
/* From the tsend template, initialize words 7 - 11 */
|
|
|
|
memcpy(&wqe->words[7],
|
|
|
|
&lpfc_tsend_cmd_template.words[7],
|
|
|
|
sizeof(uint32_t) * 5);
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
/* Words 0 - 2 : The first sg segment */
|
|
|
|
sgel = &rsp->sg[0];
|
|
|
|
physaddr = sg_dma_address(sgel);
|
|
|
|
wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
|
|
|
wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
|
|
|
|
wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
|
|
|
|
wqe->fcp_tsend.bde.addrHigh =
|
|
|
|
cpu_to_le32(putPaddrHigh(physaddr));
|
|
|
|
|
|
|
|
/* Word 3 */
|
|
|
|
wqe->fcp_tsend.payload_offset_len = 0;
|
|
|
|
|
|
|
|
/* Word 4 */
|
|
|
|
wqe->fcp_tsend.relative_offset = ctxp->offset;
|
|
|
|
|
|
|
|
/* Word 5 */
|
2018-03-06 04:04:05 +08:00
|
|
|
wqe->fcp_tsend.reserved = 0;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* Word 6 */
|
|
|
|
bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
|
|
|
|
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
|
|
|
|
bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
|
|
|
|
nvmewqe->sli4_xritag);
|
|
|
|
|
2018-03-06 04:04:05 +08:00
|
|
|
/* Word 7 - set ar later */
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* Word 8 */
|
|
|
|
wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
|
|
|
|
|
|
|
|
/* Word 9 */
|
|
|
|
bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
|
|
|
|
bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
|
|
|
|
|
2018-03-06 04:04:05 +08:00
|
|
|
/* Word 10 - set wqes later, in template xc=1 */
|
|
|
|
if (!xc)
|
|
|
|
bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2018-03-06 04:04:05 +08:00
|
|
|
/* Word 11 - set sup, irsp, irsplen later */
|
|
|
|
do_pbde = 0;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* Word 12 */
|
|
|
|
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
|
|
|
|
|
|
|
|
/* Setup 2 SKIP SGEs */
|
|
|
|
sgl->addr_hi = 0;
|
|
|
|
sgl->addr_lo = 0;
|
|
|
|
sgl->word2 = 0;
|
|
|
|
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
|
|
|
|
sgl->word2 = cpu_to_le32(sgl->word2);
|
|
|
|
sgl->sge_len = 0;
|
|
|
|
sgl++;
|
|
|
|
sgl->addr_hi = 0;
|
|
|
|
sgl->addr_lo = 0;
|
|
|
|
sgl->word2 = 0;
|
|
|
|
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
|
|
|
|
sgl->word2 = cpu_to_le32(sgl->word2);
|
|
|
|
sgl->sge_len = 0;
|
|
|
|
sgl++;
|
|
|
|
if (rsp->op == NVMET_FCOP_READDATA_RSP) {
|
|
|
|
atomic_inc(&tgtp->xmt_fcp_read_rsp);
|
2018-03-06 04:04:05 +08:00
|
|
|
|
|
|
|
/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
|
|
|
|
|
2018-01-31 07:58:58 +08:00
|
|
|
if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
|
|
|
|
if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
|
|
|
|
bf_set(wqe_sup,
|
|
|
|
&wqe->fcp_tsend.wqe_com, 1);
|
2017-02-13 05:52:37 +08:00
|
|
|
} else {
|
|
|
|
bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
|
|
|
|
bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
|
|
|
|
bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
|
|
|
|
((rsp->rsplen >> 2) - 1));
|
|
|
|
memcpy(&wqe->words[16], rsp->rspaddr,
|
|
|
|
rsp->rsplen);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
atomic_inc(&tgtp->xmt_fcp_read);
|
|
|
|
|
2018-03-06 04:04:05 +08:00
|
|
|
/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
|
2017-02-13 05:52:37 +08:00
|
|
|
bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NVMET_FCOP_WRITEDATA:
|
2018-03-06 04:04:05 +08:00
|
|
|
/* From the treceive template, initialize words 3 - 11 */
|
|
|
|
memcpy(&wqe->words[3],
|
|
|
|
&lpfc_treceive_cmd_template.words[3],
|
|
|
|
sizeof(uint32_t) * 9);
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
/* Words 0 - 2 : The first sg segment */
|
2017-07-06 16:13:05 +08:00
|
|
|
txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
|
2017-02-13 05:52:37 +08:00
|
|
|
GFP_KERNEL, &physaddr);
|
|
|
|
if (!txrdy) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6041 Bad txrdy buffer: oxid x%x\n",
|
|
|
|
ctxp->oxid);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
ctxp->txrdy = txrdy;
|
|
|
|
ctxp->txrdy_phys = physaddr;
|
|
|
|
wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
|
|
|
wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
|
|
|
|
wqe->fcp_treceive.bde.addrLow =
|
|
|
|
cpu_to_le32(putPaddrLow(physaddr));
|
|
|
|
wqe->fcp_treceive.bde.addrHigh =
|
|
|
|
cpu_to_le32(putPaddrHigh(physaddr));
|
|
|
|
|
|
|
|
/* Word 4 */
|
|
|
|
wqe->fcp_treceive.relative_offset = ctxp->offset;
|
|
|
|
|
|
|
|
/* Word 6 */
|
|
|
|
bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
|
|
|
|
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
|
|
|
|
bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
|
|
|
|
nvmewqe->sli4_xritag);
|
|
|
|
|
|
|
|
/* Word 7 */
|
|
|
|
|
|
|
|
/* Word 8 */
|
|
|
|
wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
|
|
|
|
|
|
|
|
/* Word 9 */
|
|
|
|
bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
|
|
|
|
bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
|
|
|
|
|
2018-03-06 04:04:05 +08:00
|
|
|
/* Word 10 - in template xc=1 */
|
|
|
|
if (!xc)
|
|
|
|
bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2018-03-06 04:04:05 +08:00
|
|
|
/* Word 11 - set pbde later */
|
2018-06-26 23:24:26 +08:00
|
|
|
if (phba->cfg_enable_pbde) {
|
2018-03-06 04:04:05 +08:00
|
|
|
do_pbde = 1;
|
|
|
|
} else {
|
|
|
|
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
|
|
|
|
do_pbde = 0;
|
|
|
|
}
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* Word 12 */
|
|
|
|
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
|
|
|
|
|
|
|
|
/* Setup 1 TXRDY and 1 SKIP SGE */
|
|
|
|
txrdy[0] = 0;
|
|
|
|
txrdy[1] = cpu_to_be32(rsp->transfer_length);
|
|
|
|
txrdy[2] = 0;
|
|
|
|
|
|
|
|
sgl->addr_hi = putPaddrHigh(physaddr);
|
|
|
|
sgl->addr_lo = putPaddrLow(physaddr);
|
|
|
|
sgl->word2 = 0;
|
|
|
|
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
|
|
|
|
sgl->word2 = cpu_to_le32(sgl->word2);
|
|
|
|
sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
|
|
|
|
sgl++;
|
|
|
|
sgl->addr_hi = 0;
|
|
|
|
sgl->addr_lo = 0;
|
|
|
|
sgl->word2 = 0;
|
|
|
|
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
|
|
|
|
sgl->word2 = cpu_to_le32(sgl->word2);
|
|
|
|
sgl->sge_len = 0;
|
|
|
|
sgl++;
|
|
|
|
atomic_inc(&tgtp->xmt_fcp_write);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NVMET_FCOP_RSP:
|
2018-03-06 04:04:05 +08:00
|
|
|
/* From the treceive template, initialize words 4 - 11 */
|
|
|
|
memcpy(&wqe->words[4],
|
|
|
|
&lpfc_trsp_cmd_template.words[4],
|
|
|
|
sizeof(uint32_t) * 8);
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
/* Words 0 - 2 */
|
|
|
|
physaddr = rsp->rspdma;
|
|
|
|
wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
|
|
|
wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
|
|
|
|
wqe->fcp_trsp.bde.addrLow =
|
|
|
|
cpu_to_le32(putPaddrLow(physaddr));
|
|
|
|
wqe->fcp_trsp.bde.addrHigh =
|
|
|
|
cpu_to_le32(putPaddrHigh(physaddr));
|
|
|
|
|
|
|
|
/* Word 3 */
|
|
|
|
wqe->fcp_trsp.response_len = rsp->rsplen;
|
|
|
|
|
|
|
|
/* Word 6 */
|
|
|
|
bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
|
|
|
|
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
|
|
|
|
bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
|
|
|
|
nvmewqe->sli4_xritag);
|
|
|
|
|
|
|
|
/* Word 7 */
|
|
|
|
|
|
|
|
/* Word 8 */
|
|
|
|
wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
|
|
|
|
|
|
|
|
/* Word 9 */
|
|
|
|
bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
|
|
|
|
bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
|
|
|
|
|
|
|
|
/* Word 10 */
|
2018-03-06 04:04:05 +08:00
|
|
|
if (xc)
|
|
|
|
bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* Word 11 */
|
2018-03-06 04:04:05 +08:00
|
|
|
/* In template wqes=0 irsp=0 irsplen=0 - good response */
|
|
|
|
if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
|
|
|
|
/* Bad response - embed it */
|
2017-02-13 05:52:37 +08:00
|
|
|
bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
|
|
|
|
bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
|
|
|
|
bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
|
|
|
|
((rsp->rsplen >> 2) - 1));
|
|
|
|
memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
|
|
|
|
}
|
2018-03-06 04:04:05 +08:00
|
|
|
do_pbde = 0;
|
|
|
|
|
|
|
|
/* Word 12 */
|
|
|
|
wqe->fcp_trsp.rsvd_12_15[0] = 0;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* Use rspbuf, NOT sg list */
|
|
|
|
rsp->sg_cnt = 0;
|
|
|
|
sgl->word2 = 0;
|
|
|
|
atomic_inc(&tgtp->xmt_fcp_rsp);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
|
|
|
|
"6064 Unknown Rsp Op %d\n",
|
|
|
|
rsp->op);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
nvmewqe->retry = 1;
|
|
|
|
nvmewqe->vport = phba->pport;
|
|
|
|
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
|
|
|
|
nvmewqe->context1 = ndlp;
|
|
|
|
|
|
|
|
for (i = 0; i < rsp->sg_cnt; i++) {
|
|
|
|
sgel = &rsp->sg[i];
|
|
|
|
physaddr = sg_dma_address(sgel);
|
|
|
|
cnt = sg_dma_len(sgel);
|
|
|
|
sgl->addr_hi = putPaddrHigh(physaddr);
|
|
|
|
sgl->addr_lo = putPaddrLow(physaddr);
|
|
|
|
sgl->word2 = 0;
|
|
|
|
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
|
|
|
|
bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
|
|
|
|
if ((i+1) == rsp->sg_cnt)
|
|
|
|
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
|
|
|
sgl->word2 = cpu_to_le32(sgl->word2);
|
|
|
|
sgl->sge_len = cpu_to_le32(cnt);
|
2018-06-26 23:24:26 +08:00
|
|
|
if (i == 0) {
|
2018-02-23 00:18:48 +08:00
|
|
|
bde = (struct ulp_bde64 *)&wqe->words[13];
|
2018-06-26 23:24:26 +08:00
|
|
|
if (do_pbde) {
|
|
|
|
/* Words 13-15 (PBDE) */
|
|
|
|
bde->addrLow = sgl->addr_lo;
|
|
|
|
bde->addrHigh = sgl->addr_hi;
|
|
|
|
bde->tus.f.bdeSize =
|
|
|
|
le32_to_cpu(sgl->sge_len);
|
|
|
|
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
|
|
|
bde->tus.w = cpu_to_le32(bde->tus.w);
|
|
|
|
} else {
|
|
|
|
memset(bde, 0, sizeof(struct ulp_bde64));
|
|
|
|
}
|
2018-02-23 00:18:48 +08:00
|
|
|
}
|
2017-02-13 05:52:37 +08:00
|
|
|
sgl++;
|
|
|
|
ctxp->offset += cnt;
|
|
|
|
}
|
2017-06-02 12:06:58 +08:00
|
|
|
ctxp->state = LPFC_NVMET_STE_DATA;
|
|
|
|
ctxp->entry_cnt++;
|
2017-02-13 05:52:37 +08:00
|
|
|
return nvmewqe;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
|
|
|
|
* @phba: Pointer to HBA context object.
|
|
|
|
* @cmdwqe: Pointer to driver command WQE object.
|
|
|
|
* @wcqe: Pointer to driver response CQE object.
|
|
|
|
*
|
|
|
|
* The function is called from SLI ring event handler with no
|
|
|
|
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
|
|
|
|
* The function frees memory resources used for the NVME commands.
|
|
|
|
**/
|
|
|
|
static void
|
|
|
|
lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
|
|
|
struct lpfc_wcqe_complete *wcqe)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
2019-03-29 02:06:20 +08:00
|
|
|
uint32_t result;
|
2017-04-12 02:32:29 +08:00
|
|
|
unsigned long flags;
|
|
|
|
bool released = false;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
ctxp = cmdwqe->context2;
|
|
|
|
result = wcqe->parameter;
|
|
|
|
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
2017-05-16 06:20:40 +08:00
|
|
|
if (ctxp->flag & LPFC_NVMET_ABORT_OP)
|
|
|
|
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
2017-02-13 05:52:37 +08:00
|
|
|
ctxp->state = LPFC_NVMET_STE_DONE;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
|
|
|
/* Check if we already received a free context call
|
|
|
|
* and we have completed processing an abort situation.
|
|
|
|
*/
|
|
|
|
if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
|
|
|
|
!(ctxp->flag & LPFC_NVMET_XBUSY)) {
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
2019-05-22 08:48:56 +08:00
|
|
|
list_del_init(&ctxp->list);
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
2017-04-12 02:32:29 +08:00
|
|
|
released = true;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
}
|
2017-04-12 02:32:29 +08:00
|
|
|
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp);
|
2017-04-12 02:32:29 +08:00
|
|
|
|
2017-09-30 08:34:36 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6165 ABORT cmpl: oxid x%x flg x%x (%d) "
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
"WCQE: %08x %08x %08x %08x\n",
|
|
|
|
ctxp->oxid, ctxp->flag, released,
|
|
|
|
wcqe->word0, wcqe->total_data_placed,
|
|
|
|
result, wcqe->word3);
|
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
cmdwqe->context2 = NULL;
|
|
|
|
cmdwqe->context3 = NULL;
|
2017-04-12 02:32:29 +08:00
|
|
|
/*
|
|
|
|
* if transport has released ctx, then can reuse it. Otherwise,
|
|
|
|
* will be recycled by transport release call.
|
|
|
|
*/
|
|
|
|
if (released)
|
2017-05-16 06:20:45 +08:00
|
|
|
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2017-05-16 06:20:45 +08:00
|
|
|
/* This is the iocbq for the abort, not the command */
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_sli_release_iocbq(phba, cmdwqe);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
|
|
|
/* Since iaab/iaar are NOT set, there is no work left.
|
|
|
|
* For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
|
|
|
|
* should have been called already.
|
|
|
|
*/
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
* lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
|
2017-02-13 05:52:37 +08:00
|
|
|
* @phba: Pointer to HBA context object.
|
|
|
|
* @cmdwqe: Pointer to driver command WQE object.
|
|
|
|
* @wcqe: Pointer to driver response CQE object.
|
|
|
|
*
|
|
|
|
* The function is called from SLI ring event handler with no
|
|
|
|
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
|
|
|
|
* The function frees memory resources used for the NVME commands.
|
|
|
|
**/
|
|
|
|
static void
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
|
|
|
struct lpfc_wcqe_complete *wcqe)
|
2017-02-13 05:52:37 +08:00
|
|
|
{
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
2017-04-12 02:32:29 +08:00
|
|
|
unsigned long flags;
|
2019-03-29 02:06:20 +08:00
|
|
|
uint32_t result;
|
2017-04-12 02:32:29 +08:00
|
|
|
bool released = false;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
ctxp = cmdwqe->context2;
|
|
|
|
result = wcqe->parameter;
|
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
if (!ctxp) {
|
|
|
|
/* if context is clear, related io alrady complete */
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
|
|
|
"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
|
|
|
|
wcqe->word0, wcqe->total_data_placed,
|
|
|
|
result, wcqe->word3);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-02 12:07:09 +08:00
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
2017-06-02 12:07:09 +08:00
|
|
|
if (ctxp->flag & LPFC_NVMET_ABORT_OP)
|
|
|
|
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
|
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
/* Sanity check */
|
|
|
|
if (ctxp->state != LPFC_NVMET_STE_ABORT) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
|
|
|
"6112 ABTS Wrong state:%d oxid x%x\n",
|
|
|
|
ctxp->state, ctxp->oxid);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we already received a free context call
|
|
|
|
* and we have completed processing an abort situation.
|
|
|
|
*/
|
|
|
|
ctxp->state = LPFC_NVMET_STE_DONE;
|
|
|
|
if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
|
|
|
|
!(ctxp->flag & LPFC_NVMET_XBUSY)) {
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
2019-05-22 08:48:56 +08:00
|
|
|
list_del_init(&ctxp->list);
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
released = true;
|
|
|
|
}
|
|
|
|
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6316 ABTS cmpl oxid x%x flg x%x (%x) "
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
"WCQE: %08x %08x %08x %08x\n",
|
|
|
|
ctxp->oxid, ctxp->flag, released,
|
|
|
|
wcqe->word0, wcqe->total_data_placed,
|
2017-02-13 05:52:37 +08:00
|
|
|
result, wcqe->word3);
|
2017-05-16 06:20:45 +08:00
|
|
|
|
|
|
|
cmdwqe->context2 = NULL;
|
|
|
|
cmdwqe->context3 = NULL;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
/*
|
|
|
|
* if transport has released ctx, then can reuse it. Otherwise,
|
|
|
|
* will be recycled by transport release call.
|
|
|
|
*/
|
|
|
|
if (released)
|
2017-05-16 06:20:45 +08:00
|
|
|
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
2017-04-12 02:32:29 +08:00
|
|
|
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
/* Since iaab/iaar are NOT set, there is no work left.
|
|
|
|
* For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
|
|
|
|
* should have been called already.
|
|
|
|
*/
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
|
|
|
|
* @phba: Pointer to HBA context object.
|
|
|
|
* @cmdwqe: Pointer to driver command WQE object.
|
|
|
|
* @wcqe: Pointer to driver response CQE object.
|
|
|
|
*
|
|
|
|
* The function is called from SLI ring event handler with no
|
|
|
|
* lock held. This function is the completion handler for NVME ABTS for LS cmds
|
|
|
|
* The function frees memory resources used for the NVME commands.
|
|
|
|
**/
|
|
|
|
static void
|
|
|
|
lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
|
|
|
struct lpfc_wcqe_complete *wcqe)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
2019-03-29 02:06:20 +08:00
|
|
|
uint32_t result;
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
ctxp = cmdwqe->context2;
|
|
|
|
result = wcqe->parameter;
|
|
|
|
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&tgtp->xmt_ls_abort_cmpl);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
2017-06-02 12:06:58 +08:00
|
|
|
"6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
|
2017-02-13 05:52:37 +08:00
|
|
|
ctxp, wcqe->word0, wcqe->total_data_placed,
|
|
|
|
result, wcqe->word3);
|
|
|
|
|
2017-06-02 12:06:58 +08:00
|
|
|
if (!ctxp) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
|
|
|
"6415 NVMET LS Abort No ctx: WCQE: "
|
|
|
|
"%08x %08x %08x %08x\n",
|
|
|
|
wcqe->word0, wcqe->total_data_placed,
|
|
|
|
result, wcqe->word3);
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_sli_release_iocbq(phba, cmdwqe);
|
2017-06-02 12:06:58 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6416 NVMET LS abort cmpl state mismatch: "
|
|
|
|
"oxid x%x: %d %d\n",
|
|
|
|
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
cmdwqe->context2 = NULL;
|
|
|
|
cmdwqe->context3 = NULL;
|
|
|
|
lpfc_sli_release_iocbq(phba, cmdwqe);
|
|
|
|
kfree(ctxp);
|
2017-02-13 05:52:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp,
|
|
|
|
uint32_t sid, uint16_t xri)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct lpfc_iocbq *abts_wqeq;
|
2018-03-06 04:04:03 +08:00
|
|
|
union lpfc_wqe128 *wqe_abts;
|
2017-02-13 05:52:37 +08:00
|
|
|
struct lpfc_nodelist *ndlp;
|
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
"6067 ABTS: sid %x xri x%x/x%x\n",
|
2017-03-05 01:30:30 +08:00
|
|
|
sid, xri, ctxp->wqeq->sli4_xritag);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
|
|
|
|
ndlp = lpfc_findnode_did(phba->pport, sid);
|
|
|
|
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
|
|
|
|
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
|
|
|
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
2017-06-02 12:06:58 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
2017-02-13 05:52:37 +08:00
|
|
|
"6134 Drop ABTS - wrong NDLP state x%x.\n",
|
2017-03-05 01:30:39 +08:00
|
|
|
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* No failure to an ABTS request. */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
abts_wqeq = ctxp->wqeq;
|
|
|
|
wqe_abts = &abts_wqeq->wqe;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we zero the whole WQE, we need to ensure we set the WQE fields
|
|
|
|
* that were initialized in lpfc_sli4_nvmet_alloc.
|
|
|
|
*/
|
|
|
|
memset(wqe_abts, 0, sizeof(union lpfc_wqe));
|
|
|
|
|
|
|
|
/* Word 5 */
|
|
|
|
bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
|
|
|
|
bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
|
|
|
|
bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
|
|
|
|
bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
|
|
|
|
bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
|
|
|
|
|
|
|
|
/* Word 6 */
|
|
|
|
bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
|
|
|
|
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
|
|
|
|
bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
|
|
|
|
abts_wqeq->sli4_xritag);
|
|
|
|
|
|
|
|
/* Word 7 */
|
|
|
|
bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
|
|
|
|
CMD_XMIT_SEQUENCE64_WQE);
|
|
|
|
bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
|
|
|
|
bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
|
|
|
|
bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
|
|
|
|
|
|
|
|
/* Word 8 */
|
|
|
|
wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
|
|
|
|
|
|
|
|
/* Word 9 */
|
|
|
|
bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
|
|
|
|
/* Needs to be set by caller */
|
|
|
|
bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
|
|
|
|
|
|
|
|
/* Word 10 */
|
|
|
|
bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
|
|
|
|
bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
|
|
|
|
bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
|
|
|
|
LPFC_WQE_LENLOC_WORD12);
|
|
|
|
bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
|
|
|
|
bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
|
|
|
|
|
|
|
|
/* Word 11 */
|
|
|
|
bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
|
|
|
|
LPFC_WQE_CQ_ID_DEFAULT);
|
|
|
|
bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
|
|
|
|
OTHER_COMMAND);
|
|
|
|
|
|
|
|
abts_wqeq->vport = phba->pport;
|
|
|
|
abts_wqeq->context1 = ndlp;
|
|
|
|
abts_wqeq->context2 = ctxp;
|
|
|
|
abts_wqeq->context3 = NULL;
|
|
|
|
abts_wqeq->rsvd2 = 0;
|
|
|
|
/* hba_wqidx should already be setup from command we are aborting */
|
|
|
|
abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
|
|
|
|
abts_wqeq->iocb.ulpLe = 1;
|
|
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
|
|
|
"6069 Issue ABTS to xri x%x reqtag x%x\n",
|
|
|
|
xri, abts_wqeq->iotag);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp,
|
|
|
|
uint32_t sid, uint16_t xri)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct lpfc_iocbq *abts_wqeq;
|
2018-03-06 04:04:03 +08:00
|
|
|
union lpfc_wqe128 *abts_wqe;
|
2017-02-13 05:52:37 +08:00
|
|
|
struct lpfc_nodelist *ndlp;
|
|
|
|
unsigned long flags;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
if (!ctxp->wqeq) {
|
2017-05-16 06:20:45 +08:00
|
|
|
ctxp->wqeq = ctxp->ctxbuf->iocbq;
|
2017-02-13 05:52:37 +08:00
|
|
|
ctxp->wqeq->hba_wqidx = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndlp = lpfc_findnode_did(phba->pport, sid);
|
|
|
|
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
|
|
|
|
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
|
|
|
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
2017-06-02 12:06:58 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
"6160 Drop ABORT - wrong NDLP state x%x.\n",
|
2017-03-05 01:30:39 +08:00
|
|
|
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* No failure to an ABTS request. */
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2017-02-13 05:52:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Issue ABTS for this WQE based on iotag */
|
|
|
|
ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
2017-02-13 05:52:37 +08:00
|
|
|
if (!ctxp->abort_wqeq) {
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
2017-06-02 12:06:58 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
"6161 ABORT failed: No wqeqs: "
|
2017-02-13 05:52:37 +08:00
|
|
|
"xri: x%x\n", ctxp->oxid);
|
|
|
|
/* No failure to an ABTS request. */
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2017-02-13 05:52:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
abts_wqeq = ctxp->abort_wqeq;
|
|
|
|
abts_wqe = &abts_wqeq->wqe;
|
|
|
|
ctxp->state = LPFC_NVMET_STE_ABORT;
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
/* Announce entry to new IO submit field. */
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
|
|
|
"6162 ABORT Request to rport DID x%06x "
|
2017-02-13 05:52:37 +08:00
|
|
|
"for xri x%x x%x\n",
|
|
|
|
ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
|
|
|
|
|
|
|
|
/* If the hba is getting reset, this flag is set. It is
|
|
|
|
* cleared when the reset is complete and rings reestablished.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&phba->hbalock, flags);
|
|
|
|
/* driver queued commands are in process of being flushed */
|
|
|
|
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
|
|
|
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
|
|
|
"6163 Driver in reset cleanup - flushing "
|
|
|
|
"NVME Req now. hba_flag x%x oxid x%x\n",
|
|
|
|
phba->hba_flag, ctxp->oxid);
|
|
|
|
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2017-02-13 05:52:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Outstanding abort is in progress */
|
|
|
|
if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
|
|
|
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
|
|
|
"6164 Outstanding NVME I/O Abort Request "
|
|
|
|
"still pending on oxid x%x\n",
|
|
|
|
ctxp->oxid);
|
|
|
|
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2017-02-13 05:52:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ready - mark outstanding as aborted by driver. */
|
|
|
|
abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
|
|
|
|
|
|
|
|
/* WQEs are reused. Clear stale data and set key fields to
|
|
|
|
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
|
|
|
|
*/
|
|
|
|
memset(abts_wqe, 0, sizeof(union lpfc_wqe));
|
|
|
|
|
|
|
|
/* word 3 */
|
|
|
|
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
|
|
|
|
|
|
|
|
/* word 7 */
|
|
|
|
bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
|
|
|
|
bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
|
|
|
|
|
|
|
|
/* word 8 - tell the FW to abort the IO associated with this
|
|
|
|
* outstanding exchange ID.
|
|
|
|
*/
|
|
|
|
abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
|
|
|
|
|
|
|
|
/* word 9 - this is the iotag for the abts_wqe completion. */
|
|
|
|
bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
|
|
|
|
abts_wqeq->iotag);
|
|
|
|
|
|
|
|
/* word 10 */
|
|
|
|
bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
|
|
|
|
bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
|
|
|
|
|
|
|
|
/* word 11 */
|
|
|
|
bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
|
|
|
|
bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
|
|
|
|
bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
|
|
|
|
|
|
|
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
|
|
|
|
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
|
|
|
|
abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
|
|
|
|
abts_wqeq->iocb_cmpl = 0;
|
|
|
|
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
|
|
|
|
abts_wqeq->context2 = ctxp;
|
2017-06-16 13:56:49 +08:00
|
|
|
abts_wqeq->vport = phba->pport;
|
2019-01-29 03:14:26 +08:00
|
|
|
if (!ctxp->hdwq)
|
|
|
|
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
|
|
|
|
|
|
|
|
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
|
2017-02-13 05:52:37 +08:00
|
|
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
2017-05-16 06:20:40 +08:00
|
|
|
if (rc == WQE_SUCCESS) {
|
|
|
|
atomic_inc(&tgtp->xmt_abort_sol);
|
2017-02-13 05:52:37 +08:00
|
|
|
return 0;
|
2017-05-16 06:20:40 +08:00
|
|
|
}
|
2017-02-13 05:52:37 +08:00
|
|
|
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
2017-02-13 05:52:37 +08:00
|
|
|
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
|
|
|
"6166 Failed ABORT issue_wqe with status x%x "
|
2017-02-13 05:52:37 +08:00
|
|
|
"for oxid x%x.\n",
|
|
|
|
rc, ctxp->oxid);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp,
|
|
|
|
uint32_t sid, uint16_t xri)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct lpfc_iocbq *abts_wqeq;
|
|
|
|
unsigned long flags;
|
2019-01-29 03:14:40 +08:00
|
|
|
bool released = false;
|
2017-02-13 05:52:37 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
if (!ctxp->wqeq) {
|
2017-05-16 06:20:45 +08:00
|
|
|
ctxp->wqeq = ctxp->ctxbuf->iocbq;
|
2017-02-13 05:52:37 +08:00
|
|
|
ctxp->wqeq->hba_wqidx = 0;
|
|
|
|
}
|
|
|
|
|
2017-06-02 12:06:58 +08:00
|
|
|
if (ctxp->state == LPFC_NVMET_STE_FREE) {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
|
|
|
|
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
|
|
|
|
rc = WQE_BUSY;
|
|
|
|
goto aerr;
|
|
|
|
}
|
|
|
|
ctxp->state = LPFC_NVMET_STE_ABORT;
|
|
|
|
ctxp->entry_cnt++;
|
2017-02-13 05:52:37 +08:00
|
|
|
rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
|
|
|
|
if (rc == 0)
|
|
|
|
goto aerr;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&phba->hbalock, flags);
|
|
|
|
abts_wqeq = ctxp->wqeq;
|
Update ABORT processing for NVMET.
The driver with nvme had this routine stubbed.
Right now XRI_ABORTED_CQE is not handled and the FC NVMET
Transport has a new API for the driver.
Missing code path, new NVME abort API
Update ABORT processing for NVMET
There are 3 new FC NVMET Transport API/ template routines for NVMET:
lpfc_nvmet_xmt_fcp_release
This NVMET template callback routine called to release context
associated with an IO This routine is ALWAYS called last, even
if the IO was aborted or completed in error.
lpfc_nvmet_xmt_fcp_abort
This NVMET template callback routine called to abort an exchange that
has an IO in progress
nvmet_fc_rcv_fcp_req
When the lpfc driver receives an ABTS, this NVME FC transport layer
callback routine is called. For this case there are 2 paths thru the
driver: the driver either has an outstanding exchange / context for the
XRI to be aborted or not. If not, a BA_RJT is issued otherwise a BA_ACC
NVMET Driver abort paths:
There are 2 paths for aborting an IO. The first one is we receive an IO and
decide not to process it because of lack of resources. An unsolicated ABTS
is immediately sent back to the initiator as a response.
lpfc_nvmet_unsol_fcp_buffer
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
The second one is we sent the IO up to the NVMET transport layer to
process, and for some reason the NVME Transport layer decided to abort the
IO before it completes all its phases. For this case there are 2 paths
thru the driver:
the driver either has an outstanding TSEND/TRECEIVE/TRSP WQE or no
outstanding WQEs are present for the exchange / context.
lpfc_nvmet_xmt_fcp_abort
if (LPFC_NVMET_IO_INP)
lpfc_nvmet_sol_fcp_issue_abort (ABORT_WQE)
lpfc_nvmet_sol_fcp_abort_cmp
else
lpfc_nvmet_unsol_fcp_issue_abort
lpfc_nvmet_unsol_issue_abort (XMIT_SEQUENCE_WQE)
lpfc_nvmet_unsol_fcp_abort_cmp
Context flags:
LPFC_NVMET_IOP - his flag signifies an IO is in progress on the exchange.
LPFC_NVMET_XBUSY - this flag indicates the IO completed but the firmware
is still busy with the corresponding exchange. The exchange should not be
reused until after a XRI_ABORTED_CQE is received for that exchange.
LPFC_NVMET_ABORT_OP - this flag signifies an ABORT_WQE was issued on the
exchange.
LPFC_NVMET_CTX_RLS - this flag signifies a context free was requested,
but we are deferring it due to an XBUSY or ABORT in progress.
A ctxlock is added to the context structure that is used whenever these
flags are set/read within the context of an IO.
The LPFC_NVMET_CTX_RLS flag is only set in the defer_relase routine when
the transport has resolved all IO associated with the buffer. The flag is
cleared when the CTX is associated with a new IO.
An exchange can has both an LPFC_NVMET_XBUSY and a LPFC_NVMET_ABORT_OP
condition active simultaneously. Both conditions must complete before the
exchange is freed.
When the abort callback (lpfc_nvmet_xmt_fcp_abort) is envoked:
If there is an outstanding IO, the driver will issue an ABORT_WQE. This
should result in 3 completions for the exchange:
1) IO cmpl with XB bit set
2) Abort WQE cmpl
3) XRI_ABORTED_CQE cmpl
For this scenerio, after completion #1, the NVMET Transport IO rsp
callback is called. After completion #2, no action is taken with respect
to the exchange / context. After completion #3, the exchange context is
free for re-use on another IO.
If there is no outstanding activity on the exchange, the driver will send a
ABTS to the Initiator. Upon completion of this WQE, the exchange / context
is freed for re-use on another IO.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
2017-04-22 07:05:04 +08:00
|
|
|
abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
|
|
|
|
abts_wqeq->iocb_cmpl = NULL;
|
2017-02-13 05:52:37 +08:00
|
|
|
abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
|
2019-01-29 03:14:26 +08:00
|
|
|
if (!ctxp->hdwq)
|
|
|
|
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
|
|
|
|
|
|
|
|
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
|
2017-02-13 05:52:37 +08:00
|
|
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
|
|
|
if (rc == WQE_SUCCESS) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
aerr:
|
2018-06-26 23:24:25 +08:00
|
|
|
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
2019-01-29 03:14:40 +08:00
|
|
|
if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
|
|
|
|
spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
2019-05-22 08:48:56 +08:00
|
|
|
list_del_init(&ctxp->list);
|
2019-01-29 03:14:40 +08:00
|
|
|
spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
|
|
|
|
released = true;
|
|
|
|
}
|
2018-06-26 23:24:25 +08:00
|
|
|
ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
|
|
|
|
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
2017-06-02 12:06:58 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
2019-05-22 08:48:56 +08:00
|
|
|
"6135 Failed to Issue ABTS for oxid x%x. Status x%x "
|
|
|
|
"(%x)\n",
|
|
|
|
ctxp->oxid, rc, released);
|
2019-01-29 03:14:40 +08:00
|
|
|
if (released)
|
|
|
|
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
2017-02-13 05:52:37 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_nvmet_rcv_ctx *ctxp,
|
|
|
|
uint32_t sid, uint16_t xri)
|
|
|
|
{
|
|
|
|
struct lpfc_nvmet_tgtport *tgtp;
|
|
|
|
struct lpfc_iocbq *abts_wqeq;
|
|
|
|
unsigned long flags;
|
|
|
|
int rc;
|
|
|
|
|
2017-06-02 12:06:58 +08:00
|
|
|
if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
|
|
|
|
(ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
|
|
|
|
ctxp->state = LPFC_NVMET_STE_LS_ABORT;
|
|
|
|
ctxp->entry_cnt++;
|
|
|
|
} else {
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
|
|
|
"6418 NVMET LS abort state mismatch "
|
|
|
|
"IO x%x: %d %d\n",
|
|
|
|
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
|
|
|
ctxp->state = LPFC_NVMET_STE_LS_ABORT;
|
|
|
|
}
|
|
|
|
|
2017-02-13 05:52:37 +08:00
|
|
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
|
|
|
if (!ctxp->wqeq) {
|
|
|
|
/* Issue ABTS for this WQE based on iotag */
|
|
|
|
ctxp->wqeq = lpfc_sli_get_iocbq(phba);
|
|
|
|
if (!ctxp->wqeq) {
|
2017-06-02 12:06:58 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
2017-02-13 05:52:37 +08:00
|
|
|
"6068 Abort failed: No wqeqs: "
|
|
|
|
"xri: x%x\n", xri);
|
|
|
|
/* No failure to an ABTS request. */
|
|
|
|
kfree(ctxp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
abts_wqeq = ctxp->wqeq;
|
2017-05-16 06:20:45 +08:00
|
|
|
|
2017-06-02 12:06:58 +08:00
|
|
|
if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
|
|
|
|
rc = WQE_BUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
2017-02-13 05:52:37 +08:00
|
|
|
|
|
|
|
spin_lock_irqsave(&phba->hbalock, flags);
|
|
|
|
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
|
|
|
|
abts_wqeq->iocb_cmpl = 0;
|
|
|
|
abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
|
2019-01-29 03:14:26 +08:00
|
|
|
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
|
2017-02-13 05:52:37 +08:00
|
|
|
spin_unlock_irqrestore(&phba->hbalock, flags);
|
|
|
|
if (rc == WQE_SUCCESS) {
|
2017-05-16 06:20:40 +08:00
|
|
|
atomic_inc(&tgtp->xmt_abort_unsol);
|
2017-02-13 05:52:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2017-06-02 12:06:58 +08:00
|
|
|
out:
|
2017-02-13 05:52:37 +08:00
|
|
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
|
|
|
abts_wqeq->context2 = NULL;
|
|
|
|
abts_wqeq->context3 = NULL;
|
|
|
|
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
|
|
|
kfree(ctxp);
|
2017-06-02 12:06:58 +08:00
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
2017-02-13 05:52:37 +08:00
|
|
|
"6056 Failed to Issue ABTS. Status x%x\n", rc);
|
|
|
|
return 0;
|
|
|
|
}
|