2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-15 08:44:14 +08:00

SCSI misc on 20200410

This is a batch of changes that didn't make it in the initial pull
 request because the lpfc series had to be rebased to redo an incorrect
 split.  It's basically driver updates to lpfc, target, bnx2fc and ufs
 with the rest being minor updates except the sr_block_release one
 which fixes a use after free introduced by the removal of the global
 mutex in the first patch set.
 
 Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCXpC3hSYcamFtZXMuYm90
 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishRTaAP9umhxu
 8rRnJ5hsxXRmxOUzO5BGe403ffcBeAiEKQ2n3gEAjeoxZAaqKuDDDRfXyRnBpt9Z
 QuBrgpm1gdXrJT5DDj4=
 =+4Qg
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull more SCSI updates from James Bottomley:
 "This is a batch of changes that didn't make it in the initial pull
  request because the lpfc series had to be rebased to redo an incorrect
  split.

  It's basically driver updates to lpfc, target, bnx2fc and ufs with the
  rest being minor updates except the sr_block_release one which fixes a
  use after free introduced by the removal of the global mutex in the
  first patch set"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (35 commits)
  scsi: core: Add DID_ALLOC_FAILURE and DID_MEDIUM_ERROR to hostbyte_table
  scsi: ufs: Use ufshcd_config_pwr_mode() when scaling gear
  scsi: bnx2fc: fix boolreturn.cocci warnings
  scsi: zfcp: use fallthrough;
  scsi: aacraid: do not overwrite retval in aac_reset_adapter()
  scsi: sr: Fix sr_block_release()
  scsi: aic7xxx: Remove more FreeBSD-specific code
  scsi: mpt3sas: Fix kernel panic observed on soft HBA unplug
  scsi: ufs: set device as active power mode after resetting device
  scsi: iscsi: Report unbind session event when the target has been removed
  scsi: lpfc: Change default SCSI LUN QD to 64
  scsi: libfc: rport state move to PLOGI if all PRLI retry exhausted
  scsi: libfc: If PRLI rejected, move rport to PLOGI state
  scsi: bnx2fc: Update the driver version to 2.12.13
  scsi: bnx2fc: Fix SCSI command completion after cleanup is posted
  scsi: bnx2fc: Process the RQE with CQE in interrupt context
  scsi: target: use the stack for XCOPY passthrough cmds
  scsi: target: increase XCOPY I/O size
  scsi: target: avoid per-loop XCOPY buffer allocations
  scsi: target: drop xcopy DISK BLOCK LENGTH debug
  ...
This commit is contained in:
Linus Torvalds 2020-04-10 12:21:11 -07:00
commit 93f3321f65
33 changed files with 725 additions and 773 deletions

View File

@ -178,12 +178,12 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
/* fall through */
fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
/* fall through */
fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT:
p_status = atomic_read(&port->status);
if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
@ -196,7 +196,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return need;
if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
/* fall through */
fallthrough;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
a_status = atomic_read(&adapter->status);
if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
@ -1086,7 +1086,7 @@ static enum zfcp_erp_act_result zfcp_erp_lun_strategy(
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_lun_strategy_close(erp_action);
/* already closed */
/* fall through */
fallthrough;
case ZFCP_ERP_STEP_LUN_CLOSING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
@ -1415,7 +1415,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act,
if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_erp_try_rport_unblock(port);
/* fall through */
fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
break;

View File

@ -564,7 +564,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
fc_host_fabric_name(shost) = 0;
/* fall through */
fallthrough;
default:
fc_host_fabric_name(shost) = 0;
dev_err(&adapter->ccw_device->dev,
@ -1032,7 +1032,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
switch (fsq->word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
/* fall through */
fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@ -1127,7 +1127,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
break;
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
/* fall through */
fallthrough;
case FSF_GENERIC_COMMAND_REJECTED:
case FSF_PAYLOAD_SIZE_MISMATCH:
case FSF_REQUEST_SIZE_TOO_LARGE:
@ -1313,7 +1313,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
break;
case FSF_SBAL_MISMATCH:
/* should never occur, avoided in zfcp_fsf_send_els */
/* fall through */
fallthrough;
default:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@ -1736,7 +1736,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
/* no zfcp_fc_test_link() with failed open port */
/* fall through */
fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
case FSF_SQ_NO_RETRY_POSSIBLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@ -1909,14 +1909,14 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&req->adapter->ccw_device->dev,
"Opening WKA port 0x%x failed\n", wka_port->d_id);
/* fall through */
fallthrough;
case FSF_ADAPTER_STATUS_AVAILABLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
break;
case FSF_GOOD:
wka_port->handle = header->port_handle;
/* fall through */
fallthrough;
case FSF_PORT_ALREADY_OPEN:
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
@ -2059,7 +2059,6 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
/* fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@ -2144,7 +2143,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
/* fall through */
fallthrough;
case FSF_LUN_ALREADY_OPEN:
break;
case FSF_PORT_BOXED:
@ -2175,7 +2174,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
/* fall through */
fallthrough;
case FSF_INVALID_COMMAND_OPTION:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@ -2183,7 +2182,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
/* fall through */
fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@ -2277,7 +2276,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
switch (req->qtcb->header.fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
/* fall through */
fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;

View File

@ -1626,7 +1626,7 @@ out:
int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
unsigned long flagv = 0;
int retval;
int retval, unblock_retval;
struct Scsi_Host *host = aac->scsi_host_ptr;
int bled;
@ -1656,8 +1656,9 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
retval = _aac_reset_adapter(aac, bled, reset_type);
spin_unlock_irqrestore(host->host_lock, flagv);
retval = scsi_host_unblock(host, SDEV_RUNNING);
unblock_retval = scsi_host_unblock(host, SDEV_RUNNING);
if (!retval)
retval = unblock_retval;
if ((forced < 2) && (retval == -ENODEV)) {
/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
struct fib * fibctx = aac_fib_alloc(aac);

View File

@ -1834,21 +1834,6 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_1B,
MSG_BUS_DEV_RESET, TRUE)) {
#ifdef __FreeBSD__
/*
* Don't mark the user's request for this BDR
* as completing with CAM_BDR_SENT. CAM3
* specifies CAM_REQ_CMP.
*/
if (scb != NULL
&& scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
&& ahc_match_scb(ahc, scb, target, channel,
CAM_LUN_WILDCARD,
SCB_LIST_NULL,
ROLE_INITIATOR)) {
ahc_set_transaction_status(scb, CAM_REQ_CMP);
}
#endif
ahc_compile_devinfo(&devinfo,
initiator_role_id,
target,
@ -4399,22 +4384,16 @@ ahc_alloc(void *platform_arg, char *name)
struct ahc_softc *ahc;
int i;
#ifndef __FreeBSD__
ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC);
if (!ahc) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
#else
ahc = device_get_softc((device_t)platform_arg);
#endif
memset(ahc, 0, sizeof(*ahc));
ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
if (ahc->seep_config == NULL) {
#ifndef __FreeBSD__
kfree(ahc);
#endif
kfree(name);
return (NULL);
}
@ -4540,9 +4519,7 @@ ahc_free(struct ahc_softc *ahc)
kfree(ahc->name);
if (ahc->seep_config != NULL)
kfree(ahc->seep_config);
#ifndef __FreeBSD__
kfree(ahc);
#endif
return;
}

View File

@ -66,7 +66,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
#define BNX2FC_VERSION "2.12.10"
#define BNX2FC_VERSION "2.12.13"
#define PFX "bnx2fc: "
@ -482,7 +482,10 @@ struct io_bdt {
struct bnx2fc_work {
struct list_head list;
struct bnx2fc_rport *tgt;
struct fcoe_task_ctx_entry *task;
unsigned char rq_data[BNX2FC_RQ_BUF_SZ];
u16 wqe;
u8 num_rq;
};
struct bnx2fc_unsol_els {
struct fc_lport *lport;
@ -550,7 +553,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
enum fc_rport_event event);
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
u8 num_rq, unsigned char *rq_data);
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
@ -559,7 +562,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
u8 num_rq);
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
u8 num_rq, unsigned char *rq_data);
void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
@ -577,7 +580,9 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout);
void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
unsigned char *rq_data, u8 num_rq,
struct fcoe_task_ctx_entry *task);
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
u32 port_id);
void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,

View File

@ -660,7 +660,10 @@ static int bnx2fc_percpu_io_thread(void *arg)
list_for_each_entry_safe(work, tmp, &work_list, list) {
list_del_init(&work->list);
bnx2fc_process_cq_compl(work->tgt, work->wqe);
bnx2fc_process_cq_compl(work->tgt, work->wqe,
work->rq_data,
work->num_rq,
work->task);
kfree(work);
}
@ -2655,7 +2658,8 @@ static int bnx2fc_cpu_offline(unsigned int cpu)
/* Free all work in the list */
list_for_each_entry_safe(work, tmp, &p->work_list, list) {
list_del_init(&work->list);
bnx2fc_process_cq_compl(work->tgt, work->wqe);
bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data,
work->num_rq, work->task);
kfree(work);
}

View File

@ -863,36 +863,22 @@ ret_warn_rqe:
}
}
void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
unsigned char *rq_data, u8 num_rq,
struct fcoe_task_ctx_entry *task)
{
struct fcoe_task_ctx_entry *task;
struct fcoe_task_ctx_entry *task_page;
struct fcoe_port *port = tgt->port;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_cmd *io_req;
int task_idx, index;
u16 xid;
u8 cmd_type;
u8 rx_state = 0;
u8 num_rq;
spin_lock_bh(&tgt->tgt_lock);
xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
if (xid >= hba->max_tasks) {
printk(KERN_ERR PFX "ERROR:xid out of range\n");
spin_unlock_bh(&tgt->tgt_lock);
return;
}
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
index = xid % BNX2FC_TASKS_PER_PAGE;
task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
task = &(task_page[index]);
num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (io_req == NULL) {
@ -912,7 +898,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
switch (cmd_type) {
case BNX2FC_SCSI_CMD:
if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
rq_data);
spin_unlock_bh(&tgt->tgt_lock);
return;
}
@ -929,7 +916,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
case BNX2FC_TASK_MGMT_CMD:
BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
bnx2fc_process_tm_compl(io_req, task, num_rq);
bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
break;
case BNX2FC_ABTS:
@ -987,7 +974,9 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
}
static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe,
unsigned char *rq_data, u8 num_rq,
struct fcoe_task_ctx_entry *task)
{
struct bnx2fc_work *work;
work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
@ -997,29 +986,87 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
INIT_LIST_HEAD(&work->list);
work->tgt = tgt;
work->wqe = wqe;
work->num_rq = num_rq;
work->task = task;
if (rq_data)
memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
return work;
}
/* Pending work request completion */
static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
{
unsigned int cpu = wqe % num_possible_cpus();
struct bnx2fc_percpu_s *fps;
struct bnx2fc_work *work;
struct fcoe_task_ctx_entry *task;
struct fcoe_task_ctx_entry *task_page;
struct fcoe_port *port = tgt->port;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
unsigned char *rq_data = NULL;
unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ];
int task_idx, index;
unsigned char *dummy;
u16 xid;
u8 num_rq;
int i;
xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
if (xid >= hba->max_tasks) {
pr_err(PFX "ERROR:xid out of range\n");
return false;
}
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
index = xid % BNX2FC_TASKS_PER_PAGE;
task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
task = &task_page[index];
num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ);
if (!num_rq)
goto num_rq_zero;
rq_data = bnx2fc_get_next_rqe(tgt, 1);
if (num_rq > 1) {
/* We do not need extra sense data */
for (i = 1; i < num_rq; i++)
dummy = bnx2fc_get_next_rqe(tgt, 1);
}
if (rq_data)
memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ);
/* return RQ entries */
for (i = 0; i < num_rq; i++)
bnx2fc_return_rqe(tgt, 1);
num_rq_zero:
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (fps->iothread) {
work = bnx2fc_alloc_work(tgt, wqe);
work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
num_rq, task);
if (work) {
list_add_tail(&work->list, &fps->work_list);
wake_up_process(fps->iothread);
spin_unlock_bh(&fps->fp_work_lock);
return;
return true;
}
}
spin_unlock_bh(&fps->fp_work_lock);
bnx2fc_process_cq_compl(tgt, wqe);
bnx2fc_process_cq_compl(tgt, wqe,
rq_data_buff, num_rq, task);
return true;
}
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
@ -1056,8 +1103,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
bnx2fc_pending_work(tgt, wqe);
num_free_sqes++;
if (bnx2fc_pending_work(tgt, wqe))
num_free_sqes++;
}
cqe++;
tgt->cq_cons_idx++;

View File

@ -24,7 +24,7 @@ static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp,
u8 num_rq);
u8 num_rq, unsigned char *rq_data);
void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
unsigned int timer_msec)
@ -1518,7 +1518,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
}
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task, u8 num_rq)
struct fcoe_task_ctx_entry *task, u8 num_rq,
unsigned char *rq_data)
{
struct bnx2fc_mp_req *tm_req;
struct fc_frame_header *fc_hdr;
@ -1557,7 +1558,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
bnx2fc_parse_fcp_rsp(io_req,
(struct fcoe_fcp_rsp_payload *)
rsp_buf, num_rq);
rsp_buf, num_rq, rq_data);
if (io_req->fcp_rsp_code == 0) {
/* TM successful */
if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
@ -1755,15 +1756,11 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp,
u8 num_rq)
u8 num_rq, unsigned char *rq_data)
{
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct bnx2fc_rport *tgt = io_req->tgt;
u8 rsp_flags = fcp_rsp->fcp_flags.flags;
u32 rq_buff_len = 0;
int i;
unsigned char *rq_data;
unsigned char *dummy;
int fcp_sns_len = 0;
int fcp_rsp_len = 0;
@ -1809,14 +1806,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
}
rq_data = bnx2fc_get_next_rqe(tgt, 1);
if (num_rq > 1) {
/* We do not need extra sense data */
for (i = 1; i < num_rq; i++)
dummy = bnx2fc_get_next_rqe(tgt, 1);
}
/* fetch fcp_rsp_code */
if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
/* Only for task management function */
@ -1837,9 +1826,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
if (fcp_sns_len)
memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
/* return RQ entries */
for (i = 0; i < num_rq; i++)
bnx2fc_return_rqe(tgt, 1);
}
}
@ -1918,7 +1904,7 @@ exit_qcmd:
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq)
u8 num_rq, unsigned char *rq_data)
{
struct fcoe_fcp_rsp_payload *fcp_rsp;
struct bnx2fc_rport *tgt = io_req->tgt;
@ -1931,6 +1917,12 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* we will not receive ABTS response for this IO */
BNX2FC_IO_DBG(io_req, "Timer context finished processing "
"this scsi cmd\n");
if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
&io_req->req_flags)) {
BNX2FC_IO_DBG(io_req,
"Actual completion after cleanup request cleaning up\n");
bnx2fc_process_cleanup_compl(io_req, task, num_rq);
}
return;
}
@ -1950,7 +1942,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
&(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
/* parse fcp_rsp and obtain sense data from RQ if available */
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
if (!sc_cmd->SCp.ptr) {
printk(KERN_ERR PFX "SCp.ptr is NULL\n");

View File

@ -404,7 +404,7 @@ static const char * const hostbyte_table[]={
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
"DID_NEXUS_FAILURE" };
"DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" };
static const char * const driverbyte_table[]={
"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",

View File

@ -632,6 +632,8 @@ static void fc_rport_error(struct fc_rport_priv *rdata, int err)
fc_rport_enter_ready(rdata);
break;
case RPORT_ST_PRLI:
fc_rport_enter_plogi(rdata);
break;
case RPORT_ST_ADISC:
fc_rport_enter_logo(rdata);
break;
@ -1208,9 +1210,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
if (!rjt)
FC_RPORT_DBG(rdata, "PRLI bad response\n");
else
else {
FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
if (rjt->er_reason == ELS_RJT_UNAB &&
rjt->er_explan == ELS_EXPL_PLOGI_REQD) {
fc_rport_enter_plogi(rdata);
goto out;
}
}
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}

View File

@ -207,8 +207,7 @@ typedef struct lpfc_vpd {
} rev;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd3 :19; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd3 :20; /* Reserved */
uint32_t rsvd2 : 3; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
@ -230,8 +229,7 @@ typedef struct lpfc_vpd {
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd2 : 3; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd3 :19; /* Reserved */
uint32_t rsvd3 :20; /* Reserved */
#endif
} sli3Feat;
} lpfc_vpd_t;
@ -480,8 +478,8 @@ struct lpfc_vport {
struct dentry *debug_nodelist;
struct dentry *debug_nvmestat;
struct dentry *debug_scsistat;
struct dentry *debug_nvmektime;
struct dentry *debug_cpucheck;
struct dentry *debug_ioktime;
struct dentry *debug_hdwqstat;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
@ -887,7 +885,6 @@ struct lpfc_hba {
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
uint32_t cfg_enable_dss;
uint32_t cfg_fdmi_on;
#define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
@ -1156,8 +1153,6 @@ struct lpfc_hba {
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
uint8_t fips_spec_rev;
uint8_t fips_level;
spinlock_t devicelock; /* lock for luns list */
mempool_t *device_data_mem_pool;
struct list_head luns;
@ -1175,12 +1170,11 @@ struct lpfc_hba {
uint16_t sfp_warning;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint16_t cpucheck_on;
uint16_t hdwqstat_on;
#define LPFC_CHECK_OFF 0
#define LPFC_CHECK_NVME_IO 1
#define LPFC_CHECK_NVMET_RCV 2
#define LPFC_CHECK_NVMET_IO 4
#define LPFC_CHECK_SCSI_IO 8
#define LPFC_CHECK_NVMET_IO 2
#define LPFC_CHECK_SCSI_IO 4
uint16_t ktime_on;
uint64_t ktime_data_samples;
uint64_t ktime_status_samples;
@ -1225,6 +1219,11 @@ struct lpfc_hba {
#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
char os_host_name[MAXHOSTNAMELEN];
/* SCSI host template information - for physical port */
struct scsi_host_template port_template;
/* SCSI host template information - for all vports */
struct scsi_host_template vport_template;
};
static inline struct Scsi_Host *

View File

@ -2230,66 +2230,6 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
return strlen(buf);
}
/**
* lpfc_fips_level_show - Return the current FIPS level for the HBA
* @dev: class unused variable.
* @attr: device attribute, not used.
* @buf: on return contains the module description text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
}
/**
* lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
* @dev: class unused variable.
* @attr: device attribute, not used.
* @buf: on return contains the module description text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
}
/**
* lpfc_dss_show - Return the current state of dss and the configured state
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
* @buf: on return contains the formatted text.
*
* Returns: size of formatted string.
**/
static ssize_t
lpfc_dss_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
(phba->cfg_enable_dss) ? "Enabled" : "Disabled",
(phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
"" : "Not ");
}
/**
* lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
* @dev: class converted to a Scsi_host structure.
@ -2705,9 +2645,6 @@ static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR_RO(lpfc_temp_sensor);
static DEVICE_ATTR_RO(lpfc_fips_level);
static DEVICE_ATTR_RO(lpfc_fips_rev);
static DEVICE_ATTR_RO(lpfc_dss);
static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
@ -3868,9 +3805,9 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
# commands per FCP LUN. Value range is [1,512]. Default value is 30.
# commands per FCP LUN.
*/
LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
"Max number of FCP commands we can queue to a specific LUN");
/*
@ -6251,9 +6188,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_pt,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
&dev_attr_protocol,
&dev_attr_lpfc_xlane_supported,
@ -6289,8 +6223,6 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_static_vport,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
NULL,
};
@ -7399,7 +7331,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);

View File

@ -404,9 +404,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_no_hr;
extern struct scsi_host_template lpfc_template_nvme;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
@ -590,6 +588,7 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
int);
void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
struct lpfc_sli4_hdw_queue *qp);
void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd);
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);

View File

@ -1300,8 +1300,88 @@ buffer_done:
return len;
}
void
lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
{
uint64_t seg1, seg2, seg3, seg4;
uint64_t segsum;
if (!lpfc_cmd->ts_last_cmd ||
!lpfc_cmd->ts_cmd_start ||
!lpfc_cmd->ts_cmd_wqput ||
!lpfc_cmd->ts_isr_cmpl ||
!lpfc_cmd->ts_data_io)
return;
if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start)
return;
if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd)
return;
if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start)
return;
if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput)
return;
if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl)
return;
/*
* Segment 1 - Time from Last FCP command cmpl is handed
* off to NVME Layer to start of next command.
* Segment 2 - Time from Driver receives a IO cmd start
* from NVME Layer to WQ put is done on IO cmd.
* Segment 3 - Time from Driver WQ put is done on IO cmd
* to MSI-X ISR for IO cmpl.
* Segment 4 - Time from MSI-X ISR for IO cmpl to when
* cmpl is handled off to the NVME Layer.
*/
seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd;
if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
seg1 = 0;
/* Calculate times relative to start of IO */
seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start);
segsum = seg2;
seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start;
if (segsum > seg3)
return;
seg3 -= segsum;
segsum += seg3;
seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start;
if (segsum > seg4)
return;
seg4 -= segsum;
phba->ktime_data_samples++;
phba->ktime_seg1_total += seg1;
if (seg1 < phba->ktime_seg1_min)
phba->ktime_seg1_min = seg1;
else if (seg1 > phba->ktime_seg1_max)
phba->ktime_seg1_max = seg1;
phba->ktime_seg2_total += seg2;
if (seg2 < phba->ktime_seg2_min)
phba->ktime_seg2_min = seg2;
else if (seg2 > phba->ktime_seg2_max)
phba->ktime_seg2_max = seg2;
phba->ktime_seg3_total += seg3;
if (seg3 < phba->ktime_seg3_min)
phba->ktime_seg3_min = seg3;
else if (seg3 > phba->ktime_seg3_max)
phba->ktime_seg3_max = seg3;
phba->ktime_seg4_total += seg4;
if (seg4 < phba->ktime_seg4_min)
phba->ktime_seg4_min = seg4;
else if (seg4 > phba->ktime_seg4_max)
phba->ktime_seg4_max = seg4;
lpfc_cmd->ts_last_cmd = 0;
lpfc_cmd->ts_cmd_start = 0;
lpfc_cmd->ts_cmd_wqput = 0;
lpfc_cmd->ts_isr_cmpl = 0;
lpfc_cmd->ts_data_io = 0;
}
/**
* lpfc_debugfs_nvmektime_data - Dump target node list to a buffer
* lpfc_debugfs_ioktime_data - Dump target node list to a buffer
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
@ -1314,13 +1394,13 @@ buffer_done:
* not exceed @size.
**/
static int
lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
int len = 0;
if (phba->nvmet_support == 0) {
/* NVME Initiator */
/* Initiator */
len += scnprintf(buf + len, PAGE_SIZE - len,
"ktime %s: Total Samples: %lld\n",
(phba->ktime_on ? "Enabled" : "Disabled"),
@ -1330,8 +1410,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 1: Last NVME Cmd cmpl "
"done -to- Start of next NVME cnd (in driver)\n");
"Segment 1: Last Cmd cmpl "
"done -to- Start of next Cmd (in driver)\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
@ -1341,7 +1421,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
phba->ktime_seg1_max);
len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 2: Driver start of NVME cmd "
"Segment 2: Driver start of Cmd "
"-to- Firmware WQ doorbell\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
@ -1364,7 +1444,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 4: MSI-X ISR cmpl -to- "
"NVME cmpl done\n");
"Cmd cmpl done\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
@ -1603,42 +1683,50 @@ out:
}
/**
* lpfc_debugfs_cpucheck_data - Dump target node list to a buffer
* lpfc_debugfs_hdwqstat_data - Dump I/O stats to a buffer
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
* This routine dumps the NVME statistics associated with @vport
* This routine dumps the NVME + SCSI statistics associated with @vport
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli4_hdw_queue *qp;
int i, j, max_cnt;
int len = 0;
struct lpfc_hdwq_stat *c_stat;
int i, j, len;
uint32_t tot_xmt;
uint32_t tot_rcv;
uint32_t tot_cmpl;
char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
len += scnprintf(buf + len, PAGE_SIZE - len,
"CPUcheck %s ",
(phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
"Enabled" : "Disabled"));
if (phba->nvmet_support) {
len += scnprintf(buf + len, PAGE_SIZE - len,
"%s\n",
(phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
"Rcv Enabled\n" : "Rcv Disabled\n"));
} else {
len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
}
max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ;
scnprintf(tmp, sizeof(tmp), "HDWQ Stats:\n\n");
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
scnprintf(tmp, sizeof(tmp), "(NVME Accounting: %s) ",
(phba->hdwqstat_on &
(LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO) ?
"Enabled" : "Disabled"));
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
scnprintf(tmp, sizeof(tmp), "(SCSI Accounting: %s) ",
(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO ?
"Enabled" : "Disabled"));
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
scnprintf(tmp, sizeof(tmp), "\n\n");
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
@ -1646,46 +1734,76 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
tot_rcv = 0;
tot_xmt = 0;
tot_cmpl = 0;
for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
tot_xmt += qp->cpucheck_xmt_io[j];
tot_cmpl += qp->cpucheck_cmpl_io[j];
for_each_present_cpu(j) {
c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, j);
/* Only display for this HDWQ */
if (i != c_stat->hdwq_no)
continue;
/* Only display non-zero counters */
if (!c_stat->xmt_io && !c_stat->cmpl_io &&
!c_stat->rcv_io)
continue;
if (!tot_xmt && !tot_cmpl && !tot_rcv) {
/* Print HDWQ string only the first time */
scnprintf(tmp, sizeof(tmp), "[HDWQ %d]:\t", i);
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
}
tot_xmt += c_stat->xmt_io;
tot_cmpl += c_stat->cmpl_io;
if (phba->nvmet_support)
tot_rcv += qp->cpucheck_rcv_io[j];
tot_rcv += c_stat->rcv_io;
scnprintf(tmp, sizeof(tmp), "| [CPU %d]: ", j);
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
if (phba->nvmet_support) {
scnprintf(tmp, sizeof(tmp),
"XMT 0x%x CMPL 0x%x RCV 0x%x |",
c_stat->xmt_io, c_stat->cmpl_io,
c_stat->rcv_io);
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
} else {
scnprintf(tmp, sizeof(tmp),
"XMT 0x%x CMPL 0x%x |",
c_stat->xmt_io, c_stat->cmpl_io);
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
}
}
/* Only display Hardware Qs with something */
/* Check if nothing to display */
if (!tot_xmt && !tot_cmpl && !tot_rcv)
continue;
len += scnprintf(buf + len, PAGE_SIZE - len,
"HDWQ %03d: ", i);
for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
/* Only display non-zero counters */
if (!qp->cpucheck_xmt_io[j] &&
!qp->cpucheck_cmpl_io[j] &&
!qp->cpucheck_rcv_io[j])
continue;
if (phba->nvmet_support) {
len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %03d: %x/%x/%x ", j,
qp->cpucheck_rcv_io[j],
qp->cpucheck_xmt_io[j],
qp->cpucheck_cmpl_io[j]);
} else {
len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %03d: %x/%x ", j,
qp->cpucheck_xmt_io[j],
qp->cpucheck_cmpl_io[j]);
}
}
len += scnprintf(buf + len, PAGE_SIZE - len,
"Total: %x\n", tot_xmt);
if (len >= max_cnt) {
len += scnprintf(buf + len, PAGE_SIZE - len,
"Truncated ...\n");
return len;
scnprintf(tmp, sizeof(tmp), "\t->\t[HDWQ Total: ");
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
if (phba->nvmet_support) {
scnprintf(tmp, sizeof(tmp),
"XMT 0x%x CMPL 0x%x RCV 0x%x]\n\n",
tot_xmt, tot_cmpl, tot_rcv);
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
} else {
scnprintf(tmp, sizeof(tmp),
"XMT 0x%x CMPL 0x%x]\n\n",
tot_xmt, tot_cmpl);
if (strlcat(buf, tmp, size) >= size)
goto buffer_done;
}
}
buffer_done:
len = strnlen(buf, size);
return len;
}
@ -2689,7 +2807,7 @@ lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf,
}
static int
lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
lpfc_debugfs_ioktime_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
struct lpfc_debug *debug;
@ -2700,14 +2818,14 @@ lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL);
debug->buffer = kmalloc(LPFC_IOKTIME_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer,
LPFC_NVMEKTIME_SIZE);
debug->len = lpfc_debugfs_ioktime_data(vport, debug->buffer,
LPFC_IOKTIME_SIZE);
debug->i_private = inode->i_private;
file->private_data = debug;
@ -2718,8 +2836,8 @@ out:
}
static ssize_t
lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
@ -2921,7 +3039,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
}
static int
lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
lpfc_debugfs_hdwqstat_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
struct lpfc_debug *debug;
@ -2932,14 +3050,14 @@ lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL);
debug->buffer = kcalloc(1, LPFC_SCSISTAT_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
LPFC_CPUCHECK_SIZE);
debug->len = lpfc_debugfs_hdwqstat_data(vport, debug->buffer,
LPFC_SCSISTAT_SIZE);
debug->i_private = inode->i_private;
file->private_data = debug;
@ -2950,16 +3068,16 @@ out:
}
static ssize_t
lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_hdwq_stat *c_stat;
char mybuf[64];
char *pbuf;
int i, j;
int i;
if (nbytes > 64)
nbytes = 64;
@ -2972,41 +3090,39 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
if (phba->nvmet_support)
phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
else
phba->cpucheck_on |= (LPFC_CHECK_NVME_IO |
phba->hdwqstat_on |= (LPFC_CHECK_NVME_IO |
LPFC_CHECK_SCSI_IO);
return strlen(pbuf);
} else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) {
if (phba->nvmet_support)
phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
else
phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
phba->hdwqstat_on |= LPFC_CHECK_NVME_IO;
return strlen(pbuf);
} else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) {
phba->cpucheck_on |= LPFC_CHECK_SCSI_IO;
if (!phba->nvmet_support)
phba->hdwqstat_on |= LPFC_CHECK_SCSI_IO;
return strlen(pbuf);
} else if ((strncmp(pbuf, "rcv",
sizeof("rcv") - 1) == 0)) {
if (phba->nvmet_support)
phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV;
else
return -EINVAL;
} else if ((strncmp(pbuf, "nvme_off", sizeof("nvme_off") - 1) == 0)) {
phba->hdwqstat_on &= ~(LPFC_CHECK_NVME_IO |
LPFC_CHECK_NVMET_IO);
return strlen(pbuf);
} else if ((strncmp(pbuf, "scsi_off", sizeof("scsi_off") - 1) == 0)) {
phba->hdwqstat_on &= ~LPFC_CHECK_SCSI_IO;
return strlen(pbuf);
} else if ((strncmp(pbuf, "off",
sizeof("off") - 1) == 0)) {
phba->cpucheck_on = LPFC_CHECK_OFF;
phba->hdwqstat_on = LPFC_CHECK_OFF;
return strlen(pbuf);
} else if ((strncmp(pbuf, "zero",
sizeof("zero") - 1) == 0)) {
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
qp->cpucheck_rcv_io[j] = 0;
qp->cpucheck_xmt_io[j] = 0;
qp->cpucheck_cmpl_io[j] = 0;
}
for_each_present_cpu(i) {
c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, i);
c_stat->xmt_io = 0;
c_stat->cmpl_io = 0;
c_stat->rcv_io = 0;
}
return strlen(pbuf);
}
@ -5431,13 +5547,13 @@ static const struct file_operations lpfc_debugfs_op_scsistat = {
.release = lpfc_debugfs_release,
};
#undef lpfc_debugfs_op_nvmektime
static const struct file_operations lpfc_debugfs_op_nvmektime = {
#undef lpfc_debugfs_op_ioktime
static const struct file_operations lpfc_debugfs_op_ioktime = {
.owner = THIS_MODULE,
.open = lpfc_debugfs_nvmektime_open,
.open = lpfc_debugfs_ioktime_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.write = lpfc_debugfs_nvmektime_write,
.write = lpfc_debugfs_ioktime_write,
.release = lpfc_debugfs_release,
};
@ -5451,13 +5567,13 @@ static const struct file_operations lpfc_debugfs_op_nvmeio_trc = {
.release = lpfc_debugfs_release,
};
#undef lpfc_debugfs_op_cpucheck
static const struct file_operations lpfc_debugfs_op_cpucheck = {
#undef lpfc_debugfs_op_hdwqstat
static const struct file_operations lpfc_debugfs_op_hdwqstat = {
.owner = THIS_MODULE,
.open = lpfc_debugfs_cpucheck_open,
.open = lpfc_debugfs_hdwqstat_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
.write = lpfc_debugfs_cpucheck_write,
.write = lpfc_debugfs_hdwqstat_write,
.release = lpfc_debugfs_release,
};
@ -6075,17 +6191,22 @@ nvmeio_off:
goto debug_failed;
}
snprintf(name, sizeof(name), "nvmektime");
vport->debug_nvmektime =
snprintf(name, sizeof(name), "ioktime");
vport->debug_ioktime =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_nvmektime);
vport, &lpfc_debugfs_op_ioktime);
if (!vport->debug_ioktime) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0815 Cannot create debugfs ioktime\n");
goto debug_failed;
}
snprintf(name, sizeof(name), "cpucheck");
vport->debug_cpucheck =
snprintf(name, sizeof(name), "hdwqstat");
vport->debug_hdwqstat =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_cpucheck);
vport, &lpfc_debugfs_op_hdwqstat);
/*
* The following section is for additional directories/files for the
@ -6216,11 +6337,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(vport->debug_scsistat); /* scsistat */
vport->debug_scsistat = NULL;
debugfs_remove(vport->debug_nvmektime); /* nvmektime */
vport->debug_nvmektime = NULL;
debugfs_remove(vport->debug_ioktime); /* ioktime */
vport->debug_ioktime = NULL;
debugfs_remove(vport->debug_cpucheck); /* cpucheck */
vport->debug_cpucheck = NULL;
debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */
vport->debug_hdwqstat = NULL;
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */

View File

@ -46,8 +46,7 @@
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
#define LPFC_NVMEKTIME_SIZE 8192
#define LPFC_CPUCHECK_SIZE 8192
#define LPFC_IOKTIME_SIZE 8192
#define LPFC_NVMEIO_TRC_SIZE 8192
/* scsistat output buffer size */

View File

@ -3262,8 +3262,7 @@ typedef struct {
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd1 : 19; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd1 : 20; /* Reserved */
uint32_t casabt : 1; /* Configure async abts status notice */
uint32_t rsvd2 : 2; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
@ -3287,12 +3286,10 @@ typedef struct {
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd2 : 2; /* Reserved */
uint32_t casabt : 1; /* Configure async abts status notice */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd1 : 19; /* Reserved */
uint32_t rsvd1 : 20; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd3 : 19; /* Reserved */
uint32_t gdss : 1; /* Configure Data Security SLI */
uint32_t rsvd3 : 20; /* Reserved */
uint32_t gasabt : 1; /* Grant async abts status notice */
uint32_t rsvd4 : 2; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
@ -3316,8 +3313,7 @@ typedef struct {
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t rsvd4 : 2; /* Reserved */
uint32_t gasabt : 1; /* Grant async abts status notice */
uint32_t gdss : 1; /* Configure Data Security SLI */
uint32_t rsvd3 : 19; /* Reserved */
uint32_t rsvd3 : 20; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
@ -3339,15 +3335,11 @@ typedef struct {
uint32_t rsvd6; /* Reserved */
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t fips_rev : 3; /* FIPS Spec Revision */
uint32_t fips_level : 4; /* FIPS Level */
uint32_t sec_err : 9; /* security crypto error */
uint32_t rsvd7 : 16;
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
#else /* __LITTLE_ENDIAN */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
uint32_t sec_err : 9; /* security crypto error */
uint32_t fips_level : 4; /* FIPS Level */
uint32_t fips_rev : 3; /* FIPS Spec Revision */
uint32_t rsvd7 : 16;
#endif
} CONFIG_PORT_VAR;

View File

@ -4231,6 +4231,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
{
struct lpfc_vport *vport;
struct Scsi_Host *shost = NULL;
struct scsi_host_template *template;
int error = 0;
int i;
uint64_t wwn;
@ -4259,22 +4260,50 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
}
}
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
if (dev != &phba->pcidev->dev) {
shost = scsi_host_alloc(&lpfc_vport_template,
sizeof(struct lpfc_vport));
/* Seed template for SCSI host registration */
if (dev == &phba->pcidev->dev) {
template = &phba->port_template;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
/* Seed physical port template */
memcpy(template, &lpfc_template, sizeof(*template));
if (use_no_reset_hba) {
/* template is for a no reset SCSI Host */
template->max_sectors = 0xffff;
template->eh_host_reset_handler = NULL;
}
/* Template for all vports this physical port creates */
memcpy(&phba->vport_template, &lpfc_template,
sizeof(*template));
phba->vport_template.max_sectors = 0xffff;
phba->vport_template.shost_attrs = lpfc_vport_attrs;
phba->vport_template.eh_bus_reset_handler = NULL;
phba->vport_template.eh_host_reset_handler = NULL;
phba->vport_template.vendor_id = 0;
/* Initialize the host templates with updated value */
if (phba->sli_rev == LPFC_SLI_REV4) {
template->sg_tablesize = phba->cfg_scsi_seg_cnt;
phba->vport_template.sg_tablesize =
phba->cfg_scsi_seg_cnt;
} else {
template->sg_tablesize = phba->cfg_sg_seg_cnt;
phba->vport_template.sg_tablesize =
phba->cfg_sg_seg_cnt;
}
} else {
if (!use_no_reset_hba)
shost = scsi_host_alloc(&lpfc_template,
sizeof(struct lpfc_vport));
else
shost = scsi_host_alloc(&lpfc_template_no_hr,
sizeof(struct lpfc_vport));
/* NVMET is for physical port only */
memcpy(template, &lpfc_template_nvme,
sizeof(*template));
}
} else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
shost = scsi_host_alloc(&lpfc_template_nvme,
sizeof(struct lpfc_vport));
} else {
template = &phba->vport_template;
}
shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
if (!shost)
goto out;
@ -4329,6 +4358,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport->port_type = LPFC_PHYSICAL_PORT;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9081 CreatePort TMPLATE type %x TBLsize %d "
"SEGcnt %d/%d\n",
vport->port_type, shost->sg_tablesize,
phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
INIT_LIST_HEAD(&vport->rcv_buffer_list);
@ -6301,11 +6336,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
* used to create the sg_dma_buf_pool must be dynamically calculated.
*/
/* Initialize the host templates the configured values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
if (phba->sli_rev == LPFC_SLI_REV4)
entry_sz = sizeof(struct sli4_sge);
else
@ -6346,7 +6376,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
"9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
phba->cfg_total_seg_cnt);
@ -6816,11 +6846,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
/* Initialize the host templates with the updated values. */
lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9087 sg_seg_cnt:%d dmabuf_size:%d "
"total:%d scsi:%d nvme:%d\n",
@ -6926,6 +6951,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_free_hba_cpu_map;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
if (!phba->sli4_hba.c_stat) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3332 Failed allocating per cpu hdwq stats\n");
rc = -ENOMEM;
goto out_free_hba_eq_info;
}
#endif
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@ -6945,6 +6981,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
out_free_hba_eq_info:
free_percpu(phba->sli4_hba.eq_info);
#endif
out_free_hba_cpu_map:
kfree(phba->sli4_hba.cpu_map);
out_free_hba_eq_hdl:
@ -6983,6 +7023,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
free_percpu(phba->sli4_hba.eq_info);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
free_percpu(phba->sli4_hba.c_stat);
#endif
/* Free memory allocated for msi-x interrupt vector to CPU mapping */
kfree(phba->sli4_hba.cpu_map);
@ -10823,6 +10866,9 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_hdwq_stat *c_stat;
#endif
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
@ -11074,10 +11120,17 @@ found_any:
idx = 0;
for_each_possible_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
c_stat->hdwq_no = cpup->hdwq;
#endif
if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
continue;
cpup->hdwq = idx++ % phba->cfg_hdw_queue;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
c_stat->hdwq_no = cpup->hdwq;
#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3340 Set Affinity: not present "
"CPU %d hdwq %d\n",
@ -11173,11 +11226,9 @@ static void lpfc_cpuhp_add(struct lpfc_hba *phba)
rcu_read_lock();
if (!list_empty(&phba->poll_list)) {
timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
}
rcu_read_unlock();
@ -13145,6 +13196,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_sli4_ras_setup(phba);
INIT_LIST_HEAD(&phba->poll_list);
timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
return 0;

View File

@ -1299,8 +1299,6 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
if (phba->cfg_enable_dss)
mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();

View File

@ -382,13 +382,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
ndlp->nrport = NULL;
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
}
spin_unlock_irq(&vport->phba->hbalock);
spin_unlock_irq(&vport->phba->hbalock);
/* Remove original register reference. The host transport
* won't reference this rport/remoteport any further.
*/
lpfc_nlp_put(ndlp);
/* Remove original register reference. The host transport
* won't reference this rport/remoteport any further.
*/
lpfc_nlp_put(ndlp);
} else {
spin_unlock_irq(&vport->phba->hbalock);
}
rport_err:
return;
@ -897,88 +899,6 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
sgl->sge_len = cpu_to_le32(nCmd->rsplen);
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
static void
lpfc_nvme_ktime(struct lpfc_hba *phba,
struct lpfc_io_buf *lpfc_ncmd)
{
uint64_t seg1, seg2, seg3, seg4;
uint64_t segsum;
if (!lpfc_ncmd->ts_last_cmd ||
!lpfc_ncmd->ts_cmd_start ||
!lpfc_ncmd->ts_cmd_wqput ||
!lpfc_ncmd->ts_isr_cmpl ||
!lpfc_ncmd->ts_data_nvme)
return;
if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
return;
if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
return;
if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
return;
if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
return;
if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
return;
/*
* Segment 1 - Time from Last FCP command cmpl is handed
* off to NVME Layer to start of next command.
* Segment 2 - Time from Driver receives a IO cmd start
* from NVME Layer to WQ put is done on IO cmd.
* Segment 3 - Time from Driver WQ put is done on IO cmd
* to MSI-X ISR for IO cmpl.
* Segment 4 - Time from MSI-X ISR for IO cmpl to when
* cmpl is handled off to the NVME Layer.
*/
seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
seg1 = 0;
/* Calculate times relative to start of IO */
seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
segsum = seg2;
seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
if (segsum > seg3)
return;
seg3 -= segsum;
segsum += seg3;
seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
if (segsum > seg4)
return;
seg4 -= segsum;
phba->ktime_data_samples++;
phba->ktime_seg1_total += seg1;
if (seg1 < phba->ktime_seg1_min)
phba->ktime_seg1_min = seg1;
else if (seg1 > phba->ktime_seg1_max)
phba->ktime_seg1_max = seg1;
phba->ktime_seg2_total += seg2;
if (seg2 < phba->ktime_seg2_min)
phba->ktime_seg2_min = seg2;
else if (seg2 > phba->ktime_seg2_max)
phba->ktime_seg2_max = seg2;
phba->ktime_seg3_total += seg3;
if (seg3 < phba->ktime_seg3_min)
phba->ktime_seg3_min = seg3;
else if (seg3 > phba->ktime_seg3_max)
phba->ktime_seg3_max = seg3;
phba->ktime_seg4_total += seg4;
if (seg4 < phba->ktime_seg4_min)
phba->ktime_seg4_min = seg4;
else if (seg4 > phba->ktime_seg4_max)
phba->ktime_seg4_max = seg4;
lpfc_ncmd->ts_last_cmd = 0;
lpfc_ncmd->ts_cmd_start = 0;
lpfc_ncmd->ts_cmd_wqput = 0;
lpfc_ncmd->ts_isr_cmpl = 0;
lpfc_ncmd->ts_data_nvme = 0;
}
#endif
/**
* lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
@ -1010,6 +930,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
uint32_t code, status, idx;
uint16_t cid, sqhd, data;
uint32_t *ptr;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int cpu;
#endif
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd) {
@ -1178,23 +1101,19 @@ out_err:
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_ncmd->ts_cmd_start) {
lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
lpfc_ncmd->ts_data_nvme = ktime_get_ns();
phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
lpfc_nvme_ktime(phba, lpfc_ncmd);
lpfc_ncmd->ts_data_io = ktime_get_ns();
phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
lpfc_io_ktime(phba, lpfc_ncmd);
}
if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
uint32_t cpu;
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
if (lpfc_ncmd->cpu != cpu)
lpfc_printf_vlog(vport,
KERN_INFO, LOG_NVME_IOERR,
"6701 CPU Check cmpl: "
"cpu %d expect %d\n",
cpu, lpfc_ncmd->cpu);
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
}
this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
if (lpfc_ncmd->cpu != cpu)
lpfc_printf_vlog(vport,
KERN_INFO, LOG_NVME_IOERR,
"6701 CPU Check cmpl: "
"cpu %d expect %d\n",
cpu, lpfc_ncmd->cpu);
}
#endif
@ -1743,19 +1662,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
if (lpfc_ncmd->ts_cmd_start)
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
lpfc_ncmd->cpu = cpu;
if (idx != cpu)
lpfc_printf_vlog(vport,
KERN_INFO, LOG_NVME_IOERR,
"6702 CPU Check cmd: "
"cpu %d wq %d\n",
lpfc_ncmd->cpu,
lpfc_queue_info->index);
phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
}
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
lpfc_ncmd->cpu = cpu;
if (idx != cpu)
lpfc_printf_vlog(vport,
KERN_INFO, LOG_NVME_IOERR,
"6702 CPU Check cmd: "
"cpu %d wq %d\n",
lpfc_ncmd->cpu,
lpfc_queue_info->index);
}
#endif
return 0;

View File

@ -707,7 +707,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_rcv_ctx *ctxp;
uint32_t status, result, op, start_clean, logerr;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t id;
int id;
#endif
ctxp = cmdwqe->context2;
@ -814,16 +814,14 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
rsp->done(rsp);
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
id = raw_smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT) {
if (ctxp->cpu != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6704 CPU Check cmdcmpl: "
"cpu %d expect %d\n",
id, ctxp->cpu);
phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
}
this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
if (ctxp->cpu != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6704 CPU Check cmdcmpl: "
"cpu %d expect %d\n",
id, ctxp->cpu);
}
#endif
}
@ -931,6 +929,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
struct lpfc_sli_ring *pring;
unsigned long iflags;
int rc;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int id;
#endif
if (phba->pport->load_flag & FC_UNLOADING) {
rc = -ENODEV;
@ -954,16 +955,14 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
int id = raw_smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT) {
if (rsp->hwqid != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6705 CPU Check OP: "
"cpu %d expect %d\n",
id, rsp->hwqid);
phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
}
if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
id = raw_smp_processor_id();
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
if (rsp->hwqid != id)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6705 CPU Check OP: "
"cpu %d expect %d\n",
id, rsp->hwqid);
ctxp->cpu = id; /* Setup cpu for cmpl check */
}
#endif
@ -2270,15 +2269,13 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
size = nvmebuf->bytes_recv;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
if (current_cpu < LPFC_CHECK_CPU_CNT) {
if (idx != current_cpu)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6703 CPU Check rcv: "
"cpu %d expect %d\n",
current_cpu, idx);
phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
}
if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
if (idx != current_cpu)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6703 CPU Check rcv: "
"cpu %d expect %d\n",
current_cpu, idx);
}
#endif
@ -2598,7 +2595,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde;
dma_addr_t physaddr;
int i, cnt;
int i, cnt, nsegs;
int do_pbde;
int xc = 1;
@ -2629,6 +2626,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
phba->cfg_nvme_seg_cnt);
return NULL;
}
nsegs = rsp->sg_cnt;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
nvmewqe = ctxp->wqeq;
@ -2868,7 +2866,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
wqe->fcp_trsp.rsvd_12_15[0] = 0;
/* Use rspbuf, NOT sg list */
rsp->sg_cnt = 0;
nsegs = 0;
sgl->word2 = 0;
atomic_inc(&tgtp->xmt_fcp_rsp);
break;
@ -2885,7 +2883,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
nvmewqe->context1 = ndlp;
for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
for_each_sg(rsp->sg, sgel, nsegs, i) {
physaddr = sg_dma_address(sgel);
cnt = sg_dma_len(sgel);
sgl->addr_hi = putPaddrHigh(physaddr);

View File

@ -3805,9 +3805,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct Scsi_Host *shost;
int idx;
uint32_t logit = LOG_FCP;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int cpu;
#endif
/* Guard against abort handler being called at same time */
spin_lock(&lpfc_cmd->buf_lock);
@ -3826,11 +3823,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
}
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
#endif
shost = cmd->device->host;
@ -4031,6 +4025,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->pCmd = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->ts_cmd_start) {
lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
lpfc_cmd->ts_data_io = ktime_get_ns();
phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
lpfc_io_ktime(phba, lpfc_cmd);
}
#endif
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
@ -4504,7 +4506,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int cpu;
uint64_t start = 0L;
if (phba->ktime_on)
start = ktime_get_ns();
#endif
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
@ -4626,17 +4631,20 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq =
&phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
hdwq->cpucheck_xmt_io[cpu]++;
}
}
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
#endif
err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (start) {
lpfc_cmd->ts_cmd_start = start;
lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
} else {
lpfc_cmd->ts_cmd_start = 0;
}
#endif
if (err) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"3376 FCP could not issue IOCB err %x"
@ -6023,31 +6031,6 @@ struct scsi_host_template lpfc_template_nvme = {
.track_queue_depth = 0,
};
struct scsi_host_template lpfc_template_no_hr = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.shost_attrs = lpfc_hba_attrs,
.max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
@ -6073,26 +6056,3 @@ struct scsi_host_template lpfc_template = {
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
struct scsi_host_template lpfc_vport_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};

View File

@ -230,25 +230,16 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
* This routine will update the HBA index of a queue to reflect consumption of
* Work Queue Entries by the HBA. When the HBA indicates that it has consumed
* an entry the host calls this function to update the queue's internal
* pointers. This routine returns the number of entries that were consumed by
* the HBA.
* pointers.
**/
static uint32_t
static void
lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
{
uint32_t released = 0;
/* sanity check on queue memory */
if (unlikely(!q))
return 0;
return;
if (q->hba_index == index)
return 0;
do {
q->hba_index = ((q->hba_index + 1) % q->entry_count);
released++;
} while (q->hba_index != index);
return released;
q->hba_index = index;
}
/**
@ -2511,6 +2502,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
if (phba->sli_rev == LPFC_SLI_REV4)
vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@ -4044,6 +4037,11 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
struct lpfc_iocbq *piocb, *next_iocb;
spin_lock_irq(&phba->hbalock);
if (phba->hba_flag & HBA_IOQ_FLUSH ||
!phba->sli4_hba.hdwq) {
spin_unlock_irq(&phba->hbalock);
return;
}
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
@ -5034,23 +5032,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
} else
phba->max_vpi = 0;
phba->fips_level = 0;
phba->fips_spec_rev = 0;
if (pmb->u.mb.un.varCfgPort.gdss) {
phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2850 Security Crypto Active. FIPS x%d "
"(Spec Rev: x%d)",
phba->fips_level, phba->fips_spec_rev);
}
if (pmb->u.mb.un.varCfgPort.sec_err) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2856 Config Port Security Crypto "
"Error: x%x ",
pmb->u.mb.un.varCfgPort.sec_err);
}
if (pmb->u.mb.un.varCfgPort.gerbm)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
if (pmb->u.mb.un.varCfgPort.gcrp)
@ -14442,12 +14423,10 @@ static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
if (list_empty(&phba->poll_list)) {
timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
/* kickstart slowpath processing for this eq */
/* kickstart slowpath processing if needed */
if (list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
}
list_add_rcu(&eq->_poll_list, &phba->poll_list);
synchronize_rcu();

View File

@ -446,6 +446,6 @@ struct lpfc_io_buf {
uint64_t ts_last_cmd;
uint64_t ts_cmd_wqput;
uint64_t ts_isr_cmpl;
uint64_t ts_data_nvme;
uint64_t ts_data_io;
#endif
};

View File

@ -697,13 +697,6 @@ struct lpfc_sli4_hdw_queue {
struct lpfc_lock_stat lock_conflict;
#endif
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
#define LPFC_CHECK_CPU_CNT 128
uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
#endif
/* Per HDWQ pool resources */
struct list_head sgl_list;
struct list_head cmd_rsp_buf_list;
@ -740,6 +733,15 @@ struct lpfc_sli4_hdw_queue {
#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
#endif
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_hdwq_stat {
u32 hdwq_no;
u32 rcv_io;
u32 xmt_io;
u32 cmpl_io;
};
#endif
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
* config space registers
@ -921,6 +923,9 @@ struct lpfc_sli4_hba {
struct cpumask numa_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct lpfc_hdwq_stat __percpu *c_stat;
#endif
uint32_t conf_trunk;
#define lpfc_conf_trunk_port0_WORD conf_trunk
#define lpfc_conf_trunk_port0_SHIFT 0

View File

@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "12.6.0.4"
#define LPFC_DRIVER_VERSION "12.8.0.0"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */

View File

@ -9908,8 +9908,8 @@ static void scsih_remove(struct pci_dev *pdev)
ioc->remove_host = 1;
mpt3sas_wait_for_commands_to_complete(ioc);
_scsih_flush_running_cmds(ioc);
if (!pci_device_is_present(pdev))
_scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);
@ -9992,8 +9992,8 @@ scsih_shutdown(struct pci_dev *pdev)
ioc->remove_host = 1;
mpt3sas_wait_for_commands_to_complete(ioc);
_scsih_flush_running_cmds(ioc);
if (!pci_device_is_present(pdev))
_scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);

View File

@ -2022,7 +2022,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
if (session->target_id == ISCSI_MAX_TARGET) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
return;
goto unbind_session_exit;
}
target_id = session->target_id;
@ -2034,6 +2034,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
ida_simple_remove(&iscsi_sess_ida, target_id);
scsi_remove_target(&session->dev);
unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}

View File

@ -550,10 +550,12 @@ out:
static void sr_block_release(struct gendisk *disk, fmode_t mode)
{
struct scsi_cd *cd = scsi_cd(disk);
mutex_lock(&cd->lock);
cdrom_release(&cd->cdi, mode);
scsi_cd_put(cd);
mutex_unlock(&cd->lock);
scsi_cd_put(cd);
}
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,

View File

@ -499,8 +499,15 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba);
if (err)
if (err) {
/*
* Set link as off state enforcedly to trigger
* ufshcd_host_reset_and_restore() in ufshcd_suspend()
* for completed host reset.
*/
ufshcd_set_link_off(hba);
return -EAGAIN;
}
}
if (!ufshcd_is_link_active(hba))
@ -519,8 +526,10 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
if (err) {
err = ufshcd_link_recovery(hba);
return err;
}
}
return 0;

View File

@ -172,19 +172,6 @@ enum {
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
#define ufshcd_set_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
#define ufshcd_set_ufs_dev_sleep(h) \
((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
#define ufshcd_set_ufs_dev_poweroff(h) \
((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
#define ufshcd_is_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
#define ufshcd_is_ufs_dev_sleep(h) \
((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
#define ufshcd_is_ufs_dev_poweroff(h) \
((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
{UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
{UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
@ -868,28 +855,29 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
return false;
}
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
/**
* ufshcd_set_clk_freq - set UFS controller clock frequencies
* @hba: per adapter instance
* @scale_up: If True, set max possible frequency othewise set low frequency
*
* Returns 0 if successful
* Returns < 0 for any other errors
*/
static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
ktime_t start = ktime_get();
bool clk_state_changed = false;
if (list_empty(head))
goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
if (ret)
return ret;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
if (clki->curr_freq == clki->max_freq)
continue;
clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->max_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
@ -908,7 +896,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
if (clki->curr_freq == clki->min_freq)
continue;
clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->min_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
@ -927,11 +914,37 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
clki->name, clk_get_rate(clki->clk));
}
out:
return ret;
}
/**
* ufshcd_scale_clks - scale up or scale down UFS controller clocks
* @hba: per adapter instance
* @scale_up: True if scaling up and false if scaling down
*
* Returns 0 if successful
* Returns < 0 for any other errors
*/
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
ktime_t start = ktime_get();
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
if (ret)
goto out;
ret = ufshcd_set_clk_freq(hba, scale_up);
if (ret)
goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
if (ret)
ufshcd_set_clk_freq(hba, !scale_up);
out:
if (clk_state_changed)
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@ -1065,8 +1078,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
}
/* check if the power mode needs to be changed or not? */
ret = ufshcd_change_power_mode(hba, &new_pwr_info);
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
__func__, ret,
@ -1119,35 +1131,32 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
return ret;
goto out;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
ret = ufshcd_scale_gear(hba, false);
if (ret)
goto out;
goto out_unprepare;
}
ret = ufshcd_scale_clks(hba, scale_up);
if (ret) {
if (!scale_up)
ufshcd_scale_gear(hba, true);
goto out;
goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
if (ret) {
if (ret)
ufshcd_scale_clks(hba, false);
goto out;
}
}
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
out:
out_unprepare:
ufshcd_clock_scaling_unprepare(hba);
out:
ufshcd_release(hba);
return ret;
}
@ -3785,7 +3794,7 @@ out:
return ret;
}
static int ufshcd_link_recovery(struct ufs_hba *hba)
int ufshcd_link_recovery(struct ufs_hba *hba)
{
int ret;
unsigned long flags;
@ -3812,6 +3821,7 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
@ -4112,8 +4122,6 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba,
memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
ret = ufshcd_change_power_mode(hba, &final_params);
if (!ret)
ufshcd_print_pwr_info(hba);
return ret;
}
@ -6299,7 +6307,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
ufshcd_scale_clks(hba, true);
ufshcd_set_clk_freq(hba, true);
err = ufshcd_hba_enable(hba);
if (err)
@ -7127,6 +7135,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
__func__, ret);
goto out;
}
ufshcd_print_pwr_info(hba);
}
/*

View File

@ -130,6 +130,19 @@ enum uic_link_state {
#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
UIC_LINK_HIBERN8_STATE)
#define ufshcd_set_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
#define ufshcd_set_ufs_dev_sleep(h) \
((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
#define ufshcd_set_ufs_dev_poweroff(h) \
((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
#define ufshcd_is_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
#define ufshcd_is_ufs_dev_sleep(h) \
((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
#define ufshcd_is_ufs_dev_poweroff(h) \
((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
/*
* UFS Power management levels.
* Each level is in increasing order of power savings.
@ -788,6 +801,7 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
int ufshcd_link_recovery(struct ufs_hba *hba);
int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
@ -1083,6 +1097,7 @@ static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->device_reset) {
hba->vops->device_reset(hba);
ufshcd_set_ufs_dev_active(hba);
ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
}
}

View File

@ -134,7 +134,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
* Assigned designator
*/
desig_len = desc[7];
if (desig_len != 16) {
if (desig_len != XCOPY_NAA_IEEE_REGEX_LEN) {
pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
return -EINVAL;
}
@ -315,11 +315,6 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
xop->nolb, (unsigned long long)xop->src_lba,
(unsigned long long)xop->dst_lba);
if (dc != 0) {
xop->dbl = get_unaligned_be24(&desc[29]);
pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
}
return 0;
}
@ -415,7 +410,8 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
struct xcopy_pt_cmd, se_cmd);
kfree(xpt_cmd);
/* xpt_cmd is on the stack, nothing to free here */
pr_debug("xpt_cmd done: %p\n", xpt_cmd);
}
static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
@ -504,7 +500,6 @@ void target_xcopy_release_pt(void)
* @cdb: SCSI CDB to be copied into @xpt_cmd.
* @remote_port: If false, use the LUN through which the XCOPY command has
* been received. If true, use @se_dev->xcopy_lun.
* @alloc_mem: Whether or not to allocate an SGL list.
*
* Set up a SCSI command (READ or WRITE) that will be used to execute an
* XCOPY command.
@ -514,12 +509,9 @@ static int target_xcopy_setup_pt_cmd(
struct xcopy_op *xop,
struct se_device *se_dev,
unsigned char *cdb,
bool remote_port,
bool alloc_mem)
bool remote_port)
{
struct se_cmd *cmd = &xpt_cmd->se_cmd;
sense_reason_t sense_rc;
int ret = 0, rc;
/*
* Setup LUN+port to honor reservations based upon xop->op_origin for
@ -535,46 +527,17 @@ static int target_xcopy_setup_pt_cmd(
cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
cmd->tag = 0;
sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
if (sense_rc) {
ret = -EINVAL;
goto out;
}
if (target_setup_cmd_from_cdb(cmd, cdb))
return -EINVAL;
if (alloc_mem) {
rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
cmd->data_length, false, false);
if (rc < 0) {
ret = rc;
goto out;
}
/*
* Set this bit so that transport_free_pages() allows the
* caller to release SGLs + physical memory allocated by
* transport_generic_get_mem()..
*/
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
} else {
/*
* Here the previously allocated SGLs for the internal READ
* are mapped zero-copy to the internal WRITE.
*/
sense_rc = transport_generic_map_mem_to_cmd(cmd,
xop->xop_data_sg, xop->xop_data_nents,
NULL, 0);
if (sense_rc) {
ret = -EINVAL;
goto out;
}
if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg,
xop->xop_data_nents, NULL, 0))
return -EINVAL;
pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
" %u\n", cmd->t_data_sg, cmd->t_data_nents);
}
pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
" %u\n", cmd->t_data_sg, cmd->t_data_nents);
return 0;
out:
return ret;
}
static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
@ -604,20 +567,15 @@ static int target_xcopy_read_source(
sector_t src_lba,
u32 src_sectors)
{
struct xcopy_pt_cmd *xpt_cmd;
struct se_cmd *se_cmd;
struct xcopy_pt_cmd xpt_cmd;
struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
u32 length = (src_sectors * src_dev->dev_attrib.block_size);
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
if (!xpt_cmd) {
pr_err("Unable to allocate xcopy_pt_cmd\n");
return -ENOMEM;
}
init_completion(&xpt_cmd->xpt_passthrough_sem);
se_cmd = &xpt_cmd->se_cmd;
memset(&xpt_cmd, 0, sizeof(xpt_cmd));
init_completion(&xpt_cmd.xpt_passthrough_sem);
memset(&cdb[0], 0, 16);
cdb[0] = READ_16;
@ -627,36 +585,24 @@ static int target_xcopy_read_source(
(unsigned long long)src_lba, src_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->src_pt_cmd = xpt_cmd;
DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0]);
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
remote_port, true);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
remote_port);
if (rc < 0) {
ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
ec_cmd->scsi_status = se_cmd->scsi_status;
goto out;
}
xop->xop_data_sg = se_cmd->t_data_sg;
xop->xop_data_nents = se_cmd->t_data_nents;
pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
" memory\n", xop->xop_data_sg, xop->xop_data_nents);
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
/*
* Clear off the allocated t_data_sg, that has been saved for
* zero-copy WRITE submission reuse in struct xcopy_op..
*/
se_cmd->t_data_sg = NULL;
se_cmd->t_data_nents = 0;
return 0;
rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
if (rc < 0)
ec_cmd->scsi_status = se_cmd->scsi_status;
out:
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
static int target_xcopy_write_destination(
@ -666,20 +612,15 @@ static int target_xcopy_write_destination(
sector_t dst_lba,
u32 dst_sectors)
{
struct xcopy_pt_cmd *xpt_cmd;
struct se_cmd *se_cmd;
struct xcopy_pt_cmd xpt_cmd;
struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
if (!xpt_cmd) {
pr_err("Unable to allocate xcopy_pt_cmd\n");
return -ENOMEM;
}
init_completion(&xpt_cmd->xpt_passthrough_sem);
se_cmd = &xpt_cmd->se_cmd;
memset(&xpt_cmd, 0, sizeof(xpt_cmd));
init_completion(&xpt_cmd.xpt_passthrough_sem);
memset(&cdb[0], 0, 16);
cdb[0] = WRITE_16;
@ -689,36 +630,21 @@ static int target_xcopy_write_destination(
(unsigned long long)dst_lba, dst_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->dst_pt_cmd = xpt_cmd;
DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0]);
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
remote_port, false);
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
remote_port);
if (rc < 0) {
struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
/*
* If the failure happened before the t_mem_list hand-off in
* target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
* core releases this memory on error during X-COPY WRITE I/O.
*/
src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
src_cmd->t_data_sg = xop->xop_data_sg;
src_cmd->t_data_nents = xop->xop_data_nents;
transport_generic_free_cmd(se_cmd, 0);
return rc;
ec_cmd->scsi_status = se_cmd->scsi_status;
goto out;
}
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
return 0;
rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
if (rc < 0)
ec_cmd->scsi_status = se_cmd->scsi_status;
out:
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
static void target_xcopy_do_work(struct work_struct *work)
@ -729,7 +655,7 @@ static void target_xcopy_do_work(struct work_struct *work)
sector_t src_lba, dst_lba, end_lba;
unsigned int max_sectors;
int rc = 0;
unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0;
unsigned short nolb, max_nolb, copied_nolb = 0;
if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
goto err_free;
@ -759,7 +685,23 @@ static void target_xcopy_do_work(struct work_struct *work)
(unsigned long long)src_lba, (unsigned long long)dst_lba);
while (src_lba < end_lba) {
cur_nolb = min(nolb, max_nolb);
unsigned short cur_nolb = min(nolb, max_nolb);
u32 cur_bytes = cur_nolb * src_dev->dev_attrib.block_size;
if (cur_bytes != xop->xop_data_bytes) {
/*
* (Re)allocate a buffer large enough to hold the XCOPY
* I/O size, which can be reused each read / write loop.
*/
target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
rc = target_alloc_sgl(&xop->xop_data_sg,
&xop->xop_data_nents,
cur_bytes,
false, false);
if (rc < 0)
goto out;
xop->xop_data_bytes = cur_bytes;
}
pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
" cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
@ -777,10 +719,8 @@ static void target_xcopy_do_work(struct work_struct *work)
rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
dst_lba, cur_nolb);
if (rc < 0) {
transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
if (rc < 0)
goto out;
}
dst_lba += cur_nolb;
pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
@ -788,14 +728,10 @@ static void target_xcopy_do_work(struct work_struct *work)
copied_nolb += cur_nolb;
nolb -= cur_nolb;
transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
}
xcopy_pt_undepend_remotedev(xop);
target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
kfree(xop);
pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
@ -809,6 +745,7 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
err_free:
kfree(xop);

View File

@ -5,7 +5,7 @@
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
#define XCOPY_MAX_SECTORS 1024
#define XCOPY_MAX_SECTORS 4096
/*
* SPC4r37 6.4.6.1
@ -18,8 +18,6 @@ enum xcopy_origin_list {
XCOL_DEST_RECV_OP = 0x02,
};
struct xcopy_pt_cmd;
struct xcopy_op {
int op_origin;
@ -35,11 +33,8 @@ struct xcopy_op {
unsigned short stdi;
unsigned short dtdi;
unsigned short nolb;
unsigned int dbl;
struct xcopy_pt_cmd *src_pt_cmd;
struct xcopy_pt_cmd *dst_pt_cmd;
u32 xop_data_bytes;
u32 xop_data_nents;
struct scatterlist *xop_data_sg;
struct work_struct xop_work;