2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 17:23:55 +08:00

target: Convert ->transport_wait_for_tasks usage to transport_generic_free_cmd

This patch converts se_cmd->transport_wait_for_tasks(se_cmd, 1) usage to use
transport_generic_free_cmd() directly in target-core and iscsi-target fabric
usage.  The includes:

*) Removal of the optional transport_generic_free_cmd() call from within
   transport_generic_wait_for_tasks()
*) Usage of existing SCF_SUPPORTED_SAM_OPCODE to determine when
   transport_generic_wait_for_tasks() processing may occur instead of
   checking se_cmd->transport_wait_for_tasks()
*) Move transport_generic_wait_for_tasks() call ahead of core_dec_lacl_count()
   and transport_lun_remove_cmd() in transport_generic_free_cmd() to follow
   existing logic for iscsi-target w/ se_cmd->transport_wait_for_tasks(se_cmd, 1)
*) Removal of se_cmd->transport_wait_for_tasks() function pointer
*) Rename transport_generic_wait_for_tasks() -> transport_wait_for_tasks(), and
   add docbook comment.
*) Add EXPORT_SYMBOL for transport_wait_for_tasks()

For the case in iscsi_target_erl2.c:iscsit_prepare_cmds_for_realligance()
where se_cmd->transport_wait_for_tasks(se_cmd, 0) is called, this patch
adds a direct call to transport_wait_for_tasks().

(hch: Fix transport_generic_free_cmd() usage in iscsit_release_commands_from_conn)
(nab: Add patch: Ensure that TMRs hit wait_for_tasks logic during release)

Reported-by: Christoph Hellwig <hch@lst.de>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Nicholas Bellinger 2011-10-09 01:00:58 -07:00
parent dd503a5fcc
commit d14921d6ad
5 changed files with 51 additions and 86 deletions

View File

@ -3940,7 +3940,6 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
{
struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
struct iscsi_session *sess = conn->sess;
struct se_cmd *se_cmd;
/*
* We expect this function to only ever be called from either RX or TX
* thread context via iscsit_close_connection() once the other context
@ -3953,16 +3952,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
se_cmd = &cmd->se_cmd;
/*
* Special cases for active iSCSI TMR, and
* transport_lookup_cmd_lun() failing from
* iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd().
*/
if (cmd->tmr_req && se_cmd->transport_wait_for_tasks)
se_cmd->transport_wait_for_tasks(se_cmd, 1);
else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
transport_release_cmd(se_cmd);
if (cmd->tmr_req)
transport_generic_free_cmd(&cmd->se_cmd, 1);
else
iscsit_release_cmd(cmd);
@ -3973,10 +3969,8 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
se_cmd = &cmd->se_cmd;
if (se_cmd->transport_wait_for_tasks)
se_cmd->transport_wait_for_tasks(se_cmd, 1);
transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock_bh(&conn->cmd_lock);
}

View File

@ -143,12 +143,10 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_del(&cmd->i_list);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
!(cmd->se_cmd.transport_wait_for_tasks))
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
cmd->se_cmd.transport_wait_for_tasks(
&cmd->se_cmd, 1);
transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@ -170,12 +168,10 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_del(&cmd->i_list);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
!(cmd->se_cmd.transport_wait_for_tasks))
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
cmd->se_cmd.transport_wait_for_tasks(
&cmd->se_cmd, 1);
transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@ -260,12 +256,10 @@ void iscsit_discard_cr_cmds_by_expstatsn(
iscsit_remove_cmd_from_connection_recovery(cmd, sess);
spin_unlock(&cr->conn_recovery_cmd_lock);
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
!(cmd->se_cmd.transport_wait_for_tasks))
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
cmd->se_cmd.transport_wait_for_tasks(
&cmd->se_cmd, 1);
transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock(&cr->conn_recovery_cmd_lock);
}
spin_unlock(&cr->conn_recovery_cmd_lock);
@ -319,12 +313,10 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
!(cmd->se_cmd.transport_wait_for_tasks))
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
cmd->se_cmd.transport_wait_for_tasks(
&cmd->se_cmd, 1);
transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock_bh(&conn->cmd_lock);
}
spin_unlock_bh(&conn->cmd_lock);
@ -378,12 +370,10 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
!(cmd->se_cmd.transport_wait_for_tasks))
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
cmd->se_cmd.transport_wait_for_tasks(
&cmd->se_cmd, 1);
transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@ -404,12 +394,10 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
list_del(&cmd->i_list);
spin_unlock_bh(&conn->cmd_lock);
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
!(cmd->se_cmd.transport_wait_for_tasks))
if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD))
iscsit_release_cmd(cmd);
else
cmd->se_cmd.transport_wait_for_tasks(
&cmd->se_cmd, 1);
transport_generic_free_cmd(&cmd->se_cmd, 1);
spin_lock_bh(&conn->cmd_lock);
continue;
}
@ -434,9 +422,8 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
iscsit_free_all_datain_reqs(cmd);
if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
cmd->se_cmd.transport_wait_for_tasks)
cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd, 0);
if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
transport_wait_for_tasks(&cmd->se_cmd);
/*
* Add the struct iscsi_cmd to the connection recovery cmd list
*/

View File

@ -1638,8 +1638,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
return 0;
}
static void transport_generic_wait_for_tasks(struct se_cmd *, int);
/* transport_generic_allocate_tasks():
*
* Called from fabric RX Thread.
@ -1651,12 +1649,6 @@ int transport_generic_allocate_tasks(
int ret;
transport_generic_prepare_cdb(cdb);
/*
* This is needed for early exceptions.
*/
cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
@ -1739,7 +1731,7 @@ int transport_handle_cdb_direct(
* Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
* transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
* in existing usage to ensure that outstanding descriptors are handled
* correctly during shutdown via transport_generic_wait_for_tasks()
* correctly during shutdown via transport_wait_for_tasks()
*
* Also, we don't take cmd->t_state_lock here as we only expect
* this to be called for initial descriptor submission.
@ -1819,11 +1811,6 @@ EXPORT_SYMBOL(transport_generic_handle_data);
int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
/*
* This is needed for early exceptions.
*/
cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
return 0;
}
@ -2504,8 +2491,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
static void transport_nop_wait_for_tasks(struct se_cmd *, int);
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@ -2780,7 +2765,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
@ -2881,8 +2865,6 @@ static int transport_generic_cmd_sequencer(
* Check for an existing UNIT ATTENTION condition
*/
if (core_scsi3_ua_check(cmd, cdb) < 0) {
cmd->transport_wait_for_tasks =
&transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
return -EINVAL;
@ -2892,7 +2874,6 @@ static int transport_generic_cmd_sequencer(
*/
ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) {
cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
/*
* Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
* The ALUA additional sense code qualifier (ASCQ) is determined
@ -3396,7 +3377,6 @@ static int transport_generic_cmd_sequencer(
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]);
cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
goto out_unsupported_cdb;
}
@ -4304,17 +4284,20 @@ EXPORT_SYMBOL(transport_release_cmd);
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
if (wait_for_tasks && cmd->se_tmr_req)
transport_wait_for_tasks(cmd);
transport_release_cmd(cmd);
else {
} else {
if (wait_for_tasks)
transport_wait_for_tasks(cmd);
core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
if (wait_for_tasks && cmd->transport_wait_for_tasks)
cmd->transport_wait_for_tasks(cmd, 0);
transport_free_dev_tasks(cmd);
transport_put_cmd(cmd);
@ -4322,13 +4305,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
}
EXPORT_SYMBOL(transport_generic_free_cmd);
static void transport_nop_wait_for_tasks(
struct se_cmd *cmd,
int remove_cmd)
{
return;
}
/* transport_lun_wait_for_tasks():
*
* Called from ConfigFS context to stop the passed struct se_cmd to allow
@ -4498,21 +4474,30 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
return 0;
}
/* transport_generic_wait_for_tasks():
/**
* transport_wait_for_tasks - wait for completion to occur
* @cmd: command to wait
*
* Called from frontend or passthrough context to wait for storage engine
* to pause and/or release frontend generated struct se_cmd.
* Called from frontend fabric context to wait for storage engine
* to pause and/or release frontend generated struct se_cmd.
*/
static void transport_generic_wait_for_tasks(
struct se_cmd *cmd,
int remove_cmd)
void transport_wait_for_tasks(struct se_cmd *cmd)
{
unsigned long flags;
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
return;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
/*
* Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
* has been set in transport_set_supported_SAM_opcode().
*/
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
/*
* If we are already stopped due to an external event (ie: LUN shutdown)
* sleep until the connection can have the passed struct se_cmd back.
@ -4552,8 +4537,10 @@ static void transport_generic_wait_for_tasks(
atomic_set(&cmd->transport_lun_stop, 0);
}
if (!atomic_read(&cmd->t_transport_active) ||
atomic_read(&cmd->t_transport_aborted))
goto remove;
atomic_read(&cmd->t_transport_aborted)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
atomic_set(&cmd->t_transport_stop, 1);
@ -4576,13 +4563,10 @@ static void transport_generic_wait_for_tasks(
pr_debug("wait_for_tasks: Stopped wait_for_compltion("
"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
cmd->se_tfo->get_task_tag(cmd));
remove:
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (!remove_cmd)
return;
transport_generic_free_cmd(cmd, 0);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
EXPORT_SYMBOL(transport_wait_for_tasks);
static int transport_get_sense_codes(
struct se_cmd *cmd,

View File

@ -480,7 +480,6 @@ struct se_cmd {
struct target_core_fabric_ops *se_tfo;
int (*transport_emulate_cdb)(struct se_cmd *);
void (*transport_split_cdb)(unsigned long long, u32, unsigned char *);
void (*transport_wait_for_tasks)(struct se_cmd *, int);
void (*transport_complete_callback)(struct se_cmd *);
int (*transport_qf_callback)(struct se_cmd *);

View File

@ -180,6 +180,7 @@ extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct scatterlist *, u32);
extern int transport_clear_lun_from_sessions(struct se_lun *);
extern void transport_wait_for_tasks(struct se_cmd *);
extern int transport_check_aborted_status(struct se_cmd *, int);
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
extern void transport_send_task_abort(struct se_cmd *);