Merge branch '3.2-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

* '3.2-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (25 commits)
  iscsi-target: Fix hex2bin warn_unused compile message
  target: Don't return an error if disabling unsupported features
  target/rd: fix or rewrite the copy routine
  target/rd: simplify the page/offset computation
  target: remove the unused se_dev_list
  target/file: walk properly over sg list
  target: remove unused struct fields
  target: Fix page length in emulated INQUIRY VPD page 86h
  target: Handle 0 correctly in transport_get_sectors_6()
  target: Don't return an error status for 0-length READ and WRITE
  iscsi-target: Use kmemdup rather than duplicating its implementation
  iscsi-target: Add missing F_BIT for iscsi_tm_rsp
  iscsi-target: Fix residual count hanlding + remove iscsi_cmd->residual_count
  target: Reject SCSI data overflow for fabrics using transport_generic_map_mem_to_cmd
  target: remove the unused t_task_pt_sgl and t_task_pt_sgl_num se_cmd fields
  target: remove the t_tasks_bidi se_cmd field
  target: remove the t_tasks_fua se_cmd field
  target: remove the se_ordered_node se_cmd field
  target: remove the se_obj_ptr and se_orig_obj_ptr se_cmd fields
  target: Drop config_item_name usage in fabric TFO->free_wwn()
  ...
This commit is contained in:
Linus Torvalds 2011-12-07 18:18:27 -08:00
commit 34a9d2c39a
22 changed files with 425 additions and 659 deletions

View File

@ -614,13 +614,12 @@ int iscsit_add_reject(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
@ -1017,11 +1015,6 @@ done:
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
send_check_condition = 1;
goto attach_cmd;
}
@ -1044,6 +1037,8 @@ done:
*/
send_check_condition = 1;
} else {
cmd->data_length = cmd->se_cmd.data_length;
if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@ -1123,7 +1118,7 @@ attach_cmd:
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
if (ret < 0) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
(se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
(se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
hdr->residual_count = cpu_to_be32(cmd->residual_count);
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
hdr->residual_count = cpu_to_be32(cmd->residual_count);
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain.length);
@ -3018,10 +3013,10 @@ static int iscsit_send_status(
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
hdr->residual_count = cpu_to_be32(cmd->residual_count);
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
hdr->residual_count = cpu_to_be32(cmd->residual_count);
hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
hdr = (struct iscsi_tm_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cpu_to_be32(cmd->init_task_tag);
cmd->stat_sn = conn->stat_sn++;

View File

@ -30,9 +30,11 @@
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
int j = DIV_ROUND_UP(len, 2);
int j = DIV_ROUND_UP(len, 2), rc;
hex2bin(dst, src, j);
rc = hex2bin(dst, src, j);
if (rc < 0)
pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;

View File

@ -398,7 +398,6 @@ struct iscsi_cmd {
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
u32 residual_count;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@ -535,7 +534,6 @@ struct iscsi_conn {
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
atomic_t connection_wait;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
@ -643,7 +641,6 @@ struct iscsi_session {
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
atomic_t transport_wait_cmds;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;

View File

@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
if (se_cmd->se_cmd_flags &
SCF_SCSI_RESERVATION_CONFLICT) {
if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,

View File

@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
return -1;
return -ENOMEM;
}
iscsi_login_set_conn_values(sess, conn, pdu->cid);
@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_pre_get() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
kfree(sess);
return -ENOMEM;
}
spin_lock(&sess_idr_lock);
idr_get_new(&sess_idr, NULL, &sess->session_index);
@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
return -1;
kfree(sess);
return -ENOMEM;
}
sess->se_sess = transport_init_session();
if (!sess->se_sess) {
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
kfree(sess);
return -ENOMEM;
}
return 0;

View File

@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
return NULL;
}
login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
if (!login->req) {
pr_err("Unable to allocate memory for Login Request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto out;
}
memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {

View File

@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
&tl_cmd->tl_sense_buf[0]);
/*
* Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
*/
if (scsi_bidi_cmnd(sc))
se_cmd->t_tasks_bidi = 1;
se_cmd->se_cmd_flags |= SCF_BIDI;
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* Allocate the necessary tasks to complete the received CDB+data
*/
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
if (ret == -ENOMEM) {
/* Out of Resources */
return PYX_TRANSPORT_LU_COMM_FAILURE;
} else if (ret == -EINVAL) {
/*
* Handle case for SAM_STAT_RESERVATION_CONFLICT
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
return PYX_TRANSPORT_RESERVATION_CONFLICT;
/*
* Otherwise, return SAM_STAT_CHECK_CONDITION and return
* sense data.
*/
return PYX_TRANSPORT_USE_SENSE_REASON;
}
if (ret != 0)
return ret;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
if (se_cmd->t_tasks_bidi) {
if (se_cmd->se_cmd_flags & SCF_BIDI) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
}
/* Tell the core about our preallocated memory */
ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
if (ret < 0)
return PYX_TRANSPORT_LU_COMM_FAILURE;
return 0;
}
/*
@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
int host_no = tl_hba->sh->host_no;
pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
config_item_name(&wwn->wwn_group.cg_item), host_no);
}
/* Start items for tcm_loop_cit */

View File

@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
if (!l_port)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (!l_port) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
/*
@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
rc = -EINVAL;
goto out;
}
@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
rc = -1;
@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
} else {
@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
rc = -EINVAL;
goto out;
}
}
@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}

View File

@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
buf[2] = 0x3c;
buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
if (cmd->data_length < 4) {
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
out_unmap:
@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
return -EINVAL;
}
offset += length;
@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
return PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -ENOSYS;
}
buf = transport_kmap_first_data_page(cmd);
@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
dev->transport->do_sync_cache(task);

View File

@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
static DEFINE_SPINLOCK(se_device_lock);
static LIST_HEAD(se_dev_list);
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
" struct se_subsystem_dev\n");
goto unlock;
}
INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
" from allocate_virtdevice()\n");
goto out;
}
spin_lock(&se_device_lock);
list_add_tail(&se_dev->se_dev_node, &se_dev_list);
spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
spin_lock(&se_device_lock);
list_del(&se_dev->se_dev_node);
spin_unlock(&se_device_lock);
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;

View File

@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@ -708,7 +705,7 @@ done:
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
}
/* se_release_device_for_hba():
@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return -EINVAL;
}
pr_err("dpo_emulated not supported\n");
return -EINVAL;
if (flag) {
pr_err("dpo_emulated not supported\n");
return -EINVAL;
}
return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
if (dev->transport->fua_write_emulated == 0) {
if (flag && dev->transport->fua_write_emulated == 0) {
pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return -EINVAL;
}
pr_err("ua read emulated not supported\n");
return -EINVAL;
if (flag) {
pr_err("ua read emulated not supported\n");
return -EINVAL;
}
return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (dev->transport->write_cache_emulated == 0) {
if (flag && dev->transport->write_cache_emulated == 0) {
pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);

View File

@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
return -ENOMEM;
}
for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
return -ENOMEM;
}
for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
cmd->t_tasks_fua) {
(cmd->se_cmd_flags & SCF_FUA)) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
}
if (ret < 0)
if (ret < 0) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
}
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)

View File

@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
task->task_se_cmd->t_tasks_fua))
(cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
rw = WRITE;
@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
if (!bio)
return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
if (!bio) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
bio_list_init(&list);
bio_list_add(&list, bio);
@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio);
blk_finish_plug(&plug);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
fail:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
static u32 iblock_get_device_rev(struct se_device *dev)

View File

@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
*ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return true;
}
@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
goto out;
}
/*
@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
ret = -EINVAL;
goto out_unlock;
}
@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
/*
@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = -EINVAL;
goto out;
}
/*
@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = -EINVAL;
goto out;
}
@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
#if 0
@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
/*
@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = -EINVAL;
goto out;
}
#if 0
@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
/*
* Do nothing but return GOOD status.
*/
if (!sa_res_key)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
if (!spec_i_pt) {
/*
@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
if (ret != 0) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
}
} else {
/*
@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
}
if (spec_i_pt) {
pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
}
/*
* An existing ALL_TG_PT=1 registration being released
@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
* Allocate APTPL metadata buffer used for UNREGISTER ops
@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
}
/*
@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
spin_lock(&pr_tmpl->registration_lock);
@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
}
/*
* See if we have an existing PR reservation holder pointer at
@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
return ret;
@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
/*
* In response to a persistent reservation release request from the
@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
/*
* a) Release the persistent reservation, if any;
@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
if (!se_sess)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (!se_sess) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
}
INIT_LIST_HEAD(&preempt_and_abort_list);
@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
/*
* For an existing all registrants type reservation
@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
return ret;
@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
/*
* The provided reservation key much match the existing reservation key
@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
/*
* The service active reservation key needs to be non zero
@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
}
/*
@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = -EINVAL;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
after_iport_check:
@ -3517,7 +3576,8 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@ -3527,7 +3587,8 @@ after_iport_check:
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
#if 0
@ -3543,7 +3604,8 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
@ -3553,7 +3615,8 @@ after_iport_check:
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = -EINVAL;
goto out;
}
#if 0
@ -3572,7 +3635,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
ret = -EINVAL;
goto out;
}
/*
@ -3585,7 +3649,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
ret = -EINVAL;
goto out;
}
/*
@ -3603,7 +3668,8 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
ret = -EINVAL;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@ -3640,7 +3706,8 @@ after_iport_check:
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
ret = EINVAL;
goto out;
}
@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
if (!cmd->se_sess)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (!cmd->se_sess) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
/*
@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto out;
}
/*
@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
ret = -EINVAL;
break;
}
@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EINVAL;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
ret = -EINVAL;
break;
}

View File

@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
struct bio **hbio)
{
struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
u32 task_sg_num = task->task_sg_nents;
struct bio *bio = NULL, *tbio = NULL;
@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
int nr_vecs = 0, rc;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
*hbio = NULL;
@ -1058,11 +1059,13 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
return ret;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
static int pscsi_do_task(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct request *req;
@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
return PYX_TRANSPORT_LU_COMM_FAILURE;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENODEV;
}
} else {
BUG_ON(!task->task_size);
@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
ret = pscsi_map_sg(task, task->task_sg, &hbio);
if (ret < 0)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (ret < 0) {
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
}
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
fail:
while (hbio) {
@ -1124,7 +1132,8 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
/* pscsi_get_sense_buffer():
@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
task->task_se_cmd->scsi_sense_reason =
TCM_UNSUPPORTED_SCSI_OPCODE;
transport_complete_task(task, 0);
break;
}

View File

@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
/* rd_MEMCPY_read():
*
*
*/
static int rd_MEMCPY_read(struct rd_request *req)
static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
u32 length, page_end = 0, table_sg_end;
struct scatterlist *rd_sg;
struct sg_mapping_iter m;
u32 rd_offset = req->rd_offset;
u32 src_len;
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = task->task_sg;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
src_offset = rd_offset;
pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
dev->rd_dev_id, read_rd ? "Read" : "Write",
task->task_lba, req->rd_size, req->rd_page,
rd_offset);
src_len = PAGE_SIZE - rd_offset;
sg_miter_start(&m, task->task_sg, task->task_sg_nents,
read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (req->rd_size) {
if ((sg_d[i].length - dst_offset) <
(sg_s[j].length - src_offset)) {
length = (sg_d[i].length - dst_offset);
u32 len;
void *rd_addr;
pr_debug("Step 1 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
sg_s[j].length);
pr_debug("Step 1 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
sg_miter_next(&m);
len = min((u32)m.length, src_len);
m.consumed = len;
if (length > req->rd_size)
length = req->rd_size;
rd_addr = sg_virt(rd_sg) + rd_offset;
dst = sg_virt(&sg_d[i++]) + dst_offset;
BUG_ON(!dst);
if (read_rd)
memcpy(m.addr, rd_addr, len);
else
memcpy(rd_addr, m.addr, len);
src = sg_virt(&sg_s[j]) + src_offset;
BUG_ON(!src);
dst_offset = 0;
src_offset = length;
page_end = 0;
} else {
length = (sg_s[j].length - src_offset);
pr_debug("Step 2 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset,
j, sg_s[j].length);
pr_debug("Step 2 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
if (length > req->rd_size)
length = req->rd_size;
dst = sg_virt(&sg_d[i]) + dst_offset;
BUG_ON(!dst);
if (sg_d[i].length == length) {
i++;
dst_offset = 0;
} else
dst_offset = length;
src = sg_virt(&sg_s[j++]) + src_offset;
BUG_ON(!src);
src_offset = 0;
page_end = 1;
}
memcpy(dst, src, length);
pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
req->rd_size -= length;
req->rd_size -= len;
if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
pr_debug("page: %u in same page table\n",
req->rd_page);
src_len -= len;
if (src_len) {
rd_offset += len;
continue;
}
pr_debug("getting new page table for page: %u\n",
req->rd_page);
/* rd page completed, next one please */
req->rd_page++;
rd_offset = 0;
src_len = PAGE_SIZE;
if (req->rd_page <= table->page_end_offset) {
rd_sg++;
continue;
}
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
if (!table) {
sg_miter_stop(&m);
return -EINVAL;
sg_s = &table->sg_table[j = 0];
}
return 0;
}
/* rd_MEMCPY_write():
*
*
*/
static int rd_MEMCPY_write(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
u32 length, page_end = 0, table_sg_end;
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
sg_s = task->task_sg;
pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
dst_offset = rd_offset;
while (req->rd_size) {
if ((sg_s[i].length - src_offset) <
(sg_d[j].length - dst_offset)) {
length = (sg_s[i].length - src_offset);
pr_debug("Step 1 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
pr_debug("Step 1 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
if (length > req->rd_size)
length = req->rd_size;
src = sg_virt(&sg_s[i++]) + src_offset;
BUG_ON(!src);
dst = sg_virt(&sg_d[j]) + dst_offset;
BUG_ON(!dst);
src_offset = 0;
dst_offset = length;
page_end = 0;
} else {
length = (sg_d[j].length - dst_offset);
pr_debug("Step 2 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
pr_debug("Step 2 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
if (length > req->rd_size)
length = req->rd_size;
src = sg_virt(&sg_s[i]) + src_offset;
BUG_ON(!src);
if (sg_s[i].length == length) {
i++;
src_offset = 0;
} else
src_offset = length;
dst = sg_virt(&sg_d[j++]) + dst_offset;
BUG_ON(!dst);
dst_offset = 0;
page_end = 1;
}
memcpy(dst, src, length);
pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
req->rd_size -= length;
if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
pr_debug("page: %u in same page table\n",
req->rd_page);
continue;
}
pr_debug("getting new page table for page: %u\n",
req->rd_page);
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
sg_d = &table->sg_table[j = 0];
/* since we increment, the first sg entry is correct */
rd_sg = table->sg_table;
}
sg_miter_stop(&m);
return 0;
}
@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
unsigned long long lba;
u64 tmp;
int ret;
req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
lba = task->task_lba;
req->rd_offset = (do_div(lba,
(PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
dev->se_sub_dev->se_dev_attrib.block_size;
tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
req->rd_offset = do_div(tmp, PAGE_SIZE);
req->rd_page = tmp;
req->rd_size = task->task_size;
if (task->task_data_direction == DMA_FROM_DEVICE)
ret = rd_MEMCPY_read(req);
else
ret = rd_MEMCPY_write(req);
ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
return 0;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)

View File

@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
/*
* Signal that the command has failed via cmd->se_cmd_flags,
*/
transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));

View File

@ -61,7 +61,6 @@
static int sub_api_initialized;
static struct workqueue_struct *target_completion_wq;
static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void transport_generic_request_failure(struct se_cmd *, int, int);
static void transport_generic_request_failure(struct se_cmd *);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
se_cmd_cache = kmem_cache_create("se_cmd_cache",
sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
if (!se_cmd_cache) {
pr_err("kmem_cache_create for struct se_cmd failed\n");
goto out;
}
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
goto out_free_cmd_cache;
goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
@ -182,8 +175,6 @@ out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
out_free_tmr_req_cache:
kmem_cache_destroy(se_tmr_req_cache);
out_free_cmd_cache:
kmem_cache_destroy(se_cmd_cache);
out:
return -ENOMEM;
}
@ -191,7 +182,6 @@ out:
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
task->task_scsi_status = GOOD;
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_ILLEGAL_REQUEST;
task->task_se_cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
transport_complete_task(task, good);
@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
transport_generic_request_failure(cmd, 1, 1);
transport_generic_request_failure(cmd);
}
/* transport_complete_task():
@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
atomic_set(&dev->active_cmds, 0);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_task_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->ordered_cmd_lock);
spin_lock_init(&dev->state_task_lock);
spin_lock_init(&dev->dev_alua_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
{
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
} else
@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd, 0,
(cmd->data_direction != DMA_TO_DEVICE));
}
if (ret < 0)
transport_generic_request_failure(cmd);
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
static void transport_generic_request_failure(
struct se_cmd *cmd,
int complete,
int sc)
static void transport_generic_request_failure(struct se_cmd *cmd)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
cmd->t_state,
cmd->transport_error_status);
cmd->t_state, cmd->scsi_sense_reason);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
if (complete) {
cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
}
switch (cmd->transport_error_status) {
case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
switch (cmd->scsi_sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_INVALID_CDB_FIELD:
case TCM_INVALID_PARAMETER_LIST:
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
case TCM_UNKNOWN_MODE_PAGE:
case TCM_WRITE_PROTECTED:
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
break;
case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
break;
case PYX_TRANSPORT_INVALID_CDB_FIELD:
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
break;
case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
break;
case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
if (!sc)
transport_new_cmd_failure(cmd);
/*
* Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
* we force this session to fall back to session
* recovery.
*/
cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
goto check_stop;
case PYX_TRANSPORT_LU_COMM_FAILURE:
case PYX_TRANSPORT_ILLEGAL_REQUEST:
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
break;
case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
break;
case PYX_TRANSPORT_WRITE_PROTECTED:
cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
break;
case PYX_TRANSPORT_RESERVATION_CONFLICT:
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
* and queue the response to $FABRIC_MOD.
@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
case PYX_TRANSPORT_USE_SENSE_REASON:
/*
* struct se_cmd->scsi_sense_reason already set
*/
break;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0],
cmd->transport_error_status);
cmd->t_task_cdb[0], cmd->scsi_sense_reason);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
* transport_send_check_condition_and_sense() after handling
* possible unsoliticied write data payloads.
*/
if (!sc && !cmd->se_tfo->new_cmd_map)
transport_new_cmd_failure(cmd);
else {
ret = transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
}
ret = transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_inc(&cmd->se_dev->dev_hoq_count);
smp_mb__after_atomic_inc();
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&cmd->se_dev->ordered_cmd_lock);
list_add_tail(&cmd->se_ordered_node,
&cmd->se_dev->ordered_cmd_list);
spin_unlock(&cmd->se_dev->ordered_cmd_lock);
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
transport_generic_request_failure(cmd, 0, 1);
if (se_dev_check_online(cmd->se_dev) != 0) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
transport_generic_request_failure(cmd);
return 0;
}
@ -2163,14 +2102,13 @@ check_depth:
else
error = dev->transport->do_task(task);
if (error != 0) {
cmd->transport_error_status = error;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
atomic_inc(&dev->depth_left);
transport_generic_request_failure(cmd, 0, 1);
transport_generic_request_failure(cmd);
}
goto check_depth;
@ -2178,19 +2116,6 @@ check_depth:
return 0;
}
void transport_new_cmd_failure(struct se_cmd *se_cmd)
{
unsigned long flags;
/*
* Any unsolicited data will get dumped for failed command inside of
* the fabric plugin
*/
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
/*
* Everything else assume TYPE_DISK Sector CDB location.
* Use 8-bit sector value.
* Use 8-bit sector value. SBC-3 says:
*
* A TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be written. Any other value
* specifies the number of logical blocks that shall be
* written.
*/
type_disk:
return (u32)cdb[4];
return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
return -1;
}
static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
/*
* For UA Interlock Code 11b, a RESERVATION CONFLICT will
* establish a UNIT ATTENTION with PREVIOUS RESERVATION
* CONFLICT STATUS.
*
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
if (cmd->se_sess &&
cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
return -EINVAL;
}
static inline long long transport_dev_end_lba(struct se_device *dev)
{
return dev->transport->get_blocks(dev) + 1;
@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
*/
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
cmd, cdb, pr_reg_type) != 0)
return transport_handle_reservation_conflict(cmd);
cmd, cdb, pr_reg_type) != 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EBUSY;
}
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->t_tasks_fua = (cdb[1] & 0x8);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->t_tasks_fua = (cdb[1] & 0x8);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
cmd->t_tasks_fua = (cdb[1] & 0x8);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
!(cmd->t_tasks_bidi))
!(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
cmd->t_tasks_fua = (cdb[1] & 0x8);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
cmd->t_tasks_fua = (cdb[10] & 0x8);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_dec(&dev->dev_hoq_count);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&dev->ordered_cmd_lock);
list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
/*
* Reject SCSI data overflow with map_mem_to_cmd() as incoming
* scatterlists already have been set to follow what the fabric
* passes for the original expected data transfer length.
*/
if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
pr_warn("Rejecting SCSI DATA overflow for fabric using"
" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
return ret;
goto out_fail;
}
/*
@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
task_cdbs = transport_allocate_control_task(cmd);
}
if (task_cdbs <= 0)
if (task_cdbs < 0)
goto out_fail;
else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
cmd->t_state = TRANSPORT_COMPLETE;
atomic_set(&cmd->t_transport_active, 1);
INIT_WORK(&cmd->work, target_complete_ok_work);
queue_work(target_completion_wq, &cmd->work);
return 0;
}
if (set_counts) {
atomic_inc(&cmd->t_fe_count);
@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
else if (ret < 0)
return ret;
return PYX_TRANSPORT_WRITE_PENDING;
return 1;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
transport_new_cmd_failure(cmd);
return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
set_user_nice(current, -20);
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@ -4698,18 +4625,13 @@ get_cmd:
}
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd,
0, (cmd->data_direction !=
DMA_TO_DEVICE));
transport_generic_request_failure(cmd);
break;
}
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd,
0, (cmd->data_direction !=
DMA_TO_DEVICE));
transport_generic_request_failure(cmd);
break;
}
break;
case TRANSPORT_PROCESS_WRITE:

View File

@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));

View File

@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
pr_debug("del lport %s\n",
config_item_name(&wwn->wwn_group.cg_item));
pr_debug("del lport %s\n", lacl->name);
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
mutex_unlock(&ft_lport_lock);

View File

@ -103,9 +103,10 @@ enum se_cmd_flags_table {
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
SCF_SE_CMD_FAILED = 0x00000400,
SCF_FUA = 0x00000200,
SCF_SE_LUN_CMD = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000,
SCF_BIDI = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000,
@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
TCM_RESERVATION_CONFLICT = 0x10,
};
struct se_obj {
@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
u16 lu_gp_id;
int lu_gp_valid_id;
u32 lu_gp_members;
atomic_t lu_gp_shutdown;
atomic_t lu_gp_ref_cnt;
spinlock_t lu_gp_lock;
struct config_group lu_gp_group;
@ -422,11 +423,9 @@ struct se_cmd {
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
/* Transport specific error status */
int transport_error_status;
/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
int check_release:1;
int cmd_wait_set:1;
unsigned check_release:1;
unsigned cmd_wait_set:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
@ -441,13 +440,10 @@ struct se_cmd {
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
struct list_head se_ordered_node;
struct list_head se_lun_node;
struct list_head se_qf_node;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
struct se_device *se_obj_ptr;
struct se_device *se_orig_obj_ptr;
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
@ -463,8 +459,6 @@ struct se_cmd {
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
int t_tasks_failed;
int t_tasks_fua;
bool t_tasks_bidi;
u32 t_tasks_sg_chained_no;
atomic_t t_fe_count;
atomic_t t_se_count;
@ -489,14 +483,6 @@ struct se_cmd {
struct work_struct work;
/*
* Used for pre-registered fabric SGL passthrough WRITE and READ
* with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
* and other HW target mode fabric modules.
*/
struct scatterlist *t_task_pt_sgl;
u32 t_task_pt_sgl_num;
struct scatterlist *t_data_sg;
unsigned int t_data_nents;
struct scatterlist *t_bidi_data_sg;
@ -562,7 +548,7 @@ struct se_node_acl {
} ____cacheline_aligned;
struct se_session {
int sess_tearing_down:1;
unsigned sess_tearing_down:1;
u64 sess_bin_isid;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
@ -683,7 +669,6 @@ struct se_subsystem_dev {
struct t10_reservation t10_pr;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
struct list_head se_dev_node;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
@ -692,9 +677,6 @@ struct se_subsystem_dev {
} ____cacheline_aligned;
struct se_device {
/* Set to 1 if thread is NOT sleeping on thread_sem */
u8 thread_active;
u8 dev_status_timer_flags;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
@ -719,14 +701,10 @@ struct se_device {
u64 write_bytes;
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
atomic_t active_cmds;
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
atomic_t dev_tur_active;
atomic_t execute_tasks;
atomic_t dev_status_thr_count;
atomic_t dev_hoq_count;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
struct se_obj dev_obj;
@ -734,14 +712,9 @@ struct se_device {
struct se_obj dev_export_obj;
struct se_queue_obj dev_queue_obj;
spinlock_t delayed_cmd_lock;
spinlock_t ordered_cmd_lock;
spinlock_t execute_task_lock;
spinlock_t state_task_lock;
spinlock_t dev_alua_lock;
spinlock_t dev_reservation_lock;
spinlock_t dev_state_lock;
spinlock_t dev_status_lock;
spinlock_t dev_status_thr_lock;
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock;
@ -753,14 +726,10 @@ struct se_device {
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
struct timer_list dev_status_timer;
/* Pointer to descriptor for processing thread */
struct task_struct *process_thread;
pid_t process_thread_pid;
struct task_struct *dev_mgmt_thread;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
struct list_head ordered_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
struct list_head qf_cmd_list;
@ -771,8 +740,6 @@ struct se_device {
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
/* Linked list for struct se_global->g_se_dev_list */
struct list_head g_se_dev_list;
} ____cacheline_aligned;
struct se_hba {
@ -834,7 +801,6 @@ struct se_port {
u32 sep_index;
struct scsi_port_stats sep_stats;
/* Used for ALUA Target Port Groups membership */
atomic_t sep_tg_pt_gp_active;
atomic_t sep_tg_pt_secondary_offline;
/* Used for PR ALL_TG_PT=1 */
atomic_t sep_tg_pt_ref_cnt;

View File

@ -10,29 +10,6 @@
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0
#define PYX_TRANSPORT_WRITE_PENDING 1
#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1
#define PYX_TRANSPORT_HBA_QUEUE_FULL -2
#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3
#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4
#define PYX_TRANSPORT_INVALID_CDB_FIELD -5
#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6
#define PYX_TRANSPORT_LU_COMM_FAILURE -7
#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
#define PYX_TRANSPORT_WRITE_PROTECTED -9
#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
#define PYX_TRANSPORT_USE_SENSE_REASON -12
#ifndef SAM_STAT_RESERVATION_CONFLICT
#define SAM_STAT_RESERVATION_CONFLICT 0x18
#endif
#define TRANSPORT_PLUGIN_FREE 0
#define TRANSPORT_PLUGIN_REGISTERED 1
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,