2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 09:13:55 +08:00

target: Follow up core updates from AGrover and HCH (round 4)

This patch contains the squashed version of forth round series cleanups
from Andy and Christoph following the post heavy lifting in the preceeding:
'Eliminate usage of struct se_mem' and 'Make all control CDBs scatter-gather'
changes.  This also includes a conversion of target core and the v3.0
mainline fabric modules (loopback and tcm_fc) to use pr_debug and the
CONFIG_DYNAMIC_DEBUG infrastructure!

These have been squashed into this third and final round for v3.1.

target: Remove ifdeffed code in t_g_process_write
target: Remove direct ramdisk code
target: Rename task_sg_num to task_sg_nents
target: Remove custom debug macros for pr_debug. Use pr_err().
target: Remove custom debug macros in mainline fabrics
target: Set WSNZ=1 in block limits VPD. Abort if WRITE_SAME sectors = 0
target: Remove transport do_se_mem_map callback
target: Further simplify transport_free_pages
target: Redo task allocation return value handling
target: Remove extra parentheses
target: change alloc_task call to take *cdb, not *cmd

(nab: Fix bogus struct file assignments in fd_do_readv and fd_do_writev)

Signed-off-by: Andy Grover <agrover@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Andy Grover 2011-06-08 10:36:43 -07:00 committed by Nicholas Bellinger
parent ec98f7825c
commit 6708bb27bb
30 changed files with 1388 additions and 2045 deletions

View File

@ -3,9 +3,3 @@ config LOOPBACK_TARGET
help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module.
config LOOPBACK_TARGET_CDB_DEBUG
bool "TCM loopback fabric module CDB debug code"
depends on LOOPBACK_TARGET
help
Say Y here to enable the TCM loopback fabric module CDB debug code

View File

@ -79,7 +79,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
if (!tl_cmd) {
printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
pr_err("Unable to allocate struct tcm_loop_cmd\n");
set_host_byte(sc, DID_ERROR);
return NULL;
}
@ -281,7 +281,7 @@ static int tcm_loop_queuecommand(
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
" scsi_buf_len: %u\n", sc->device->host->host_no,
sc->device->id, sc->device->channel, sc->device->lun,
sc->cmnd[0], scsi_bufflen(sc));
@ -331,7 +331,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
printk(KERN_ERR "Unable to perform device reset without"
pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
@ -344,13 +344,13 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
printk(KERN_ERR "Unable to allocate memory for tl_cmd\n");
pr_err("Unable to allocate memory for tl_cmd\n");
return FAILED;
}
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
if (!tl_tmr) {
printk(KERN_ERR "Unable to allocate memory for tl_tmr\n");
pr_err("Unable to allocate memory for tl_tmr\n");
goto release;
}
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
@ -435,7 +435,7 @@ static int tcm_loop_driver_probe(struct device *dev)
sh = scsi_host_alloc(&tcm_loop_driver_template,
sizeof(struct tcm_loop_hba));
if (!sh) {
printk(KERN_ERR "Unable to allocate struct scsi_host\n");
pr_err("Unable to allocate struct scsi_host\n");
return -ENODEV;
}
tl_hba->sh = sh;
@ -454,7 +454,7 @@ static int tcm_loop_driver_probe(struct device *dev)
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
pr_err("%s: scsi_add_host failed\n", __func__);
scsi_host_put(sh);
return -ENODEV;
}
@ -495,7 +495,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
ret = device_register(&tl_hba->dev);
if (ret) {
printk(KERN_ERR "device_register() failed for"
pr_err("device_register() failed for"
" tl_hba->dev: %d\n", ret);
return -ENODEV;
}
@ -513,24 +513,24 @@ static int tcm_loop_alloc_core_bus(void)
tcm_loop_primary = root_device_register("tcm_loop_0");
if (IS_ERR(tcm_loop_primary)) {
printk(KERN_ERR "Unable to allocate tcm_loop_primary\n");
pr_err("Unable to allocate tcm_loop_primary\n");
return PTR_ERR(tcm_loop_primary);
}
ret = bus_register(&tcm_loop_lld_bus);
if (ret) {
printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n");
pr_err("bus_register() failed for tcm_loop_lld_bus\n");
goto dev_unreg;
}
ret = driver_register(&tcm_loop_driverfs);
if (ret) {
printk(KERN_ERR "driver_register() failed for"
pr_err("driver_register() failed for"
"tcm_loop_driverfs\n");
goto bus_unreg;
}
printk(KERN_INFO "Initialized TCM Loop Core Bus\n");
pr_debug("Initialized TCM Loop Core Bus\n");
return ret;
bus_unreg:
@ -546,7 +546,7 @@ static void tcm_loop_release_core_bus(void)
bus_unregister(&tcm_loop_lld_bus);
root_device_unregister(tcm_loop_primary);
printk(KERN_INFO "Releasing TCM Loop Core BUS\n");
pr_debug("Releasing TCM Loop Core BUS\n");
}
static char *tcm_loop_get_fabric_name(void)
@ -574,7 +574,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_fabric_proto_ident(se_tpg);
default:
printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@ -630,7 +630,7 @@ static u32 tcm_loop_get_pr_transport_id(
return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
default:
printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@ -660,7 +660,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
default:
printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@ -694,7 +694,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
default:
printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@ -743,7 +743,7 @@ static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
if (!tl_nacl) {
printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n");
pr_err("Unable to allocate struct tcm_loop_nacl\n");
return NULL;
}
@ -853,7 +853,7 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
sc->result = SAM_STAT_GOOD;
@ -868,7 +868,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p"
pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
@ -943,7 +943,7 @@ static int tcm_loop_port_link(
*/
scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n");
pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
return 0;
}
@ -961,7 +961,7 @@ static void tcm_loop_port_unlink(
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
se_lun->unpacked_lun);
if (!sd) {
printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:"
pr_err("Unable to locate struct scsi_device for %d:%d:"
"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
@ -974,7 +974,7 @@ static void tcm_loop_port_unlink(
atomic_dec(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic_dec();
printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n");
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
/* End items for tcm_loop_port_cit */
@ -991,14 +991,14 @@ static int tcm_loop_make_nexus(
int ret = -ENOMEM;
if (tl_tpg->tl_hba->tl_nexus) {
printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tl_tpg->tl_se_tpg;
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
if (!tl_nexus) {
printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n");
pr_err("Unable to allocate struct tcm_loop_nexus\n");
return -ENOMEM;
}
/*
@ -1027,7 +1027,7 @@ static int tcm_loop_make_nexus(
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_hba->tl_nexus = tl_nexus;
printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name);
return 0;
@ -1053,13 +1053,13 @@ static int tcm_loop_drop_nexus(
return -ENODEV;
if (atomic_read(&tpg->tl_tpg_port_count)) {
printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with"
pr_err("Unable to remove TCM_Loop I_T Nexus with"
" active TPG port count: %d\n",
atomic_read(&tpg->tl_tpg_port_count));
return -EPERM;
}
printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
@ -1115,7 +1115,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
* tcm_loop_make_nexus()
*/
if (strlen(page) >= TL_WWN_ADDR_LEN) {
printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
pr_err("Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, TL_WWN_ADDR_LEN);
return -EINVAL;
}
@ -1124,7 +1124,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
printk(KERN_ERR "Passed SAS Initiator Port %s does not"
pr_err("Passed SAS Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@ -1135,7 +1135,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
printk(KERN_ERR "Passed FCP Initiator Port %s does not"
pr_err("Passed FCP Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@ -1146,7 +1146,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
printk(KERN_ERR "Passed iSCSI Initiator Port %s does not"
pr_err("Passed iSCSI Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@ -1154,7 +1154,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
port_ptr = &i_port[0];
goto check_newline;
}
printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:"
pr_err("Unable to locate prefix for emulated Initiator Port:"
" %s\n", i_port);
return -EINVAL;
/*
@ -1194,7 +1194,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
printk(KERN_ERR "Unable to locate \"tpgt_#\" directory"
pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return ERR_PTR(-EINVAL);
}
@ -1202,7 +1202,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
if (tpgt >= TL_TPGS_PER_HBA) {
printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
" %u\n", tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
@ -1218,7 +1218,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
if (ret < 0)
return ERR_PTR(-ENOMEM);
printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s"
pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
@ -1245,7 +1245,7 @@ void tcm_loop_drop_naa_tpg(
*/
core_tpg_deregister(se_tpg);
printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s"
pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
@ -1266,7 +1266,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
if (!tl_hba) {
printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n");
pr_err("Unable to allocate struct tcm_loop_hba\n");
return ERR_PTR(-ENOMEM);
}
/*
@ -1286,7 +1286,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
}
ptr = strstr(name, "iqn.");
if (!ptr) {
printk(KERN_ERR "Unable to locate prefix for emulated Target "
pr_err("Unable to locate prefix for emulated Target "
"Port: %s\n", name);
ret = -EINVAL;
goto out;
@ -1295,7 +1295,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
check_len:
if (strlen(name) >= TL_WWN_ADDR_LEN) {
printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
pr_err("Emulated NAA %s Address: %s, exceeds"
" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
TL_WWN_ADDR_LEN);
ret = -EINVAL;
@ -1314,7 +1314,7 @@ check_len:
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target"
pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
" %s Address: %s at Linux/SCSI Host ID: %d\n",
tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
@ -1337,7 +1337,7 @@ void tcm_loop_drop_scsi_hba(
*/
device_unregister(&tl_hba->dev);
printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target"
pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
config_item_name(&wwn->wwn_group.cg_item), host_no);
}
@ -1373,7 +1373,7 @@ static int tcm_loop_register_configfs(void)
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
if (IS_ERR(fabric)) {
printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
pr_err("tcm_loop_register_configfs() failed!\n");
return PTR_ERR(fabric);
}
/*
@ -1464,7 +1464,7 @@ static int tcm_loop_register_configfs(void)
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
printk(KERN_ERR "target_fabric_configfs_register() for"
pr_err("target_fabric_configfs_register() for"
" TCM_Loop failed!\n");
target_fabric_configfs_free(fabric);
return -1;
@ -1473,7 +1473,7 @@ static int tcm_loop_register_configfs(void)
* Setup our local pointer to *fabric.
*/
tcm_loop_fabric_configfs = fabric;
printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->"
pr_debug("TCM_LOOP[0] - Set fabric ->"
" tcm_loop_fabric_configfs\n");
return 0;
}
@ -1485,7 +1485,7 @@ static void tcm_loop_deregister_configfs(void)
target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
tcm_loop_fabric_configfs = NULL;
printk(KERN_INFO "TCM_LOOP[0] - Cleared"
pr_debug("TCM_LOOP[0] - Cleared"
" tcm_loop_fabric_configfs\n");
}
@ -1498,7 +1498,7 @@ static int __init tcm_loop_fabric_init(void)
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
printk(KERN_ERR "kmem_cache_create() for"
pr_debug("kmem_cache_create() for"
" tcm_loop_cmd_cache failed\n");
return -ENOMEM;
}

View File

@ -16,12 +16,6 @@
*/
#define TL_SCSI_MAX_CMD_LEN 32
#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
# define TL_CDB_DEBUG(x...) printk(KERN_INFO x)
#else
# define TL_CDB_DEBUG(x...)
#endif
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;

View File

@ -167,7 +167,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
if (!(l_port))
if (!l_port)
return PYX_TRANSPORT_LU_COMM_FAILURE;
buf = transport_kmap_first_data_page(cmd);
@ -177,24 +177,24 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* for the local tg_pt_gp.
*/
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!(l_tg_pt_gp_mem)) {
printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
if (!(l_tg_pt_gp)) {
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
if (!(rc)) {
printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
goto out;
@ -249,7 +249,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
list_for_each_entry(tg_pt_gp,
&su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!(tg_pt_gp->tg_pt_gp_valid_id))
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
@ -498,7 +498,7 @@ static int core_alua_state_check(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
if (!(port))
if (!port)
return 0;
/*
* First, check for a struct se_port specific secondary ALUA target port
@ -506,7 +506,7 @@ static int core_alua_state_check(
*/
if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
printk(KERN_INFO "ALUA: Got secondary offline status for local"
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
return 1;
@ -548,7 +548,7 @@ static int core_alua_state_check(
*/
case ALUA_ACCESS_STATE_OFFLINE:
default:
printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
return -EINVAL;
}
@ -580,7 +580,7 @@ static int core_alua_check_transition(int state, int *primary)
*primary = 0;
break;
default:
printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
pr_err("Unknown ALUA access state: 0x%02x\n", state);
return -EINVAL;
}
@ -638,7 +638,7 @@ int core_alua_check_nonop_delay(
* The ALUA Active/NonOptimized access state delay can be disabled
* in via configfs with a value of zero
*/
if (!(cmd->alua_nonop_delay))
if (!cmd->alua_nonop_delay)
return 0;
/*
* struct se_cmd->alua_nonop_delay gets set by a target port group
@ -667,7 +667,7 @@ static int core_alua_write_tpg_metadata(
file = filp_open(path, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
pr_err("filp_open(%s) for ALUA metadata failed\n",
path);
return -ENODEV;
}
@ -681,7 +681,7 @@ static int core_alua_write_tpg_metadata(
set_fs(old_fs);
if (ret < 0) {
printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
pr_err("Error writing ALUA metadata file: %s\n", path);
filp_close(file, NULL);
return -EIO;
}
@ -778,7 +778,7 @@ static int core_alua_do_transition_tg_pt(
* se_deve->se_lun_acl pointer may be NULL for a
* entry created without explict Node+MappedLUN ACLs
*/
if (!(lacl))
if (!lacl)
continue;
if (explict &&
@ -820,7 +820,7 @@ static int core_alua_do_transition_tg_pt(
*/
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
@ -851,8 +851,8 @@ int core_alua_do_port_transition(
return -EINVAL;
md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
if (!(md_buf)) {
printk("Unable to allocate buf for ALUA metadata\n");
if (!md_buf) {
pr_err("Unable to allocate buf for ALUA metadata\n");
return -ENOMEM;
}
@ -867,7 +867,7 @@ int core_alua_do_port_transition(
* we only do transition on the passed *l_tp_pt_gp, and not
* on all of the matching target port groups IDs in default_lu_gp.
*/
if (!(lu_gp->lu_gp_id)) {
if (!lu_gp->lu_gp_id) {
/*
* core_alua_do_transition_tg_pt() will always return
* success.
@ -899,7 +899,7 @@ int core_alua_do_port_transition(
&su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!(tg_pt_gp->tg_pt_gp_valid_id))
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
/*
* If the target behavior port asymmetric access state
@ -941,7 +941,7 @@ int core_alua_do_port_transition(
}
spin_unlock(&lu_gp->lu_gp_lock);
printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
" Group IDs: %hu %s transition to primary state: %s\n",
config_item_name(&lu_gp->lu_gp_group.cg_item),
l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
@ -1001,9 +1001,9 @@ static int core_alua_set_tg_pt_secondary_state(
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (!(tg_pt_gp)) {
if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
printk(KERN_ERR "Unable to complete secondary state"
pr_err("Unable to complete secondary state"
" transition\n");
return -EINVAL;
}
@ -1022,7 +1022,7 @@ static int core_alua_set_tg_pt_secondary_state(
ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" to secondary access state: %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
@ -1040,8 +1040,8 @@ static int core_alua_set_tg_pt_secondary_state(
*/
if (port->sep_tg_pt_secondary_write_md) {
md_buf = kzalloc(md_buf_len, GFP_KERNEL);
if (!(md_buf)) {
printk(KERN_ERR "Unable to allocate md_buf for"
if (!md_buf) {
pr_err("Unable to allocate md_buf for"
" secondary ALUA access metadata\n");
return -ENOMEM;
}
@ -1062,8 +1062,8 @@ core_alua_allocate_lu_gp(const char *name, int def_group)
struct t10_alua_lu_gp *lu_gp;
lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
if (!(lu_gp)) {
printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
if (!lu_gp) {
pr_err("Unable to allocate struct t10_alua_lu_gp\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lu_gp->lu_gp_node);
@ -1088,14 +1088,14 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
* The lu_gp->lu_gp_id may only be set once..
*/
if (lu_gp->lu_gp_valid_id) {
printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
pr_warn("ALUA LU Group already has a valid ID,"
" ignoring request\n");
return -EINVAL;
}
spin_lock(&lu_gps_lock);
if (alua_lu_gps_count == 0x0000ffff) {
printk(KERN_ERR "Maximum ALUA alua_lu_gps_count:"
pr_err("Maximum ALUA alua_lu_gps_count:"
" 0x0000ffff reached\n");
spin_unlock(&lu_gps_lock);
kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
@ -1107,10 +1107,10 @@ again:
list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
if (!(lu_gp_id))
if (!lu_gp_id)
goto again;
printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
pr_warn("ALUA Logical Unit Group ID: %hu"
" already exists, ignoring request\n",
lu_gp_id);
spin_unlock(&lu_gps_lock);
@ -1133,8 +1133,8 @@ core_alua_allocate_lu_gp_mem(struct se_device *dev)
struct t10_alua_lu_gp_member *lu_gp_mem;
lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
if (!(lu_gp_mem)) {
printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
if (!lu_gp_mem) {
pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
@ -1218,7 +1218,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
return;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!(lu_gp_mem))
if (!lu_gp_mem)
return;
while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
@ -1226,7 +1226,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if ((lu_gp)) {
if (lu_gp) {
spin_lock(&lu_gp->lu_gp_lock);
if (lu_gp_mem->lu_gp_assoc) {
list_del(&lu_gp_mem->lu_gp_mem_list);
@ -1248,10 +1248,10 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
spin_lock(&lu_gps_lock);
list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
if (!(lu_gp->lu_gp_valid_id))
if (!lu_gp->lu_gp_valid_id)
continue;
ci = &lu_gp->lu_gp_group.cg_item;
if (!(strcmp(config_item_name(ci), name))) {
if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&lu_gp->lu_gp_ref_cnt);
spin_unlock(&lu_gps_lock);
return lu_gp;
@ -1307,8 +1307,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp;
tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
if (!(tg_pt_gp)) {
printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
if (!tg_pt_gp) {
pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
return NULL;
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
@ -1356,14 +1356,14 @@ int core_alua_set_tg_pt_gp_id(
* The tg_pt_gp->tg_pt_gp_id may only be set once..
*/
if (tg_pt_gp->tg_pt_gp_valid_id) {
printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
pr_warn("ALUA TG PT Group already has a valid ID,"
" ignoring request\n");
return -EINVAL;
}
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
@ -1376,10 +1376,10 @@ again:
list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
if (!(tg_pt_gp_id))
if (!tg_pt_gp_id)
goto again;
printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
pr_err("ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id);
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return -EINVAL;
@ -1403,8 +1403,8 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
GFP_KERNEL);
if (!(tg_pt_gp_mem)) {
printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
if (!tg_pt_gp_mem) {
pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@ -1491,7 +1491,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
return;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!(tg_pt_gp_mem))
if (!tg_pt_gp_mem)
return;
while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
@ -1499,7 +1499,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if ((tg_pt_gp)) {
if (tg_pt_gp) {
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
if (tg_pt_gp_mem->tg_pt_gp_assoc) {
list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@ -1524,10 +1524,10 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (!(tg_pt_gp->tg_pt_gp_valid_id))
if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
if (!(strcmp(config_item_name(ci), name))) {
if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return tg_pt_gp;
@ -1592,12 +1592,12 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
return len;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!(tg_pt_gp_mem))
if (!tg_pt_gp_mem)
return len;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if ((tg_pt_gp)) {
if (tg_pt_gp) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
" %hu\nTG Port Primary Access State: %s\nTG Port "
@ -1634,7 +1634,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
lun = port->sep_lun;
if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
pr_warn("SPC3_ALUA_EMULATED not enabled for"
" %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
@ -1642,7 +1642,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
}
if (count > TG_PT_GROUP_NAME_BUF) {
printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
pr_err("ALUA Target Port Group alias too large!\n");
return -EINVAL;
}
memset(buf, 0, TG_PT_GROUP_NAME_BUF);
@ -1659,26 +1659,26 @@ ssize_t core_alua_store_tg_pt_gp_info(
*/
tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
strstrip(buf));
if (!(tg_pt_gp_new))
if (!tg_pt_gp_new)
return -ENODEV;
}
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!(tg_pt_gp_mem)) {
if (!tg_pt_gp_mem) {
if (tg_pt_gp_new)
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
return -EINVAL;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if ((tg_pt_gp)) {
if (tg_pt_gp) {
/*
* Clearing an existing tg_pt_gp association, and replacing
* with the default_tg_pt_gp.
*/
if (!(tg_pt_gp_new)) {
printk(KERN_INFO "Target_Core_ConfigFS: Moving"
if (!tg_pt_gp_new) {
pr_debug("Target_Core_ConfigFS: Moving"
" %s/tpgt_%hu/%s from ALUA Target Port Group:"
" alua/%s, ID: %hu back to"
" default_tg_pt_gp\n",
@ -1707,7 +1707,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
*/
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
" Target Port Group: alua/%s, ID: %hu\n", (move) ?
"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
@ -1744,11 +1744,11 @@ ssize_t core_alua_store_access_type(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract alua_access_type\n");
pr_err("Unable to extract alua_access_type\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
printk(KERN_ERR "Illegal value for alua_access_type:"
pr_err("Illegal value for alua_access_type:"
" %lu\n", tmp);
return -EINVAL;
}
@ -1782,11 +1782,11 @@ ssize_t core_alua_store_nonop_delay_msecs(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
pr_err("Unable to extract nonop_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
pr_err("Passed nonop_delay_msecs: %lu, exceeds"
" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_NONOP_DELAY_MSECS);
return -EINVAL;
@ -1813,11 +1813,11 @@ ssize_t core_alua_store_trans_delay_msecs(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
pr_err("Unable to extract trans_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
pr_err("Passed trans_delay_msecs: %lu, exceeds"
" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_TRANS_DELAY_MSECS);
return -EINVAL;
@ -1844,11 +1844,11 @@ ssize_t core_alua_store_preferred_bit(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract preferred ALUA value\n");
pr_err("Unable to extract preferred ALUA value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_pref = (int)tmp;
@ -1858,7 +1858,7 @@ ssize_t core_alua_store_preferred_bit(
ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
{
if (!(lun->lun_sep))
if (!lun->lun_sep)
return -ENODEV;
return sprintf(page, "%d\n",
@ -1874,22 +1874,22 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
if (!(lun->lun_sep))
if (!lun->lun_sep)
return -ENODEV;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
pr_err("Unable to extract alua_tg_pt_offline value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
tmp);
return -EINVAL;
}
tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
if (!(tg_pt_gp_mem)) {
printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
if (!tg_pt_gp_mem) {
pr_err("Unable to locate *tg_pt_gp_mem\n");
return -EINVAL;
}
@ -1918,13 +1918,13 @@ ssize_t core_alua_store_secondary_status(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
pr_err("Unable to extract alua_tg_pt_status\n");
return -EINVAL;
}
if ((tmp != ALUA_STATUS_NONE) &&
(tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
(tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
pr_err("Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
}
@ -1951,11 +1951,11 @@ ssize_t core_alua_store_secondary_write_metadata(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
pr_err("Unable to extract alua_tg_pt_write_md\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
pr_err("Illegal value for alua_tg_pt_write_md:"
" %lu\n", tmp);
return -EINVAL;
}
@ -1979,7 +1979,7 @@ int core_setup_alua(struct se_device *dev, int force_pt)
!(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
alua->alua_type = SPC_ALUA_PASSTHROUGH;
alua->alua_state_check = &core_alua_state_check_nop;
printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
" emulation\n", dev->transport->name);
return 0;
}
@ -1988,7 +1988,7 @@ int core_setup_alua(struct se_device *dev, int force_pt)
* use emulated ALUA.
*/
if (dev->transport->get_device_rev(dev) >= SCSI_3) {
printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
pr_debug("%s: Enabling ALUA Emulation for SPC-3"
" device\n", dev->transport->name);
/*
* Associate this struct se_device with the default ALUA
@ -2005,13 +2005,13 @@ int core_setup_alua(struct se_device *dev, int force_pt)
default_lu_gp);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
pr_debug("%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n",
dev->transport->name);
} else {
alua->alua_type = SPC2_ALUA_DISABLED;
alua->alua_state_check = &core_alua_state_check_nop;
printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
pr_debug("%s: Disabling ALUA Emulation for SPC-2"
" device\n", dev->transport->name);
}

View File

@ -73,7 +73,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
* payload going back for EVPD=0
*/
if (cmd->data_length < 6) {
printk(KERN_ERR "SCSI Inquiry payload length: %u"
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=0\n", cmd->data_length);
return -EINVAL;
}
@ -327,7 +327,7 @@ check_tpgi:
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (!(tg_pt_gp)) {
if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
goto check_lu_gp;
}
@ -358,12 +358,12 @@ check_lu_gp:
goto check_scsi_name;
}
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!(lu_gp_mem))
if (!lu_gp_mem)
goto check_scsi_name;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if (!(lu_gp)) {
if (!lu_gp) {
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
goto check_scsi_name;
}
@ -475,14 +475,14 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
have_tp = 1;
if (cmd->data_length < (0x10 + 4)) {
printk(KERN_INFO "Received data_length: %u"
pr_debug("Received data_length: %u"
" too small for EVPD 0xb0\n",
cmd->data_length);
return -EINVAL;
}
if (have_tp && cmd->data_length < (0x3c + 4)) {
printk(KERN_INFO "Received data_length: %u"
pr_debug("Received data_length: %u"
" too small for TPE=1 EVPD 0xb0\n",
cmd->data_length);
have_tp = 0;
@ -491,6 +491,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
buf[0] = dev->transport->get_device_type(dev);
buf[3] = have_tp ? 0x3c : 0x10;
/* Set WSNZ to 1 */
buf[4] = 0x01;
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
@ -667,7 +670,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
* payload length left for the next outgoing EVPD metadata
*/
if (cmd->data_length < 4) {
printk(KERN_ERR "SCSI Inquiry payload length: %u"
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
return -EINVAL;
}
@ -685,7 +688,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
}
transport_kunmap_first_data_page(cmd);
printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
return -EINVAL;
}
@ -891,7 +894,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
length += target_modesense_control(dev, &buf[offset+length]);
break;
default:
printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
pr_err("Got Unknown Mode Page: 0x%02x\n",
cdb[2] & 0x3f);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
}
@ -947,14 +950,14 @@ target_emulate_request_sense(struct se_cmd *cmd)
int err = 0;
if (cdb[1] & 0x01) {
printk(KERN_ERR "REQUEST_SENSE description emulation not"
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
buf = transport_kmap_first_data_page(cmd);
if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/*
* CURRENT ERROR, UNIT ATTENTION
*/
@ -1028,18 +1031,18 @@ target_emulate_unmap(struct se_task *task)
buf = transport_kmap_first_data_page(cmd);
ptr = &buf[offset];
printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
while (size) {
lba = get_unaligned_be64(&ptr[0]);
range = get_unaligned_be32(&ptr[8]);
printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
pr_err("blkdev_issue_discard() failed: %d\n",
ret);
goto err;
}
@ -1084,12 +1087,12 @@ target_emulate_write_same(struct se_task *task, int write_same32)
else
range = (dev->transport->get_blocks(dev) - lba);
printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
(unsigned long long)lba, (unsigned long long)range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
return ret;
}
@ -1125,7 +1128,7 @@ transport_emulate_control_cdb(struct se_task *task)
ret = target_emulate_readcapacity_16(cmd);
break;
default:
printk(KERN_ERR "Unsupported SA: 0x%02x\n",
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@ -1135,7 +1138,7 @@ transport_emulate_control_cdb(struct se_task *task)
break;
case UNMAP:
if (!dev->transport->do_discard) {
printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@ -1143,7 +1146,7 @@ transport_emulate_control_cdb(struct se_task *task)
break;
case WRITE_SAME_16:
if (!dev->transport->do_discard) {
printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
pr_err("WRITE_SAME_16 emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@ -1155,7 +1158,7 @@ transport_emulate_control_cdb(struct se_task *task)
switch (service_action) {
case WRITE_SAME_32:
if (!dev->transport->do_discard) {
printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
pr_err("WRITE_SAME_32 SA emulation not"
" supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
@ -1163,7 +1166,7 @@ transport_emulate_control_cdb(struct se_task *task)
ret = target_emulate_write_same(task, 1);
break;
default:
printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
" 0x%02x\n", service_action);
break;
}
@ -1171,8 +1174,7 @@ transport_emulate_control_cdb(struct se_task *task)
case SYNCHRONIZE_CACHE:
case 0x91: /* SYNCHRONIZE_CACHE_16: */
if (!dev->transport->do_sync_cache) {
printk(KERN_ERR
"SYNCHRONIZE_CACHE emulation not supported"
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@ -1189,7 +1191,7 @@ transport_emulate_control_cdb(struct se_task *task)
case WRITE_FILEMARKS:
break;
default:
printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
cmd->t_task_cdb[0], dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}

File diff suppressed because it is too large Load Diff

View File

@ -84,7 +84,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
@ -117,7 +117,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
if (unpacked_lun != 0) {
se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
@ -204,7 +204,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
if (!se_lun) {
printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
@ -255,15 +255,15 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
continue;
lun = deve->se_lun;
if (!(lun)) {
printk(KERN_ERR "%s device entries device pointer is"
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->get_fabric_name());
continue;
}
port = lun->lun_sep;
if (!(port)) {
printk(KERN_ERR "%s device entries device pointer is"
if (!port) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->get_fabric_name());
continue;
@ -301,7 +301,7 @@ int core_free_device_list_for_node(
continue;
if (!deve->se_lun) {
printk(KERN_ERR "%s device entries device pointer is"
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->get_fabric_name());
continue;
@ -372,7 +372,7 @@ int core_update_device_list_for_node(
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*/
if (!(enable)) {
if (!enable) {
/*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly concerted to MappedLUNs ->
@ -395,14 +395,14 @@ int core_update_device_list_for_node(
*/
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
if (deve->se_lun_acl != NULL) {
printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
pr_err("struct se_dev_entry->se_lun_acl"
" already set for demo mode -> explict"
" LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
return -EINVAL;
}
if (deve->se_lun != lun) {
printk(KERN_ERR "struct se_dev_entry->se_lun does"
pr_err("struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
" -> explict LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
@ -501,8 +501,8 @@ static struct se_port *core_alloc_port(struct se_device *dev)
struct se_port *port, *port_tmp;
port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
if (!(port)) {
printk(KERN_ERR "Unable to allocate struct se_port\n");
if (!port) {
pr_err("Unable to allocate struct se_port\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&port->sep_alua_list);
@ -513,7 +513,7 @@ static struct se_port *core_alloc_port(struct se_device *dev)
spin_lock(&dev->se_port_lock);
if (dev->dev_port_count == 0x0000ffff) {
printk(KERN_WARNING "Reached dev->dev_port_count =="
pr_warn("Reached dev->dev_port_count =="
" 0x0000ffff\n");
spin_unlock(&dev->se_port_lock);
return ERR_PTR(-ENOSPC);
@ -532,7 +532,7 @@ again:
* 3h to FFFFh Relative port 3 through 65 535
*/
port->sep_rtpi = dev->dev_rpti_counter++;
if (!(port->sep_rtpi))
if (!port->sep_rtpi)
goto again;
list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
@ -570,7 +570,7 @@ static void core_export_port(
if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
pr_err("Unable to allocate t10_alua_tg_pt"
"_gp_member_t\n");
return;
}
@ -578,7 +578,7 @@ static void core_export_port(
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
su_dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
pr_debug("%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
}
@ -663,8 +663,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
break;
if (!(se_task)) {
printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
if (!se_task) {
pr_err("Unable to locate struct se_task for struct se_cmd\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@ -675,7 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!(se_sess)) {
if (!se_sess) {
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
goto done;
@ -893,12 +893,12 @@ void se_dev_set_default_attribs(
int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
{
if (task_timeout > DA_TASK_TIMEOUT_MAX) {
printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
pr_err("dev[%p]: Passed task_timeout: %u larger then"
" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
return -EINVAL;
} else {
dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
dev, task_timeout);
}
@ -910,7 +910,7 @@ int se_dev_set_max_unmap_lba_count(
u32 max_unmap_lba_count)
{
dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
return 0;
}
@ -921,7 +921,7 @@ int se_dev_set_max_unmap_block_desc_count(
{
dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
max_unmap_block_desc_count;
printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
return 0;
}
@ -931,7 +931,7 @@ int se_dev_set_unmap_granularity(
u32 unmap_granularity)
{
dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
pr_debug("dev[%p]: Set unmap_granularity: %u\n",
dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
return 0;
}
@ -941,7 +941,7 @@ int se_dev_set_unmap_granularity_alignment(
u32 unmap_granularity_alignment)
{
dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
return 0;
}
@ -949,19 +949,19 @@ int se_dev_set_unmap_granularity_alignment(
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (dev->transport->dpo_emulated == NULL) {
printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n");
pr_err("dev->transport->dpo_emulated is NULL\n");
return -EINVAL;
}
if (dev->transport->dpo_emulated(dev) == 0) {
printk(KERN_ERR "dev->transport->dpo_emulated not supported\n");
pr_err("dev->transport->dpo_emulated not supported\n");
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
" bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
return 0;
}
@ -969,19 +969,19 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (dev->transport->fua_write_emulated == NULL) {
printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n");
pr_err("dev->transport->fua_write_emulated is NULL\n");
return -EINVAL;
}
if (dev->transport->fua_write_emulated(dev) == 0) {
printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n");
pr_err("dev->transport->fua_write_emulated not supported\n");
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
return 0;
}
@ -989,19 +989,19 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (dev->transport->fua_read_emulated == NULL) {
printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n");
pr_err("dev->transport->fua_read_emulated is NULL\n");
return -EINVAL;
}
if (dev->transport->fua_read_emulated(dev) == 0) {
printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n");
pr_err("dev->transport->fua_read_emulated not supported\n");
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
return 0;
}
@ -1009,19 +1009,19 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (dev->transport->write_cache_emulated == NULL) {
printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n");
pr_err("dev->transport->write_cache_emulated is NULL\n");
return -EINVAL;
}
if (dev->transport->write_cache_emulated(dev) == 0) {
printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n");
pr_err("dev->transport->write_cache_emulated not supported\n");
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
return 0;
}
@ -1029,19 +1029,19 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1) && (flag != 2)) {
printk(KERN_ERR "Illegal value %d\n", flag);
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device"
pr_err("dev[%p]: Unable to change SE Device"
" UA_INTRLCK_CTRL while dev_export_obj: %d count"
" exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
return 0;
@ -1050,18 +1050,18 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
int se_dev_set_emulate_tas(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
pr_err("dev[%p]: Unable to change SE Device TAS while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
return 0;
@ -1070,20 +1070,20 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
printk(KERN_ERR "Generic Block Discard not supported\n");
if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
dev, flag);
return 0;
}
@ -1091,20 +1091,20 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
printk(KERN_ERR "Generic Block Discard not supported\n");
if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
dev, flag);
return 0;
}
@ -1112,11 +1112,11 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
(dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
return 0;
}
@ -1141,20 +1141,20 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
u32 orig_queue_depth = dev->queue_depth;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
pr_err("dev[%p]: Unable to change SE Device TCQ while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
if (!(queue_depth)) {
printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
if (!queue_depth) {
pr_err("dev[%p]: Illegal ZERO value for queue"
"_depth\n", dev);
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
pr_err("dev[%p]: Passed queue_depth: %u"
" exceeds TCM/SE_Device TCQ: %u\n",
dev, queue_depth,
dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
@ -1163,7 +1163,7 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
} else {
if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
printk(KERN_ERR "dev[%p]: Passed queue_depth:"
pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
@ -1178,7 +1178,7 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
else if (queue_depth < orig_queue_depth)
atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
}
@ -1188,41 +1188,41 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
int force = 0; /* Force setting for VDEVS */
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device"
pr_err("dev[%p]: Unable to change SE Device"
" max_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
if (!(max_sectors)) {
printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
if (!max_sectors) {
pr_err("dev[%p]: Illegal ZERO value for"
" max_sectors\n", dev);
return -EINVAL;
}
if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
pr_err("dev[%p]: Passed max_sectors: %u less than"
" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MIN);
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
" %u\n", dev, max_sectors,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
return -EINVAL;
}
} else {
if (!(force) && (max_sectors >
if (!force && (max_sectors >
dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors"
": %u, use force=1 to override.\n", dev,
max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
return -EINVAL;
}
if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
pr_err("dev[%p]: Passed max_sectors: %u"
" greater than DA_STATUS_MAX_SECTORS_MAX:"
" %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MAX);
@ -1231,7 +1231,7 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
}
dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
printk("dev[%p]: SE Device max_sectors changed to %u\n",
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
dev, max_sectors);
return 0;
}
@ -1239,25 +1239,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device"
pr_err("dev[%p]: Unable to change SE Device"
" optimal_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
pr_err("dev[%p]: Passed optimal_sectors cannot be"
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than max_sectors: %u\n", dev,
optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
dev, optimal_sectors);
return 0;
}
@ -1265,7 +1265,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
pr_err("dev[%p]: Unable to change SE Device block_size"
" while dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
@ -1275,21 +1275,21 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
(block_size != 1024) &&
(block_size != 2048) &&
(block_size != 4096)) {
printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
pr_err("dev[%p]: Illegal value for block_device: %u"
" for SE device, must be 512, 1024, 2048 or 4096\n",
dev, block_size);
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
pr_err("dev[%p]: Not allowed to change block_size for"
" Physical Device, use for Linux/SCSI to change"
" block_size for underlying hardware\n", dev);
return -EINVAL;
}
dev->se_sub_dev->se_dev_attrib.block_size = block_size;
printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
return 0;
}
@ -1304,13 +1304,13 @@ struct se_lun *core_dev_add_lun(
u32 lun_access = 0;
if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
atomic_read(&dev->dev_access_obj.obj_access_count));
return NULL;
}
lun_p = core_tpg_pre_addlun(tpg, lun);
if ((IS_ERR(lun_p)) || !(lun_p))
if ((IS_ERR(lun_p)) || !lun_p)
return NULL;
if (dev->dev_flags & DF_READ_ONLY)
@ -1321,7 +1321,7 @@ struct se_lun *core_dev_add_lun(
if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
return NULL;
printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
@ -1357,12 +1357,12 @@ int core_dev_del_lun(
int ret = 0;
lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
if (!(lun))
if (!lun)
return ret;
core_tpg_post_dellun(tpg, lun);
printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
tpg->se_tpg_tfo->get_fabric_name());
@ -1376,7 +1376,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
"_PER_TPG-1: %u for Target Portal Group: %hu\n",
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
@ -1387,7 +1387,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
pr_err("%s Logical Unit Number: %u is not free on"
" Target Portal Group: %hu, ignoring request.\n",
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
@ -1409,7 +1409,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
"_TPG-1: %u for Target Portal Group: %hu\n",
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
@ -1420,7 +1420,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
@ -1442,19 +1442,19 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_node_acl *nacl;
if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
pr_err("%s InitiatorName exceeds maximum size.\n",
tpg->se_tpg_tfo->get_fabric_name());
*ret = -EOVERFLOW;
return NULL;
}
nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!(nacl)) {
if (!nacl) {
*ret = -EINVAL;
return NULL;
}
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
if (!(lacl)) {
printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
if (!lacl) {
pr_err("Unable to allocate memory for struct se_lun_acl.\n");
*ret = -ENOMEM;
return NULL;
}
@ -1477,8 +1477,8 @@ int core_dev_add_initiator_node_lun_acl(
struct se_node_acl *nacl;
lun = core_dev_get_lun(tpg, unpacked_lun);
if (!(lun)) {
printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
if (!lun) {
pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
@ -1486,7 +1486,7 @@ int core_dev_add_initiator_node_lun_acl(
}
nacl = lacl->se_lun_nacl;
if (!(nacl))
if (!nacl)
return -EINVAL;
if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
@ -1505,7 +1505,7 @@ int core_dev_add_initiator_node_lun_acl(
smp_mb__after_atomic_inc();
spin_unlock(&lun->lun_acl_lock);
printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
@ -1530,7 +1530,7 @@ int core_dev_del_initiator_node_lun_acl(
struct se_node_acl *nacl;
nacl = lacl->se_lun_nacl;
if (!(nacl))
if (!nacl)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
@ -1544,7 +1544,7 @@ int core_dev_del_initiator_node_lun_acl(
lacl->se_lun = NULL;
printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
" InitiatorNode: %s Mapped LUN: %u\n",
tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
@ -1557,7 +1557,7 @@ void core_dev_free_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl)
{
printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
" Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
tpg->se_tpg_tfo->get_fabric_name(),
@ -1575,7 +1575,7 @@ int core_dev_setup_virtual_lun0(void)
char buf[16];
int ret;
hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
if (IS_ERR(hba))
return PTR_ERR(hba);
@ -1583,8 +1583,8 @@ int core_dev_setup_virtual_lun0(void)
t = hba->transport;
se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
if (!(se_dev)) {
printk(KERN_ERR "Unable to allocate memory for"
if (!se_dev) {
pr_err("Unable to allocate memory for"
" struct se_subsystem_dev\n");
ret = -ENOMEM;
goto out;
@ -1606,8 +1606,8 @@ int core_dev_setup_virtual_lun0(void)
se_dev->se_dev_hba = hba;
se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
if (!(se_dev->se_dev_su_ptr)) {
printk(KERN_ERR "Unable to locate subsystem dependent pointer"
if (!se_dev->se_dev_su_ptr) {
pr_err("Unable to locate subsystem dependent pointer"
" from allocate_virtdevice()\n");
ret = -ENOMEM;
goto out;
@ -1643,7 +1643,7 @@ void core_dev_release_virtual_lun0(void)
struct se_hba *hba = lun0_hba;
struct se_subsystem_dev *su_dev = lun0_su_dev;
if (!(hba))
if (!hba)
return;
if (g_lun0_dev)

View File

@ -60,7 +60,7 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
cit->ct_owner = tf->tf_module; \
printk("Setup generic %s\n", __stringify(_name)); \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
/* Start of tfc_tpg_mappedlun_cit */
@ -80,8 +80,8 @@ static int target_fabric_mappedlun_link(
/*
* Ensure that the source port exists
*/
if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
"_tpg does not exist\n");
return -EINVAL;
}
@ -96,12 +96,12 @@ static int target_fabric_mappedlun_link(
* Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
*/
if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
pr_err("Illegal Initiator ACL SymLink outside of %s\n",
config_item_name(wwn_ci));
return -EINVAL;
}
if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
pr_err("Illegal Initiator ACL Symlink outside of %s"
" TPGT: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci));
return -EINVAL;
@ -147,7 +147,7 @@ static int target_fabric_mappedlun_unlink(
/*
* Determine if the underlying MappedLUN has already been released..
*/
if (!(deve->se_lun))
if (!deve->se_lun)
return 0;
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
@ -202,7 +202,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
TRANSPORT_LUNFLAGS_READ_WRITE,
lacl->se_lun_nacl);
printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %u Write Protect bit to %s\n",
se_tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
@ -327,14 +327,14 @@ static struct config_group *target_fabric_make_mappedlun(
int ret = 0;
acl_ci = &group->cg_item;
if (!(acl_ci)) {
printk(KERN_ERR "Unable to locatel acl_ci\n");
if (!acl_ci) {
pr_err("Unable to locatel acl_ci\n");
return NULL;
}
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
if (!(buf)) {
printk(KERN_ERR "Unable to allocate memory for name buf\n");
if (!buf) {
pr_err("Unable to allocate memory for name buf\n");
return ERR_PTR(-ENOMEM);
}
snprintf(buf, strlen(name) + 1, "%s", name);
@ -342,7 +342,7 @@ static struct config_group *target_fabric_make_mappedlun(
* Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
*/
if (strstr(buf, "lun_") != buf) {
printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
pr_err("Unable to locate \"lun_\" from buf: %s"
" name: %s\n", buf, name);
ret = -EINVAL;
goto out;
@ -358,7 +358,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
config_item_name(acl_ci), &ret);
if (!(lacl)) {
if (!lacl) {
ret = -EINVAL;
goto out;
}
@ -367,7 +367,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!lacl_cg->default_groups) {
printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n");
pr_err("Unable to allocate lacl_cg->default_groups\n");
ret = -ENOMEM;
goto out;
}
@ -383,7 +383,7 @@ static struct config_group *target_fabric_make_mappedlun(
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!ml_stat_grp->default_groups) {
printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n");
pr_err("Unable to allocate ml_stat_grp->default_groups\n");
ret = -ENOMEM;
goto out;
}
@ -474,8 +474,8 @@ static struct config_group *target_fabric_make_nodeacl(
struct se_node_acl *se_nacl;
struct config_group *nacl_cg;
if (!(tf->tf_ops.fabric_make_nodeacl)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
if (!tf->tf_ops.fabric_make_nodeacl) {
pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
return ERR_PTR(-ENOSYS);
}
@ -572,13 +572,13 @@ static struct config_group *target_fabric_make_np(
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np;
if (!(tf->tf_ops.fabric_make_np)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
if (!tf->tf_ops.fabric_make_np) {
pr_err("tf->tf_ops.fabric_make_np is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
if (!(se_tpg_np) || IS_ERR(se_tpg_np))
if (!se_tpg_np || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
se_tpg_np->tpg_np_parent = se_tpg;
@ -627,10 +627,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
@ -641,10 +638,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
@ -659,10 +653,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
@ -673,10 +664,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
@ -691,10 +679,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
@ -705,10 +690,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
@ -723,10 +705,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
@ -737,10 +716,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
@ -781,13 +757,13 @@ static int target_fabric_port_link(
tf = se_tpg->se_tpg_wwn->wwn_tf;
if (lun->lun_se_dev != NULL) {
printk(KERN_ERR "Port Symlink already exists\n");
pr_err("Port Symlink already exists\n");
return -EEXIST;
}
dev = se_dev->se_dev_ptr;
if (!(dev)) {
printk(KERN_ERR "Unable to locate struct se_device pointer from"
if (!dev) {
pr_err("Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
@ -795,8 +771,8 @@ static int target_fabric_port_link(
lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
lun->unpacked_lun);
if ((IS_ERR(lun_p)) || !(lun_p)) {
printk(KERN_ERR "core_dev_add_lun() failed\n");
if (IS_ERR(lun_p) || !lun_p) {
pr_err("core_dev_add_lun() failed\n");
ret = -EINVAL;
goto out;
}
@ -888,7 +864,7 @@ static struct config_group *target_fabric_make_lun(
int errno;
if (strstr(name, "lun_") != name) {
printk(KERN_ERR "Unable to locate \'_\" in"
pr_err("Unable to locate \'_\" in"
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
@ -896,14 +872,14 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL);
lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
if (!(lun))
if (!lun)
return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group;
lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!lun_cg->default_groups) {
printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n");
pr_err("Unable to allocate lun_cg->default_groups\n");
return ERR_PTR(-ENOMEM);
}
@ -918,7 +894,7 @@ static struct config_group *target_fabric_make_lun(
port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!port_stat_grp->default_groups) {
printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n");
pr_err("Unable to allocate port_stat_grp->default_groups\n");
errno = -ENOMEM;
goto out;
}
@ -1031,13 +1007,13 @@ static struct config_group *target_fabric_make_tpg(
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg;
if (!(tf->tf_ops.fabric_make_tpg)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
if (!tf->tf_ops.fabric_make_tpg) {
pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
if (!(se_tpg) || IS_ERR(se_tpg))
if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
/*
* Setup default groups from pre-allocated se_tpg->tpg_default_groups
@ -1130,13 +1106,13 @@ static struct config_group *target_fabric_make_wwn(
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn;
if (!(tf->tf_ops.fabric_make_wwn)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
if (!tf->tf_ops.fabric_make_wwn) {
pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
return ERR_PTR(-ENOSYS);
}
wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
if (!(wwn) || IS_ERR(wwn))
if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
wwn->wwn_tf = tf;

View File

@ -172,7 +172,7 @@ u32 fc_get_pr_transport_id(
ptr = &se_nacl->initiatorname[0];
for (i = 0; i < 24; ) {
if (!(strncmp(&ptr[i], ":", 1))) {
if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
}
@ -386,7 +386,7 @@ char *iscsi_parse_pr_out_transport_id(
* Reserved
*/
if ((format_code != 0x00) && (format_code != 0x40)) {
printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
pr_err("Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
return NULL;
}
@ -406,7 +406,7 @@ char *iscsi_parse_pr_out_transport_id(
tid_len += padding;
if ((add_len + 4) != tid_len) {
printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
pr_debug("LIO-Target Extracted add_len: %hu "
"does not match calculated tid_len: %u,"
" using tid_len instead\n", add_len+4, tid_len);
*out_tid_len = tid_len;
@ -420,8 +420,8 @@ char *iscsi_parse_pr_out_transport_id(
*/
if (format_code == 0x40) {
p = strstr((char *)&buf[4], ",i,0x");
if (!(p)) {
printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
if (!p) {
pr_err("Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n",
(char *)&buf[4]);
return NULL;

View File

@ -42,18 +42,6 @@
#include "target_core_file.h"
#if 1
#define DEBUG_FD_CACHE(x...) printk(x)
#else
#define DEBUG_FD_CACHE(x...)
#endif
#if 1
#define DEBUG_FD_FUA(x...) printk(x)
#else
#define DEBUG_FD_FUA(x...)
#endif
static struct se_subsystem_api fileio_template;
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
@ -65,8 +53,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
struct fd_host *fd_host;
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
if (!(fd_host)) {
printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
if (!fd_host) {
pr_err("Unable to allocate memory for struct fd_host\n");
return -ENOMEM;
}
@ -74,10 +62,10 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
hba->hba_ptr = fd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
" MaxSectors: %u\n",
hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
@ -88,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba)
{
struct fd_host *fd_host = hba->hba_ptr;
printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
" Target Core\n", hba->hba_id, fd_host->fd_host_id);
kfree(fd_host);
@ -101,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
if (!(fd_dev)) {
printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
if (!fd_dev) {
pr_err("Unable to allocate memory for struct fd_dev\n");
return NULL;
}
fd_dev->fd_host = fd_host;
printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev;
}
@ -141,7 +129,7 @@ static struct se_device *fd_create_virtdevice(
set_fs(old_fs);
if (IS_ERR(dev_p)) {
printk(KERN_ERR "getname(%s) failed: %lu\n",
pr_err("getname(%s) failed: %lu\n",
fd_dev->fd_dev_name, IS_ERR(dev_p));
ret = PTR_ERR(dev_p);
goto fail;
@ -164,12 +152,12 @@ static struct se_device *fd_create_virtdevice(
file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file)) {
printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
pr_err("filp_open(%s) failed\n", dev_p);
ret = PTR_ERR(file);
goto fail;
}
if (!file || !file->f_dentry) {
printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
pr_err("filp_open(%s) failed\n", dev_p);
goto fail;
}
fd_dev->fd_file = file;
@ -199,14 +187,14 @@ static struct se_device *fd_create_virtdevice(
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
pr_debug("FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
fd_dev->fd_dev_size,
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
printk(KERN_ERR "FILEIO: Missing fd_dev_size="
pr_err("FILEIO: Missing fd_dev_size="
" parameter, and no backing struct"
" block_device\n");
goto fail;
@ -225,13 +213,13 @@ static struct se_device *fd_create_virtdevice(
dev = transport_add_device_to_core_hba(hba, &fileio_template,
se_dev, dev_flags, fd_dev,
&dev_limits, "FILEIO", FD_VERSION);
if (!(dev))
if (!dev)
goto fail;
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
@ -269,25 +257,24 @@ static inline struct fd_request *FILE_REQ(struct se_task *task)
static struct se_task *
fd_alloc_task(struct se_cmd *cmd)
fd_alloc_task(unsigned char *cdb)
{
struct fd_request *fd_req;
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
if (!(fd_req)) {
printk(KERN_ERR "Unable to allocate struct fd_request\n");
if (!fd_req) {
pr_err("Unable to allocate struct fd_request\n");
return NULL;
}
fd_req->fd_dev = cmd->se_dev->dev_ptr;
return &fd_req->fd_task;
}
static int fd_do_readv(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
struct file *fd = req->fd_dev->fd_file;
struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
@ -295,20 +282,20 @@ static int fd_do_readv(struct se_task *task)
task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
if (!iov) {
pr_err("Unable to allocate fd_do_readv iov[]\n");
return -ENOMEM;
}
for (i = 0; i < task->task_sg_num; i++) {
for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs);
kfree(iov);
@ -319,14 +306,14 @@ static int fd_do_readv(struct se_task *task)
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_readv() returned %d,"
pr_err("vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret,
(int)task->task_size);
return (ret < 0 ? ret : -EINVAL);
}
} else {
if (ret < 0) {
printk(KERN_ERR "vfs_readv() returned %d for non"
pr_err("vfs_readv() returned %d for non"
" S_ISBLK\n", ret);
return ret;
}
@ -338,7 +325,8 @@ static int fd_do_readv(struct se_task *task)
static int fd_do_writev(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
struct file *fd = req->fd_dev->fd_file;
struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
@ -346,26 +334,26 @@ static int fd_do_writev(struct se_task *task)
task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
if (!iov) {
pr_err("Unable to allocate fd_do_writev iov[]\n");
return -ENOMEM;
}
for (i = 0; i < task->task_sg_num; i++) {
for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs);
kfree(iov);
if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_writev() returned %d\n", ret);
pr_err("vfs_writev() returned %d\n", ret);
return (ret < 0 ? ret : -EINVAL);
}
@ -404,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (!immed)
transport_complete_sync_cache(cmd, ret == 0);
@ -449,12 +437,12 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
loff_t end = start + task->task_size;
int ret;
DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
task->task_lba, task->task_size);
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
}
static int fd_do_task(struct se_task *task)
@ -548,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params(
snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
"%s", arg_p);
kfree(arg_p);
printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
pr_debug("FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
break;
@ -561,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params(
ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
kfree(arg_p);
if (ret < 0) {
printk(KERN_ERR "strict_strtoull() failed for"
pr_err("strict_strtoull() failed for"
" fd_dev_size=\n");
goto out;
}
printk(KERN_INFO "FILEIO: Referencing Size: %llu"
pr_debug("FILEIO: Referencing Size: %llu"
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
case Opt_fd_buffered_io:
match_int(args, &arg);
if (arg != 1) {
printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
pr_err("bogus fd_buffered_io=%d value\n", arg);
ret = -EINVAL;
goto out;
}
printk(KERN_INFO "FILEIO: Using buffered I/O"
pr_debug("FILEIO: Using buffered I/O"
" operations for struct fd_dev\n");
fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
@ -597,7 +585,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
printk(KERN_ERR "Missing fd_dev_name=\n");
pr_err("Missing fd_dev_name=\n");
return -EINVAL;
}

View File

@ -16,8 +16,6 @@ struct fd_request {
struct se_task fd_task;
/* SCSI CDB from iSCSI Command PDU */
unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
/* FILEIO device */
struct fd_dev *fd_dev;
} ____cacheline_aligned;
#define FBDF_HAS_PATH 0x01

View File

@ -58,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!(strcmp(s->name, sub_api->name))) {
printk(KERN_ERR "%p is already registered with"
if (!strcmp(s->name, sub_api->name)) {
pr_err("%p is already registered with"
" duplicate name %s, unable to process"
" request\n", s, s->name);
mutex_unlock(&subsystem_mutex);
@ -69,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
list_add_tail(&sub_api->sub_api_list, &subsystem_list);
mutex_unlock(&subsystem_mutex);
printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
pr_debug("TCM: Registered subsystem plugin: %s struct module:"
" %p\n", sub_api->name, sub_api->owner);
return 0;
}
@ -109,7 +109,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) {
printk(KERN_ERR "Unable to allocate struct se_hba\n");
pr_err("Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM);
}
@ -135,7 +135,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
list_add_tail(&hba->hba_node, &hba_list);
spin_unlock(&hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
return hba;
@ -161,7 +161,7 @@ core_delete_hba(struct se_hba *hba)
list_del(&hba->hba_node);
spin_unlock(&hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
if (hba->transport->owner)

View File

@ -47,12 +47,6 @@
#include "target_core_iblock.h"
#if 0
#define DEBUG_IBLOCK(x...) printk(x)
#else
#define DEBUG_IBLOCK(x...)
#endif
static struct se_subsystem_api iblock_template;
static void iblock_bio_done(struct bio *, int);
@ -66,8 +60,8 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
struct iblock_hba *ib_host;
ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
if (!(ib_host)) {
printk(KERN_ERR "Unable to allocate memory for"
if (!ib_host) {
pr_err("Unable to allocate memory for"
" struct iblock_hba\n");
return -ENOMEM;
}
@ -76,11 +70,11 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
hba->hba_ptr = ib_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
hba->hba_id, ib_host->iblock_host_id);
return 0;
@ -90,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba)
{
struct iblock_hba *ib_host = hba->hba_ptr;
printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
kfree(ib_host);
@ -103,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
struct iblock_hba *ib_host = hba->hba_ptr;
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
if (!(ib_dev)) {
printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
if (!ib_dev) {
pr_err("Unable to allocate struct iblock_dev\n");
return NULL;
}
ib_dev->ibd_host = ib_host;
printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return ib_dev;
}
@ -128,8 +122,8 @@ static struct se_device *iblock_create_virtdevice(
u32 dev_flags = 0;
int ret = -EINVAL;
if (!(ib_dev)) {
printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
if (!ib_dev) {
pr_err("Unable to locate struct iblock_dev parameter\n");
return ERR_PTR(ret);
}
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
@ -137,16 +131,16 @@ static struct se_device *iblock_create_virtdevice(
* These settings need to be made tunable..
*/
ib_dev->ibd_bio_set = bioset_create(32, 64);
if (!(ib_dev->ibd_bio_set)) {
printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
if (!ib_dev->ibd_bio_set) {
pr_err("IBLOCK: Unable to create bioset()\n");
return ERR_PTR(-ENOMEM);
}
printk(KERN_INFO "IBLOCK: Created bio_set()\n");
pr_debug("IBLOCK: Created bio_set()\n");
/*
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
*/
printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
@ -172,7 +166,7 @@ static struct se_device *iblock_create_virtdevice(
dev = transport_add_device_to_core_hba(hba,
&iblock_template, se_dev, dev_flags, ib_dev,
&dev_limits, "IBLOCK", IBLOCK_VERSION);
if (!(dev))
if (!dev)
goto failed;
/*
@ -192,7 +186,7 @@ static struct se_device *iblock_create_virtdevice(
dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
@ -227,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
}
static struct se_task *
iblock_alloc_task(struct se_cmd *cmd)
iblock_alloc_task(unsigned char *cdb)
{
struct iblock_req *ib_req;
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!(ib_req)) {
printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
if (!ib_req) {
pr_err("Unable to allocate memory for struct iblock_req\n");
return NULL;
}
ib_req->ib_dev = cmd->se_dev->dev_ptr;
atomic_set(&ib_req->ib_bio_cnt, 0);
return &ib_req->ib_task;
}
@ -345,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
*/
ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
if (ret != 0) {
printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
pr_err("IBLOCK: block_issue_flush() failed: %d "
" error_sector: %llu\n", ret,
(unsigned long long)error_sector);
}
@ -409,8 +402,9 @@ static int iblock_do_task(struct se_task *task)
while (bio) {
nbio = bio->bi_next;
bio->bi_next = NULL;
DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
" bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
pr_debug("Calling submit_bio() task: %p bio: %p"
" bio->bi_sector: %llu\n", task, bio,
(unsigned long long)bio->bi_sector);
submit_bio(rw, bio);
bio = nbio;
@ -480,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
switch (token) {
case Opt_udev_path:
if (ib_dev->ibd_bd) {
printk(KERN_ERR "Unable to set udev_path= while"
pr_err("Unable to set udev_path= while"
" ib_dev->ibd_bd exists\n");
ret = -EEXIST;
goto out;
@ -493,7 +487,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
"%s", arg_p);
kfree(arg_p);
printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
pr_debug("IBLOCK: Referencing UDEV path: %s\n",
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
@ -516,7 +510,7 @@ static ssize_t iblock_check_configfs_dev_params(
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
pr_err("Missing udev_path= parameters for IBLOCK\n");
return -EINVAL;
}
@ -574,15 +568,15 @@ static struct bio *iblock_get_bio(
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!(bio)) {
printk(KERN_ERR "Unable to allocate memory for bio\n");
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
return NULL;
}
DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
" %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
" %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = task;
@ -591,8 +585,8 @@ static struct bio *iblock_get_bio(
bio->bi_sector = lba;
atomic_inc(&ib_req->ib_bio_cnt);
DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
pr_debug("Set ib_req->ib_bio_cnt: %d\n",
atomic_read(&ib_req->ib_bio_cnt));
return bio;
}
@ -606,7 +600,7 @@ static int iblock_map_task_SG(struct se_task *task)
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
struct scatterlist *sg;
int ret = 0;
u32 i, sg_num = task->task_sg_num;
u32 i, sg_num = task->task_sg_nents;
sector_t block_lba;
/*
* Do starting conversion up from non 512-byte blocksize with
@ -621,13 +615,13 @@ static int iblock_map_task_SG(struct se_task *task)
else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
block_lba = task->task_lba;
else {
printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
if (!(bio))
if (!bio)
return ret;
ib_req->ib_bio = bio;
@ -636,41 +630,41 @@ static int iblock_map_task_SG(struct se_task *task)
* Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
* from task->task_sg -> struct scatterlist memory.
*/
for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
" %p len: %u offset: %u\n", task, bio, sg_page(sg),
sg->length, sg->offset);
again:
ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
if (ret != sg->length) {
DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
bio->bi_sector);
DEBUG_IBLOCK("** task->task_size: %u\n",
pr_debug("*** Set bio->bi_sector: %llu\n",
(unsigned long long)bio->bi_sector);
pr_debug("** task->task_size: %u\n",
task->task_size);
DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
pr_debug("*** bio->bi_max_vecs: %u\n",
bio->bi_max_vecs);
DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
pr_debug("*** bio->bi_vcnt: %u\n",
bio->bi_vcnt);
bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
block_lba, sg_num);
if (!(bio))
if (!bio)
goto fail;
tbio = tbio->bi_next = bio;
DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
pr_debug("-----------------> Added +1 bio: %p to"
" list, Going to again\n", bio);
goto again;
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
pr_debug("task: %p bio-add_page() passed!, decremented"
" sg_num to %u\n", task, sg_num);
DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
" to %llu\n", task, block_lba);
DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
pr_debug("task: %p bio_add_page() passed!, increased lba"
" to %llu\n", task, (unsigned long long)block_lba);
pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
" %u\n", task, bio->bi_vcnt);
}
@ -716,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
err = -EIO;
if (err != 0) {
printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
@ -731,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Wait to complete the task until the last bio as completed.
*/
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
ibr->ib_bio = NULL;
transport_complete_task(task, 0);
return;
}
DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
task, bio, task->task_lba, bio->bi_sector, err);
pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
/*
* bio_put() will call iblock_bio_destructor() to release the bio back
* to ibr->ib_bio_set.
@ -748,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Wait to complete the task until the last bio as completed.
*/
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
/*
* Return GOOD status for task if zero ib_bio_err_cnt exists.

View File

@ -12,7 +12,6 @@ struct iblock_req {
atomic_t ib_bio_cnt;
atomic_t ib_bio_err_cnt;
struct bio *ib_bio;
struct iblock_dev *ib_dev;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01

File diff suppressed because it is too large Load Diff

View File

@ -65,8 +65,8 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
struct pscsi_hba_virt *phv;
phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
if (!(phv)) {
printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
if (!phv) {
pr_err("Unable to allocate struct pscsi_hba_virt\n");
return -ENOMEM;
}
phv->phv_host_id = host_id;
@ -74,10 +74,10 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
hba->hba_ptr = phv;
printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
hba->hba_id);
return 0;
@ -91,12 +91,12 @@ static void pscsi_detach_hba(struct se_hba *hba)
if (scsi_host) {
scsi_host_put(scsi_host);
printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
" Generic Target Core\n", hba->hba_id,
(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
"Unknown");
} else
printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
" from Generic Target Core\n", hba->hba_id);
kfree(phv);
@ -110,14 +110,14 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
/*
* Release the struct Scsi_Host
*/
if (!(mode_flag)) {
if (!(sh))
if (!mode_flag) {
if (!sh)
return 0;
phv->phv_lld_host = NULL;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
" %s\n", hba->hba_id, (sh->hostt->name) ?
(sh->hostt->name) : "Unknown");
@ -130,7 +130,7 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
*/
sh = scsi_host_lookup(phv->phv_host_id);
if (IS_ERR(sh)) {
printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
return PTR_ERR(sh);
}
@ -138,7 +138,7 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
phv->phv_lld_host = sh;
phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
return 1;
@ -257,15 +257,15 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
page_83 = &buf[off];
ident_len = page_83[3];
if (!ident_len) {
printk(KERN_ERR "page_83[3]: identifier"
pr_err("page_83[3]: identifier"
" length zero!\n");
break;
}
printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
if (!vpd) {
printk(KERN_ERR "Unable to allocate memory for"
pr_err("Unable to allocate memory for"
" struct t10_vpd\n");
goto out;
}
@ -317,7 +317,7 @@ static struct se_device *pscsi_add_device_to_list(
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
pr_err("Set broken SCSI Device %d:%d:%d"
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
@ -355,7 +355,7 @@ static struct se_device *pscsi_add_device_to_list(
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
se_dev, dev_flags, pdv,
&dev_limits, NULL, NULL);
if (!(dev)) {
if (!dev) {
pdv->pdv_sd = NULL;
return NULL;
}
@ -385,13 +385,13 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
struct pscsi_dev_virt *pdv;
pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
if (!(pdv)) {
printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
if (!pdv) {
pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL;
}
pdv->pdv_se_hba = hba;
printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
return pdv;
}
@ -412,7 +412,7 @@ static struct se_device *pscsi_create_type_disk(
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
@ -425,19 +425,19 @@ static struct se_device *pscsi_create_type_disk(
bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
if (IS_ERR(bd)) {
printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
return NULL;
}
pdv->pdv_bd = bd;
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
if (!(dev)) {
if (!dev) {
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
scsi_device_put(sd);
return NULL;
}
printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
return dev;
@ -459,7 +459,7 @@ static struct se_device *pscsi_create_type_rom(
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
@ -467,11 +467,11 @@ static struct se_device *pscsi_create_type_rom(
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
if (!(dev)) {
if (!dev) {
scsi_device_put(sd);
return NULL;
}
printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
@ -495,10 +495,10 @@ static struct se_device *pscsi_create_type_other(
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
if (!(dev))
if (!dev)
return NULL;
printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
@ -517,8 +517,8 @@ static struct se_device *pscsi_create_virtdevice(
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
if (!(pdv)) {
printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
if (!pdv) {
pr_err("Unable to locate struct pscsi_dev_virt"
" parameter\n");
return ERR_PTR(-EINVAL);
}
@ -526,9 +526,9 @@ static struct se_device *pscsi_create_virtdevice(
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
*/
if (!(sh)) {
if (!sh) {
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
printk(KERN_ERR "pSCSI: Unable to locate struct"
pr_err("pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
return ERR_PTR(-ENODEV);
}
@ -537,7 +537,7 @@ static struct se_device *pscsi_create_virtdevice(
* reference, we enforce that udev_path has been set
*/
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
printk(KERN_ERR "pSCSI: udev_path attribute has not"
pr_err("pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
return ERR_PTR(-EINVAL);
}
@ -548,8 +548,8 @@ static struct se_device *pscsi_create_virtdevice(
*/
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
spin_lock(&hba->device_lock);
if (!(list_empty(&hba->hba_dev_list))) {
printk(KERN_ERR "pSCSI: Unable to set hba_mode"
if (!list_empty(&hba->hba_dev_list)) {
pr_err("pSCSI: Unable to set hba_mode"
" with active devices\n");
spin_unlock(&hba->device_lock);
return ERR_PTR(-EEXIST);
@ -565,14 +565,14 @@ static struct se_device *pscsi_create_virtdevice(
} else {
sh = scsi_host_lookup(pdv->pdv_host_id);
if (IS_ERR(sh)) {
printk(KERN_ERR "pSCSI: Unable to locate"
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
return (struct se_device *) sh;
}
}
} else {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while"
" struct Scsi_Host exists\n");
return ERR_PTR(-EEXIST);
}
@ -601,7 +601,7 @@ static struct se_device *pscsi_create_virtdevice(
break;
}
if (!(dev)) {
if (!dev) {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
@ -615,7 +615,7 @@ static struct se_device *pscsi_create_virtdevice(
}
spin_unlock_irq(sh->host_lock);
printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
@ -729,8 +729,8 @@ after_mode_sense:
u32 blocksize;
buf = sg_virt(&sg[0]);
if (!(buf)) {
printk(KERN_ERR "Unable to get buf for scatterlist\n");
if (!buf) {
pr_err("Unable to get buf for scatterlist\n");
goto after_mode_select;
}
@ -760,33 +760,19 @@ after_mode_select:
}
static struct se_task *
pscsi_alloc_task(struct se_cmd *cmd)
pscsi_alloc_task(unsigned char *cdb)
{
struct pscsi_plugin_task *pt;
unsigned char *cdb = cmd->t_task_cdb;
pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
if (!pt) {
printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
return NULL;
}
/*
* If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
* allocate the extended CDB buffer for per struct se_task context
* pt->pscsi_cdb now.
* Dynamically alloc cdb space, since it may be larger than
* TCM_MAX_COMMAND_SIZE
*/
if (cmd->t_task_cdb != cmd->__t_task_cdb) {
pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
if (!(pt->pscsi_cdb)) {
printk(KERN_ERR "pSCSI: Unable to allocate extended"
" pt->pscsi_cdb\n");
kfree(pt);
pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
if (!pt) {
pr_err("Unable to allocate struct pscsi_plugin_task\n");
return NULL;
}
} else
pt->pscsi_cdb = &pt->__pscsi_cdb[0];
return &pt->pscsi_task;
}
@ -837,8 +823,8 @@ static int pscsi_blk_get_request(struct se_task *task)
pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
(task->task_data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
IS_ERR(pt->pscsi_req));
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@ -883,14 +869,7 @@ static int pscsi_do_task(struct se_task *task)
static void pscsi_free_task(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct se_cmd *cmd = task->task_se_cmd;
/*
* Release the extended CDB allocation from pscsi_alloc_task()
* if one exists.
*/
if (cmd->t_task_cdb != cmd->__t_task_cdb)
kfree(pt->pscsi_cdb);
/*
* We do not release the bio(s) here associated with this task, as
* this is handled by bio_put() and pscsi_bi_endio().
@ -936,7 +915,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
switch (token) {
case Opt_scsi_host_id:
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
printk(KERN_ERR "PSCSI[%d]: Unable to accept"
pr_err("PSCSI[%d]: Unable to accept"
" scsi_host_id while phv_mode =="
" PHV_LLD_SCSI_HOST_NO\n",
phv->phv_host_id);
@ -945,14 +924,14 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
}
match_int(args, &arg);
pdv->pdv_host_id = arg;
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
" %d\n", phv->phv_host_id, pdv->pdv_host_id);
pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
break;
case Opt_scsi_channel_id:
match_int(args, &arg);
pdv->pdv_channel_id = arg;
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
pr_debug("PSCSI[%d]: Referencing SCSI Channel"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_channel_id);
pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
@ -960,7 +939,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
case Opt_scsi_target_id:
match_int(args, &arg);
pdv->pdv_target_id = arg;
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
pr_debug("PSCSI[%d]: Referencing SCSI Target"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_target_id);
pdv->pdv_flags |= PDF_HAS_TARGET_ID;
@ -968,7 +947,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
case Opt_scsi_lun_id:
match_int(args, &arg);
pdv->pdv_lun_id = arg;
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
pdv->pdv_flags |= PDF_HAS_LUN_ID;
break;
@ -991,7 +970,7 @@ static ssize_t pscsi_check_configfs_dev_params(
if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
!(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
pr_err("Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n");
return -EINVAL;
}
@ -1061,8 +1040,8 @@ static inline struct bio *pscsi_get_bio(int sg_num)
* in block/blk-core.c:blk_make_request()
*/
bio = bio_kmalloc(GFP_KERNEL, sg_num);
if (!(bio)) {
printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
if (!bio) {
pr_err("PSCSI: bio_kmalloc() failed\n");
return NULL;
}
bio->bi_end_io = pscsi_bi_endio;
@ -1070,12 +1049,6 @@ static inline struct bio *pscsi_get_bio(int sg_num)
return bio;
}
#if 0
#define DEBUG_PSCSI(x...) printk(x)
#else
#define DEBUG_PSCSI(x...)
#endif
static int __pscsi_map_task_SG(
struct se_task *task,
struct scatterlist *task_sg,
@ -1106,34 +1079,34 @@ static int __pscsi_map_task_SG(
* is ported to upstream SCSI passthrough functionality that accepts
* struct scatterlist->page_link or struct page as a paraemeter.
*/
DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
for_each_sg(task_sg, sg, task_sg_num, i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
page, len, off);
while (len > 0 && data_len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
if (!(bio)) {
if (!bio) {
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
nr_pages -= nr_vecs;
/*
* Calls bio_kmalloc() and sets bio->bi_end_io()
*/
bio = pscsi_get_bio(nr_vecs);
if (!(bio))
if (!bio)
goto fail;
if (rw)
bio->bi_rw |= REQ_WRITE;
DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs);
/*
@ -1148,7 +1121,7 @@ static int __pscsi_map_task_SG(
tbio = tbio->bi_next = bio;
}
DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
@ -1157,11 +1130,11 @@ static int __pscsi_map_task_SG(
if (rc != bytes)
goto fail;
DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
bio->bi_vcnt, nr_vecs);
if (bio->bi_vcnt > nr_vecs) {
DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
/*
@ -1183,15 +1156,15 @@ static int __pscsi_map_task_SG(
* Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
* primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
*/
if (!(bidi_read)) {
if (!bidi_read) {
/*
* Starting with v2.6.31, call blk_make_request() passing in *hbio to
* allocate the pSCSI task a struct request.
*/
pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
if (!(pt->pscsi_req)) {
printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
if (!pt->pscsi_req) {
pr_err("pSCSI: blk_make_request() failed\n");
goto fail;
}
/*
@ -1200,7 +1173,7 @@ static int __pscsi_map_task_SG(
*/
pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
return task->task_sg_num;
return task->task_sg_nents;
}
/*
* Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
@ -1208,13 +1181,13 @@ static int __pscsi_map_task_SG(
*/
pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
if (!(pt->pscsi_req->next_rq)) {
printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
if (!pt->pscsi_req->next_rq) {
pr_err("pSCSI: blk_make_request() failed for BIDI\n");
goto fail;
}
pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
return task->task_sg_num;
return task->task_sg_nents;
fail:
while (hbio) {
bio = hbio;
@ -1233,14 +1206,14 @@ static int pscsi_map_task_SG(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_nents, 0);
if (ret >= 0 && task->task_sg_bidi) {
/*
* If present, set up the extra BIDI-COMMAND SCSI READ
* struct request and payload.
*/
ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
task->task_sg_num, 1);
task->task_sg_nents, 1);
}
if (ret < 0)
@ -1319,9 +1292,9 @@ static inline void pscsi_process_SAM_status(
struct pscsi_plugin_task *pt)
{
task->task_scsi_status = status_byte(pt->pscsi_result);
if ((task->task_scsi_status)) {
if (task->task_scsi_status) {
task->task_scsi_status <<= 1;
printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
pr_debug("PSCSI Status Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
}
@ -1331,7 +1304,7 @@ static inline void pscsi_process_SAM_status(
transport_complete_task(task, (!task->task_scsi_status));
break;
default:
printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
pr_debug("PSCSI Host Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;

View File

@ -23,13 +23,12 @@
struct pscsi_plugin_task {
struct se_task pscsi_task;
unsigned char *pscsi_cdb;
unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
struct request *pscsi_req;
unsigned char pscsi_cdb[0];
} ____cacheline_aligned;
#define PDF_HAS_CHANNEL_ID 0x01

View File

@ -44,12 +44,8 @@
#include "target_core_rd.h"
static struct se_subsystem_api rd_dr_template;
static struct se_subsystem_api rd_mcp_template;
/* #define DEBUG_RAMDISK_MCP */
/* #define DEBUG_RAMDISK_DR */
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
*
@ -59,8 +55,8 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
struct rd_host *rd_host;
rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
if (!(rd_host)) {
printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
if (!rd_host) {
pr_err("Unable to allocate memory for struct rd_host\n");
return -ENOMEM;
}
@ -68,10 +64,10 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
hba->hba_ptr = rd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
" MaxSectors: %u\n", hba->hba_id,
rd_host->rd_host_id, RD_MAX_SECTORS);
@ -82,7 +78,7 @@ static void rd_detach_hba(struct se_hba *hba)
{
struct rd_host *rd_host = hba->hba_ptr;
printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
kfree(rd_host);
@ -111,7 +107,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
for (j = 0; j < sg_per_table; j++) {
pg = sg_page(&sg[j]);
if ((pg)) {
if (pg) {
__free_page(pg);
page_count++;
}
@ -120,7 +116,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
kfree(sg);
}
printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
@ -145,7 +141,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
struct scatterlist *sg;
if (rd_dev->rd_page_count <= 0) {
printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
@ -154,8 +150,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
if (!(sg_table)) {
printk(KERN_ERR "Unable to allocate memory for Ramdisk"
if (!sg_table) {
pr_err("Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -ENOMEM;
}
@ -169,13 +165,13 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
if (!(sg)) {
printk(KERN_ERR "Unable to allocate scatterlist array"
if (!sg) {
pr_err("Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -ENOMEM;
}
sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
sg_init_table(sg, sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
@ -185,8 +181,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
if (!(pg)) {
printk(KERN_ERR "Unable to allocate scatterlist"
if (!pg) {
pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
@ -198,7 +194,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
total_sg_needed -= sg_per_table;
}
printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
@ -215,8 +211,8 @@ static void *rd_allocate_virtdevice(
struct rd_host *rd_host = hba->hba_ptr;
rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
if (!(rd_dev)) {
printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
if (!rd_dev) {
pr_err("Unable to allocate memory for struct rd_dev\n");
return NULL;
}
@ -226,11 +222,6 @@ static void *rd_allocate_virtdevice(
return rd_dev;
}
static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
{
return rd_allocate_virtdevice(hba, name, 1);
}
static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
{
return rd_allocate_virtdevice(hba, name, 0);
@ -270,16 +261,15 @@ static struct se_device *rd_create_virtdevice(
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba,
(rd_dev->rd_direct) ? &rd_dr_template :
&rd_mcp_template, se_dev, dev_flags, rd_dev,
&dev_limits, prod, rev);
if (!(dev))
if (!dev)
goto fail;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
rd_dev->rd_queue_depth = dev->queue_depth;
printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
" %u pages in %u tables, %lu total bytes\n",
rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
@ -293,14 +283,6 @@ fail:
return ERR_PTR(ret);
}
static struct se_device *rd_DIRECT_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
return rd_create_virtdevice(hba, se_dev, p, 1);
}
static struct se_device *rd_MEMCPY_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
@ -327,16 +309,15 @@ static inline struct rd_request *RD_REQ(struct se_task *task)
}
static struct se_task *
rd_alloc_task(struct se_cmd *cmd)
rd_alloc_task(unsigned char *cdb)
{
struct rd_request *rd_req;
rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
if (!rd_req) {
printk(KERN_ERR "Unable to allocate struct rd_request\n");
pr_err("Unable to allocate struct rd_request\n");
return NULL;
}
rd_req->rd_dev = cmd->se_dev->dev_ptr;
return &rd_req->rd_task;
}
@ -357,7 +338,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return sg_table;
}
printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
page);
return NULL;
@ -370,7 +351,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
static int rd_MEMCPY_read(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_dev;
struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@ -379,32 +360,32 @@ static int rd_MEMCPY_read(struct rd_request *req)
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
if (!table)
return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = task->task_sg;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
#endif
src_offset = rd_offset;
while (req->rd_size) {
if ((sg_d[i].length - dst_offset) <
(sg_s[j].length - src_offset)) {
length = (sg_d[i].length - dst_offset);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
pr_debug("Step 1 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
sg_s[j].length);
printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
pr_debug("Step 1 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
#endif
if (length > req->rd_size)
length = req->rd_size;
@ -421,15 +402,15 @@ static int rd_MEMCPY_read(struct rd_request *req)
page_end = 0;
} else {
length = (sg_s[j].length - src_offset);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
pr_debug("Step 2 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset,
j, sg_s[j].length);
printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
pr_debug("Step 2 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
#endif
if (length > req->rd_size)
length = req->rd_size;
@ -453,31 +434,28 @@ static int rd_MEMCPY_read(struct rd_request *req)
memcpy(dst, src, length);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
#endif
req->rd_size -= length;
if (!(req->rd_size))
if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "page: %u in same page table\n",
pr_debug("page: %u in same page table\n",
req->rd_page);
#endif
continue;
}
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "getting new page table for page: %u\n",
pr_debug("getting new page table for page: %u\n",
req->rd_page);
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
if (!table)
return -EINVAL;
sg_s = &table->sg_table[j = 0];
@ -493,7 +471,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
static int rd_MEMCPY_write(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_dev;
struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@ -502,32 +480,32 @@ static int rd_MEMCPY_write(struct rd_request *req)
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
if (!table)
return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
sg_s = task->task_sg;
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
#endif
dst_offset = rd_offset;
while (req->rd_size) {
if ((sg_s[i].length - src_offset) <
(sg_d[j].length - dst_offset)) {
length = (sg_s[i].length - src_offset);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
pr_debug("Step 1 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
pr_debug("Step 1 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
#endif
if (length > req->rd_size)
length = req->rd_size;
@ -544,15 +522,15 @@ static int rd_MEMCPY_write(struct rd_request *req)
page_end = 0;
} else {
length = (sg_d[j].length - dst_offset);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
pr_debug("Step 2 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
pr_debug("Step 2 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
#endif
if (length > req->rd_size)
length = req->rd_size;
@ -576,31 +554,28 @@ static int rd_MEMCPY_write(struct rd_request *req)
memcpy(dst, src, length);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
#endif
req->rd_size -= length;
if (!(req->rd_size))
if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "page: %u in same page table\n",
pr_debug("page: %u in same page table\n",
req->rd_page);
#endif
continue;
}
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "getting new page table for page: %u\n",
pr_debug("getting new page table for page: %u\n",
req->rd_page);
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
if (!table)
return -EINVAL;
sg_d = &table->sg_table[j = 0];
@ -641,273 +616,6 @@ static int rd_MEMCPY_do_task(struct se_task *task)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
/* rd_DIRECT_with_offset():
*
*
*/
static int rd_DIRECT_with_offset(
struct se_task *task,
struct list_head *se_mem_list,
u32 *se_mem_cnt,
u32 *task_offset)
{
struct rd_request *req = RD_REQ(task);
struct rd_dev *dev = req->rd_dev;
struct rd_dev_sg_table *table;
struct se_mem *se_mem;
struct scatterlist *sg_s;
u32 j = 0, set_offset = 1;
u32 get_next_table = 0, offset_length, table_sg_end;
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
(task->task_data_direction == DMA_TO_DEVICE) ?
"Write" : "Read",
task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
#endif
while (req->rd_size) {
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
if (!(se_mem)) {
printk(KERN_ERR "Unable to allocate struct se_mem\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&se_mem->se_list);
if (set_offset) {
offset_length = sg_s[j].length - req->rd_offset;
if (offset_length > req->rd_size)
offset_length = req->rd_size;
se_mem->se_page = sg_page(&sg_s[j++]);
se_mem->se_off = req->rd_offset;
se_mem->se_len = offset_length;
set_offset = 0;
get_next_table = (j > table_sg_end);
goto check_eot;
}
offset_length = (req->rd_size < req->rd_offset) ?
req->rd_size : req->rd_offset;
se_mem->se_page = sg_page(&sg_s[j]);
se_mem->se_len = offset_length;
set_offset = 1;
check_eot:
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
req->rd_page, req->rd_size, offset_length, j, se_mem,
se_mem->se_page, se_mem->se_off, se_mem->se_len);
#endif
list_add_tail(&se_mem->se_list, se_mem_list);
(*se_mem_cnt)++;
req->rd_size -= offset_length;
if (!(req->rd_size))
goto out;
if (!set_offset && !get_next_table)
continue;
if (++req->rd_page <= table->page_end_offset) {
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "page: %u in same page table\n",
req->rd_page);
#endif
continue;
}
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "getting new page table for page: %u\n",
req->rd_page);
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -EINVAL;
sg_s = &table->sg_table[j = 0];
}
out:
task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
*se_mem_cnt);
#endif
return 0;
}
/* rd_DIRECT_without_offset():
*
*
*/
static int rd_DIRECT_without_offset(
struct se_task *task,
struct list_head *se_mem_list,
u32 *se_mem_cnt,
u32 *task_offset)
{
struct rd_request *req = RD_REQ(task);
struct rd_dev *dev = req->rd_dev;
struct rd_dev_sg_table *table;
struct se_mem *se_mem;
struct scatterlist *sg_s;
u32 length, j = 0;
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -EINVAL;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
(task->task_data_direction == DMA_TO_DEVICE) ?
"Write" : "Read",
task->task_lba, req->rd_size, req->rd_page);
#endif
while (req->rd_size) {
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
if (!(se_mem)) {
printk(KERN_ERR "Unable to allocate struct se_mem\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&se_mem->se_list);
length = (req->rd_size < sg_s[j].length) ?
req->rd_size : sg_s[j].length;
se_mem->se_page = sg_page(&sg_s[j++]);
se_mem->se_len = length;
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
req->rd_size, j, se_mem, se_mem->se_page,
se_mem->se_off, se_mem->se_len);
#endif
list_add_tail(&se_mem->se_list, se_mem_list);
(*se_mem_cnt)++;
req->rd_size -= length;
if (!(req->rd_size))
goto out;
if (++req->rd_page <= table->page_end_offset) {
#ifdef DEBUG_RAMDISK_DR
printk("page: %u in same page table\n",
req->rd_page);
#endif
continue;
}
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "getting new page table for page: %u\n",
req->rd_page);
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -EINVAL;
sg_s = &table->sg_table[j = 0];
}
out:
task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
*se_mem_cnt);
#endif
return 0;
}
/* rd_DIRECT_do_se_mem_map():
*
*
*/
static int rd_DIRECT_do_se_mem_map(
struct se_task *task,
struct list_head *se_mem_list,
void *in_mem,
struct se_mem *in_se_mem,
struct se_mem **out_se_mem,
u32 *se_mem_cnt,
u32 *task_offset_in)
{
struct se_cmd *cmd = task->task_se_cmd;
struct rd_request *req = RD_REQ(task);
u32 task_offset = *task_offset_in;
unsigned long long lba;
int ret;
int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size;
lba = task->task_lba;
req->rd_page = ((task->task_lba * block_size) / PAGE_SIZE);
req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size;
req->rd_size = task->task_size;
if (req->rd_offset)
ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
task_offset_in);
else
ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
task_offset_in);
if (ret < 0)
return ret;
if (cmd->se_tfo->task_sg_chaining == 0)
return 0;
/*
* Currently prevent writers from multiple HW fabrics doing
* pci_map_sg() to RD_DR's internal scatterlist memory.
*/
if (cmd->data_direction == DMA_TO_DEVICE) {
printk(KERN_ERR "DMA_TO_DEVICE not supported for"
" RAMDISK_DR with task_sg_chaining=1\n");
return -ENOSYS;
}
/*
* Special case for if task_sg_chaining is enabled, then
* we setup struct se_task->task_sg[], as it will be used by
* transport_do_task_sg_chain() for creating chainged SGLs
* across multiple struct se_task->task_sg[].
*/
ret = transport_init_task_sg(task,
list_first_entry(&cmd->t_mem_list,
struct se_mem, se_list),
task_offset);
if (ret <= 0)
return ret;
return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
list_first_entry(&cmd->t_mem_list,
struct se_mem, se_list),
out_se_mem, se_mem_cnt, task_offset_in);
}
/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
*
*
*/
static int rd_DIRECT_do_task(struct se_task *task)
{
/*
* At this point the locally allocated RD tables have been mapped
* to struct se_mem elements in rd_DIRECT_do_se_mem_map().
*/
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
*
*
@ -952,7 +660,7 @@ static ssize_t rd_set_configfs_dev_params(
case Opt_rd_pages:
match_int(args, &arg);
rd_dev->rd_page_count = arg;
printk(KERN_INFO "RAMDISK: Referencing Page"
pr_debug("RAMDISK: Referencing Page"
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
@ -970,7 +678,7 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
printk(KERN_INFO "Missing rd_pages= parameter\n");
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
@ -1022,27 +730,6 @@ static sector_t rd_get_blocks(struct se_device *dev)
return blocks_long;
}
static struct se_subsystem_api rd_dr_template = {
.name = "rd_dr",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
.allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
.create_virtdevice = rd_DIRECT_create_virtdevice,
.free_device = rd_free_device,
.alloc_task = rd_alloc_task,
.do_task = rd_DIRECT_do_task,
.free_task = rd_free_task,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
.get_cdb = rd_get_cdb,
.get_device_rev = rd_get_device_rev,
.get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks,
.do_se_mem_map = rd_DIRECT_do_se_mem_map,
};
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
@ -1067,13 +754,8 @@ int __init rd_module_init(void)
{
int ret;
ret = transport_subsystem_register(&rd_dr_template);
if (ret < 0)
return ret;
ret = transport_subsystem_register(&rd_mcp_template);
if (ret < 0) {
transport_subsystem_release(&rd_dr_template);
return ret;
}
@ -1082,6 +764,5 @@ int __init rd_module_init(void)
void rd_module_exit(void)
{
transport_subsystem_release(&rd_dr_template);
transport_subsystem_release(&rd_mcp_template);
}

View File

@ -32,8 +32,6 @@ struct rd_request {
u32 rd_page_count;
/* Scatterlist count */
u32 rd_size;
/* Ramdisk device */
struct rd_dev *rd_dev;
} ____cacheline_aligned;
struct rd_dev_sg_table {

View File

@ -41,13 +41,6 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
#define DEBUG_LUN_RESET
#ifdef DEBUG_LUN_RESET
#define DEBUG_LR(x...) printk(KERN_INFO x)
#else
#define DEBUG_LR(x...)
#endif
struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req(
tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
GFP_ATOMIC : GFP_KERNEL);
if (!(tmr)) {
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
if (!tmr) {
pr_err("Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
}
tmr->task_cmd = se_cmd;
@ -93,14 +86,14 @@ static void core_tmr_handle_tas_abort(
int tas,
int fe_count)
{
if (!(fe_count)) {
if (!fe_count) {
transport_cmd_finish_abort(cmd, 1);
return;
}
/*
* TASK ABORTED status (TAS) bit support
*/
if (((tmr_nacl != NULL) &&
if ((tmr_nacl &&
(tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
transport_send_task_abort(cmd);
@ -141,13 +134,13 @@ int core_tmr_lun_reset(
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
tmr_tpg->se_tpg_tfo->get_fabric_name(),
tmr_nacl->initiatorname);
}
}
DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name, tas);
/*
@ -163,8 +156,8 @@ int core_tmr_lun_reset(
continue;
cmd = tmr_p->task_cmd;
if (!(cmd)) {
printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
if (!cmd) {
pr_err("Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
@ -172,14 +165,14 @@ int core_tmr_lun_reset(
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
if ((preempt_and_abort_list != NULL) &&
if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
spin_unlock_irq(&dev->se_tmr_lock);
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (!(atomic_read(&cmd->t_transport_active))) {
if (!atomic_read(&cmd->t_transport_active)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
spin_lock_irq(&dev->se_tmr_lock);
continue;
@ -189,7 +182,7 @@ int core_tmr_lun_reset(
spin_lock_irq(&dev->se_tmr_lock);
continue;
}
DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
@ -224,7 +217,7 @@ int core_tmr_lun_reset(
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
t_state_list) {
if (!task->task_se_cmd) {
printk(KERN_ERR "task->task_se_cmd is NULL!\n");
pr_err("task->task_se_cmd is NULL!\n");
continue;
}
cmd = task->task_se_cmd;
@ -233,7 +226,7 @@ int core_tmr_lun_reset(
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if ((preempt_and_abort_list != NULL) &&
if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
@ -248,14 +241,14 @@ int core_tmr_lun_reset(
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
spin_lock_irqsave(&cmd->t_state_lock, flags);
DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
pr_debug("LUN_RESET: %s cmd: %p task: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
"def_t_state: %d/%d cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
cmd->se_tfo->get_task_tag(cmd), 0,
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
cmd->deferred_t_state, cmd->t_task_cdb[0]);
DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
@ -272,10 +265,10 @@ int core_tmr_lun_reset(
spin_unlock_irqrestore(
&cmd->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
" for dev: %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
pr_debug("LUN_RESET Completed task: %p shutdown for"
" dev: %p\n", task, dev);
spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_dec(&cmd->t_task_cdbs_left);
@ -288,10 +281,10 @@ int core_tmr_lun_reset(
}
__transport_stop_task_timer(task, &flags);
if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
spin_unlock_irqrestore(
&cmd->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
atomic_read(&cmd->t_task_cdbs_ex_left));
@ -301,7 +294,7 @@ int core_tmr_lun_reset(
fe_count = atomic_read(&cmd->t_fe_count);
if (atomic_read(&cmd->t_transport_active)) {
DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
pr_debug("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1);
@ -312,7 +305,7 @@ int core_tmr_lun_reset(
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
atomic_set(&cmd->t_transport_aborted, 1);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@ -335,7 +328,7 @@ int core_tmr_lun_reset(
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if ((preempt_and_abort_list != NULL) &&
if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
@ -350,7 +343,7 @@ int core_tmr_lun_reset(
list_del(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
@ -368,20 +361,20 @@ int core_tmr_lun_reset(
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
if (!(preempt_and_abort_list) &&
if (!preempt_and_abort_list &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
spin_lock_irq(&dev->stats_lock);
dev->num_resets++;
spin_unlock_irq(&dev->stats_lock);
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name);
return 0;

View File

@ -72,7 +72,7 @@ static void core_clear_initiator_node_from_tpg(
continue;
if (!deve->se_lun) {
printk(KERN_ERR "%s device entries device pointer is"
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->get_fabric_name());
continue;
@ -86,14 +86,13 @@ static void core_clear_initiator_node_from_tpg(
spin_lock(&lun->lun_acl_lock);
list_for_each_entry_safe(acl, acl_tmp,
&lun->lun_acl_list, lacl_list) {
if (!(strcmp(acl->initiatorname,
nacl->initiatorname)) &&
if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
(acl->mapped_lun == deve->mapped_lun))
break;
}
if (!acl) {
printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
pr_err("Unable to locate struct se_lun_acl for %s,"
" mapped_lun: %u\n", nacl->initiatorname,
deve->mapped_lun);
spin_unlock(&lun->lun_acl_lock);
@ -121,7 +120,7 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_node_acl *acl;
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!(strcmp(acl->initiatorname, initiatorname)))
if (!strcmp(acl->initiatorname, initiatorname))
return acl;
}
@ -140,8 +139,8 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!(strcmp(acl->initiatorname, initiatorname)) &&
(!(acl->dynamic_node_acl))) {
if (!strcmp(acl->initiatorname, initiatorname) &&
!acl->dynamic_node_acl) {
spin_unlock_bh(&tpg->acl_node_lock);
return acl;
}
@ -177,7 +176,7 @@ void core_tpg_add_node_to_devs(
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!(tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg))) {
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
@ -193,7 +192,7 @@ void core_tpg_add_node_to_devs(
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
}
printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
" access for LUN in Demo Mode\n",
tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
@ -216,7 +215,7 @@ static int core_set_queue_depth_for_node(
struct se_node_acl *acl)
{
if (!acl->queue_depth) {
printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
pr_err("Queue depth for %s Initiator Node: %s is 0,"
"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
acl->initiatorname);
acl->queue_depth = 1;
@ -236,8 +235,8 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
if (!(nacl->device_list)) {
printk(KERN_ERR "Unable to allocate memory for"
if (!nacl->device_list) {
pr_err("Unable to allocate memory for"
" struct se_node_acl->device_list\n");
return -ENOMEM;
}
@ -265,14 +264,14 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_node_acl *acl;
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
if ((acl))
if (acl)
return acl;
if (!(tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)))
if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
return NULL;
acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
if (!(acl))
if (!acl)
return NULL;
INIT_LIST_HEAD(&acl->acl_list);
@ -307,7 +306,7 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock);
printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
@ -357,10 +356,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if ((acl)) {
if (acl) {
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
spin_unlock_bh(&tpg->acl_node_lock);
@ -375,7 +374,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
goto done;
}
printk(KERN_ERR "ACL entry for %s Initiator"
pr_err("ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
@ -384,8 +383,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
}
spin_unlock_bh(&tpg->acl_node_lock);
if (!(se_nacl)) {
printk("struct se_node_acl pointer is NULL\n");
if (!se_nacl) {
pr_err("struct se_node_acl pointer is NULL\n");
return ERR_PTR(-EINVAL);
}
/*
@ -425,7 +424,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
spin_unlock_bh(&tpg->acl_node_lock);
done:
printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
@ -463,7 +462,7 @@ int core_tpg_del_initiator_node_acl(
/*
* Determine if the session needs to be closed by our context.
*/
if (!(tpg->se_tpg_tfo->shutdown_session(sess)))
if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
spin_unlock_bh(&tpg->session_lock);
@ -481,7 +480,7 @@ int core_tpg_del_initiator_node_acl(
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
@ -506,8 +505,8 @@ int core_tpg_set_initiator_node_queue_depth(
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!(acl)) {
printk(KERN_ERR "Access Control List entry for %s Initiator"
if (!acl) {
pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
@ -526,7 +525,7 @@ int core_tpg_set_initiator_node_queue_depth(
continue;
if (!force) {
printk(KERN_ERR "Unable to change queue depth for %s"
pr_err("Unable to change queue depth for %s"
" Initiator Node: %s while session is"
" operational. To forcefully change the queue"
" depth and force session reinstatement"
@ -543,7 +542,7 @@ int core_tpg_set_initiator_node_queue_depth(
/*
* Determine if the session needs to be closed by our context.
*/
if (!(tpg->se_tpg_tfo->shutdown_session(sess)))
if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
init_sess = sess;
@ -586,7 +585,7 @@ int core_tpg_set_initiator_node_queue_depth(
if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess);
printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
pr_debug("Successfuly changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg));
@ -644,8 +643,8 @@ int core_tpg_register(
se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
if (!(se_tpg->tpg_lun_list)) {
printk(KERN_ERR "Unable to allocate struct se_portal_group->"
if (!se_tpg->tpg_lun_list) {
pr_err("Unable to allocate struct se_portal_group->"
"tpg_lun_list\n");
return -ENOMEM;
}
@ -686,7 +685,7 @@ int core_tpg_register(
list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
spin_unlock_bh(&tpg_lock);
printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
@ -700,7 +699,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
{
struct se_node_acl *nacl, *nacl_tmp;
printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
" for endpoint: %s Portal Tag %u\n",
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
@ -749,7 +748,7 @@ struct se_lun *core_tpg_pre_addlun(
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
tpg->se_tpg_tfo->get_fabric_name(),
unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
@ -760,7 +759,7 @@ struct se_lun *core_tpg_pre_addlun(
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
pr_err("TPG Logical Unit Number: %u is already active"
" on %s Target Portal Group: %u, ignoring request.\n",
unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg));
@ -808,7 +807,7 @@ struct se_lun *core_tpg_pre_dellun(
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
@ -819,7 +818,7 @@ struct se_lun *core_tpg_pre_dellun(
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %u, ignoring request.\n",
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
tpg->se_tpg_tfo->tpg_get_tag(tpg));

File diff suppressed because it is too large Load Diff

View File

@ -49,15 +49,15 @@ int core_scsi3_ua_check(
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
if (!(sess))
if (!sess)
return 0;
nacl = sess->se_node_acl;
if (!(nacl))
if (!nacl)
return 0;
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count)))
if (!atomic_read(&deve->ua_count))
return 0;
/*
* From sam4r14, section 5.14 Unit attention condition:
@ -97,12 +97,12 @@ int core_scsi3_ua_allocate(
/*
* PASSTHROUGH OPS
*/
if (!(nacl))
if (!nacl)
return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!(ua)) {
printk(KERN_ERR "Unable to allocate struct se_ua\n");
if (!ua) {
pr_err("Unable to allocate struct se_ua\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&ua->ua_dev_list);
@ -177,7 +177,7 @@ int core_scsi3_ua_allocate(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq);
@ -215,16 +215,16 @@ void core_scsi3_ua_for_check_condition(
struct se_ua *ua = NULL, *ua_p;
int head = 1;
if (!(sess))
if (!sess)
return;
nacl = sess->se_node_acl;
if (!(nacl))
if (!nacl)
return;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) {
if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
return;
}
@ -264,7 +264,7 @@ void core_scsi3_ua_for_check_condition(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
pr_debug("[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
@ -284,16 +284,16 @@ int core_scsi3_ua_clear_for_request_sense(
struct se_ua *ua = NULL, *ua_p;
int head = 1;
if (!(sess))
if (!sess)
return -EINVAL;
nacl = sess->se_node_acl;
if (!(nacl))
if (!nacl)
return -EINVAL;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) {
if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
return -EPERM;
}
@ -323,7 +323,7 @@ int core_scsi3_ua_clear_for_request_sense(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq);

View File

@ -23,30 +23,6 @@
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
/*
* Debug options.
*/
#define FT_DEBUG_CONF 0x01 /* configuration messages */
#define FT_DEBUG_SESS 0x02 /* session messages */
#define FT_DEBUG_TM 0x04 /* TM operations */
#define FT_DEBUG_IO 0x08 /* I/O commands */
#define FT_DEBUG_DATA 0x10 /* Data transfer */
extern unsigned int ft_debug_logging; /* debug options */
#define FT_DEBUG(mask, fmt, args...) \
do { \
if (ft_debug_logging & (mask)) \
printk(KERN_INFO "tcm_fc: %s: " fmt, \
__func__, ##args); \
} while (0)
#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
struct ft_transport_id {
__u8 format;
__u8 __resvd1[7];

View File

@ -62,22 +62,19 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
struct scatterlist *sg;
int count;
if (!(ft_debug_logging & FT_DEBUG_IO))
return;
se_cmd = &cmd->se_cmd;
printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
printk(KERN_INFO "%s: cmd %p cdb %p\n",
pr_debug("%s: cmd %p cdb %p\n",
caller, cmd, cmd->cdb);
printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
printk(KERN_INFO "%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
caller, cmd, se_cmd->t_data_nents,
se_cmd->data_length, se_cmd->se_cmd_flags);
for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
printk(KERN_INFO "%s: cmd %p sg %p page %p "
pr_debug("%s: cmd %p sg %p page %p "
"len 0x%x off 0x%x\n",
caller, cmd, sg,
sg_page(sg), sg->length, sg->offset);
@ -85,7 +82,7 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
sp = cmd->seq;
if (sp) {
ep = fc_seq_exch(sp);
printk(KERN_INFO "%s: cmd %p sid %x did %x "
pr_debug("%s: cmd %p sid %x did %x "
"ox_id %x rx_id %x seq_id %x e_stat %x\n",
caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
sp->id, ep->esb_stat);
@ -321,7 +318,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
case FC_RCTL_DD_SOL_CTL: /* transfer ready */
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
default:
printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
fc_frame_free(fp);
transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
@ -346,7 +343,7 @@ static void ft_send_resp_status(struct fc_lport *lport,
struct fcp_resp_rsp_info *info;
fh = fc_frame_header_get(rx_fp);
FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
len = sizeof(*fcp);
if (status == SAM_STAT_GOOD)
@ -416,15 +413,15 @@ static void ft_send_tm(struct ft_cmd *cmd)
* FCP4r01 indicates having a combination of
* tm_flags set is invalid.
*/
FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
return;
}
FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
pr_debug("alloc tm cmd fn %d\n", tm_func);
tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
if (!tmr) {
FT_TM_DBG("alloc failed\n");
pr_debug("alloc failed\n");
ft_send_resp_code(cmd, FCP_TMF_FAILED);
return;
}
@ -439,7 +436,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
* since "unable to handle TMR request because failed
* to get to LUN"
*/
FT_TM_DBG("Failed to get LUN for TMR func %d, "
pr_debug("Failed to get LUN for TMR func %d, "
"se_cmd %p, unpacked_lun %d\n",
tm_func, &cmd->se_cmd, cmd->lun);
ft_dump_cmd(cmd, __func__);
@ -490,7 +487,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
code = FCP_TMF_FAILED;
break;
}
FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
return 0;
@ -518,7 +515,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
return;
busy:
FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
pr_debug("cmd or seq allocation failure - sending BUSY\n");
ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
@ -543,7 +540,7 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
default:
printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
@ -642,7 +639,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)
ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
ft_dump_cmd(cmd, __func__);
if (ret == -ENOMEM) {
@ -672,7 +669,7 @@ err:
*/
static void ft_exec_req(struct ft_cmd *cmd)
{
FT_IO_DBG("cmd state %x\n", cmd->state);
pr_debug("cmd state %x\n", cmd->state);
switch (cmd->state) {
case FC_CMD_ST_NEW:
ft_send_cmd(cmd);

View File

@ -106,7 +106,7 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
}
err = 4;
fail:
FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
pr_debug("err %u len %zu pos %u byte %u\n",
err, cp - name, pos, byte);
return -1;
}
@ -216,7 +216,7 @@ static struct se_node_acl *ft_add_acl(
u64 wwpn;
u32 q_depth;
FT_CONF_DBG("add acl %s\n", name);
pr_debug("add acl %s\n", name);
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
@ -239,11 +239,11 @@ static void ft_del_acl(struct se_node_acl *se_acl)
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
FT_CONF_DBG("del acl %s\n",
pr_debug("del acl %s\n",
config_item_name(&se_acl->acl_group.cg_item));
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
acl, se_acl, tpg, &tpg->se_tpg);
core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
@ -260,11 +260,11 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
spin_lock_bh(&se_tpg->acl_node_lock);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
FT_CONF_DBG("acl %p port_name %llx\n",
pr_debug("acl %p port_name %llx\n",
acl, (unsigned long long)acl->node_auth.port_name);
if (acl->node_auth.port_name == rdata->ids.port_name ||
acl->node_auth.node_name == rdata->ids.node_name) {
FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
pr_debug("acl %p port_name %llx matched\n", acl,
(unsigned long long)rdata->ids.port_name);
found = acl;
/* XXX need to hold onto ACL */
@ -281,10 +281,10 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
if (!acl) {
printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
pr_err("Unable to allocate struct ft_node_acl\n");
return NULL;
}
FT_CONF_DBG("acl %p\n", acl);
pr_debug("acl %p\n", acl);
return &acl->se_node_acl;
}
@ -294,7 +294,7 @@ static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
pr_debug("acl %p\n", acl);
kfree(acl);
}
@ -311,7 +311,7 @@ static struct se_portal_group *ft_add_tpg(
unsigned long index;
int ret;
FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
pr_debug("tcm_fc: add tpg %s\n", name);
/*
* Name must be "tpgt_" followed by the index.
@ -354,7 +354,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
FT_CONF_DBG("del tpg %s\n",
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
kthread_stop(tpg->thread);
@ -412,7 +412,7 @@ static struct se_wwn *ft_add_lport(
struct ft_lport_acl *old_lacl;
u64 wwpn;
FT_CONF_DBG("add lport %s\n", name);
pr_debug("add lport %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
@ -441,7 +441,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
FT_CONF_DBG("del lport %s\n",
pr_debug("del lport %s\n",
config_item_name(&wwn->wwn_group.cg_item));
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
@ -581,7 +581,7 @@ int ft_register_configfs(void)
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
if (IS_ERR(fabric)) {
printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
pr_err("%s: target_fabric_configfs_init() failed!\n",
__func__);
return PTR_ERR(fabric);
}
@ -608,11 +608,8 @@ int ft_register_configfs(void)
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
FT_CONF_DBG("target_fabric_configfs_register() for"
pr_debug("target_fabric_configfs_register() for"
" FC Target failed!\n");
printk(KERN_INFO
"%s: target_fabric_configfs_register() failed!\n",
__func__);
target_fabric_configfs_free(fabric);
return -1;
}

View File

@ -39,6 +39,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@ -176,8 +177,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
error = lport->tt.seq_send(lport, cmd->seq, fp);
if (error) {
/* XXX For now, initiator will retry */
if (printk_ratelimit())
printk(KERN_ERR "%s: Failed to send frame %p, "
pr_err_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
@ -222,7 +222,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
*/
buf = fc_frame_payload_get(fp, 1);
if (cmd->was_ddp_setup && buf) {
printk(KERN_INFO "%s: When DDP was setup, not expected to"
pr_debug("%s: When DDP was setup, not expected to"
"receive frame with payload, Payload shall be"
"copied directly to buffer instead of coming "
"via. legacy receive queues\n", __func__);
@ -260,7 +260,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
* this point, but just in case if required in future
* for debugging or any other purpose
*/
printk(KERN_ERR "%s: Received frame with TSI bit not"
pr_err("%s: Received frame with TSI bit not"
" being SET, dropping the frame, "
"cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
__func__, cmd->sg, cmd->sg_cnt);

View File

@ -198,13 +198,13 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
pr_debug("port_id %x found %p\n", port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
FT_SESS_DBG("port_id %x not found\n", port_id);
pr_debug("port_id %x not found\n", port_id);
return NULL;
}
@ -240,7 +240,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
pr_debug("port_id %x sess %p\n", port_id, sess);
transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
sess->se_sess, sess);
@ -314,7 +314,7 @@ int ft_sess_shutdown(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
FT_SESS_DBG("port_id %x\n", sess->port_id);
pr_debug("port_id %x\n", sess->port_id);
return 1;
}
@ -335,7 +335,7 @@ void ft_sess_close(struct se_session *se_sess)
mutex_unlock(&ft_lport_lock);
return;
}
FT_SESS_DBG("port_id %x\n", port_id);
pr_debug("port_id %x\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(se_sess);
@ -348,7 +348,7 @@ void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
FT_SESS_DBG("port_id %x\n", sess->port_id);
pr_debug("port_id %x\n", sess->port_id);
}
int ft_sess_logged_in(struct se_session *se_sess)
@ -458,7 +458,7 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
FT_SESS_DBG("port_id %x flags %x ret %x\n",
pr_debug("port_id %x flags %x ret %x\n",
rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@ -518,11 +518,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
FT_SESS_DBG("sid %x\n", sid);
pr_debug("sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
FT_SESS_DBG("sid %x sess lookup failed\n", sid);
pr_debug("sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;

View File

@ -403,7 +403,7 @@ struct se_queue_obj {
struct se_task {
unsigned char task_sense;
struct scatterlist *task_sg;
u32 task_sg_num;
u32 task_sg_nents;
struct scatterlist *task_sg_bidi;
u8 task_scsi_status;
u8 task_flags;

View File

@ -292,7 +292,7 @@ struct se_subsystem_api {
* drivers. Provided out of convenience.
*/
int (*transport_complete)(struct se_task *task);
struct se_task *(*alloc_task)(struct se_cmd *);
struct se_task *(*alloc_task)(unsigned char *cdb);
/*
* do_task():
*/
@ -341,11 +341,6 @@ struct se_subsystem_api {
* Get the sector_t from a subsystem backstore..
*/
sector_t (*get_blocks)(struct se_device *);
/*
* do_se_mem_map():
*/
int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
struct se_mem *, struct se_mem **, u32 *, u32 *);
/*
* get_sense_buffer():
*/