2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 01:04:08 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-post-merge-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-post-merge-2.6:
  [SCSI] target: Add LIO target core v4.0.0-rc6
  [SCSI] sd,sr: kill compat SDEV_MEDIA_CHANGE event
  [SCSI] sd: implement sd_check_events()
This commit is contained in:
Linus Torvalds 2011-01-14 16:29:49 -08:00
commit 38567333a6
47 changed files with 30571 additions and 58 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,145 @@
>>>>>>>>>> The TCM v4 fabric module script generator <<<<<<<<<<
Greetings all,
This document is intended to be a mini-HOWTO for using the tcm_mod_builder.py
script to generate a brand new functional TCM v4 fabric .ko module of your very own,
that once built can be immediately be loaded to start access the new TCM/ConfigFS
fabric skeleton, by simply using:
modprobe $TCM_NEW_MOD
mkdir -p /sys/kernel/config/target/$TCM_NEW_MOD
This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following
*) Generate new API callers for drivers/target/target_core_fabric_configs.c logic
->make_nodeacl(), ->drop_nodeacl(), ->make_tpg(), ->drop_tpg()
->make_wwn(), ->drop_wwn(). These are created into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c
*) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module
using a skeleton struct target_core_fabric_ops API template.
*) Based on user defined T10 Proto_Ident for the new fabric module being built,
the TransportID / Initiator and Target WWPN related handlers for
SPC-3 persistent reservation are automatically generated in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
using drivers/target/target_core_fabric_lib.c logic.
*) NOP API calls for all other Data I/O path and fabric dependent attribute logic
in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
tcm_mod_builder.py depends upon the mandatory '-p $PROTO_IDENT' and '-m
$FABRIC_MOD_name' parameters, and actually running the script looks like:
target:/mnt/sdb/lio-core-2.6.git/Documentation/target# python tcm_mod_builder.py -p iSCSI -m tcm_nab5000
tcm_dir: /mnt/sdb/lio-core-2.6.git/Documentation/target/../../
Set fabric_mod_name: tcm_nab5000
Set fabric_mod_dir:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
Using proto_ident: iSCSI
Creating fabric_mod_dir:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_base.h
Using tcm_mod_scan_fabric_ops:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../include/target/target_core_fabric_ops.h
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.c
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.h
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_configfs.c
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kbuild
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kconfig
Would you like to add tcm_nab5000to drivers/target/Kbuild..? [yes,no]: yes
Would you like to add tcm_nab5000to drivers/target/Kconfig..? [yes,no]: yes
At the end of tcm_mod_builder.py. the script will ask to add the following
line to drivers/target/Kbuild:
obj-$(CONFIG_TCM_NAB5000) += tcm_nab5000/
and the same for drivers/target/Kconfig:
source "drivers/target/tcm_nab5000/Kconfig"
*) Run 'make menuconfig' and select the new CONFIG_TCM_NAB5000 item:
<M> TCM_NAB5000 fabric module
*) Build using 'make modules', once completed you will have:
target:/mnt/sdb/lio-core-2.6.git# ls -la drivers/target/tcm_nab5000/
total 1348
drwxr-xr-x 2 root root 4096 2010-10-05 03:23 .
drwxr-xr-x 9 root root 4096 2010-10-05 03:22 ..
-rw-r--r-- 1 root root 282 2010-10-05 03:22 Kbuild
-rw-r--r-- 1 root root 171 2010-10-05 03:22 Kconfig
-rw-r--r-- 1 root root 49 2010-10-05 03:23 modules.order
-rw-r--r-- 1 root root 738 2010-10-05 03:22 tcm_nab5000_base.h
-rw-r--r-- 1 root root 9096 2010-10-05 03:22 tcm_nab5000_configfs.c
-rw-r--r-- 1 root root 191200 2010-10-05 03:23 tcm_nab5000_configfs.o
-rw-r--r-- 1 root root 40504 2010-10-05 03:23 .tcm_nab5000_configfs.o.cmd
-rw-r--r-- 1 root root 5414 2010-10-05 03:22 tcm_nab5000_fabric.c
-rw-r--r-- 1 root root 2016 2010-10-05 03:22 tcm_nab5000_fabric.h
-rw-r--r-- 1 root root 190932 2010-10-05 03:23 tcm_nab5000_fabric.o
-rw-r--r-- 1 root root 40713 2010-10-05 03:23 .tcm_nab5000_fabric.o.cmd
-rw-r--r-- 1 root root 401861 2010-10-05 03:23 tcm_nab5000.ko
-rw-r--r-- 1 root root 265 2010-10-05 03:23 .tcm_nab5000.ko.cmd
-rw-r--r-- 1 root root 459 2010-10-05 03:23 tcm_nab5000.mod.c
-rw-r--r-- 1 root root 23896 2010-10-05 03:23 tcm_nab5000.mod.o
-rw-r--r-- 1 root root 22655 2010-10-05 03:23 .tcm_nab5000.mod.o.cmd
-rw-r--r-- 1 root root 379022 2010-10-05 03:23 tcm_nab5000.o
-rw-r--r-- 1 root root 211 2010-10-05 03:23 .tcm_nab5000.o.cmd
*) Load the new module, create a lun_0 configfs group, and add new TCM Core
IBLOCK backstore symlink to port:
target:/mnt/sdb/lio-core-2.6.git# insmod drivers/target/tcm_nab5000.ko
target:/mnt/sdb/lio-core-2.6.git# mkdir -p /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0
target:/mnt/sdb/lio-core-2.6.git# cd /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0/
target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# ln -s /sys/kernel/config/target/core/iblock_0/lvm_test0 nab5000_port
target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# cd -
target:/mnt/sdb/lio-core-2.6.git# tree /sys/kernel/config/target/nab5000/
/sys/kernel/config/target/nab5000/
|-- discovery_auth
|-- iqn.foo
| `-- tpgt_1
| |-- acls
| |-- attrib
| |-- lun
| | `-- lun_0
| | |-- alua_tg_pt_gp
| | |-- alua_tg_pt_offline
| | |-- alua_tg_pt_status
| | |-- alua_tg_pt_write_md
| | `-- nab5000_port -> ../../../../../../target/core/iblock_0/lvm_test0
| |-- np
| `-- param
`-- version
target:/mnt/sdb/lio-core-2.6.git# lsmod
Module Size Used by
tcm_nab5000 3935 4
iscsi_target_mod 193211 0
target_core_stgt 8090 0
target_core_pscsi 11122 1
target_core_file 9172 2
target_core_iblock 9280 1
target_core_mod 228575 31
tcm_nab5000,iscsi_target_mod,target_core_stgt,target_core_pscsi,target_core_file,target_core_iblock
libfc 73681 0
scsi_debug 56265 0
scsi_tgt 8666 1 target_core_stgt
configfs 20644 2 target_core_mod
----------------------------------------------------------------------
Future TODO items:
*) Add more T10 proto_idents
*) Make tcm_mod_dump_fabric_ops() smarter and generate function pointer
defs directly from include/target/target_core_fabric_ops.h:struct target_core_fabric_ops
structure members.
October 5th, 2010
Nicholas A. Bellinger <nab@linux-iscsi.org>

View File

@ -26,6 +26,8 @@ source "drivers/ata/Kconfig"
source "drivers/md/Kconfig"
source "drivers/target/Kconfig"
source "drivers/message/fusion/Kconfig"
source "drivers/firewire/Kconfig"

View File

@ -46,6 +46,7 @@ obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
obj-y += net/

View File

@ -990,30 +990,51 @@ out:
static void set_media_not_present(struct scsi_disk *sdkp)
{
sdkp->media_present = 0;
sdkp->capacity = 0;
sdkp->device->changed = 1;
if (sdkp->media_present)
sdkp->device->changed = 1;
if (sdkp->device->removable) {
sdkp->media_present = 0;
sdkp->capacity = 0;
}
}
static int media_not_present(struct scsi_disk *sdkp,
struct scsi_sense_hdr *sshdr)
{
if (!scsi_sense_valid(sshdr))
return 0;
/* not invoked for commands that could return deferred errors */
switch (sshdr->sense_key) {
case UNIT_ATTENTION:
case NOT_READY:
/* medium not present */
if (sshdr->asc == 0x3A) {
set_media_not_present(sdkp);
return 1;
}
}
return 0;
}
/**
* sd_media_changed - check if our medium changed
* @disk: kernel device descriptor
* sd_check_events - check media events
* @disk: kernel device descriptor
* @clearing: disk events currently being cleared
*
* Returns 0 if not applicable or no change; 1 if change
* Returns mask of DISK_EVENT_*.
*
* Note: this function is invoked from the block subsystem.
**/
static int sd_media_changed(struct gendisk *disk)
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_media_changed\n"));
if (!sdp->removable)
return 0;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
* If the device is offline, don't send any commands - just pretend as
@ -1043,40 +1064,32 @@ static int sd_media_changed(struct gendisk *disk)
sshdr);
}
if (retval) {
/* failed to execute TUR, assume media not present */
if (host_byte(retval)) {
set_media_not_present(sdkp);
goto out;
}
if (media_not_present(sdkp, sshdr))
goto out;
/*
* For removable scsi disk we have to recognise the presence
* of a disk in the drive. This is kept in the struct scsi_disk
* struct and tested at open ! Daniel Roche (dan@lectra.fr)
* of a disk in the drive.
*/
if (!sdkp->media_present)
sdp->changed = 1;
sdkp->media_present = 1;
out:
/*
* Report a media change under the following conditions:
* sdp->changed is set under the following conditions:
*
* Medium is present now and wasn't present before.
* Medium wasn't present before and is present now.
* Medium was present at all times, but it changed while
* we weren't looking (sdp->changed is set).
*
* If there was no medium before and there is no medium now then
* don't report a change, even if a medium was inserted and removed
* while we weren't looking.
* Medium present state has changed in either direction.
* Device has indicated UNIT_ATTENTION.
*/
retval = (sdkp->media_present != sdkp->previous_state ||
(sdkp->media_present && sdp->changed));
if (retval)
sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL);
sdkp->previous_state = sdkp->media_present;
/* sdp->changed indicates medium was changed or is not present */
sdp->changed = !sdkp->media_present;
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
return retval;
}
@ -1169,7 +1182,7 @@ static const struct block_device_operations sd_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = sd_compat_ioctl,
#endif
.media_changed = sd_media_changed,
.check_events = sd_check_events,
.revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
};
@ -1312,23 +1325,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
return good_bytes;
}
static int media_not_present(struct scsi_disk *sdkp,
struct scsi_sense_hdr *sshdr)
{
if (!scsi_sense_valid(sshdr))
return 0;
/* not invoked for commands that could return deferred errors */
if (sshdr->sense_key != NOT_READY &&
sshdr->sense_key != UNIT_ATTENTION)
return 0;
if (sshdr->asc != 0x3A) /* medium not present */
return 0;
set_media_not_present(sdkp);
return 1;
}
/*
* spinup disk - called only in sd_revalidate_disk()
*/
@ -1503,7 +1499,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
*/
if (sdp->removable &&
sense_valid && sshdr->sense_key == NOT_READY)
sdp->changed = 1;
set_media_not_present(sdkp);
/*
* We used to set media_present to 0 here to indicate no media
@ -2389,8 +2385,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
gd->driverfs_dev = &sdp->sdev_gendev;
gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable)
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
gd->events |= DISK_EVENT_MEDIA_CHANGE;
}
add_disk(gd);
sd_dif_config_host(sdkp);
@ -2472,7 +2470,6 @@ static int sd_probe(struct device *dev)
sdkp->disk = gd;
sdkp->index = index;
atomic_set(&sdkp->openers, 0);
sdkp->previous_state = 1;
if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)

View File

@ -55,7 +55,6 @@ struct scsi_disk {
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
unsigned previous_state : 1;
unsigned ATO : 1; /* state of disk ATO bit */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */

View File

@ -249,10 +249,6 @@ skip_tur:
cd->device->changed = 0;
}
/* for backward compatibility */
if (events & DISK_EVENT_MEDIA_CHANGE)
sdev_evt_send_simple(cd->device, SDEV_EVT_MEDIA_CHANGE,
GFP_KERNEL);
return events;
}

32
drivers/target/Kconfig Normal file
View File

@ -0,0 +1,32 @@
menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
depends on SCSI && BLOCK
select CONFIGFS_FS
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
control path for target_core_mod. This includes built-in TCM RAMDISK
subsystem logic for virtual LUN 0 access
if TARGET_CORE
config TCM_IBLOCK
tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
help
Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
access to Linux/Block devices using BIO
config TCM_FILEIO
tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
help
Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
access to Linux/VFS struct file or struct block_device
config TCM_PSCSI
tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
help
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
endif

24
drivers/target/Makefile Normal file
View File

@ -0,0 +1,24 @@
EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
target_core_mod-y := target_core_configfs.o \
target_core_device.o \
target_core_fabric_configfs.o \
target_core_fabric_lib.o \
target_core_hba.o \
target_core_pr.o \
target_core_alua.o \
target_core_scdb.o \
target_core_tmr.o \
target_core_tpg.o \
target_core_transport.o \
target_core_cdb.o \
target_core_ua.o \
target_core_rd.o \
target_core_mib.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
# Subsystem modules
obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,126 @@
#ifndef TARGET_CORE_ALUA_H
#define TARGET_CORE_ALUA_H
/*
* INQUIRY response data, TPGS Field
*
* from spc4r17 section 6.4.2 Table 135
*/
#define TPGS_NO_ALUA 0x00
#define TPGS_IMPLICT_ALUA 0x10
#define TPGS_EXPLICT_ALUA 0x20
/*
* ASYMMETRIC ACCESS STATE field
*
* from spc4r17 section 6.27 Table 245
*/
#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
#define ALUA_ACCESS_STATE_STANDBY 0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
#define ALUA_ACCESS_STATE_OFFLINE 0xe
#define ALUA_ACCESS_STATE_TRANSITION 0xf
/*
* REPORT_TARGET_PORT_GROUP STATUS CODE
*
* from spc4r17 section 6.27 Table 246
*/
#define ALUA_STATUS_NONE 0x00
#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
*/
#define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a
#define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b
#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c
#define ASCQ_04H_ALUA_OFFLINE 0x12
/*
* Used as the default for Active/NonOptimized delay (in milliseconds)
* This can also be changed via configfs on a per target port group basis..
*/
#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
/*
* Used for implict and explict ALUA transitional delay, that is disabled
* by default, and is intended to be used for debugging client side ALUA code.
*/
#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
/*
* Used by core_alua_update_tpg_primary_metadata() and
* core_alua_update_tpg_secondary_metadata()
*/
#define ALUA_METADATA_PATH_LEN 512
/*
* Used by core_alua_update_tpg_secondary_metadata()
*/
#define ALUA_SECONDARY_METADATA_WWN_LEN 256
extern struct kmem_cache *t10_alua_lu_gp_cache;
extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern int core_emulate_report_target_port_groups(struct se_cmd *);
extern int core_emulate_set_target_port_groups(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
struct se_node_acl *, int, int);
extern char *core_alua_dump_status(int);
extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
extern void core_alua_free_lu_gp_mem(struct se_device *);
extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_subsystem_dev *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *);
extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
struct t10_alua_tg_pt_gp *);
extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
size_t);
extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
size_t);
extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
extern ssize_t core_alua_store_secondary_status(struct se_lun *,
const char *, size_t);
extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t);
extern int core_setup_alua(struct se_device *, int);
#endif /* TARGET_CORE_ALUA_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,996 @@
/*******************************************************************************
* Filename: target_core_fabric_configfs.c
*
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
* Copyright (c) 2010 Rising Tide Systems
* Copyright (c) 2010 Linux-iSCSI.org
*
* Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/unistd.h>
#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
{ \
struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
cit->ct_owner = tf->tf_module; \
printk("Setup generic %s\n", __stringify(_name)); \
}
/* Start of tfc_tpg_mappedlun_cit */
static int target_fabric_mappedlun_link(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
struct se_dev_entry *deve;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
int ret = 0, lun_access;
/*
* Ensure that the source port exists
*/
if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
"_tpg does not exist\n");
return -EINVAL;
}
se_tpg = lun->lun_sep->sep_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
tpg_ci = &nacl_ci->ci_group->cg_item;
wwn_ci = &tpg_ci->ci_group->cg_item;
tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
/*
* Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
*/
if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
config_item_name(wwn_ci));
return -EINVAL;
}
if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
" TPGT: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci));
return -EINVAL;
}
/*
* If this struct se_node_acl was dynamically generated with
* tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
* which be will write protected (READ-ONLY) when
* tpg_1/attrib/demo_mode_write_protect=1
*/
spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
lun_access = deve->lun_flags;
else
lun_access =
(TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE;
spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
/*
* Determine the actual mapped LUN value user wants..
*
* This value is what the SCSI Initiator actually sees the
* iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
*/
ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
lun->unpacked_lun, lun_access);
return (ret < 0) ? -EINVAL : 0;
}
static int target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
struct se_lun *lun;
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
struct se_portal_group *se_tpg;
/*
* Determine if the underlying MappedLUN has already been released..
*/
if (!(deve->se_lun))
return 0;
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
se_tpg = lun->lun_sep->sep_tpg;
core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
return 0;
}
CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
#define TCM_MAPPEDLUN_ATTR(_name, _mode) \
static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_fabric_mappedlun_show_##_name, \
target_fabric_mappedlun_store_##_name);
static ssize_t target_fabric_mappedlun_show_write_protect(
struct se_lun_acl *lacl,
char *page)
{
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t len;
spin_lock_irq(&se_nacl->device_list_lock);
deve = &se_nacl->device_list[lacl->mapped_lun];
len = sprintf(page, "%d\n",
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
1 : 0);
spin_unlock_irq(&se_nacl->device_list_lock);
return len;
}
static ssize_t target_fabric_mappedlun_store_write_protect(
struct se_lun_acl *lacl,
const char *page,
size_t count)
{
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
unsigned long op;
if (strict_strtoul(page, 0, &op))
return -EINVAL;
if ((op != 1) && (op != 0))
return -EINVAL;
core_update_device_list_access(lacl->mapped_lun, (op) ?
TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE,
lacl->se_lun_nacl);
printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %u Write Protect bit to %s\n",
TPG_TFO(se_tpg)->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
return count;
}
TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
&target_fabric_mappedlun_write_protect.attr,
NULL,
};
static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
.show_attribute = target_fabric_mappedlun_attr_show,
.store_attribute = target_fabric_mappedlun_attr_store,
.allow_link = target_fabric_mappedlun_link,
.drop_link = target_fabric_mappedlun_unlink,
};
TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
target_fabric_mappedlun_attrs);
/* End of tfc_tpg_mappedlun_cit */
/* Start of tfc_tpg_nacl_attrib_cit */
CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
.show_attribute = target_fabric_nacl_attrib_attr_show,
.store_attribute = target_fabric_nacl_attrib_attr_store,
};
TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
/* End of tfc_tpg_nacl_attrib_cit */
/* Start of tfc_tpg_nacl_auth_cit */
CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
.show_attribute = target_fabric_nacl_auth_attr_show,
.store_attribute = target_fabric_nacl_auth_attr_store,
};
TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
/* End of tfc_tpg_nacl_auth_cit */
/* Start of tfc_tpg_nacl_param_cit */
CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
.show_attribute = target_fabric_nacl_param_attr_show,
.store_attribute = target_fabric_nacl_param_attr_store,
};
TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
/* End of tfc_tpg_nacl_param_cit */
/* Start of tfc_tpg_nacl_base_cit */
CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
static struct config_group *target_fabric_make_mappedlun(
struct config_group *group,
const char *name)
{
struct se_node_acl *se_nacl = container_of(group,
struct se_node_acl, acl_group);
struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_lun_acl *lacl;
struct config_item *acl_ci;
char *buf;
unsigned long mapped_lun;
int ret = 0;
acl_ci = &group->cg_item;
if (!(acl_ci)) {
printk(KERN_ERR "Unable to locatel acl_ci\n");
return NULL;
}
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
if (!(buf)) {
printk(KERN_ERR "Unable to allocate memory for name buf\n");
return ERR_PTR(-ENOMEM);
}
snprintf(buf, strlen(name) + 1, "%s", name);
/*
* Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
*/
if (strstr(buf, "lun_") != buf) {
printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
" name: %s\n", buf, name);
ret = -EINVAL;
goto out;
}
/*
* Determine the Mapped LUN value. This is what the SCSI Initiator
* Port will actually see.
*/
if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
ret = -EINVAL;
goto out;
}
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
config_item_name(acl_ci), &ret);
if (!(lacl))
goto out;
config_group_init_type_name(&lacl->se_lun_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
kfree(buf);
return &lacl->se_lun_group;
out:
kfree(buf);
return ERR_PTR(ret);
}
static void target_fabric_drop_mappedlun(
struct config_group *group,
struct config_item *item)
{
struct se_lun_acl *lacl = container_of(to_config_group(item),
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
config_item_put(item);
core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
}
static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
.show_attribute = target_fabric_nacl_base_attr_show,
.store_attribute = target_fabric_nacl_base_attr_store,
};
static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
.make_group = target_fabric_make_mappedlun,
.drop_item = target_fabric_drop_mappedlun,
};
TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
&target_fabric_nacl_base_group_ops, NULL);
/* End of tfc_tpg_nacl_base_cit */
/* Start of tfc_tpg_nacl_cit */
static struct config_group *target_fabric_make_nodeacl(
struct config_group *group,
const char *name)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_acl_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_node_acl *se_nacl;
struct config_group *nacl_cg;
if (!(tf->tf_ops.fabric_make_nodeacl)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
if (IS_ERR(se_nacl))
return ERR_PTR(PTR_ERR(se_nacl));
nacl_cg = &se_nacl->acl_group;
nacl_cg->default_groups = se_nacl->acl_default_groups;
nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
nacl_cg->default_groups[3] = NULL;
config_group_init_type_name(&se_nacl->acl_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
config_group_init_type_name(&se_nacl->acl_param_group, "param",
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
return &se_nacl->acl_group;
}
static void target_fabric_drop_nodeacl(
struct config_group *group,
struct config_item *item)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_acl_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
struct config_item *df_item;
struct config_group *nacl_cg;
int i;
nacl_cg = &se_nacl->acl_group;
for (i = 0; nacl_cg->default_groups[i]; i++) {
df_item = &nacl_cg->default_groups[i]->cg_item;
nacl_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
config_item_put(item);
tf->tf_ops.fabric_drop_nodeacl(se_nacl);
}
static struct configfs_group_operations target_fabric_nacl_group_ops = {
.make_group = target_fabric_make_nodeacl,
.drop_item = target_fabric_drop_nodeacl,
};
TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
/* End of tfc_tpg_nacl_cit */
/* Start of tfc_tpg_np_base_cit */
CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
static struct configfs_item_operations target_fabric_np_base_item_ops = {
.show_attribute = target_fabric_np_base_attr_show,
.store_attribute = target_fabric_np_base_attr_store,
};
TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
/* End of tfc_tpg_np_base_cit */
/* Start of tfc_tpg_np_cit */
static struct config_group *target_fabric_make_np(
struct config_group *group,
const char *name)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_np_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np;
if (!(tf->tf_ops.fabric_make_np)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
if (!(se_tpg_np) || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
return &se_tpg_np->tpg_np_group;
}
static void target_fabric_drop_np(
struct config_group *group,
struct config_item *item)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_np_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
struct se_tpg_np, tpg_np_group);
config_item_put(item);
tf->tf_ops.fabric_drop_np(se_tpg_np);
}
static struct configfs_group_operations target_fabric_np_group_ops = {
.make_group = &target_fabric_make_np,
.drop_item = &target_fabric_drop_np,
};
TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
/* End of tfc_tpg_np_cit */
/* Start of tfc_tpg_port_cit */
CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
#define TCM_PORT_ATTR(_name, _mode) \
static struct target_fabric_port_attribute target_fabric_port_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_fabric_port_show_attr_##_name, \
target_fabric_port_store_attr_##_name);
#define TCM_PORT_ATTOR_RO(_name) \
__CONFIGFS_EATTR_RO(_name, \
target_fabric_port_show_attr_##_name);
/*
* alua_tg_pt_gp
*/
static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
}
static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
struct se_lun *lun,
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
}
TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
/*
* alua_tg_pt_offline
*/
static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
}
static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
struct se_lun *lun,
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
}
TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
/*
* alua_tg_pt_status
*/
static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
}
static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
struct se_lun *lun,
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
}
TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
/*
* alua_tg_pt_write_md
*/
static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
}
static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
struct se_lun *lun,
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
}
TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
static struct configfs_attribute *target_fabric_port_attrs[] = {
&target_fabric_port_alua_tg_pt_gp.attr,
&target_fabric_port_alua_tg_pt_offline.attr,
&target_fabric_port_alua_tg_pt_status.attr,
&target_fabric_port_alua_tg_pt_write_md.attr,
NULL,
};
CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
struct config_item *tpg_ci;
struct se_device *dev;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_lun *lun_p;
struct se_portal_group *se_tpg;
struct se_subsystem_dev *se_dev = container_of(
to_config_group(se_dev_ci), struct se_subsystem_dev,
se_dev_group);
struct target_fabric_configfs *tf;
int ret;
tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
se_tpg = container_of(to_config_group(tpg_ci),
struct se_portal_group, tpg_group);
tf = se_tpg->se_tpg_wwn->wwn_tf;
if (lun->lun_se_dev != NULL) {
printk(KERN_ERR "Port Symlink already exists\n");
return -EEXIST;
}
dev = se_dev->se_dev_ptr;
if (!(dev)) {
printk(KERN_ERR "Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
}
lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
lun->unpacked_lun);
if ((IS_ERR(lun_p)) || !(lun_p)) {
printk(KERN_ERR "core_dev_add_lun() failed\n");
ret = -EINVAL;
goto out;
}
if (tf->tf_ops.fabric_post_link) {
/*
* Call the optional fabric_post_link() to allow a
* fabric module to setup any additional state once
* core_dev_add_lun() has been called..
*/
tf->tf_ops.fabric_post_link(se_tpg, lun);
}
return 0;
out:
return ret;
}
static int target_fabric_port_unlink(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
if (tf->tf_ops.fabric_pre_unlink) {
/*
* Call the optional fabric_pre_unlink() to allow a
* fabric module to release any additional stat before
* core_dev_del_lun() is called.
*/
tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
}
core_dev_del_lun(se_tpg, lun->unpacked_lun);
return 0;
}
static struct configfs_item_operations target_fabric_port_item_ops = {
.show_attribute = target_fabric_port_attr_show,
.store_attribute = target_fabric_port_attr_store,
.allow_link = target_fabric_port_link,
.drop_link = target_fabric_port_unlink,
};
TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
/* End of tfc_tpg_port_cit */
/* Start of tfc_tpg_lun_cit */
static struct config_group *target_fabric_make_lun(
struct config_group *group,
const char *name)
{
struct se_lun *lun;
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_lun_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
unsigned long unpacked_lun;
if (strstr(name, "lun_") != name) {
printk(KERN_ERR "Unable to locate \'_\" in"
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
return ERR_PTR(-EINVAL);
lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
if (!(lun))
return ERR_PTR(-EINVAL);
config_group_init_type_name(&lun->lun_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
return &lun->lun_group;
}
static void target_fabric_drop_lun(
struct config_group *group,
struct config_item *item)
{
config_item_put(item);
}
static struct configfs_group_operations target_fabric_lun_group_ops = {
.make_group = &target_fabric_make_lun,
.drop_item = &target_fabric_drop_lun,
};
TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
/* End of tfc_tpg_lun_cit */
/* Start of tfc_tpg_attrib_cit */
CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
.show_attribute = target_fabric_tpg_attrib_attr_show,
.store_attribute = target_fabric_tpg_attrib_attr_store,
};
TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
/* End of tfc_tpg_attrib_cit */
/* Start of tfc_tpg_param_cit */
CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
.show_attribute = target_fabric_tpg_param_attr_show,
.store_attribute = target_fabric_tpg_param_attr_store,
};
TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
/* End of tfc_tpg_param_cit */
/* Start of tfc_tpg_base_cit */
/*
* For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
*/
CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
.show_attribute = target_fabric_tpg_attr_show,
.store_attribute = target_fabric_tpg_attr_store,
};
TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
/* End of tfc_tpg_base_cit */
/* Start of tfc_tpg_cit */
static struct config_group *target_fabric_make_tpg(
struct config_group *group,
const char *name)
{
struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg;
if (!(tf->tf_ops.fabric_make_tpg)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
if (!(se_tpg) || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
/*
* Setup default groups from pre-allocated se_tpg->tpg_default_groups
*/
se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
se_tpg->tpg_group.default_groups[5] = NULL;
config_group_init_type_name(&se_tpg->tpg_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
&TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
&TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
&TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
&TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
return &se_tpg->tpg_group;
}
static void target_fabric_drop_tpg(
struct config_group *group,
struct config_item *item)
{
struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
struct config_group *tpg_cg = &se_tpg->tpg_group;
struct config_item *df_item;
int i;
/*
* Release default groups, but do not release tpg_cg->default_groups
* memory as it is statically allocated at se_tpg->tpg_default_groups.
*/
for (i = 0; tpg_cg->default_groups[i]; i++) {
df_item = &tpg_cg->default_groups[i]->cg_item;
tpg_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
config_item_put(item);
tf->tf_ops.fabric_drop_tpg(se_tpg);
}
static struct configfs_group_operations target_fabric_tpg_group_ops = {
.make_group = target_fabric_make_tpg,
.drop_item = target_fabric_drop_tpg,
};
TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
/* End of tfc_tpg_cit */
/* Start of tfc_wwn_cit */
static struct config_group *target_fabric_make_wwn(
struct config_group *group,
const char *name)
{
struct target_fabric_configfs *tf = container_of(group,
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn;
if (!(tf->tf_ops.fabric_make_wwn)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
return ERR_PTR(-ENOSYS);
}
wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
if (!(wwn) || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
wwn->wwn_tf = tf;
config_group_init_type_name(&wwn->wwn_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_cit);
return &wwn->wwn_group;
}
static void target_fabric_drop_wwn(
struct config_group *group,
struct config_item *item)
{
struct target_fabric_configfs *tf = container_of(group,
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn = container_of(to_config_group(item),
struct se_wwn, wwn_group);
config_item_put(item);
tf->tf_ops.fabric_drop_wwn(wwn);
}
static struct configfs_group_operations target_fabric_wwn_group_ops = {
.make_group = target_fabric_make_wwn,
.drop_item = target_fabric_drop_wwn,
};
/*
* For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
*/
CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
static struct configfs_item_operations target_fabric_wwn_item_ops = {
.show_attribute = target_fabric_wwn_attr_show,
.store_attribute = target_fabric_wwn_attr_store,
};
TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
/* End of tfc_wwn_cit */
/* Start of tfc_discovery_cit */
CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
tf_disc_group);
static struct configfs_item_operations target_fabric_discovery_item_ops = {
.show_attribute = target_fabric_discovery_attr_show,
.store_attribute = target_fabric_discovery_attr_store,
};
TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
/* End of tfc_discovery_cit */
int target_fabric_setup_cits(struct target_fabric_configfs *tf)
{
target_fabric_setup_discovery_cit(tf);
target_fabric_setup_wwn_cit(tf);
target_fabric_setup_tpg_cit(tf);
target_fabric_setup_tpg_base_cit(tf);
target_fabric_setup_tpg_port_cit(tf);
target_fabric_setup_tpg_lun_cit(tf);
target_fabric_setup_tpg_np_cit(tf);
target_fabric_setup_tpg_np_base_cit(tf);
target_fabric_setup_tpg_attrib_cit(tf);
target_fabric_setup_tpg_param_cit(tf);
target_fabric_setup_tpg_nacl_cit(tf);
target_fabric_setup_tpg_nacl_base_cit(tf);
target_fabric_setup_tpg_nacl_attrib_cit(tf);
target_fabric_setup_tpg_nacl_auth_cit(tf);
target_fabric_setup_tpg_nacl_param_cit(tf);
target_fabric_setup_tpg_mappedlun_cit(tf);
return 0;
}

View File

@ -0,0 +1,451 @@
/*******************************************************************************
* Filename: target_core_fabric_lib.c
*
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
* Copyright (c) 2010 Rising Tide Systems, Inc.
* Copyright (c) 2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
#include "target_core_hba.h"
#include "target_core_pr.h"
/*
* Handlers for Serial Attached SCSI (SAS)
*/
u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
/*
* Return a SAS Serial SCSI Protocol identifier for loopback operations
* This is defined in section 7.5.1 Table 362 in spc4r17
*/
return 0x6;
}
EXPORT_SYMBOL(sas_get_fabric_proto_ident);
u32 sas_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
unsigned char binary, *ptr;
int i;
u32 off = 4;
/*
* Set PROTOCOL IDENTIFIER to 6h for SAS
*/
buf[0] = 0x06;
/*
* From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
* over SAS Serial SCSI Protocol
*/
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
for (i = 0; i < 16; i += 2) {
binary = transport_asciihex_to_binaryhex(&ptr[i]);
buf[off++] = binary;
}
/*
* The SAS Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(sas_get_pr_transport_id);
u32 sas_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
*format_code = 0;
/*
* From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
* over SAS Serial SCSI Protocol
*
* The SAS Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(sas_get_pr_transport_id_len);
/*
* Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
* Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
*/
char *sas_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
/*
* Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
* for initiator ports using SCSI over SAS Serial SCSI Protocol
*
* The TransportID for a SAS Initiator Port is of fixed size of
* 24 bytes, and SAS does not contain a I_T nexus identifier,
* so we return the **port_nexus_ptr set to NULL.
*/
*port_nexus_ptr = NULL;
*out_tid_len = 24;
return (char *)&buf[4];
}
EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
/*
* Handlers for Fibre Channel Protocol (FCP)
*/
u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
}
EXPORT_SYMBOL(fc_get_fabric_proto_ident);
u32 fc_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
*format_code = 0;
/*
* The FC Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(fc_get_pr_transport_id_len);
u32 fc_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
unsigned char binary, *ptr;
int i;
u32 off = 8;
/*
* PROTOCOL IDENTIFIER is 0h for FCP-2
*
* From spc4r17, 7.5.4.2 TransportID for initiator ports using
* SCSI over Fibre Channel
*
* We convert the ASCII formatted N Port name into a binary
* encoded TransportID.
*/
ptr = &se_nacl->initiatorname[0];
for (i = 0; i < 24; ) {
if (!(strncmp(&ptr[i], ":", 1))) {
i++;
continue;
}
binary = transport_asciihex_to_binaryhex(&ptr[i]);
buf[off++] = binary;
i += 2;
}
/*
* The FC Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(fc_get_pr_transport_id);
char *fc_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
/*
* The TransportID for a FC N Port is of fixed size of
* 24 bytes, and FC does not contain a I_T nexus identifier,
* so we return the **port_nexus_ptr set to NULL.
*/
*port_nexus_ptr = NULL;
*out_tid_len = 24;
return (char *)&buf[8];
}
EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
/*
* Handlers for Internet Small Computer Systems Interface (iSCSI)
*/
u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
/*
* This value is defined for "Internet SCSI (iSCSI)"
* in spc4r17 section 7.5.1 Table 362
*/
return 0x5;
}
EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
u32 iscsi_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
u32 off = 4, padding = 0;
u16 len = 0;
spin_lock_irq(&se_nacl->nacl_sess_lock);
/*
* Set PROTOCOL IDENTIFIER to 5h for iSCSI
*/
buf[0] = 0x05;
/*
* From spc4r17 Section 7.5.4.6: TransportID for initiator
* ports using SCSI over iSCSI.
*
* The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
* shall contain the iSCSI name of an iSCSI initiator node (see
* RFC 3720). The first ISCSI NAME field byte containing an ASCII
* null character terminates the ISCSI NAME field without regard for
* the specified length of the iSCSI TransportID or the contents of
* the ADDITIONAL LENGTH field.
*/
len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
/*
* Add Extra byte for NULL terminator
*/
len++;
/*
* If there is ISID present with the registration and *format code == 1
* 1, use iSCSI Initiator port TransportID format.
*
* Otherwise use iSCSI Initiator device TransportID format that
* does not contain the ASCII encoded iSCSI Initiator iSID value
* provied by the iSCSi Initiator during the iSCSI login process.
*/
if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
/*
* Set FORMAT CODE 01b for iSCSI Initiator port TransportID
* format.
*/
buf[0] |= 0x40;
/*
* From spc4r17 Section 7.5.4.6: TransportID for initiator
* ports using SCSI over iSCSI. Table 390
*
* The SEPARATOR field shall contain the five ASCII
* characters ",i,0x".
*
* The null-terminated, null-padded ISCSI INITIATOR SESSION ID
* field shall contain the iSCSI initiator session identifier
* (see RFC 3720) in the form of ASCII characters that are the
* hexadecimal digits converted from the binary iSCSI initiator
* session identifier value. The first ISCSI INITIATOR SESSION
* ID field byte containing an ASCII null character
*/
buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
len += 5;
buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
buf[off+len] = '\0'; off++;
len += 7;
}
spin_unlock_irq(&se_nacl->nacl_sess_lock);
/*
* The ADDITIONAL LENGTH field specifies the number of bytes that follow
* in the TransportID. The additional length shall be at least 20 and
* shall be a multiple of four.
*/
padding = ((-len) & 3);
if (padding != 0)
len += padding;
buf[2] = ((len >> 8) & 0xff);
buf[3] = (len & 0xff);
/*
* Increment value for total payload + header length for
* full status descriptor
*/
len += 4;
return len;
}
EXPORT_SYMBOL(iscsi_get_pr_transport_id);
u32 iscsi_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
u32 len = 0, padding = 0;
spin_lock_irq(&se_nacl->nacl_sess_lock);
len = strlen(se_nacl->initiatorname);
/*
* Add extra byte for NULL terminator
*/
len++;
/*
* If there is ISID present with the registration, use format code:
* 01b: iSCSI Initiator port TransportID format
*
* If there is not an active iSCSI session, use format code:
* 00b: iSCSI Initiator device TransportID format
*/
if (pr_reg->isid_present_at_reg) {
len += 5; /* For ",i,0x" ASCII seperator */
len += 7; /* For iSCSI Initiator Session ID + Null terminator */
*format_code = 1;
} else
*format_code = 0;
spin_unlock_irq(&se_nacl->nacl_sess_lock);
/*
* The ADDITIONAL LENGTH field specifies the number of bytes that follow
* in the TransportID. The additional length shall be at least 20 and
* shall be a multiple of four.
*/
padding = ((-len) & 3);
if (padding != 0)
len += padding;
/*
* Increment value for total payload + header length for
* full status descriptor
*/
len += 4;
return len;
}
EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
char *iscsi_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
char *p;
u32 tid_len, padding;
int i;
u16 add_len;
u8 format_code = (buf[0] & 0xc0);
/*
* Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
*
* TransportID for initiator ports using SCSI over iSCSI,
* from Table 388 -- iSCSI TransportID formats.
*
* 00b Initiator port is identified using the world wide unique
* SCSI device name of the iSCSI initiator
* device containing the initiator port (see table 389).
* 01b Initiator port is identified using the world wide unique
* initiator port identifier (see table 390).10b to 11b
* Reserved
*/
if ((format_code != 0x00) && (format_code != 0x40)) {
printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
return NULL;
}
/*
* If the caller wants the TransportID Length, we set that value for the
* entire iSCSI Tarnsport ID now.
*/
if (out_tid_len != NULL) {
add_len = ((buf[2] >> 8) & 0xff);
add_len |= (buf[3] & 0xff);
tid_len = strlen((char *)&buf[4]);
tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
tid_len += 1; /* Add one byte for NULL terminator */
padding = ((-tid_len) & 3);
if (padding != 0)
tid_len += padding;
if ((add_len + 4) != tid_len) {
printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
"does not match calculated tid_len: %u,"
" using tid_len instead\n", add_len+4, tid_len);
*out_tid_len = tid_len;
} else
*out_tid_len = (add_len + 4);
}
/*
* Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
* Session ID as defined in Table 390 - iSCSI initiator port TransportID
* format.
*/
if (format_code == 0x40) {
p = strstr((char *)&buf[4], ",i,0x");
if (!(p)) {
printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n",
(char *)&buf[4]);
return NULL;
}
*p = '\0'; /* Terminate iSCSI Name */
p += 5; /* Skip over ",i,0x" seperator */
*port_nexus_ptr = p;
/*
* Go ahead and do the lower case conversion of the received
* 12 ASCII characters representing the ISID in the TransportID
* for comparision against the running iSCSI session's ISID from
* iscsi_target.c:lio_sess_get_initiator_sid()
*/
for (i = 0; i < 12; i++) {
if (isdigit(*p)) {
p++;
continue;
}
*p = tolower(*p);
p++;
}
}
return (char *)&buf[4];
}
EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);

View File

@ -0,0 +1,688 @@
/*******************************************************************************
* Filename: target_core_file.c
*
* This file contains the Storage Engine <-> FILEIO transport specific functions
*
* Copyright (c) 2005 PyX Technologies, Inc.
* Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include "target_core_file.h"
#if 1
#define DEBUG_FD_CACHE(x...) printk(x)
#else
#define DEBUG_FD_CACHE(x...)
#endif
#if 1
#define DEBUG_FD_FUA(x...) printk(x)
#else
#define DEBUG_FD_FUA(x...)
#endif
static struct se_subsystem_api fileio_template;
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
*
*
*/
static int fd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct fd_host *fd_host;
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
if (!(fd_host)) {
printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
return -1;
}
fd_host->fd_host_id = host_id;
atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) fd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
" Target Core with TCQ Depth: %d MaxSectors: %u\n",
hba->hba_id, fd_host->fd_host_id,
atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
return 0;
}
static void fd_detach_hba(struct se_hba *hba)
{
struct fd_host *fd_host = hba->hba_ptr;
printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
" Target Core\n", hba->hba_id, fd_host->fd_host_id);
kfree(fd_host);
hba->hba_ptr = NULL;
}
static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
if (!(fd_dev)) {
printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
return NULL;
}
fd_dev->fd_host = fd_host;
printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev;
}
/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
*
*
*/
static struct se_device *fd_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
char *dev_p = NULL;
struct se_device *dev;
struct se_dev_limits dev_limits;
struct queue_limits *limits;
struct fd_dev *fd_dev = (struct fd_dev *) p;
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
mm_segment_t old_fs;
struct file *file;
struct inode *inode = NULL;
int dev_flags = 0, flags;
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
old_fs = get_fs();
set_fs(get_ds());
dev_p = getname(fd_dev->fd_dev_name);
set_fs(old_fs);
if (IS_ERR(dev_p)) {
printk(KERN_ERR "getname(%s) failed: %lu\n",
fd_dev->fd_dev_name, IS_ERR(dev_p));
goto fail;
}
#if 0
if (di->no_create_file)
flags = O_RDWR | O_LARGEFILE;
else
flags = O_RDWR | O_CREAT | O_LARGEFILE;
#else
flags = O_RDWR | O_CREAT | O_LARGEFILE;
#endif
/* flags |= O_DIRECT; */
/*
* If fd_buffered_io=1 has not been set explictly (the default),
* use O_SYNC to force FILEIO writes to disk.
*/
if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
flags |= O_SYNC;
file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
goto fail;
}
fd_dev->fd_file = file;
/*
* If using a block backend with this struct file, we extract
* fd_dev->fd_[block,dev]_size from struct block_device.
*
* Otherwise, we use the passed fd_size= from configfs
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct request_queue *q;
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
*/
q = bdev_get_queue(inode->i_bdev);
limits = &dev_limits.limits;
limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
*/
fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
fd_dev->fd_dev_size,
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
printk(KERN_ERR "FILEIO: Missing fd_dev_size="
" parameter, and no backing struct"
" block_device\n");
goto fail;
}
limits = &dev_limits.limits;
limits->logical_block_size = FD_BLOCKSIZE;
limits->max_hw_sectors = FD_MAX_SECTORS;
limits->max_sectors = FD_MAX_SECTORS;
fd_dev->fd_block_size = FD_BLOCKSIZE;
}
dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba, &fileio_template,
se_dev, dev_flags, (void *)fd_dev,
&dev_limits, "FILEIO", FD_VERSION);
if (!(dev))
goto fail;
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
putname(dev_p);
return dev;
fail:
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
putname(dev_p);
return NULL;
}
/* fd_free_device(): (Part of se_subsystem_api_t template)
*
*
*/
static void fd_free_device(void *p)
{
struct fd_dev *fd_dev = (struct fd_dev *) p;
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
kfree(fd_dev);
}
static inline struct fd_request *FILE_REQ(struct se_task *task)
{
return container_of(task, struct fd_request, fd_task);
}
static struct se_task *
fd_alloc_task(struct se_cmd *cmd)
{
struct fd_request *fd_req;
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
if (!(fd_req)) {
printk(KERN_ERR "Unable to allocate struct fd_request\n");
return NULL;
}
fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
return &fd_req->fd_task;
}
static int fd_do_readv(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
struct file *fd = req->fd_dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
return -1;
}
for (i = 0; i < task->task_sg_num; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
set_fs(old_fs);
kfree(iov);
/*
* Return zeros and GOOD status even if the READ did not return
* the expected virt_size for struct file w/o a backing struct
* block_device.
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret,
(int)task->task_size);
return -1;
}
} else {
if (ret < 0) {
printk(KERN_ERR "vfs_readv() returned %d for non"
" S_ISBLK\n", ret);
return -1;
}
}
return 1;
}
static int fd_do_writev(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
struct file *fd = req->fd_dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
return -1;
}
for (i = 0; i < task->task_sg_num; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
set_fs(old_fs);
kfree(iov);
if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_writev() returned %d\n", ret);
return -1;
}
return 1;
}
static void fd_emulate_sync_cache(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
/*
* If the Immediate bit is set, queue up the GOOD response
* for this SYNCHRONIZE_CACHE op
*/
if (immed)
transport_complete_sync_cache(cmd, 1);
/*
* Determine if we will be flushing the entire device.
*/
if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
start = 0;
end = LLONG_MAX;
} else {
start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
end = LLONG_MAX;
}
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (!immed)
transport_complete_sync_cache(cmd, ret == 0);
}
/*
* Tell TCM Core that we are capable of WriteCache emulation for
* an underlying struct se_device.
*/
static int fd_emulated_write_cache(struct se_device *dev)
{
return 1;
}
static int fd_emulated_dpo(struct se_device *dev)
{
return 0;
}
/*
* Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
* for TYPE_DISK.
*/
static int fd_emulated_fua_write(struct se_device *dev)
{
return 1;
}
static int fd_emulated_fua_read(struct se_device *dev)
{
return 0;
}
/*
* WRITE Force Unit Access (FUA) emulation on a per struct se_task
* LBA range basis..
*/
static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
loff_t end = start + task->task_size;
int ret;
DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
task->task_lba, task->task_size);
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
}
static int fd_do_task(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
int ret = 0;
/*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
*/
if (task->task_data_direction == DMA_FROM_DEVICE) {
ret = fd_do_readv(task);
} else {
ret = fd_do_writev(task);
if (ret > 0 &&
DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
T_TASK(cmd)->t_tasks_fua) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
* know the FUA WRITE cache sync failed..?
*/
fd_emulate_write_fua(cmd, task);
}
}
if (ret < 0)
return ret;
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
*
*
*/
static void fd_free_task(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
kfree(req);
}
enum {
Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
};
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
{Opt_fd_buffered_io, "fd_buffered_id=%d"},
{Opt_err, NULL}
};
static ssize_t fd_set_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
const char *page, ssize_t count)
{
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_fd_dev_name:
snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
"%s", match_strdup(&args[0]));
printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
break;
case Opt_fd_dev_size:
arg_p = match_strdup(&args[0]);
ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
if (ret < 0) {
printk(KERN_ERR "strict_strtoull() failed for"
" fd_dev_size=\n");
goto out;
}
printk(KERN_INFO "FILEIO: Referencing Size: %llu"
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
case Opt_fd_buffered_io:
match_int(args, &arg);
if (arg != 1) {
printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
ret = -EINVAL;
goto out;
}
printk(KERN_INFO "FILEIO: Using buffered I/O"
" operations for struct fd_dev\n");
fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
break;
default:
break;
}
}
out:
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
{
struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
printk(KERN_ERR "Missing fd_dev_name=\n");
return -1;
}
return 0;
}
static ssize_t fd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
fd_dev->fd_dev_name, fd_dev->fd_dev_size,
(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
"Buffered" : "Synchronous");
return bl;
}
/* fd_get_cdb(): (Part of se_subsystem_api_t template)
*
*
*/
static unsigned char *fd_get_cdb(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
return req->fd_scsi_cdb;
}
/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
*
*
*/
static u32 fd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
/* fd_get_device_type(): (Part of se_subsystem_api_t template)
*
*
*/
static u32 fd_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = dev->dev_ptr;
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
DEV_ATTRIB(dev)->block_size);
return blocks_long;
}
static struct se_subsystem_api fileio_template = {
.name = "fileio",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
.allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice,
.free_device = fd_free_device,
.dpo_emulated = fd_emulated_dpo,
.fua_write_emulated = fd_emulated_fua_write,
.fua_read_emulated = fd_emulated_fua_read,
.write_cache_emulated = fd_emulated_write_cache,
.alloc_task = fd_alloc_task,
.do_task = fd_do_task,
.do_sync_cache = fd_emulate_sync_cache,
.free_task = fd_free_task,
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
.get_cdb = fd_get_cdb,
.get_device_rev = fd_get_device_rev,
.get_device_type = fd_get_device_type,
.get_blocks = fd_get_blocks,
};
static int __init fileio_module_init(void)
{
return transport_subsystem_register(&fileio_template);
}
static void fileio_module_exit(void)
{
transport_subsystem_release(&fileio_template);
}
MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(fileio_module_init);
module_exit(fileio_module_exit);

View File

@ -0,0 +1,50 @@
#ifndef TARGET_CORE_FILE_H
#define TARGET_CORE_FILE_H
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
/* Maximum queuedepth for the FILEIO HBA */
#define FD_HBA_QUEUE_DEPTH 256
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
#define FD_MAX_SECTORS 1024
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
struct fd_request {
struct se_task fd_task;
/* SCSI CDB from iSCSI Command PDU */
unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
/* FILEIO device */
struct fd_dev *fd_dev;
} ____cacheline_aligned;
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
#define FDBD_USE_BUFFERED_IO 0x04
struct fd_dev {
u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 fd_dev_id;
/* Number of SG tables in sg_table_array */
u32 fd_table_count;
u32 fd_queue_depth;
u32 fd_block_size;
unsigned long long fd_dev_size;
struct file *fd_file;
/* FILEIO HBA device is connected to */
struct fd_host *fd_host;
} ____cacheline_aligned;
struct fd_host {
u32 fd_host_dev_id_count;
/* Unique FILEIO Host ID */
u32 fd_host_id;
} ____cacheline_aligned;
#endif /* TARGET_CORE_FILE_H */

View File

@ -0,0 +1,185 @@
/*******************************************************************************
* Filename: target_core_hba.c
*
* This file copntains the iSCSI HBA Transport related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/in.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include "target_core_hba.h"
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
int transport_subsystem_register(struct se_subsystem_api *sub_api)
{
struct se_subsystem_api *s;
INIT_LIST_HEAD(&sub_api->sub_api_list);
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!(strcmp(s->name, sub_api->name))) {
printk(KERN_ERR "%p is already registered with"
" duplicate name %s, unable to process"
" request\n", s, s->name);
mutex_unlock(&subsystem_mutex);
return -EEXIST;
}
}
list_add_tail(&sub_api->sub_api_list, &subsystem_list);
mutex_unlock(&subsystem_mutex);
printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
" %p\n", sub_api->name, sub_api->owner);
return 0;
}
EXPORT_SYMBOL(transport_subsystem_register);
void transport_subsystem_release(struct se_subsystem_api *sub_api)
{
mutex_lock(&subsystem_mutex);
list_del(&sub_api->sub_api_list);
mutex_unlock(&subsystem_mutex);
}
EXPORT_SYMBOL(transport_subsystem_release);
static struct se_subsystem_api *core_get_backend(const char *sub_name)
{
struct se_subsystem_api *s;
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!strcmp(s->name, sub_name))
goto found;
}
mutex_unlock(&subsystem_mutex);
return NULL;
found:
if (s->owner && !try_module_get(s->owner))
s = NULL;
mutex_unlock(&subsystem_mutex);
return s;
}
struct se_hba *
core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
{
struct se_hba *hba;
int ret = 0;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) {
printk(KERN_ERR "Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
spin_lock_init(&hba->hba_queue_lock);
mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
atomic_set(&hba->max_queue_depth, 0);
atomic_set(&hba->left_queue_depth, 0);
hba->transport = core_get_backend(plugin_name);
if (!hba->transport) {
ret = -EINVAL;
goto out_free_hba;
}
ret = hba->transport->attach_hba(hba, plugin_dep_id);
if (ret < 0)
goto out_module_put;
spin_lock(&se_global->hba_lock);
hba->hba_id = se_global->g_hba_id_counter++;
list_add_tail(&hba->hba_list, &se_global->g_hba_list);
spin_unlock(&se_global->hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
return hba;
out_module_put:
if (hba->transport->owner)
module_put(hba->transport->owner);
hba->transport = NULL;
out_free_hba:
kfree(hba);
return ERR_PTR(ret);
}
int
core_delete_hba(struct se_hba *hba)
{
struct se_device *dev, *dev_tmp;
spin_lock(&hba->device_lock);
list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
se_clear_dev_ports(dev);
spin_unlock(&hba->device_lock);
se_release_device_for_hba(dev);
spin_lock(&hba->device_lock);
}
spin_unlock(&hba->device_lock);
hba->transport->detach_hba(hba);
spin_lock(&se_global->hba_lock);
list_del(&hba->hba_list);
spin_unlock(&se_global->hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
if (hba->transport->owner)
module_put(hba->transport->owner);
hba->transport = NULL;
kfree(hba);
return 0;
}

View File

@ -0,0 +1,7 @@
#ifndef TARGET_CORE_HBA_H
#define TARGET_CORE_HBA_H
extern struct se_hba *core_alloc_hba(const char *, u32, u32);
extern int core_delete_hba(struct se_hba *);
#endif /* TARGET_CORE_HBA_H */

View File

@ -0,0 +1,808 @@
/*******************************************************************************
* Filename: target_core_iblock.c
*
* This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include "target_core_iblock.h"
#if 0
#define DEBUG_IBLOCK(x...) printk(x)
#else
#define DEBUG_IBLOCK(x...)
#endif
static struct se_subsystem_api iblock_template;
static void iblock_bio_done(struct bio *, int);
/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
*
*
*/
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
struct iblock_hba *ib_host;
ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
if (!(ib_host)) {
printk(KERN_ERR "Unable to allocate memory for"
" struct iblock_hba\n");
return -ENOMEM;
}
ib_host->iblock_host_id = host_id;
atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) ib_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
" Target Core TCQ Depth: %d\n", hba->hba_id,
ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
return 0;
}
static void iblock_detach_hba(struct se_hba *hba)
{
struct iblock_hba *ib_host = hba->hba_ptr;
printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
kfree(ib_host);
hba->hba_ptr = NULL;
}
static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct iblock_dev *ib_dev = NULL;
struct iblock_hba *ib_host = hba->hba_ptr;
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
if (!(ib_dev)) {
printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
return NULL;
}
ib_dev->ibd_host = ib_host;
printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
return ib_dev;
}
static struct se_device *iblock_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
struct iblock_dev *ib_dev = p;
struct se_device *dev;
struct se_dev_limits dev_limits;
struct block_device *bd = NULL;
struct request_queue *q;
struct queue_limits *limits;
u32 dev_flags = 0;
if (!(ib_dev)) {
printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
return 0;
}
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
/*
* These settings need to be made tunable..
*/
ib_dev->ibd_bio_set = bioset_create(32, 64);
if (!(ib_dev->ibd_bio_set)) {
printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
return 0;
}
printk(KERN_INFO "IBLOCK: Created bio_set()\n");
/*
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
*/
printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
if (!(bd))
goto failed;
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
*/
q = bdev_get_queue(bd);
limits = &dev_limits.limits;
limits->logical_block_size = bdev_logical_block_size(bd);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
ib_dev->ibd_major = MAJOR(bd->bd_dev);
ib_dev->ibd_minor = MINOR(bd->bd_dev);
ib_dev->ibd_bd = bd;
dev = transport_add_device_to_core_hba(hba,
&iblock_template, se_dev, dev_flags, (void *)ib_dev,
&dev_limits, "IBLOCK", IBLOCK_VERSION);
if (!(dev))
goto failed;
ib_dev->ibd_depth = dev->queue_depth;
/*
* Check if the underlying struct block_device request_queue supports
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(bdev_get_queue(bd))) {
struct request_queue *q = bdev_get_queue(bd);
DEV_ATTRIB(dev)->max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
DEV_ATTRIB(dev)->unmap_granularity =
q->limits.discard_granularity;
DEV_ATTRIB(dev)->unmap_granularity_alignment =
q->limits.discard_alignment;
printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
return dev;
failed:
if (ib_dev->ibd_bio_set) {
bioset_free(ib_dev->ibd_bio_set);
ib_dev->ibd_bio_set = NULL;
}
ib_dev->ibd_bd = NULL;
ib_dev->ibd_major = 0;
ib_dev->ibd_minor = 0;
return NULL;
}
static void iblock_free_device(void *p)
{
struct iblock_dev *ib_dev = p;
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
bioset_free(ib_dev->ibd_bio_set);
kfree(ib_dev);
}
static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
{
return container_of(task, struct iblock_req, ib_task);
}
static struct se_task *
iblock_alloc_task(struct se_cmd *cmd)
{
struct iblock_req *ib_req;
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!(ib_req)) {
printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
return NULL;
}
ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
atomic_set(&ib_req->ib_bio_cnt, 0);
return &ib_req->ib_task;
}
static unsigned long long iblock_emulate_read_cap_with_block_size(
struct se_device *dev,
struct block_device *bd,
struct request_queue *q)
{
unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
if (block_size == DEV_ATTRIB(dev)->block_size)
return blocks_long;
switch (block_size) {
case 4096:
switch (DEV_ATTRIB(dev)->block_size) {
case 2048:
blocks_long <<= 1;
break;
case 1024:
blocks_long <<= 2;
break;
case 512:
blocks_long <<= 3;
default:
break;
}
break;
case 2048:
switch (DEV_ATTRIB(dev)->block_size) {
case 4096:
blocks_long >>= 1;
break;
case 1024:
blocks_long <<= 1;
break;
case 512:
blocks_long <<= 2;
break;
default:
break;
}
break;
case 1024:
switch (DEV_ATTRIB(dev)->block_size) {
case 4096:
blocks_long >>= 2;
break;
case 2048:
blocks_long >>= 1;
break;
case 512:
blocks_long <<= 1;
break;
default:
break;
}
break;
case 512:
switch (DEV_ATTRIB(dev)->block_size) {
case 4096:
blocks_long >>= 3;
break;
case 2048:
blocks_long >>= 2;
break;
case 1024:
blocks_long >>= 1;
break;
default:
break;
}
break;
default:
break;
}
return blocks_long;
}
/*
* Emulate SYCHRONIZE_CACHE_*
*/
static void iblock_emulate_sync_cache(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
sector_t error_sector;
int ret;
/*
* If the Immediate bit is set, queue up the GOOD response
* for this SYNCHRONIZE_CACHE op
*/
if (immed)
transport_complete_sync_cache(cmd, 1);
/*
* blkdev_issue_flush() does not support a specifying a range, so
* we have to flush the entire cache.
*/
ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
if (ret != 0) {
printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
" error_sector: %llu\n", ret,
(unsigned long long)error_sector);
}
if (!immed)
transport_complete_sync_cache(cmd, ret == 0);
}
/*
* Tell TCM Core that we are capable of WriteCache emulation for
* an underlying struct se_device.
*/
static int iblock_emulated_write_cache(struct se_device *dev)
{
return 1;
}
static int iblock_emulated_dpo(struct se_device *dev)
{
return 0;
}
/*
* Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
* for TYPE_DISK.
*/
static int iblock_emulated_fua_write(struct se_device *dev)
{
return 1;
}
static int iblock_emulated_fua_read(struct se_device *dev)
{
return 0;
}
static int iblock_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct iblock_req *req = IBLOCK_REQ(task);
struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
struct bio *bio = req->ib_bio, *nbio = NULL;
int rw;
if (task->task_data_direction == DMA_TO_DEVICE) {
/*
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
*/
if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
(DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
T_TASK(task->task_se_cmd)->t_tasks_fua))
rw = WRITE_FUA;
else
rw = WRITE;
} else {
rw = READ;
}
while (bio) {
nbio = bio->bi_next;
bio->bi_next = NULL;
DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
" bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
submit_bio(rw, bio);
bio = nbio;
}
if (q->unplug_fn)
q->unplug_fn(q);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
{
struct iblock_dev *ibd = dev->dev_ptr;
struct block_device *bd = ibd->ibd_bd;
int barrier = 0;
return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
}
static void iblock_free_task(struct se_task *task)
{
struct iblock_req *req = IBLOCK_REQ(task);
struct bio *bio, *hbio = req->ib_bio;
/*
* We only release the bio(s) here if iblock_bio_done() has not called
* bio_put() -> iblock_bio_destructor().
*/
while (hbio != NULL) {
bio = hbio;
hbio = hbio->bi_next;
bio->bi_next = NULL;
bio_put(bio);
}
kfree(req);
}
enum {
Opt_udev_path, Opt_force, Opt_err
};
static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"},
{Opt_force, "force=%d"},
{Opt_err, NULL}
};
static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
struct se_subsystem_dev *se_dev,
const char *page, ssize_t count)
{
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_udev_path:
if (ib_dev->ibd_bd) {
printk(KERN_ERR "Unable to set udev_path= while"
" ib_dev->ibd_bd exists\n");
ret = -EEXIST;
goto out;
}
ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
"%s", match_strdup(&args[0]));
printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
case Opt_force:
match_int(args, &arg);
ib_dev->ibd_force = arg;
printk(KERN_INFO "IBLOCK: Set force=%d\n",
ib_dev->ibd_force);
break;
default:
break;
}
}
out:
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t iblock_check_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev)
{
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
return -1;
}
return 0;
}
static ssize_t iblock_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
struct block_device *bd = ibd->ibd_bd;
char buf[BDEVNAME_SIZE];
ssize_t bl = 0;
if (bd)
bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf));
if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
bl += sprintf(b + bl, " UDEV PATH: %s\n",
ibd->ibd_udev_path);
} else
bl += sprintf(b + bl, "\n");
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: %d Minor: %d\n",
ibd->ibd_major, ibd->ibd_minor);
}
return bl;
}
static void iblock_bio_destructor(struct bio *bio)
{
struct se_task *task = bio->bi_private;
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
bio_free(bio, ib_dev->ibd_bio_set);
}
static struct bio *iblock_get_bio(
struct se_task *task,
struct iblock_req *ib_req,
struct iblock_dev *ib_dev,
int *ret,
sector_t lba,
u32 sg_num)
{
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!(bio)) {
printk(KERN_ERR "Unable to allocate memory for bio\n");
*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
return NULL;
}
DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
" %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = (void *) task;
bio->bi_destructor = iblock_bio_destructor;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
atomic_inc(&ib_req->ib_bio_cnt);
DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
atomic_read(&ib_req->ib_bio_cnt));
return bio;
}
static int iblock_map_task_SG(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = SE_DEV(cmd);
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
struct scatterlist *sg;
int ret = 0;
u32 i, sg_num = task->task_sg_num;
sector_t block_lba;
/*
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
*/
if (DEV_ATTRIB(dev)->block_size == 4096)
block_lba = (task->task_lba << 3);
else if (DEV_ATTRIB(dev)->block_size == 2048)
block_lba = (task->task_lba << 2);
else if (DEV_ATTRIB(dev)->block_size == 1024)
block_lba = (task->task_lba << 1);
else if (DEV_ATTRIB(dev)->block_size == 512)
block_lba = task->task_lba;
else {
printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", DEV_ATTRIB(dev)->block_size);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
if (!(bio))
return ret;
ib_req->ib_bio = bio;
hbio = tbio = bio;
/*
* Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
* from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
*/
for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
" %p len: %u offset: %u\n", task, bio, sg_page(sg),
sg->length, sg->offset);
again:
ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
if (ret != sg->length) {
DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
bio->bi_sector);
DEBUG_IBLOCK("** task->task_size: %u\n",
task->task_size);
DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
bio->bi_max_vecs);
DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
bio->bi_vcnt);
bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
block_lba, sg_num);
if (!(bio))
goto fail;
tbio = tbio->bi_next = bio;
DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
" list, Going to again\n", bio);
goto again;
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
" sg_num to %u\n", task, sg_num);
DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
" to %llu\n", task, block_lba);
DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
" %u\n", task, bio->bi_vcnt);
}
return 0;
fail:
while (hbio) {
bio = hbio;
hbio = hbio->bi_next;
bio->bi_next = NULL;
bio_put(bio);
}
return ret;
}
static unsigned char *iblock_get_cdb(struct se_task *task)
{
return IBLOCK_REQ(task)->ib_scsi_cdb;
}
static u32 iblock_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
static u32 iblock_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t iblock_get_blocks(struct se_device *dev)
{
struct iblock_dev *ibd = dev->dev_ptr;
struct block_device *bd = ibd->ibd_bd;
struct request_queue *q = bdev_get_queue(bd);
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
static void iblock_bio_done(struct bio *bio, int err)
{
struct se_task *task = bio->bi_private;
struct iblock_req *ibr = IBLOCK_REQ(task);
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
err = -EIO;
if (err != 0) {
printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
*/
atomic_inc(&ibr->ib_bio_err_cnt);
smp_mb__after_atomic_inc();
bio_put(bio);
/*
* Wait to complete the task until the last bio as completed.
*/
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
return;
ibr->ib_bio = NULL;
transport_complete_task(task, 0);
return;
}
DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
task, bio, task->task_lba, bio->bi_sector, err);
/*
* bio_put() will call iblock_bio_destructor() to release the bio back
* to ibr->ib_bio_set.
*/
bio_put(bio);
/*
* Wait to complete the task until the last bio as completed.
*/
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
return;
/*
* Return GOOD status for task if zero ib_bio_err_cnt exists.
*/
ibr->ib_bio = NULL;
transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
}
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.map_task_SG = iblock_map_task_SG,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device,
.dpo_emulated = iblock_emulated_dpo,
.fua_write_emulated = iblock_emulated_fua_write,
.fua_read_emulated = iblock_emulated_fua_read,
.write_cache_emulated = iblock_emulated_write_cache,
.alloc_task = iblock_alloc_task,
.do_task = iblock_do_task,
.do_discard = iblock_do_discard,
.do_sync_cache = iblock_emulate_sync_cache,
.free_task = iblock_free_task,
.check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_cdb = iblock_get_cdb,
.get_device_rev = iblock_get_device_rev,
.get_device_type = iblock_get_device_type,
.get_blocks = iblock_get_blocks,
};
static int __init iblock_module_init(void)
{
return transport_subsystem_register(&iblock_template);
}
static void iblock_module_exit(void)
{
transport_subsystem_release(&iblock_template);
}
MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(iblock_module_init);
module_exit(iblock_module_exit);

View File

@ -0,0 +1,40 @@
#ifndef TARGET_CORE_IBLOCK_H
#define TARGET_CORE_IBLOCK_H
#define IBLOCK_VERSION "4.0"
#define IBLOCK_HBA_QUEUE_DEPTH 512
#define IBLOCK_DEVICE_QUEUE_DEPTH 32
#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
#define IBLOCK_MAX_CDBS 16
#define IBLOCK_LBA_SHIFT 9
struct iblock_req {
struct se_task ib_task;
unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
atomic_t ib_bio_cnt;
atomic_t ib_bio_err_cnt;
struct bio *ib_bio;
struct iblock_dev *ib_dev;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
#define IBDF_HAS_FORCE 0x02
struct iblock_dev {
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
int ibd_force;
int ibd_major;
int ibd_minor;
u32 ibd_depth;
u32 ibd_flags;
struct bio_set *ibd_bio_set;
struct block_device *ibd_bd;
struct iblock_hba *ibd_host;
} ____cacheline_aligned;
struct iblock_hba {
int iblock_host_id;
} ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,28 @@
#ifndef TARGET_CORE_MIB_H
#define TARGET_CORE_MIB_H
typedef enum {
SCSI_INST_INDEX,
SCSI_DEVICE_INDEX,
SCSI_AUTH_INTR_INDEX,
SCSI_INDEX_TYPE_MAX
} scsi_index_t;
struct scsi_index_table {
spinlock_t lock;
u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
} ____cacheline_aligned;
/* SCSI Port stats */
struct scsi_port_stats {
u64 cmd_pdus;
u64 tx_data_octets;
u64 rx_data_octets;
} ____cacheline_aligned;
extern int init_scsi_target_mib(void);
extern void remove_scsi_target_mib(void);
extern void init_scsi_index_table(void);
extern u32 scsi_get_new_index(scsi_index_t);
#endif /*** TARGET_CORE_MIB_H ***/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,67 @@
#ifndef TARGET_CORE_PR_H
#define TARGET_CORE_PR_H
/*
* PERSISTENT_RESERVE_OUT service action codes
*
* spc4r17 section 6.14.2 Table 171
*/
#define PRO_REGISTER 0x00
#define PRO_RESERVE 0x01
#define PRO_RELEASE 0x02
#define PRO_CLEAR 0x03
#define PRO_PREEMPT 0x04
#define PRO_PREEMPT_AND_ABORT 0x05
#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06
#define PRO_REGISTER_AND_MOVE 0x07
/*
* PERSISTENT_RESERVE_IN service action codes
*
* spc4r17 section 6.13.1 Table 159
*/
#define PRI_READ_KEYS 0x00
#define PRI_READ_RESERVATION 0x01
#define PRI_REPORT_CAPABILITIES 0x02
#define PRI_READ_FULL_STATUS 0x03
/*
* PERSISTENT_RESERVE_ SCOPE field
*
* spc4r17 section 6.13.3.3 Table 163
*/
#define PR_SCOPE_LU_SCOPE 0x00
/*
* PERSISTENT_RESERVE_* TYPE field
*
* spc4r17 section 6.13.3.4 Table 164
*/
#define PR_TYPE_WRITE_EXCLUSIVE 0x01
#define PR_TYPE_EXCLUSIVE_ACCESS 0x03
#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07
#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08
#define PR_APTPL_MAX_IPORT_LEN 256
#define PR_APTPL_MAX_TPORT_LEN 256
extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int core_scsi2_emulate_crh(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation_template *, u64,
unsigned char *, unsigned char *, u32,
unsigned char *, u16, u32, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *,
struct se_portal_group *, struct se_lun *,
struct se_lun_acl *);
extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
struct se_node_acl *);
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
struct se_cmd *);
extern int core_scsi3_emulate_pr(struct se_cmd *);
extern int core_setup_reservations(struct se_device *, int);
#endif /* TARGET_CORE_PR_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,65 @@
#ifndef TARGET_CORE_PSCSI_H
#define TARGET_CORE_PSCSI_H
#define PSCSI_VERSION "v4.0"
#define PSCSI_VIRTUAL_HBA_DEPTH 2048
/* used in pscsi_find_alloc_len() */
#ifndef INQUIRY_DATA_SIZE
#define INQUIRY_DATA_SIZE 0x24
#endif
/* used in pscsi_add_device_to_list() */
#define PSCSI_DEFAULT_QUEUEDEPTH 1
#define PS_RETRY 5
#define PS_TIMEOUT_DISK (15*HZ)
#define PS_TIMEOUT_OTHER (500*HZ)
#include <linux/device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_device.h>
#include <linux/kref.h>
#include <linux/kobject.h>
struct pscsi_plugin_task {
struct se_task pscsi_task;
unsigned char *pscsi_cdb;
unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
struct request *pscsi_req;
} ____cacheline_aligned;
#define PDF_HAS_CHANNEL_ID 0x01
#define PDF_HAS_TARGET_ID 0x02
#define PDF_HAS_LUN_ID 0x04
#define PDF_HAS_VPD_UNIT_SERIAL 0x08
#define PDF_HAS_VPD_DEV_IDENT 0x10
#define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt {
int pdv_flags;
int pdv_host_id;
int pdv_channel_id;
int pdv_target_id;
int pdv_lun_id;
struct block_device *pdv_bd;
struct scsi_device *pdv_sd;
struct se_hba *pdv_se_hba;
} ____cacheline_aligned;
typedef enum phv_modes {
PHV_VIRUTAL_HOST_ID,
PHV_LLD_SCSI_HOST_NO
} phv_modes_t;
struct pscsi_hba_virt {
int phv_host_id;
phv_modes_t phv_mode;
struct Scsi_Host *phv_lld_host;
} ____cacheline_aligned;
#endif /*** TARGET_CORE_PSCSI_H ***/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,73 @@
#ifndef TARGET_CORE_RD_H
#define TARGET_CORE_RD_H
#define RD_HBA_VERSION "v4.0"
#define RD_DR_VERSION "4.0"
#define RD_MCP_VERSION "4.0"
/* Largest piece of memory kmalloc can allocate */
#define RD_MAX_ALLOCATION_SIZE 65536
/* Maximum queuedepth for the Ramdisk HBA */
#define RD_HBA_QUEUE_DEPTH 256
#define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512
#define RD_MAX_SECTORS 1024
extern struct kmem_cache *se_mem_cache;
/* Used in target_core_init_configfs() for virtual LUN 0 access */
int __init rd_module_init(void);
void rd_module_exit(void);
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
struct rd_request {
struct se_task rd_task;
/* SCSI CDB from iSCSI Command PDU */
unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
/* Offset from start of page */
u32 rd_offset;
/* Starting page in Ramdisk for request */
u32 rd_page;
/* Total number of pages needed for request */
u32 rd_page_count;
/* Scatterlist count */
u32 rd_size;
/* Ramdisk device */
struct rd_dev *rd_dev;
} ____cacheline_aligned;
struct rd_dev_sg_table {
u32 page_start_offset;
u32 page_end_offset;
u32 rd_sg_count;
struct scatterlist *sg_table;
} ____cacheline_aligned;
#define RDF_HAS_PAGE_COUNT 0x01
struct rd_dev {
int rd_direct;
u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id;
/* Total page count for ramdisk device */
u32 rd_page_count;
/* Number of SG tables in sg_table_array */
u32 sg_table_count;
u32 rd_queue_depth;
/* Array of rd_dev_sg_table_t containing scatterlists */
struct rd_dev_sg_table *sg_table_array;
/* Ramdisk HBA device is connected to */
struct rd_host *rd_host;
} ____cacheline_aligned;
struct rd_host {
u32 rd_host_dev_id_count;
u32 rd_host_id; /* Unique Ramdisk Host ID */
} ____cacheline_aligned;
#endif /* TARGET_CORE_RD_H */

View File

@ -0,0 +1,105 @@
/*******************************************************************************
* Filename: target_core_scdb.c
*
* This file contains the generic target engine Split CDB related functions.
*
* Copyright (c) 2004-2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <scsi/scsi.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include "target_core_scdb.h"
/* split_cdb_XX_6():
*
* 21-bit LBA w/ 8-bit SECTORS
*/
void split_cdb_XX_6(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
cdb[1] = (lba >> 16) & 0x1f;
cdb[2] = (lba >> 8) & 0xff;
cdb[3] = lba & 0xff;
cdb[4] = *sectors & 0xff;
}
/* split_cdb_XX_10():
*
* 32-bit LBA w/ 16-bit SECTORS
*/
void split_cdb_XX_10(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
put_unaligned_be16(*sectors, &cdb[7]);
}
/* split_cdb_XX_12():
*
* 32-bit LBA w/ 32-bit SECTORS
*/
void split_cdb_XX_12(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
put_unaligned_be32(*sectors, &cdb[6]);
}
/* split_cdb_XX_16():
*
* 64-bit LBA w/ 32-bit SECTORS
*/
void split_cdb_XX_16(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[2]);
put_unaligned_be32(*sectors, &cdb[10]);
}
/*
* split_cdb_XX_32():
*
* 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
*/
void split_cdb_XX_32(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[12]);
put_unaligned_be32(*sectors, &cdb[28]);
}

View File

@ -0,0 +1,10 @@
#ifndef TARGET_CORE_SCDB_H
#define TARGET_CORE_SCDB_H
extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
#endif /* TARGET_CORE_SCDB_H */

View File

@ -0,0 +1,404 @@
/*******************************************************************************
* Filename: target_core_tmr.c
*
* This file contains SPC-3 task management infrastructure
*
* Copyright (c) 2009,2010 Rising Tide Systems
* Copyright (c) 2009,2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tmr.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
#include "target_core_alua.h"
#include "target_core_pr.h"
#define DEBUG_LUN_RESET
#ifdef DEBUG_LUN_RESET
#define DEBUG_LR(x...) printk(KERN_INFO x)
#else
#define DEBUG_LR(x...)
#endif
struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
u8 function)
{
struct se_tmr_req *tmr;
tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
if (!(tmr)) {
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
}
tmr->task_cmd = se_cmd;
tmr->fabric_tmr_ptr = fabric_tmr_ptr;
tmr->function = function;
INIT_LIST_HEAD(&tmr->tmr_list);
return tmr;
}
EXPORT_SYMBOL(core_tmr_alloc_req);
void core_tmr_release_req(
struct se_tmr_req *tmr)
{
struct se_device *dev = tmr->tmr_dev;
spin_lock(&dev->se_tmr_lock);
list_del(&tmr->tmr_list);
kmem_cache_free(se_tmr_req_cache, tmr);
spin_unlock(&dev->se_tmr_lock);
}
static void core_tmr_handle_tas_abort(
struct se_node_acl *tmr_nacl,
struct se_cmd *cmd,
int tas,
int fe_count)
{
if (!(fe_count)) {
transport_cmd_finish_abort(cmd, 1);
return;
}
/*
* TASK ABORTED status (TAS) bit support
*/
if (((tmr_nacl != NULL) &&
(tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
transport_send_task_abort(cmd);
transport_cmd_finish_abort(cmd, 0);
}
int core_tmr_lun_reset(
struct se_device *dev,
struct se_tmr_req *tmr,
struct list_head *preempt_and_abort_list,
struct se_cmd *prout_cmd)
{
struct se_cmd *cmd;
struct se_queue_req *qr, *qr_tmp;
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
struct se_queue_obj *qobj = dev->dev_queue_obj;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_task *task, *task_tmp;
unsigned long flags;
int fe_count, state, tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
*
* A task aborted status (TAS) bit set to zero specifies that aborted
* tasks shall be terminated by the device server without any response
* to the application client. A TAS bit set to one specifies that tasks
* aborted by the actions of an I_T nexus other than the I_T nexus on
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
tas = DEV_ATTRIB(dev)->emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
*/
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
TPG_TFO(tmr_tpg)->get_fabric_name(),
tmr_nacl->initiatorname);
}
}
DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
TRANSPORT(dev)->name, tas);
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
spin_lock(&dev->se_tmr_lock);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
*/
if (tmr && (tmr_p == tmr))
continue;
cmd = tmr_p->task_cmd;
if (!(cmd)) {
printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
* If this function was called with a valid pr_res_key
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
if ((preempt_and_abort_list != NULL) &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
spin_unlock(&dev->se_tmr_lock);
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
spin_lock(&dev->se_tmr_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
spin_lock(&dev->se_tmr_lock);
continue;
}
DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
transport_cmd_finish_abort_tmr(cmd);
spin_lock(&dev->se_tmr_lock);
}
spin_unlock(&dev->se_tmr_lock);
/*
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
* This is following sam4r17, section 5.6 Aborting commands, Table 38
* for TMR LUN_RESET:
*
* a) "Yes" indicates that each command that is aborted on an I_T nexus
* other than the one that caused the SCSI device condition is
* completed with TASK ABORTED status, if the TAS bit is set to one in
* the Control mode page (see SPC-4). "No" indicates that no status is
* returned for aborted commands.
*
* d) If the logical unit reset is caused by a particular I_T nexus
* (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
* (TASK_ABORTED status) applies.
*
* Otherwise (e.g., if triggered by a hard reset), "no"
* (no TASK_ABORTED SAM status) applies.
*
* Note that this seems to be independent of TAS (Task Aborted Status)
* in the Control Mode Page.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
t_state_list) {
if (!(TASK_CMD(task))) {
printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
continue;
}
cmd = TASK_CMD(task);
if (!T_TASK(cmd)) {
printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
" %p ITT: 0x%08x\n", task, cmd,
CMD_TFO(cmd)->get_task_tag(cmd));
continue;
}
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if ((preempt_and_abort_list != NULL) &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
list_del(&task->t_state_list);
atomic_set(&task->task_state_active, 0);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
"def_t_state: %d/%d cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
CMD_TFO(cmd)->get_task_tag(cmd), 0,
CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
T_TASK(cmd)->t_task_cdbs,
atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
atomic_read(&T_TASK(cmd)->t_transport_active),
atomic_read(&T_TASK(cmd)->t_transport_stop),
atomic_read(&T_TASK(cmd)->t_transport_sent));
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
spin_unlock_irqrestore(
&T_TASK(cmd)->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
" for dev: %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
" dev: %p\n", task, dev);
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
}
__transport_stop_task_timer(task, &flags);
if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
spin_unlock_irqrestore(
&T_TASK(cmd)->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
/*
* Release all commands remaining in the struct se_device cmd queue.
*
* This follows the same logic as above for the struct se_device
* struct se_task state list, where commands are returned with
* TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
* reference, otherwise the struct se_cmd is released.
*/
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
cmd = (struct se_cmd *)qr->cmd;
if (!(cmd)) {
/*
* Skip these for non PREEMPT_AND_ABORT usage..
*/
if (preempt_and_abort_list != NULL)
continue;
atomic_dec(&qobj->queue_cnt);
list_del(&qr->qr_list);
kfree(qr);
continue;
}
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if ((preempt_and_abort_list != NULL) &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
atomic_dec(&qobj->queue_cnt);
list_del(&qr->qr_list);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
state = qr->state;
kfree(qr);
DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, state,
atomic_read(&T_TASK(cmd)->t_fe_count));
/*
* Signal that the command has failed via cmd->se_cmd_flags,
* and call TFO->new_cmd_failure() to wakeup any fabric
* dependent code used to wait for unsolicited data out
* allocation to complete. The fabric module is expected
* to dump any remaining unsolicited data out for the aborted
* command at this point.
*/
transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&T_TASK(cmd)->t_fe_count));
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
}
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
if (!(preempt_and_abort_list) &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
}
spin_lock(&dev->stats_lock);
dev->num_resets++;
spin_unlock(&dev->stats_lock);
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
TRANSPORT(dev)->name);
return 0;
}

View File

@ -0,0 +1,826 @@
/*******************************************************************************
* Filename: target_core_tpg.c
*
* This file contains generic Target Portal Group related functions.
*
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/in.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include "target_core_hba.h"
/* core_clear_initiator_node_from_tpg():
*
*
*/
static void core_clear_initiator_node_from_tpg(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
int i;
struct se_dev_entry *deve;
struct se_lun *lun;
struct se_lun_acl *acl, *acl_tmp;
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
if (!deve->se_lun) {
printk(KERN_ERR "%s device entries device pointer is"
" NULL, but Initiator has access.\n",
TPG_TFO(tpg)->get_fabric_name());
continue;
}
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
spin_lock(&lun->lun_acl_lock);
list_for_each_entry_safe(acl, acl_tmp,
&lun->lun_acl_list, lacl_list) {
if (!(strcmp(acl->initiatorname,
nacl->initiatorname)) &&
(acl->mapped_lun == deve->mapped_lun))
break;
}
if (!acl) {
printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
" mapped_lun: %u\n", nacl->initiatorname,
deve->mapped_lun);
spin_unlock(&lun->lun_acl_lock);
spin_lock_irq(&nacl->device_list_lock);
continue;
}
list_del(&acl->lacl_list);
spin_unlock(&lun->lun_acl_lock);
spin_lock_irq(&nacl->device_list_lock);
kfree(acl);
}
spin_unlock_irq(&nacl->device_list_lock);
}
/* __core_tpg_get_initiator_node_acl():
*
* spin_lock_bh(&tpg->acl_node_lock); must be held when calling
*/
struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
const char *initiatorname)
{
struct se_node_acl *acl;
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!(strcmp(acl->initiatorname, initiatorname)))
return acl;
}
return NULL;
}
/* core_tpg_get_initiator_node_acl():
*
*
*/
struct se_node_acl *core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
{
struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!(strcmp(acl->initiatorname, initiatorname)) &&
(!(acl->dynamic_node_acl))) {
spin_unlock_bh(&tpg->acl_node_lock);
return acl;
}
}
spin_unlock_bh(&tpg->acl_node_lock);
return NULL;
}
/* core_tpg_add_node_to_devs():
*
*
*/
void core_tpg_add_node_to_devs(
struct se_node_acl *acl,
struct se_portal_group *tpg)
{
int i = 0;
u32 lun_access = 0;
struct se_lun *lun;
struct se_device *dev;
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = &tpg->tpg_lun_list[i];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
continue;
spin_unlock(&tpg->tpg_lun_lock);
dev = lun->lun_se_dev;
/*
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
/*
* Allow only optical drives to issue R/W in default RO
* demo mode.
*/
if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
}
printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
" access for LUN in Demo Mode\n",
TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
lun_access, acl, tpg, 1);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
}
/* core_set_queue_depth_for_node():
*
*
*/
static int core_set_queue_depth_for_node(
struct se_portal_group *tpg,
struct se_node_acl *acl)
{
if (!acl->queue_depth) {
printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
"defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
acl->initiatorname);
acl->queue_depth = 1;
}
return 0;
}
/* core_create_device_list_for_node():
*
*
*/
static int core_create_device_list_for_node(struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
int i;
nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
if (!(nacl->device_list)) {
printk(KERN_ERR "Unable to allocate memory for"
" struct se_node_acl->device_list\n");
return -1;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
atomic_set(&deve->ua_count, 0);
atomic_set(&deve->pr_ref_count, 0);
spin_lock_init(&deve->ua_lock);
INIT_LIST_HEAD(&deve->alua_port_list);
INIT_LIST_HEAD(&deve->ua_list);
}
return 0;
}
/* core_tpg_check_initiator_node_acl()
*
*
*/
struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
{
struct se_node_acl *acl;
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
if ((acl))
return acl;
if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
return NULL;
acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
if (!(acl))
return NULL;
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
atomic_set(&acl->mib_ref_count, 0);
acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
TPG_TFO(tpg)->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
core_tpg_add_node_to_devs(acl, tpg);
spin_lock_bh(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock);
printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
TPG_TFO(tpg)->get_fabric_name(), initiatorname);
return acl;
}
EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
{
while (atomic_read(&nacl->acl_pr_ref_count) != 0)
cpu_relax();
}
void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
{
while (atomic_read(&nacl->mib_ref_count) != 0)
cpu_relax();
}
void core_tpg_clear_object_luns(struct se_portal_group *tpg)
{
int i, ret;
struct se_lun *lun;
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = &tpg->tpg_lun_list[i];
if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
(lun->lun_se_dev == NULL))
continue;
spin_unlock(&tpg->tpg_lun_lock);
ret = core_dev_del_lun(tpg, lun->unpacked_lun);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
}
EXPORT_SYMBOL(core_tpg_clear_object_luns);
/* core_tpg_add_initiator_node_acl():
*
*
*/
struct se_node_acl *core_tpg_add_initiator_node_acl(
struct se_portal_group *tpg,
struct se_node_acl *se_nacl,
const char *initiatorname,
u32 queue_depth)
{
struct se_node_acl *acl = NULL;
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if ((acl)) {
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
spin_unlock_bh(&tpg->acl_node_lock);
/*
* Release the locally allocated struct se_node_acl
* because * core_tpg_add_initiator_node_acl() returned
* a pointer to an existing demo mode node ACL.
*/
if (se_nacl)
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
se_nacl);
goto done;
}
printk(KERN_ERR "ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
" request.\n", TPG_TFO(tpg)->get_fabric_name(),
initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return ERR_PTR(-EEXIST);
}
spin_unlock_bh(&tpg->acl_node_lock);
if (!(se_nacl)) {
printk("struct se_node_acl pointer is NULL\n");
return ERR_PTR(-EINVAL);
}
/*
* For v4.x logic the se_node_acl_s is hanging off a fabric
* dependent structure allocated via
* struct target_core_fabric_ops->fabric_make_nodeacl()
*/
acl = se_nacl;
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
acl->queue_depth = queue_depth;
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
TPG_TFO(tpg)->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-ENOMEM);
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-EINVAL);
}
spin_lock_bh(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock);
done:
printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
TPG_TFO(tpg)->get_fabric_name(), initiatorname);
return acl;
}
EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
/* core_tpg_del_initiator_node_acl():
*
*
*/
int core_tpg_del_initiator_node_acl(
struct se_portal_group *tpg,
struct se_node_acl *acl,
int force)
{
struct se_session *sess, *sess_tmp;
int dynamic_acl = 0;
spin_lock_bh(&tpg->acl_node_lock);
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
dynamic_acl = 1;
}
list_del(&acl->acl_list);
tpg->num_node_acls--;
spin_unlock_bh(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock);
list_for_each_entry_safe(sess, sess_tmp,
&tpg->tpg_sess_list, sess_list) {
if (sess->se_node_acl != acl)
continue;
/*
* Determine if the session needs to be closed by our context.
*/
if (!(TPG_TFO(tpg)->shutdown_session(sess)))
continue;
spin_unlock_bh(&tpg->session_lock);
/*
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
TPG_TFO(tpg)->close_session(sess);
spin_lock_bh(&tpg->session_lock);
}
spin_unlock_bh(&tpg->session_lock);
core_tpg_wait_for_nacl_pr_ref(acl);
core_tpg_wait_for_mib_ref(acl);
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
return 0;
}
EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
/* core_tpg_set_initiator_node_queue_depth():
*
*
*/
int core_tpg_set_initiator_node_queue_depth(
struct se_portal_group *tpg,
unsigned char *initiatorname,
u32 queue_depth,
int force)
{
struct se_session *sess, *init_sess = NULL;
struct se_node_acl *acl;
int dynamic_acl = 0;
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!(acl)) {
printk(KERN_ERR "Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
" request.\n", TPG_TFO(tpg)->get_fabric_name(),
initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return -ENODEV;
}
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
dynamic_acl = 1;
}
spin_unlock_bh(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
if (sess->se_node_acl != acl)
continue;
if (!force) {
printk(KERN_ERR "Unable to change queue depth for %s"
" Initiator Node: %s while session is"
" operational. To forcefully change the queue"
" depth and force session reinstatement"
" use the \"force=1\" parameter.\n",
TPG_TFO(tpg)->get_fabric_name(), initiatorname);
spin_unlock_bh(&tpg->session_lock);
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock);
return -EEXIST;
}
/*
* Determine if the session needs to be closed by our context.
*/
if (!(TPG_TFO(tpg)->shutdown_session(sess)))
continue;
init_sess = sess;
break;
}
/*
* User has requested to change the queue depth for a Initiator Node.
* Change the value in the Node's struct se_node_acl, and call
* core_set_queue_depth_for_node() to add the requested queue depth.
*
* Finally call TPG_TFO(tpg)->close_session() to force session
* reinstatement to occur if there is an active session for the
* $FABRIC_MOD Initiator Node in question.
*/
acl->queue_depth = queue_depth;
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
spin_unlock_bh(&tpg->session_lock);
/*
* Force session reinstatement if
* core_set_queue_depth_for_node() failed, because we assume
* the $FABRIC_MOD has already the set session reinstatement
* bit from TPG_TFO(tpg)->shutdown_session() called above.
*/
if (init_sess)
TPG_TFO(tpg)->close_session(init_sess);
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock);
return -EINVAL;
}
spin_unlock_bh(&tpg->session_lock);
/*
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
if (init_sess)
TPG_TFO(tpg)->close_session(init_sess);
printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
initiatorname, TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock);
return 0;
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
struct se_device *dev = se_global->g_lun0_dev;
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
int ret;
lun->unpacked_lun = 0;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
if (ret < 0)
return -1;
return 0;
}
static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
{
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
core_tpg_post_dellun(se_tpg, lun);
}
int core_tpg_register(
struct target_core_fabric_ops *tfo,
struct se_wwn *se_wwn,
struct se_portal_group *se_tpg,
void *tpg_fabric_ptr,
int se_tpg_type)
{
struct se_lun *lun;
u32 i;
se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
if (!(se_tpg->tpg_lun_list)) {
printk(KERN_ERR "Unable to allocate struct se_portal_group->"
"tpg_lun_list\n");
return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = &se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
}
se_tpg->se_tpg_type = se_tpg_type;
se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
se_tpg->se_tpg_tfo = tfo;
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
INIT_LIST_HEAD(&se_tpg->se_tpg_list);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->acl_node_lock);
spin_lock_init(&se_tpg->session_lock);
spin_lock_init(&se_tpg->tpg_lun_lock);
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
kfree(se_tpg);
return -ENOMEM;
}
}
spin_lock_bh(&se_global->se_tpg_lock);
list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
spin_unlock_bh(&se_global->se_tpg_lock);
printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
return 0;
}
EXPORT_SYMBOL(core_tpg_register);
int core_tpg_deregister(struct se_portal_group *se_tpg)
{
printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
" for endpoint: %s Portal Tag %u\n",
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
spin_lock_bh(&se_global->se_tpg_lock);
list_del(&se_tpg->se_tpg_list);
spin_unlock_bh(&se_global->se_tpg_lock);
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
core_tpg_release_virtual_lun0(se_tpg);
se_tpg->se_tpg_fabric_ptr = NULL;
kfree(se_tpg->tpg_lun_list);
return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);
struct se_lun *core_tpg_pre_addlun(
struct se_portal_group *tpg,
u32 unpacked_lun)
{
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
TPG_TFO(tpg)->get_fabric_name(),
unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
TPG_TFO(tpg)->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
" on %s Target Portal Group: %u, ignoring request.\n",
unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-EINVAL);
}
spin_unlock(&tpg->tpg_lun_lock);
return lun;
}
int core_tpg_post_addlun(
struct se_portal_group *tpg,
struct se_lun *lun,
u32 lun_access,
void *lun_ptr)
{
if (core_dev_export(lun_ptr, tpg, lun) < 0)
return -1;
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
spin_unlock(&tpg->tpg_lun_lock);
return 0;
}
static void core_tpg_shutdown_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
core_clear_lun_from_tpg(lun, tpg);
transport_clear_lun_from_sessions(lun);
}
struct se_lun *core_tpg_pre_dellun(
struct se_portal_group *tpg,
u32 unpacked_lun,
int *ret)
{
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
TPG_TFO(tpg)->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
" Target Portal Group: %u, ignoring request.\n",
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-ENODEV);
}
spin_unlock(&tpg->tpg_lun_lock);
return lun;
}
int core_tpg_post_dellun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
core_tpg_shutdown_lun(tpg, lun);
core_dev_unexport(lun->lun_se_dev, tpg, lun);
spin_lock(&tpg->tpg_lun_lock);
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
spin_unlock(&tpg->tpg_lun_lock);
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,332 @@
/*******************************************************************************
* Filename: target_core_ua.c
*
* This file contains logic for SPC-3 Unit Attention emulation
*
* Copyright (c) 2009,2010 Rising Tide Systems
* Copyright (c) 2009,2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
int core_scsi3_ua_check(
struct se_cmd *cmd,
unsigned char *cdb)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
if (!(sess))
return 0;
nacl = sess->se_node_acl;
if (!(nacl))
return 0;
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count)))
return 0;
/*
* From sam4r14, section 5.14 Unit attention condition:
*
* a) if an INQUIRY command enters the enabled command state, the
* device server shall process the INQUIRY command and shall neither
* report nor clear any unit attention condition;
* b) if a REPORT LUNS command enters the enabled command state, the
* device server shall process the REPORT LUNS command and shall not
* report any unit attention condition;
* e) if a REQUEST SENSE command enters the enabled command state while
* a unit attention condition exists for the SCSI initiator port
* associated with the I_T nexus on which the REQUEST SENSE command
* was received, then the device server shall process the command
* and either:
*/
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case REQUEST_SENSE:
return 0;
default:
return -1;
}
return -1;
}
int core_scsi3_ua_allocate(
struct se_node_acl *nacl,
u32 unpacked_lun,
u8 asc,
u8 ascq)
{
struct se_dev_entry *deve;
struct se_ua *ua, *ua_p, *ua_tmp;
/*
* PASSTHROUGH OPS
*/
if (!(nacl))
return -1;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!(ua)) {
printk(KERN_ERR "Unable to allocate struct se_ua\n");
return -1;
}
INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
ua->ua_nacl = nacl;
ua->ua_asc = asc;
ua->ua_ascq = ascq;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[unpacked_lun];
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
/*
* Do not report the same UNIT ATTENTION twice..
*/
if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
kmem_cache_free(se_ua_cache, ua);
return 0;
}
/*
* Attach the highest priority Unit Attention to
* the head of the list following sam4r14,
* Section 5.14 Unit Attention Condition:
*
* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
* POWER ON OCCURRED or
* DEVICE INTERNAL RESET
* SCSI BUS RESET OCCURRED or
* MICROCODE HAS BEEN CHANGED or
* protocol specific
* BUS DEVICE RESET FUNCTION OCCURRED
* I_T NEXUS LOSS OCCURRED
* COMMANDS CLEARED BY POWER LOSS NOTIFICATION
* all others Lowest
*
* Each of the ASCQ codes listed above are defined in
* the 29h ASC family, see spc4r17 Table D.1
*/
if (ua_p->ua_asc == 0x29) {
if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
list_add(&ua->ua_nacl_list,
&deve->ua_list);
else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
} else if (ua_p->ua_asc == 0x2a) {
/*
* Incoming Family 29h ASCQ codes will override
* Family 2AHh ASCQ codes for Unit Attention condition.
*/
if ((asc == 0x29) || (ascq > ua_p->ua_asc))
list_add(&ua->ua_nacl_list,
&deve->ua_list);
else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
} else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
atomic_inc(&deve->ua_count);
smp_mb__after_atomic_inc();
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n",
TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
asc, ascq);
atomic_inc(&deve->ua_count);
smp_mb__after_atomic_inc();
return 0;
}
void core_scsi3_ua_release_all(
struct se_dev_entry *deve)
{
struct se_ua *ua, *ua_p;
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
smp_mb__after_atomic_dec();
}
spin_unlock(&deve->ua_lock);
}
void core_scsi3_ua_for_check_condition(
struct se_cmd *cmd,
u8 *asc,
u8 *ascq)
{
struct se_device *dev = SE_DEV(cmd);
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
if (!(sess))
return;
nacl = sess->se_node_acl;
if (!(nacl))
return;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) {
spin_unlock_irq(&nacl->device_list_lock);
return;
}
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
* sense data for the received CDB.
*/
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
/*
* For ua_intlck_ctrl code not equal to 00b, only report the
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
}
/*
* Otherwise for the default 00b, release the UNIT ATTENTION
* condition. Return the ASC/ASCQ of the higest priority UA
* (head of the list) in the outgoing CHECK_CONDITION + sense.
*/
if (head) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
head = 0;
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
smp_mb__after_atomic_dec();
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
TPG_TFO(nacl->se_tpg)->get_fabric_name(),
(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
}
int core_scsi3_ua_clear_for_request_sense(
struct se_cmd *cmd,
u8 *asc,
u8 *ascq)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
if (!(sess))
return -1;
nacl = sess->se_node_acl;
if (!(nacl))
return -1;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) {
spin_unlock_irq(&nacl->device_list_lock);
return -1;
}
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list. The First (and hence highest priority)
* ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
* matching struct se_lun.
*
* Once the returning ASC/ASCQ values are set, we go ahead and
* release all of the Unit Attention conditions for the assoicated
* struct se_lun.
*/
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
if (head) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
head = 0;
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
smp_mb__after_atomic_dec();
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
" ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq);
return (head) ? -1 : 0;
}

View File

@ -0,0 +1,36 @@
#ifndef TARGET_CORE_UA_H
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
*/
#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00
#define ASCQ_29H_POWER_ON_OCCURRED 0x01
#define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02
#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03
#define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04
#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05
#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06
#define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07
#define ASCQ_2AH_PARAMETERS_CHANGED 0x00
#define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01
#define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02
#define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03
#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
#define ASCQ_2AH_PRIORITY_CHANGED 0x08
#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
extern struct kmem_cache *se_ua_cache;
extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
u8 *, u8 *);
#endif /* TARGET_CORE_UA_H */

View File

@ -0,0 +1,147 @@
/* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* configfs_macros.h - extends macros for configfs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* Based on kobject.h:
* Copyright (c) 2002-2003 Patrick Mochel
* Copyright (c) 2002-2003 Open Source Development Labs
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
* Added CONFIGFS_EATTR() macros from original configfs.h macros
* Copright (C) 2008-2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* Please read Documentation/filesystems/configfs.txt before using the
* configfs interface, ESPECIALLY the parts about reference counts and
* item destructors.
*/
#ifndef _CONFIGFS_MACROS_H_
#define _CONFIGFS_MACROS_H_
#include <linux/configfs.h>
/*
* Users often need to create attribute structures for their configurable
* attributes, containing a configfs_attribute member and function pointers
* for the show() and store() operations on that attribute. If they don't
* need anything else on the extended attribute structure, they can use
* this macro to define it. The argument _name isends up as
* 'struct _name_attribute, as well as names of to CONFIGFS_ATTR_OPS() below.
* The argument _item is the name of the structure containing the
* struct config_item or struct config_group structure members
*/
#define CONFIGFS_EATTR_STRUCT(_name, _item) \
struct _name##_attribute { \
struct configfs_attribute attr; \
ssize_t (*show)(struct _item *, char *); \
ssize_t (*store)(struct _item *, const char *, size_t); \
}
/*
* With the extended attribute structure, users can use this macro
* (similar to sysfs' __ATTR) to make defining attributes easier.
* An example:
* #define MYITEM_EATTR(_name, _mode, _show, _store) \
* struct myitem_attribute childless_attr_##_name = \
* __CONFIGFS_EATTR(_name, _mode, _show, _store)
*/
#define __CONFIGFS_EATTR(_name, _mode, _show, _store) \
{ \
.attr = { \
.ca_name = __stringify(_name), \
.ca_mode = _mode, \
.ca_owner = THIS_MODULE, \
}, \
.show = _show, \
.store = _store, \
}
/* Here is a readonly version, only requiring a show() operation */
#define __CONFIGFS_EATTR_RO(_name, _show) \
{ \
.attr = { \
.ca_name = __stringify(_name), \
.ca_mode = 0444, \
.ca_owner = THIS_MODULE, \
}, \
.show = _show, \
}
/*
* With these extended attributes, the simple show_attribute() and
* store_attribute() operations need to call the show() and store() of the
* attributes. This is a common pattern, so we provide a macro to define
* them. The argument _name is the name of the attribute defined by
* CONFIGFS_ATTR_STRUCT(). The argument _item is the name of the structure
* containing the struct config_item or struct config_group structure member.
* The argument _item_member is the actual name of the struct config_* struct
* in your _item structure. Meaning my_structure->some_config_group.
* ^^_item^^^^^ ^^_item_member^^^
* This macro expects the attributes to be named "struct <name>_attribute".
*/
#define CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member) \
static struct _item *to_##_name(struct config_item *ci) \
{ \
return (ci) ? container_of(to_config_group(ci), struct _item, \
_item_member) : NULL; \
}
#define CONFIGFS_EATTR_OPS_SHOW(_name, _item) \
static ssize_t _name##_attr_show(struct config_item *item, \
struct configfs_attribute *attr, \
char *page) \
{ \
struct _item *_item = to_##_name(item); \
struct _name##_attribute * _name##_attr = \
container_of(attr, struct _name##_attribute, attr); \
ssize_t ret = 0; \
\
if (_name##_attr->show) \
ret = _name##_attr->show(_item, page); \
return ret; \
}
#define CONFIGFS_EATTR_OPS_STORE(_name, _item) \
static ssize_t _name##_attr_store(struct config_item *item, \
struct configfs_attribute *attr, \
const char *page, size_t count) \
{ \
struct _item *_item = to_##_name(item); \
struct _name##_attribute * _name##_attr = \
container_of(attr, struct _name##_attribute, attr); \
ssize_t ret = -EINVAL; \
\
if (_name##_attr->store) \
ret = _name##_attr->store(_item, page, count); \
return ret; \
}
#define CONFIGFS_EATTR_OPS(_name, _item, _item_member) \
CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member); \
CONFIGFS_EATTR_OPS_SHOW(_name, _item); \
CONFIGFS_EATTR_OPS_STORE(_name, _item);
#define CONFIGFS_EATTR_OPS_RO(_name, _item, _item_member) \
CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member); \
CONFIGFS_EATTR_OPS_SHOW(_name, _item);
#endif /* _CONFIGFS_MACROS_H_ */

View File

@ -0,0 +1,937 @@
#ifndef TARGET_CORE_BASE_H
#define TARGET_CORE_BASE_H
#include <linux/in.h>
#include <linux/configfs.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <scsi/scsi_cmnd.h>
#include <net/sock.h>
#include <net/tcp.h>
#include "target_core_mib.h"
#define TARGET_CORE_MOD_VERSION "v4.0.0-rc6"
#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
/* Used by transport_generic_allocate_iovecs() */
#define TRANSPORT_IOV_DATA_BUFFER 5
/* Maximum Number of LUNs per Target Portal Group */
#define TRANSPORT_MAX_LUNS_PER_TPG 256
/*
* By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
*
* Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
* include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
* 16-byte CDBs by default and require an extra allocation for
* 32-byte CDBs to becasue of legacy issues.
*
* Within TCM Core there are no such legacy limitiations, so we go ahead
* use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
* within all TCM Core and subsystem plugin code.
*/
#define TCM_MAX_COMMAND_SIZE 32
/*
* From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
* defined 96, but the real limit is 252 (or 260 including the header)
*/
#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
/* Used by transport_send_check_condition_and_sense() */
#define SPC_SENSE_KEY_OFFSET 2
#define SPC_ASC_KEY_OFFSET 12
#define SPC_ASCQ_KEY_OFFSET 13
#define TRANSPORT_IQN_LEN 224
/* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
#define LU_GROUP_NAME_BUF 256
/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
#define TG_PT_GROUP_NAME_BUF 256
/* Used to parse VPD into struct t10_vpd */
#define VPD_TMP_BUF_SIZE 128
/* Used by transport_generic_cmd_sequencer() */
#define READ_BLOCK_LEN 6
#define READ_CAP_LEN 8
#define READ_POSITION_LEN 20
#define INQUIRY_LEN 36
/* Used by transport_get_inquiry_vpd_serial() */
#define INQUIRY_VPD_SERIAL_LEN 254
/* Used by transport_get_inquiry_vpd_device_ident() */
#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
/* struct se_hba->hba_flags */
enum hba_flags_table {
HBA_FLAGS_INTERNAL_USE = 0x01,
HBA_FLAGS_PSCSI_MODE = 0x02,
};
/* struct se_lun->lun_status */
enum transport_lun_status_table {
TRANSPORT_LUN_STATUS_FREE = 0,
TRANSPORT_LUN_STATUS_ACTIVE = 1,
};
/* struct se_portal_group->se_tpg_type */
enum transport_tpg_type_table {
TRANSPORT_TPG_TYPE_NORMAL = 0,
TRANSPORT_TPG_TYPE_DISCOVERY = 1,
};
/* Used for generate timer flags */
enum timer_flags_table {
TF_RUNNING = 0x01,
TF_STOP = 0x02,
};
/* Special transport agnostic struct se_cmd->t_states */
enum transport_state_table {
TRANSPORT_NO_STATE = 0,
TRANSPORT_NEW_CMD = 1,
TRANSPORT_DEFERRED_CMD = 2,
TRANSPORT_WRITE_PENDING = 3,
TRANSPORT_PROCESS_WRITE = 4,
TRANSPORT_PROCESSING = 5,
TRANSPORT_COMPLETE_OK = 6,
TRANSPORT_COMPLETE_FAILURE = 7,
TRANSPORT_COMPLETE_TIMEOUT = 8,
TRANSPORT_PROCESS_TMR = 9,
TRANSPORT_TMR_COMPLETE = 10,
TRANSPORT_ISTATE_PROCESSING = 11,
TRANSPORT_ISTATE_PROCESSED = 12,
TRANSPORT_KILL = 13,
TRANSPORT_REMOVE = 14,
TRANSPORT_FREE = 15,
TRANSPORT_NEW_CMD_MAP = 16,
};
/* Used for struct se_cmd->se_cmd_flags */
enum se_cmd_flags_table {
SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
SCF_TRANSPORT_TASK_SENSE = 0x00000002,
SCF_EMULATED_TASK_SENSE = 0x00000004,
SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020,
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200,
SCF_SE_CMD_FAILED = 0x00000400,
SCF_SE_LUN_CMD = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000,
SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000,
SCF_SENT_DELAYED_TAS = 0x00020000,
SCF_ALUA_NON_OPTIMIZED = 0x00040000,
SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000,
SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
SCF_EMULATE_SYNC_CACHE = 0x00800000,
SCF_EMULATE_CDB_ASYNC = 0x01000000,
SCF_EMULATE_SYNC_UNMAP = 0x02000000
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
enum transport_lunflags_table {
TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00,
TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01,
TRANSPORT_LUNFLAGS_READ_ONLY = 0x02,
TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
};
/* struct se_device->dev_status */
enum transport_device_status_table {
TRANSPORT_DEVICE_ACTIVATED = 0x01,
TRANSPORT_DEVICE_DEACTIVATED = 0x02,
TRANSPORT_DEVICE_QUEUE_FULL = 0x04,
TRANSPORT_DEVICE_SHUTDOWN = 0x08,
TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10,
TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20,
};
/*
* Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
* to signal which ASC/ASCQ sense payload should be built.
*/
enum tcm_sense_reason_table {
TCM_NON_EXISTENT_LUN = 0x01,
TCM_UNSUPPORTED_SCSI_OPCODE = 0x02,
TCM_INCORRECT_AMOUNT_OF_DATA = 0x03,
TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04,
TCM_SERVICE_CRC_ERROR = 0x05,
TCM_SNACK_REJECTED = 0x06,
TCM_SECTOR_COUNT_TOO_MANY = 0x07,
TCM_INVALID_CDB_FIELD = 0x08,
TCM_INVALID_PARAMETER_LIST = 0x09,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a,
TCM_UNKNOWN_MODE_PAGE = 0x0b,
TCM_WRITE_PROTECTED = 0x0c,
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
};
struct se_obj {
atomic_t obj_access_count;
} ____cacheline_aligned;
/*
* Used by TCM Core internally to signal if ALUA emulation is enabled or
* disabled, or running in with TCM/pSCSI passthrough mode
*/
typedef enum {
SPC_ALUA_PASSTHROUGH,
SPC2_ALUA_DISABLED,
SPC3_ALUA_EMULATED
} t10_alua_index_t;
/*
* Used by TCM Core internally to signal if SAM Task Attribute emulation
* is enabled or disabled, or running in with TCM/pSCSI passthrough mode
*/
typedef enum {
SAM_TASK_ATTR_PASSTHROUGH,
SAM_TASK_ATTR_UNTAGGED,
SAM_TASK_ATTR_EMULATED
} t10_task_attr_index_t;
struct se_cmd;
struct t10_alua {
t10_alua_index_t alua_type;
/* ALUA Target Port Group ID */
u16 alua_tg_pt_gps_counter;
u32 alua_tg_pt_gps_count;
spinlock_t tg_pt_gps_lock;
struct se_subsystem_dev *t10_sub_dev;
/* Used for default ALUA Target Port Group */
struct t10_alua_tg_pt_gp *default_tg_pt_gp;
/* Used for default ALUA Target Port Group ConfigFS group */
struct config_group alua_tg_pt_gps_group;
int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
struct list_head tg_pt_gps_list;
} ____cacheline_aligned;
struct t10_alua_lu_gp {
u16 lu_gp_id;
int lu_gp_valid_id;
u32 lu_gp_members;
atomic_t lu_gp_shutdown;
atomic_t lu_gp_ref_cnt;
spinlock_t lu_gp_lock;
struct config_group lu_gp_group;
struct list_head lu_gp_list;
struct list_head lu_gp_mem_list;
} ____cacheline_aligned;
struct t10_alua_lu_gp_member {
int lu_gp_assoc:1;
atomic_t lu_gp_mem_ref_cnt;
spinlock_t lu_gp_mem_lock;
struct t10_alua_lu_gp *lu_gp;
struct se_device *lu_gp_mem_dev;
struct list_head lu_gp_mem_list;
} ____cacheline_aligned;
struct t10_alua_tg_pt_gp {
u16 tg_pt_gp_id;
int tg_pt_gp_valid_id;
int tg_pt_gp_alua_access_status;
int tg_pt_gp_alua_access_type;
int tg_pt_gp_nonop_delay_msecs;
int tg_pt_gp_trans_delay_msecs;
int tg_pt_gp_pref;
int tg_pt_gp_write_metadata;
/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
#define ALUA_MD_BUF_LEN 1024
u32 tg_pt_gp_md_buf_len;
u32 tg_pt_gp_members;
atomic_t tg_pt_gp_alua_access_state;
atomic_t tg_pt_gp_ref_cnt;
spinlock_t tg_pt_gp_lock;
struct mutex tg_pt_gp_md_mutex;
struct se_subsystem_dev *tg_pt_gp_su_dev;
struct config_group tg_pt_gp_group;
struct list_head tg_pt_gp_list;
struct list_head tg_pt_gp_mem_list;
} ____cacheline_aligned;
struct t10_alua_tg_pt_gp_member {
int tg_pt_gp_assoc:1;
atomic_t tg_pt_gp_mem_ref_cnt;
spinlock_t tg_pt_gp_mem_lock;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct se_port *tg_pt;
struct list_head tg_pt_gp_mem_list;
} ____cacheline_aligned;
struct t10_vpd {
unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
int protocol_identifier_set;
u32 protocol_identifier;
u32 device_identifier_code_set;
u32 association;
u32 device_identifier_type;
struct list_head vpd_list;
} ____cacheline_aligned;
struct t10_wwn {
unsigned char vendor[8];
unsigned char model[16];
unsigned char revision[4];
unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN];
spinlock_t t10_vpd_lock;
struct se_subsystem_dev *t10_sub_dev;
struct config_group t10_wwn_group;
struct list_head t10_vpd_list;
} ____cacheline_aligned;
/*
* Used by TCM Core internally to signal if >= SPC-3 peristent reservations
* emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
* mode
*/
typedef enum {
SPC_PASSTHROUGH,
SPC2_RESERVATIONS,
SPC3_PERSISTENT_RESERVATIONS
} t10_reservations_index_t;
struct t10_pr_registration {
/* Used for fabrics that contain WWN+ISID */
#define PR_REG_ISID_LEN 16
/* PR_REG_ISID_LEN + ',i,0x' */
#define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5)
char pr_reg_isid[PR_REG_ISID_LEN];
/* Used during APTPL metadata reading */
#define PR_APTPL_MAX_IPORT_LEN 256
unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
/* Used during APTPL metadata reading */
#define PR_APTPL_MAX_TPORT_LEN 256
unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
/* For writing out live meta data */
unsigned char *pr_aptpl_buf;
u16 pr_aptpl_rpti;
u16 pr_reg_tpgt;
/* Reservation effects all target ports */
int pr_reg_all_tg_pt;
/* Activate Persistence across Target Power Loss */
int pr_reg_aptpl;
int pr_res_holder;
int pr_res_type;
int pr_res_scope;
/* Used for fabric initiator WWPNs using a ISID */
int isid_present_at_reg:1;
u32 pr_res_mapped_lun;
u32 pr_aptpl_target_lun;
u32 pr_res_generation;
u64 pr_reg_bin_isid;
u64 pr_res_key;
atomic_t pr_res_holders;
struct se_node_acl *pr_reg_nacl;
struct se_dev_entry *pr_reg_deve;
struct se_lun *pr_reg_tg_pt_lun;
struct list_head pr_reg_list;
struct list_head pr_reg_abort_list;
struct list_head pr_reg_aptpl_list;
struct list_head pr_reg_atp_list;
struct list_head pr_reg_atp_mem_list;
} ____cacheline_aligned;
/*
* This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
* SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
* core_setup_reservations()
*/
struct t10_reservation_ops {
int (*t10_reservation_check)(struct se_cmd *, u32 *);
int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
int (*t10_pr_register)(struct se_cmd *);
int (*t10_pr_clear)(struct se_cmd *);
};
struct t10_reservation_template {
/* Reservation effects all target ports */
int pr_all_tg_pt;
/* Activate Persistence across Target Power Loss enabled
* for SCSI device */
int pr_aptpl_active;
/* Used by struct t10_reservation_template->pr_aptpl_buf_len */
#define PR_APTPL_BUF_LEN 8192
u32 pr_aptpl_buf_len;
u32 pr_generation;
t10_reservations_index_t res_type;
spinlock_t registration_lock;
spinlock_t aptpl_reg_lock;
/*
* This will always be set by one individual I_T Nexus.
* However with all_tg_pt=1, other I_T Nexus from the
* same initiator can access PR reg/res info on a different
* target port.
*
* There is also the 'All Registrants' case, where there is
* a single *pr_res_holder of the reservation, but all
* registrations are considered reservation holders.
*/
struct se_node_acl *pr_res_holder;
struct list_head registration_list;
struct list_head aptpl_reg_list;
struct t10_reservation_ops pr_ops;
} ____cacheline_aligned;
struct se_queue_req {
int state;
void *cmd;
struct list_head qr_list;
} ____cacheline_aligned;
struct se_queue_obj {
atomic_t queue_cnt;
spinlock_t cmd_queue_lock;
struct list_head qobj_list;
wait_queue_head_t thread_wq;
} ____cacheline_aligned;
/*
* Used one per struct se_cmd to hold all extra struct se_task
* metadata. This structure is setup and allocated in
* drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
*/
struct se_transport_task {
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
int t_tasks_failed;
int t_tasks_fua;
int t_tasks_bidi:1;
u32 t_task_cdbs;
u32 t_tasks_check;
u32 t_tasks_no;
u32 t_tasks_sectors;
u32 t_tasks_se_num;
u32 t_tasks_se_bidi_num;
u32 t_tasks_sg_chained_no;
atomic_t t_fe_count;
atomic_t t_se_count;
atomic_t t_task_cdbs_left;
atomic_t t_task_cdbs_ex_left;
atomic_t t_task_cdbs_timeout_left;
atomic_t t_task_cdbs_sent;
atomic_t t_transport_aborted;
atomic_t t_transport_active;
atomic_t t_transport_complete;
atomic_t t_transport_queue_active;
atomic_t t_transport_sent;
atomic_t t_transport_stop;
atomic_t t_transport_timeout;
atomic_t transport_dev_active;
atomic_t transport_lun_active;
atomic_t transport_lun_fe_stop;
atomic_t transport_lun_stop;
spinlock_t t_state_lock;
struct completion t_transport_stop_comp;
struct completion transport_lun_fe_stop_comp;
struct completion transport_lun_stop_comp;
struct scatterlist *t_tasks_sg_chained;
struct scatterlist t_tasks_sg_bounce;
void *t_task_buf;
/*
* Used for pre-registered fabric SGL passthrough WRITE and READ
* with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
* and other HW target mode fabric modules.
*/
struct scatterlist *t_task_pt_sgl;
struct list_head *t_mem_list;
/* Used for BIDI READ */
struct list_head *t_mem_bidi_list;
struct list_head t_task_list;
} ____cacheline_aligned;
struct se_task {
unsigned char task_sense;
struct scatterlist *task_sg;
struct scatterlist *task_sg_bidi;
u8 task_scsi_status;
u8 task_flags;
int task_error_status;
int task_state_flags;
int task_padded_sg:1;
unsigned long long task_lba;
u32 task_no;
u32 task_sectors;
u32 task_size;
u32 task_sg_num;
u32 task_sg_offset;
enum dma_data_direction task_data_direction;
struct se_cmd *task_se_cmd;
struct se_device *se_dev;
struct completion task_stop_comp;
atomic_t task_active;
atomic_t task_execute_queue;
atomic_t task_timeout;
atomic_t task_sent;
atomic_t task_stop;
atomic_t task_state_active;
struct timer_list task_timer;
struct se_device *se_obj_ptr;
struct list_head t_list;
struct list_head t_execute_list;
struct list_head t_state_list;
} ____cacheline_aligned;
#define TASK_CMD(task) ((struct se_cmd *)task->task_se_cmd)
#define TASK_DEV(task) ((struct se_device *)task->se_dev)
struct se_cmd {
/* SAM response code being sent to initiator */
u8 scsi_status;
u8 scsi_asc;
u8 scsi_ascq;
u8 scsi_sense_reason;
u16 scsi_sense_length;
/* Delay for ALUA Active/NonOptimized state access in milliseconds */
int alua_nonop_delay;
/* See include/linux/dma-mapping.h */
enum dma_data_direction data_direction;
/* For SAM Task Attribute */
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
/* Transport protocol dependent state for out of order CmdSNs */
int deferred_t_state;
/* Transport specific error status */
int transport_error_status;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
/* Total size in bytes associated with command */
u32 data_length;
/* SCSI Presented Data Transfer Length */
u32 cmd_spdtl;
u32 residual_count;
u32 orig_fe_lun;
/* Persistent Reservation key */
u64 pr_res_key;
atomic_t transport_sent;
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_list;
struct list_head se_ordered_list;
struct list_head se_lun_list;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
struct se_device *se_obj_ptr;
struct se_device *se_orig_obj_ptr;
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
/* t_task is setup to t_task_backstore in transport_init_se_cmd() */
struct se_transport_task *t_task;
struct se_transport_task t_task_backstore;
struct target_core_fabric_ops *se_tfo;
int (*transport_emulate_cdb)(struct se_cmd *);
void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
void (*transport_complete_callback)(struct se_cmd *);
} ____cacheline_aligned;
#define T_TASK(cmd) ((struct se_transport_task *)(cmd->t_task))
#define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo)
struct se_tmr_req {
/* Task Management function to be preformed */
u8 function;
/* Task Management response to send */
u8 response;
int call_transport;
/* Reference to ITT that Task Mgmt should be preformed */
u32 ref_task_tag;
/* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
u64 ref_task_lun;
void *fabric_tmr_ptr;
struct se_cmd *task_cmd;
struct se_cmd *ref_cmd;
struct se_device *tmr_dev;
struct se_lun *tmr_lun;
struct list_head tmr_list;
} ____cacheline_aligned;
struct se_ua {
u8 ua_asc;
u8 ua_ascq;
struct se_node_acl *ua_nacl;
struct list_head ua_dev_list;
struct list_head ua_nacl_list;
} ____cacheline_aligned;
struct se_node_acl {
char initiatorname[TRANSPORT_IQN_LEN];
/* Used to signal demo mode created ACL, disabled by default */
int dynamic_node_acl:1;
u32 queue_depth;
u32 acl_index;
u64 num_cmds;
u64 read_bytes;
u64 write_bytes;
spinlock_t stats_lock;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t acl_pr_ref_count;
/* Used for MIB access */
atomic_t mib_ref_count;
struct se_dev_entry *device_list;
struct se_session *nacl_sess;
struct se_portal_group *se_tpg;
spinlock_t device_list_lock;
spinlock_t nacl_sess_lock;
struct config_group acl_group;
struct config_group acl_attrib_group;
struct config_group acl_auth_group;
struct config_group acl_param_group;
struct config_group *acl_default_groups[4];
struct list_head acl_list;
struct list_head acl_sess_list;
} ____cacheline_aligned;
struct se_session {
/* Used for MIB access */
atomic_t mib_ref_count;
u64 sess_bin_isid;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
struct list_head sess_list;
struct list_head sess_acl_list;
} ____cacheline_aligned;
#define SE_SESS(cmd) ((struct se_session *)(cmd)->se_sess)
#define SE_NODE_ACL(sess) ((struct se_node_acl *)(sess)->se_node_acl)
struct se_device;
struct se_transform_info;
struct scatterlist;
struct se_lun_acl {
char initiatorname[TRANSPORT_IQN_LEN];
u32 mapped_lun;
struct se_node_acl *se_lun_nacl;
struct se_lun *se_lun;
struct list_head lacl_list;
struct config_group se_lun_group;
} ____cacheline_aligned;
struct se_dev_entry {
int def_pr_registered:1;
/* See transport_lunflags_table */
u32 lun_flags;
u32 deve_cmds;
u32 mapped_lun;
u32 average_bytes;
u32 last_byte_count;
u32 total_cmds;
u32 total_bytes;
u64 pr_res_key;
u64 creation_time;
u32 attach_count;
u64 read_bytes;
u64 write_bytes;
atomic_t ua_count;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t pr_ref_count;
struct se_lun_acl *se_lun_acl;
spinlock_t ua_lock;
struct se_lun *se_lun;
struct list_head alua_port_list;
struct list_head ua_list;
} ____cacheline_aligned;
struct se_dev_limits {
/* Max supported HW queue depth */
u32 hw_queue_depth;
/* Max supported virtual queue depth */
u32 queue_depth;
/* From include/linux/blkdev.h for the other HW/SW limits. */
struct queue_limits limits;
} ____cacheline_aligned;
struct se_dev_attrib {
int emulate_dpo;
int emulate_fua_write;
int emulate_fua_read;
int emulate_write_cache;
int emulate_ua_intlck_ctrl;
int emulate_tas;
int emulate_tpu;
int emulate_tpws;
int emulate_reservations;
int emulate_alua;
int enforce_pr_isids;
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
u32 max_sectors;
u32 optimal_sectors;
u32 hw_queue_depth;
u32 queue_depth;
u32 task_timeout;
u32 max_unmap_lba_count;
u32 max_unmap_block_desc_count;
u32 unmap_granularity;
u32 unmap_granularity_alignment;
struct se_subsystem_dev *da_sub_dev;
struct config_group da_group;
} ____cacheline_aligned;
struct se_subsystem_dev {
/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
#define SE_DEV_ALIAS_LEN 512
unsigned char se_dev_alias[SE_DEV_ALIAS_LEN];
/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
#define SE_UDEV_PATH_LEN 512
unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN];
u32 su_dev_flags;
struct se_hba *se_dev_hba;
struct se_device *se_dev_ptr;
struct se_dev_attrib se_dev_attrib;
/* T10 Asymmetric Logical Unit Assignment for Target Ports */
struct t10_alua t10_alua;
/* T10 Inquiry and VPD WWN Information */
struct t10_wwn t10_wwn;
/* T10 SPC-2 + SPC-3 Reservations */
struct t10_reservation_template t10_reservation;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
struct list_head g_se_dev_list;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
} ____cacheline_aligned;
#define T10_ALUA(su_dev) (&(su_dev)->t10_alua)
#define T10_RES(su_dev) (&(su_dev)->t10_reservation)
#define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops)
struct se_device {
/* Set to 1 if thread is NOT sleeping on thread_sem */
u8 thread_active;
u8 dev_status_timer_flags;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
u32 dev_cur_ordered_id;
u32 dev_flags;
u32 dev_port_count;
/* See transport_device_status_table */
u32 dev_status;
u32 dev_tcq_window_closed;
/* Physical device queue depth */
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
u64 dev_res_bin_isid;
t10_task_attr_index_t dev_task_attr_type;
/* Pointer to transport specific device structure */
void *dev_ptr;
u32 dev_index;
u64 creation_time;
u32 num_resets;
u64 num_cmds;
u64 read_bytes;
u64 write_bytes;
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
atomic_t active_cmds;
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
atomic_t dev_tur_active;
atomic_t execute_tasks;
atomic_t dev_status_thr_count;
atomic_t dev_hoq_count;
atomic_t dev_ordered_sync;
struct se_obj dev_obj;
struct se_obj dev_access_obj;
struct se_obj dev_export_obj;
struct se_queue_obj *dev_queue_obj;
struct se_queue_obj *dev_status_queue_obj;
spinlock_t delayed_cmd_lock;
spinlock_t ordered_cmd_lock;
spinlock_t execute_task_lock;
spinlock_t state_task_lock;
spinlock_t dev_alua_lock;
spinlock_t dev_reservation_lock;
spinlock_t dev_state_lock;
spinlock_t dev_status_lock;
spinlock_t dev_status_thr_lock;
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
/* Used for legacy SPC-2 reservationsa */
struct se_node_acl *dev_reserved_node_acl;
/* Used for ALUA Logical Unit Group membership */
struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
/* Used for SPC-3 Persistent Reservations */
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
struct timer_list dev_status_timer;
/* Pointer to descriptor for processing thread */
struct task_struct *process_thread;
pid_t process_thread_pid;
struct task_struct *dev_mgmt_thread;
struct list_head delayed_cmd_list;
struct list_head ordered_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
struct se_subsystem_dev *se_sub_dev;
/* Pointer to template of function pointers for transport */
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
/* Linked list for struct se_global->g_se_dev_list */
struct list_head g_se_dev_list;
} ____cacheline_aligned;
#define SE_DEV(cmd) ((struct se_device *)(cmd)->se_lun->lun_se_dev)
#define SU_DEV(dev) ((struct se_subsystem_dev *)(dev)->se_sub_dev)
#define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib)
#define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn)
struct se_hba {
u16 hba_tpgt;
u32 hba_id;
/* See hba_flags_table */
u32 hba_flags;
/* Virtual iSCSI devices attached. */
u32 dev_count;
u32 hba_index;
atomic_t dev_mib_access_count;
atomic_t load_balance_queue;
atomic_t left_queue_depth;
/* Maximum queue depth the HBA can handle. */
atomic_t max_queue_depth;
/* Pointer to transport specific host structure. */
void *hba_ptr;
/* Linked list for struct se_device */
struct list_head hba_dev_list;
struct list_head hba_list;
spinlock_t device_lock;
spinlock_t hba_queue_lock;
struct config_group hba_group;
struct mutex hba_access_mutex;
struct se_subsystem_api *transport;
} ____cacheline_aligned;
#define SE_HBA(d) ((struct se_hba *)(d)->se_hba)
struct se_lun {
/* See transport_lun_status_table */
enum transport_lun_status_table lun_status;
u32 lun_access;
u32 lun_flags;
u32 unpacked_lun;
atomic_t lun_acl_count;
spinlock_t lun_acl_lock;
spinlock_t lun_cmd_lock;
spinlock_t lun_sep_lock;
struct completion lun_shutdown_comp;
struct list_head lun_cmd_list;
struct list_head lun_acl_list;
struct se_device *lun_se_dev;
struct config_group lun_group;
struct se_port *lun_sep;
} ____cacheline_aligned;
#define SE_LUN(c) ((struct se_lun *)(c)->se_lun)
struct se_port {
/* RELATIVE TARGET PORT IDENTIFER */
u16 sep_rtpi;
int sep_tg_pt_secondary_stat;
int sep_tg_pt_secondary_write_md;
u32 sep_index;
struct scsi_port_stats sep_stats;
/* Used for ALUA Target Port Groups membership */
atomic_t sep_tg_pt_gp_active;
atomic_t sep_tg_pt_secondary_offline;
/* Used for PR ALL_TG_PT=1 */
atomic_t sep_tg_pt_ref_cnt;
spinlock_t sep_alua_lock;
struct mutex sep_tg_pt_md_mutex;
struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
struct se_lun *sep_lun;
struct se_portal_group *sep_tpg;
struct list_head sep_alua_list;
struct list_head sep_list;
} ____cacheline_aligned;
struct se_tpg_np {
struct config_group tpg_np_group;
} ____cacheline_aligned;
struct se_portal_group {
/* Type of target portal group, see transport_tpg_type_table */
enum transport_tpg_type_table se_tpg_type;
/* Number of ACLed Initiator Nodes for this TPG */
u32 num_node_acls;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t tpg_pr_ref_count;
/* Spinlock for adding/removing ACLed Nodes */
spinlock_t acl_node_lock;
/* Spinlock for adding/removing sessions */
spinlock_t session_lock;
spinlock_t tpg_lun_lock;
/* Pointer to $FABRIC_MOD portal group */
void *se_tpg_fabric_ptr;
struct list_head se_tpg_list;
/* linked list for initiator ACL list */
struct list_head acl_node_list;
struct se_lun *tpg_lun_list;
struct se_lun tpg_virt_lun0;
/* List of TCM sessions assoicated wth this TPG */
struct list_head tpg_sess_list;
/* Pointer to $FABRIC_MOD dependent code */
struct target_core_fabric_ops *se_tpg_tfo;
struct se_wwn *se_tpg_wwn;
struct config_group tpg_group;
struct config_group *tpg_default_groups[6];
struct config_group tpg_lun_group;
struct config_group tpg_np_group;
struct config_group tpg_acl_group;
struct config_group tpg_attrib_group;
struct config_group tpg_param_group;
} ____cacheline_aligned;
#define TPG_TFO(se_tpg) ((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo)
struct se_wwn {
struct target_fabric_configfs *wwn_tf;
struct config_group wwn_group;
} ____cacheline_aligned;
struct se_global {
u16 alua_lu_gps_counter;
int g_sub_api_initialized;
u32 in_shutdown;
u32 alua_lu_gps_count;
u32 g_hba_id_counter;
struct config_group target_core_hbagroup;
struct config_group alua_group;
struct config_group alua_lu_gps_group;
struct list_head g_lu_gps_list;
struct list_head g_se_tpg_list;
struct list_head g_hba_list;
struct list_head g_se_dev_list;
struct se_hba *g_lun0_hba;
struct se_subsystem_dev *g_lun0_su_dev;
struct se_device *g_lun0_dev;
struct t10_alua_lu_gp *default_lu_gp;
spinlock_t g_device_lock;
spinlock_t hba_lock;
spinlock_t se_tpg_lock;
spinlock_t lu_gps_lock;
spinlock_t plugin_class_lock;
} ____cacheline_aligned;
#endif /* TARGET_CORE_BASE_H */

View File

@ -0,0 +1,52 @@
#define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION
#define TARGET_CORE_CONFIG_ROOT "/sys/kernel/config"
#define TARGET_CORE_NAME_MAX_LEN 64
#define TARGET_FABRIC_NAME_SIZE 32
extern struct target_fabric_configfs *target_fabric_configfs_init(
struct module *, const char *);
extern void target_fabric_configfs_free(struct target_fabric_configfs *);
extern int target_fabric_configfs_register(struct target_fabric_configfs *);
extern void target_fabric_configfs_deregister(struct target_fabric_configfs *);
struct target_fabric_configfs_template {
struct config_item_type tfc_discovery_cit;
struct config_item_type tfc_wwn_cit;
struct config_item_type tfc_tpg_cit;
struct config_item_type tfc_tpg_base_cit;
struct config_item_type tfc_tpg_lun_cit;
struct config_item_type tfc_tpg_port_cit;
struct config_item_type tfc_tpg_np_cit;
struct config_item_type tfc_tpg_np_base_cit;
struct config_item_type tfc_tpg_attrib_cit;
struct config_item_type tfc_tpg_param_cit;
struct config_item_type tfc_tpg_nacl_cit;
struct config_item_type tfc_tpg_nacl_base_cit;
struct config_item_type tfc_tpg_nacl_attrib_cit;
struct config_item_type tfc_tpg_nacl_auth_cit;
struct config_item_type tfc_tpg_nacl_param_cit;
struct config_item_type tfc_tpg_mappedlun_cit;
};
struct target_fabric_configfs {
char tf_name[TARGET_FABRIC_NAME_SIZE];
atomic_t tf_access_cnt;
struct list_head tf_list;
struct config_group tf_group;
struct config_group tf_disc_group;
struct config_group *tf_default_groups[2];
/* Pointer to fabric's config_item */
struct config_item *tf_fabric;
/* Passed from fabric modules */
struct config_item_type *tf_fabric_cit;
/* Pointer to target core subsystem */
struct configfs_subsystem *tf_subsys;
/* Pointer to fabric's struct module */
struct module *tf_module;
struct target_core_fabric_ops tf_ops;
struct target_fabric_configfs_template tf_cit_tmpl;
};
#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl)

View File

@ -0,0 +1,61 @@
#ifndef TARGET_CORE_DEVICE_H
#define TARGET_CORE_DEVICE_H
extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32);
extern int transport_get_lun_for_tmr(struct se_cmd *, u32);
extern struct se_dev_entry *core_get_se_deve_from_rtpi(
struct se_node_acl *, u16);
extern int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
extern void core_update_device_list_access(u32, u32, struct se_node_acl *);
extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32,
u32, struct se_node_acl *,
struct se_portal_group *, int);
extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
extern int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
extern int transport_core_report_lun_response(struct se_cmd *);
extern void se_release_device_for_hba(struct se_device *);
extern void se_release_vpd_for_dev(struct se_device *);
extern void se_clear_dev_ports(struct se_device *);
extern int se_free_virtual_device(struct se_device *, struct se_hba *);
extern int se_dev_check_online(struct se_device *);
extern int se_dev_check_shutdown(struct se_device *);
extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
extern int se_dev_set_task_timeout(struct se_device *, u32);
extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
extern int se_dev_set_unmap_granularity(struct se_device *, u32);
extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
extern int se_dev_set_emulate_dpo(struct se_device *, int);
extern int se_dev_set_emulate_fua_write(struct se_device *, int);
extern int se_dev_set_emulate_fua_read(struct se_device *, int);
extern int se_dev_set_emulate_write_cache(struct se_device *, int);
extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
extern int se_dev_set_emulate_tas(struct se_device *, int);
extern int se_dev_set_emulate_tpu(struct se_device *, int);
extern int se_dev_set_emulate_tpws(struct se_device *, int);
extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
extern int se_dev_set_queue_depth(struct se_device *, u32);
extern int se_dev_set_max_sectors(struct se_device *, u32);
extern int se_dev_set_optimal_sectors(struct se_device *, u32);
extern int se_dev_set_block_size(struct se_device *, u32);
extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
struct se_device *, u32);
extern int core_dev_del_lun(struct se_portal_group *, u32);
extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
u32, char *, int *);
extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, u32, u32);
extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun *, struct se_lun_acl *);
extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
extern int core_dev_setup_virtual_lun0(void);
extern void core_dev_release_virtual_lun0(void);
#endif /* TARGET_CORE_DEVICE_H */

View File

@ -0,0 +1,106 @@
/*
* Used for tfc_wwn_cit attributes
*/
#include <target/configfs_macros.h>
CONFIGFS_EATTR_STRUCT(target_fabric_nacl_attrib, se_node_acl);
#define TF_NACL_ATTRIB_ATTR(_fabric, _name, _mode) \
static struct target_fabric_nacl_attrib_attribute _fabric##_nacl_attrib_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_nacl_attrib_show_##_name, \
_fabric##_nacl_attrib_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_nacl_auth, se_node_acl);
#define TF_NACL_AUTH_ATTR(_fabric, _name, _mode) \
static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_nacl_auth_show_##_name, \
_fabric##_nacl_auth_store_##_name);
#define TF_NACL_AUTH_ATTR_RO(_fabric, _name) \
static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_nacl_auth_show_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_nacl_param, se_node_acl);
#define TF_NACL_PARAM_ATTR(_fabric, _name, _mode) \
static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_nacl_param_show_##_name, \
_fabric##_nacl_param_store_##_name);
#define TF_NACL_PARAM_ATTR_RO(_fabric, _name) \
static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_nacl_param_show_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_nacl_base, se_node_acl);
#define TF_NACL_BASE_ATTR(_fabric, _name, _mode) \
static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_nacl_show_##_name, \
_fabric##_nacl_store_##_name);
#define TF_NACL_BASE_ATTR_RO(_fabric, _name) \
static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_nacl_show_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_np_base, se_tpg_np);
#define TF_NP_BASE_ATTR(_fabric, _name, _mode) \
static struct target_fabric_np_base_attribute _fabric##_np_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_np_show_##_name, \
_fabric##_np_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_tpg_attrib, se_portal_group);
#define TF_TPG_ATTRIB_ATTR(_fabric, _name, _mode) \
static struct target_fabric_tpg_attrib_attribute _fabric##_tpg_attrib_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_tpg_attrib_show_##_name, \
_fabric##_tpg_attrib_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_tpg_param, se_portal_group);
#define TF_TPG_PARAM_ATTR(_fabric, _name, _mode) \
static struct target_fabric_tpg_param_attribute _fabric##_tpg_param_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_tpg_param_show_##_name, \
_fabric##_tpg_param_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_tpg, se_portal_group);
#define TF_TPG_BASE_ATTR(_fabric, _name, _mode) \
static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_tpg_show_##_name, \
_fabric##_tpg_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs);
#define TF_WWN_ATTR(_fabric, _name, _mode) \
static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_wwn_show_attr_##_name, \
_fabric##_wwn_store_attr_##_name);
#define TF_WWN_ATTR_RO(_fabric, _name) \
static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_wwn_show_attr_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_discovery, target_fabric_configfs);
#define TF_DISC_ATTR(_fabric, _name, _mode) \
static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_disc_show_##_name, \
_fabric##_disc_store_##_name);
#define TF_DISC_ATTR_RO(_fabric, _name) \
static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_disc_show_##_name);
extern int target_fabric_setup_cits(struct target_fabric_configfs *);

View File

@ -0,0 +1,28 @@
#ifndef TARGET_CORE_FABRIC_LIB_H
#define TARGET_CORE_FABRIC_LIB_H
extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
const char *, u32 *, char **);
extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
const char *, u32 *, char **);
extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
const char *, u32 *, char **);
#endif /* TARGET_CORE_FABRIC_LIB_H */

View File

@ -0,0 +1,100 @@
/* Defined in target_core_configfs.h */
struct target_fabric_configfs;
struct target_core_fabric_ops {
struct configfs_subsystem *tf_subsys;
/*
* Optional to signal struct se_task->task_sg[] padding entries
* for scatterlist chaining using transport_do_task_sg_link(),
* disabled by default
*/
int task_sg_chaining:1;
char *(*get_fabric_name)(void);
u8 (*get_fabric_proto_ident)(struct se_portal_group *);
char *(*tpg_get_wwn)(struct se_portal_group *);
u16 (*tpg_get_tag)(struct se_portal_group *);
u32 (*tpg_get_default_depth)(struct se_portal_group *);
u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
struct se_node_acl *,
struct t10_pr_registration *, int *,
unsigned char *);
u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
struct se_node_acl *,
struct t10_pr_registration *, int *);
char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
const char *, u32 *, char **);
int (*tpg_check_demo_mode)(struct se_portal_group *);
int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
struct se_node_acl *(*tpg_alloc_fabric_acl)(
struct se_portal_group *);
void (*tpg_release_fabric_acl)(struct se_portal_group *,
struct se_node_acl *);
u32 (*tpg_get_inst_index)(struct se_portal_group *);
/*
* Optional function pointer for TCM to perform command map
* from TCM processing thread context, for those struct se_cmd
* initally allocated in interrupt context.
*/
int (*new_cmd_map)(struct se_cmd *);
/*
* Optional function pointer for TCM fabric modules that use
* Linux/NET sockets to allocate struct iovec array to struct se_cmd
*/
int (*alloc_cmd_iovecs)(struct se_cmd *);
/*
* Optional to release struct se_cmd and fabric dependent allocated
* I/O descriptor in transport_cmd_check_stop()
*/
void (*check_stop_free)(struct se_cmd *);
void (*release_cmd_to_pool)(struct se_cmd *);
void (*release_cmd_direct)(struct se_cmd *);
/*
* Called with spin_lock_bh(struct se_portal_group->session_lock held.
*/
int (*shutdown_session)(struct se_session *);
void (*close_session)(struct se_session *);
void (*stop_session)(struct se_session *, int, int);
void (*fall_back_to_erl0)(struct se_session *);
int (*sess_logged_in)(struct se_session *);
u32 (*sess_get_index)(struct se_session *);
/*
* Used only for SCSI fabrics that contain multi-value TransportIDs
* (like iSCSI). All other SCSI fabrics should set this to NULL.
*/
u32 (*sess_get_initiator_sid)(struct se_session *,
unsigned char *, u32);
int (*write_pending)(struct se_cmd *);
int (*write_pending_status)(struct se_cmd *);
void (*set_default_node_attributes)(struct se_node_acl *);
u32 (*get_task_tag)(struct se_cmd *);
int (*get_cmd_state)(struct se_cmd *);
void (*new_cmd_failure)(struct se_cmd *);
int (*queue_data_in)(struct se_cmd *);
int (*queue_status)(struct se_cmd *);
int (*queue_tm_rsp)(struct se_cmd *);
u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
u16 (*get_fabric_sense_len)(void);
int (*is_state_remove)(struct se_cmd *);
u64 (*pack_lun)(unsigned int);
/*
* fabric module calls for target_core_fabric_configfs.c
*/
struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
struct config_group *, const char *);
void (*fabric_drop_wwn)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
struct config_group *, const char *);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);
void (*fabric_pre_unlink)(struct se_portal_group *,
struct se_lun *);
struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
struct config_group *, const char *);
void (*fabric_drop_np)(struct se_tpg_np *);
struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
struct config_group *, const char *);
void (*fabric_drop_nodeacl)(struct se_node_acl *);
};

View File

@ -0,0 +1,43 @@
#ifndef TARGET_CORE_TMR_H
#define TARGET_CORE_TMR_H
/* task management function values */
#ifdef ABORT_TASK
#undef ABORT_TASK
#endif /* ABORT_TASK */
#define ABORT_TASK 1
#ifdef ABORT_TASK_SET
#undef ABORT_TASK_SET
#endif /* ABORT_TASK_SET */
#define ABORT_TASK_SET 2
#ifdef CLEAR_ACA
#undef CLEAR_ACA
#endif /* CLEAR_ACA */
#define CLEAR_ACA 3
#ifdef CLEAR_TASK_SET
#undef CLEAR_TASK_SET
#endif /* CLEAR_TASK_SET */
#define CLEAR_TASK_SET 4
#define LUN_RESET 5
#define TARGET_WARM_RESET 6
#define TARGET_COLD_RESET 7
#define TASK_REASSIGN 8
/* task management response values */
#define TMR_FUNCTION_COMPLETE 0
#define TMR_TASK_DOES_NOT_EXIST 1
#define TMR_LUN_DOES_NOT_EXIST 2
#define TMR_TASK_STILL_ALLEGIANT 3
#define TMR_TASK_FAILOVER_NOT_SUPPORTED 4
#define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED 5
#define TMR_FUNCTION_AUTHORIZATION_FAILED 6
#define TMR_FUNCTION_REJECTED 255
extern struct kmem_cache *se_tmr_req_cache;
extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8);
extern void core_tmr_release_req(struct se_tmr_req *);
extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
struct list_head *, struct se_cmd *);
#endif /* TARGET_CORE_TMR_H */

View File

@ -0,0 +1,35 @@
#ifndef TARGET_CORE_TPG_H
#define TARGET_CORE_TPG_H
extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
unsigned char *);
extern void core_tpg_add_node_to_devs(struct se_node_acl *,
struct se_portal_group *);
extern struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *,
unsigned char *);
extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
extern void core_tpg_wait_for_mib_ref(struct se_node_acl *);
extern void core_tpg_clear_object_luns(struct se_portal_group *);
extern struct se_node_acl *core_tpg_add_initiator_node_acl(
struct se_portal_group *,
struct se_node_acl *,
const char *, u32);
extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
struct se_node_acl *, int);
extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int);
extern int core_tpg_register(struct target_core_fabric_ops *,
struct se_wwn *,
struct se_portal_group *, void *,
int);
extern int core_tpg_deregister(struct se_portal_group *);
extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32,
void *);
extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
#endif /* TARGET_CORE_TPG_H */

View File

@ -0,0 +1,351 @@
#ifndef TARGET_CORE_TRANSPORT_H
#define TARGET_CORE_TRANSPORT_H
#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
/* Attempts before moving from SHORT to LONG */
#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0
#define PYX_TRANSPORT_WRITE_PENDING 1
#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1
#define PYX_TRANSPORT_HBA_QUEUE_FULL -2
#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3
#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4
#define PYX_TRANSPORT_INVALID_CDB_FIELD -5
#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6
#define PYX_TRANSPORT_LU_COMM_FAILURE -7
#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
#define PYX_TRANSPORT_WRITE_PROTECTED -9
#define PYX_TRANSPORT_TASK_TIMEOUT -10
#define PYX_TRANSPORT_RESERVATION_CONFLICT -11
#define PYX_TRANSPORT_ILLEGAL_REQUEST -12
#define PYX_TRANSPORT_USE_SENSE_REASON -13
#ifndef SAM_STAT_RESERVATION_CONFLICT
#define SAM_STAT_RESERVATION_CONFLICT 0x18
#endif
#define TRANSPORT_PLUGIN_FREE 0
#define TRANSPORT_PLUGIN_REGISTERED 1
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
/* For SE OBJ Plugins, in seconds */
#define TRANSPORT_TIMEOUT_TUR 10
#define TRANSPORT_TIMEOUT_TYPE_DISK 60
#define TRANSPORT_TIMEOUT_TYPE_ROM 120
#define TRANSPORT_TIMEOUT_TYPE_TAPE 600
#define TRANSPORT_TIMEOUT_TYPE_OTHER 300
/* For se_task->task_state_flags */
#define TSF_EXCEPTION_CLEARED 0x01
/*
* struct se_subsystem_dev->su_dev_flags
*/
#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
#define SDF_USING_UDEV_PATH 0x00000004
#define SDF_USING_ALIAS 0x00000008
/*
* struct se_device->dev_flags
*/
#define DF_READ_ONLY 0x00000001
#define DF_SPC2_RESERVATIONS 0x00000002
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
/* struct se_dev_attrib sanity values */
/* 10 Minutes */
#define DA_TASK_TIMEOUT_MAX 600
/* Default max_unmap_lba_count */
#define DA_MAX_UNMAP_LBA_COUNT 0
/* Default max_unmap_block_desc_count */
#define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
/* Default unmap_granularity */
#define DA_UNMAP_GRANULARITY_DEFAULT 0
/* Default unmap_granularity_alignment */
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
/* Emulation for Direct Page Out */
#define DA_EMULATE_DPO 0
/* Emulation for Forced Unit Access WRITEs */
#define DA_EMULATE_FUA_WRITE 1
/* Emulation for Forced Unit Access READs */
#define DA_EMULATE_FUA_READ 0
/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
#define DA_EMULATE_WRITE_CACHE 0
/* Emulation for UNIT ATTENTION Interlock Control */
#define DA_EMULATE_UA_INTLLCK_CTRL 0
/* Emulation for TASK_ABORTED status (TAS) by default */
#define DA_EMULATE_TAS 1
/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
#define DA_EMULATE_TPU 0
/*
* Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
* block/blk-lib.c:blkdev_issue_discard()
*/
#define DA_EMULATE_TPWS 0
/* No Emulation for PSCSI by default */
#define DA_EMULATE_RESERVATIONS 0
/* No Emulation for PSCSI by default */
#define DA_EMULATE_ALUA 0
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1
#define DA_STATUS_MAX_SECTORS_MIN 16
#define DA_STATUS_MAX_SECTORS_MAX 8192
#define SE_MODE_PAGE_BUF 512
#define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs))
struct se_mem;
struct se_subsystem_api;
extern int init_se_global(void);
extern void release_se_global(void);
extern void transport_init_queue_obj(struct se_queue_obj *);
extern int transport_subsystem_check_init(void);
extern int transport_subsystem_register(struct se_subsystem_api *);
extern void transport_subsystem_release(struct se_subsystem_api *);
extern void transport_load_plugins(void);
extern struct se_session *transport_init_session(void);
extern void __transport_register_session(struct se_portal_group *,
struct se_node_acl *,
struct se_session *, void *);
extern void transport_register_session(struct se_portal_group *,
struct se_node_acl *,
struct se_session *, void *);
extern void transport_free_session(struct se_session *);
extern void transport_deregister_session_configfs(struct se_session *);
extern void transport_deregister_session(struct se_session *);
extern void transport_cmd_finish_abort(struct se_cmd *, int);
extern void transport_cmd_finish_abort_tmr(struct se_cmd *);
extern void transport_complete_sync_cache(struct se_cmd *, int);
extern void transport_complete_task(struct se_task *, int);
extern void transport_add_task_to_execute_queue(struct se_task *,
struct se_task *,
struct se_device *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
extern void transport_dump_dev_state(struct se_device *, char *, int *);
extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
unsigned long long, char *, int *);
extern void transport_dump_vpd_proto_id(struct t10_vpd *,
unsigned char *, int);
extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
extern int transport_dump_vpd_assoc(struct t10_vpd *,
unsigned char *, int);
extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
extern int transport_dump_vpd_ident_type(struct t10_vpd *,
unsigned char *, int);
extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
extern int transport_dump_vpd_ident(struct t10_vpd *,
unsigned char *, int);
extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
struct se_subsystem_api *,
struct se_subsystem_dev *, u32,
void *, struct se_dev_limits *,
const char *, const char *);
extern void transport_device_setup_cmd(struct se_cmd *);
extern void transport_init_se_cmd(struct se_cmd *,
struct target_core_fabric_ops *,
struct se_session *, u32, int, int,
unsigned char *);
extern void transport_free_se_cmd(struct se_cmd *);
extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_generic_handle_cdb(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct scatterlist *, u32);
extern int transport_clear_lun_from_sessions(struct se_lun *);
extern int transport_check_aborted_status(struct se_cmd *, int);
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
extern void transport_send_task_abort(struct se_cmd *);
extern void transport_release_cmd_to_pool(struct se_cmd *);
extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32);
extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
void *, struct se_mem *,
struct se_mem **, u32 *, u32 *);
extern void transport_do_task_sg_chain(struct se_cmd *);
extern void transport_generic_process_write(struct se_cmd *);
extern int transport_generic_do_tmr(struct se_cmd *);
/* From target_core_alua.c */
extern int core_alua_check_nonop_delay(struct se_cmd *);
/*
* Each se_transport_task_t can have N number of possible struct se_task's
* for the storage transport(s) to possibly execute.
* Used primarily for splitting up CDBs that exceed the physical storage
* HBA's maximum sector count per task.
*/
struct se_mem {
struct page *se_page;
u32 se_len;
u32 se_off;
struct list_head se_list;
} ____cacheline_aligned;
/*
* Each type of disk transport supported MUST have a template defined
* within its .h file.
*/
struct se_subsystem_api {
/*
* The Name. :-)
*/
char name[16];
/*
* Transport Type.
*/
u8 transport_type;
/*
* struct module for struct se_hba references
*/
struct module *owner;
/*
* Used for global se_subsystem_api list_head
*/
struct list_head sub_api_list;
/*
* For SCF_SCSI_NON_DATA_CDB
*/
int (*cdb_none)(struct se_task *);
/*
* For SCF_SCSI_CONTROL_NONSG_IO_CDB
*/
int (*map_task_non_SG)(struct se_task *);
/*
* For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB
*/
int (*map_task_SG)(struct se_task *);
/*
* attach_hba():
*/
int (*attach_hba)(struct se_hba *, u32);
/*
* detach_hba():
*/
void (*detach_hba)(struct se_hba *);
/*
* pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
* Linux/SCSI struct Scsi_Host passthrough
*/
int (*pmode_enable_hba)(struct se_hba *, unsigned long);
/*
* allocate_virtdevice():
*/
void *(*allocate_virtdevice)(struct se_hba *, const char *);
/*
* create_virtdevice(): Only for Virtual HBAs
*/
struct se_device *(*create_virtdevice)(struct se_hba *,
struct se_subsystem_dev *, void *);
/*
* free_device():
*/
void (*free_device)(void *);
/*
* dpo_emulated():
*/
int (*dpo_emulated)(struct se_device *);
/*
* fua_write_emulated():
*/
int (*fua_write_emulated)(struct se_device *);
/*
* fua_read_emulated():
*/
int (*fua_read_emulated)(struct se_device *);
/*
* write_cache_emulated():
*/
int (*write_cache_emulated)(struct se_device *);
/*
* transport_complete():
*
* Use transport_generic_complete() for majority of DAS transport
* drivers. Provided out of convenience.
*/
int (*transport_complete)(struct se_task *task);
struct se_task *(*alloc_task)(struct se_cmd *);
/*
* do_task():
*/
int (*do_task)(struct se_task *);
/*
* Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
* UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
*/
int (*do_discard)(struct se_device *, sector_t, u32);
/*
* Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
* SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
*/
void (*do_sync_cache)(struct se_task *);
/*
* free_task():
*/
void (*free_task)(struct se_task *);
/*
* check_configfs_dev_params():
*/
ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
/*
* set_configfs_dev_params():
*/
ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
const char *, ssize_t);
/*
* show_configfs_dev_params():
*/
ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
char *);
/*
* get_cdb():
*/
unsigned char *(*get_cdb)(struct se_task *);
/*
* get_device_rev():
*/
u32 (*get_device_rev)(struct se_device *);
/*
* get_device_type():
*/
u32 (*get_device_type)(struct se_device *);
/*
* Get the sector_t from a subsystem backstore..
*/
sector_t (*get_blocks)(struct se_device *);
/*
* do_se_mem_map():
*/
int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
struct se_mem *, struct se_mem **, u32 *, u32 *);
/*
* get_sense_buffer():
*/
unsigned char *(*get_sense_buffer)(struct se_task *);
} ____cacheline_aligned;
#define TRANSPORT(dev) ((dev)->transport)
#define HBA_TRANSPORT(hba) ((hba)->transport)
extern struct se_global *se_global;
#endif /* TARGET_CORE_TRANSPORT_H */