2019-05-27 14:55:06 +08:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-08-05 10:30:08 +08:00
/*
* iSCSI Initiator over TCP / IP Data - Path
*
* Copyright ( C ) 2004 Dmitry Yusupov
* Copyright ( C ) 2004 Alex Aizman
2006-04-07 10:26:46 +08:00
* Copyright ( C ) 2005 - 2006 Mike Christie
* Copyright ( C ) 2006 Red Hat , Inc . All rights reserved .
2005-08-05 10:30:08 +08:00
* maintained by open - iscsi @ googlegroups . com
*
* See the file COPYING included with this distribution for more details .
*
* Credits :
* Christoph Hellwig
* FUJITA Tomonori
* Arne Redlich
* Zhenyu Wang
*/
2016-01-24 21:19:41 +08:00
# include <crypto/hash.h>
2005-08-05 10:30:08 +08:00
# include <linux/types.h>
# include <linux/inet.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
# include <linux/slab.h>
2017-05-09 06:59:53 +08:00
# include <linux/sched/mm.h>
2007-05-31 01:57:23 +08:00
# include <linux/file.h>
2005-08-05 10:30:08 +08:00
# include <linux/blkdev.h>
# include <linux/delay.h>
# include <linux/kfifo.h>
# include <linux/scatterlist.h>
2011-05-27 21:47:43 +08:00
# include <linux/module.h>
2018-03-07 20:29:03 +08:00
# include <linux/backing-dev.h>
2005-08-05 10:30:08 +08:00
# include <net/tcp.h>
# include <scsi/scsi_cmnd.h>
2007-05-31 01:57:21 +08:00
# include <scsi/scsi_device.h>
2005-08-05 10:30:08 +08:00
# include <scsi/scsi_host.h>
# include <scsi/scsi.h>
# include <scsi/scsi_transport_iscsi.h>
2018-11-22 01:04:43 +08:00
# include <trace/events/iscsi.h>
2023-01-20 08:45:16 +08:00
# include <trace/events/sock.h>
2005-08-05 10:30:08 +08:00
# include "iscsi_tcp.h"
2008-12-02 14:32:12 +08:00
MODULE_AUTHOR ( " Mike Christie <michaelc@cs.wisc.edu>, "
" Dmitry Yusupov <dmitry_yus@yahoo.com>, "
2005-08-05 10:30:08 +08:00
" Alex Aizman <itn780@yahoo.com> " ) ;
MODULE_DESCRIPTION ( " iSCSI/TCP data-path " ) ;
MODULE_LICENSE ( " GPL " ) ;
2008-12-02 14:32:12 +08:00
static struct scsi_transport_template * iscsi_sw_tcp_scsi_transport ;
2023-03-23 03:54:44 +08:00
static const struct scsi_host_template iscsi_sw_tcp_sht ;
2008-12-02 14:32:12 +08:00
static struct iscsi_transport iscsi_sw_tcp_transport ;
2008-05-22 04:53:59 +08:00
2012-08-16 06:39:34 +08:00
static unsigned int iscsi_max_lun = ~ 0 ;
2005-08-05 10:30:08 +08:00
module_param_named ( max_lun , iscsi_max_lun , uint , S_IRUGO ) ;
2022-06-17 06:45:51 +08:00
static bool iscsi_recv_from_iscsi_q ;
module_param_named ( recv_from_iscsi_q , iscsi_recv_from_iscsi_q , bool , 0644 ) ;
MODULE_PARM_DESC ( recv_from_iscsi_q , " Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context. " ) ;
2009-03-06 04:46:00 +08:00
static int iscsi_sw_tcp_dbg ;
module_param_named ( debug_iscsi_tcp , iscsi_sw_tcp_dbg , int ,
S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( debug_iscsi_tcp , " Turn on debugging for iscsi_tcp module "
" Set to 1 to turn on, and zero to turn off. Default is off. " ) ;
# define ISCSI_SW_TCP_DBG(_conn, dbg_fmt, arg...) \
do { \
if ( iscsi_sw_tcp_dbg ) \
iscsi_conn_printk ( KERN_INFO , _conn , \
" %s " dbg_fmt , \
__func__ , # # arg ) ; \
2018-11-22 01:04:43 +08:00
iscsi_dbg_trace ( trace_iscsi_dbg_sw_tcp , \
& ( _conn ) - > cls_conn - > dev , \
" %s " dbg_fmt , __func__ , # # arg ) ; \
2009-03-06 04:46:00 +08:00
} while ( 0 ) ;
2007-12-14 02:43:21 +08:00
/**
2008-12-02 14:32:12 +08:00
* iscsi_sw_tcp_recv - TCP receive in sendfile fashion
2008-12-02 14:32:04 +08:00
* @ rd_desc : read descriptor
* @ skb : socket buffer
* @ offset : offset in skb
* @ len : skb - > len - offset
2008-12-02 14:32:12 +08:00
*/
static int iscsi_sw_tcp_recv ( read_descriptor_t * rd_desc , struct sk_buff * skb ,
unsigned int offset , size_t len )
2008-12-02 14:32:04 +08:00
{
struct iscsi_conn * conn = rd_desc - > arg . data ;
unsigned int consumed , total_consumed = 0 ;
int status ;
2009-03-06 04:46:00 +08:00
ISCSI_SW_TCP_DBG ( conn , " in %d bytes \n " , skb - > len - offset ) ;
2008-12-02 14:32:04 +08:00
do {
status = 0 ;
consumed = iscsi_tcp_recv_skb ( conn , skb , offset , 0 , & status ) ;
offset + = consumed ;
total_consumed + = consumed ;
} while ( consumed ! = 0 & & status ! = ISCSI_TCP_SKB_DONE ) ;
2009-03-06 04:46:00 +08:00
ISCSI_SW_TCP_DBG ( conn , " read %d bytes status %d \n " ,
skb - > len - offset , status ) ;
2008-12-02 14:32:04 +08:00
return total_consumed ;
2005-08-05 10:30:08 +08:00
}
2009-08-21 04:10:57 +08:00
/**
* iscsi_sw_sk_state_check - check socket state
* @ sk : socket
*
* If the socket is in CLOSE or CLOSE_WAIT we should
* not close the connection if there is still some
* data pending .
2011-06-25 04:11:54 +08:00
*
* Must be called with sk_callback_lock .
2009-08-21 04:10:57 +08:00
*/
static inline int iscsi_sw_sk_state_check ( struct sock * sk )
{
2011-06-25 04:11:54 +08:00
struct iscsi_conn * conn = sk - > sk_user_data ;
2009-08-21 04:10:57 +08:00
2009-08-21 04:11:02 +08:00
if ( ( sk - > sk_state = = TCP_CLOSE_WAIT | | sk - > sk_state = = TCP_CLOSE ) & &
2013-09-27 00:09:44 +08:00
( conn - > session - > state ! = ISCSI_STATE_LOGGING_OUT ) & &
2009-08-21 04:11:02 +08:00
! atomic_read ( & sk - > sk_rmem_alloc ) ) {
ISCSI_SW_TCP_DBG ( conn , " TCP_CLOSE|TCP_CLOSE_WAIT \n " ) ;
iscsi_conn_failure ( conn , ISCSI_ERR_TCP_CONN_CLOSE ) ;
return - ECONNRESET ;
}
2009-08-21 04:10:57 +08:00
return 0 ;
}
2022-06-17 06:45:51 +08:00
static void iscsi_sw_tcp_recv_data ( struct iscsi_conn * conn )
2005-08-05 10:30:08 +08:00
{
2022-06-17 06:45:51 +08:00
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
struct sock * sk = tcp_sw_conn - > sock - > sk ;
2005-08-05 10:30:08 +08:00
read_descriptor_t rd_desc ;
2006-05-03 08:46:49 +08:00
/*
2007-12-14 02:43:21 +08:00
* Use rd_desc to pass ' conn ' to iscsi_tcp_recv .
2006-05-03 08:46:49 +08:00
* We set count to 1 because we want the network layer to
2007-12-14 02:43:21 +08:00
* hand us all the skbs that are available . iscsi_tcp_recv
2006-05-03 08:46:49 +08:00
* handled pdus that cross buffers or pdus that still need data .
*/
2005-08-05 10:30:08 +08:00
rd_desc . arg . data = conn ;
2006-05-03 08:46:49 +08:00
rd_desc . count = 1 ;
2005-08-05 10:30:08 +08:00
2022-06-17 06:45:51 +08:00
tcp_read_sock ( sk , & rd_desc , iscsi_sw_tcp_recv ) ;
2009-08-21 04:10:57 +08:00
2007-12-14 02:43:21 +08:00
/* If we had to (atomically) map a highmem page,
* unmap it now . */
2007-12-14 02:43:35 +08:00
iscsi_tcp_segment_unmap ( & tcp_conn - > in . segment ) ;
2022-06-17 06:45:51 +08:00
iscsi_sw_sk_state_check ( sk ) ;
}
static void iscsi_sw_tcp_recv_data_work ( struct work_struct * work )
{
struct iscsi_conn * conn = container_of ( work , struct iscsi_conn ,
recvwork ) ;
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
struct sock * sk = tcp_sw_conn - > sock - > sk ;
lock_sock ( sk ) ;
iscsi_sw_tcp_recv_data ( conn ) ;
release_sock ( sk ) ;
}
static void iscsi_sw_tcp_data_ready ( struct sock * sk )
{
struct iscsi_sw_tcp_conn * tcp_sw_conn ;
struct iscsi_tcp_conn * tcp_conn ;
struct iscsi_conn * conn ;
2023-01-20 08:45:16 +08:00
trace_sk_data_ready ( sk ) ;
2022-06-17 06:45:51 +08:00
read_lock_bh ( & sk - > sk_callback_lock ) ;
conn = sk - > sk_user_data ;
if ( ! conn ) {
read_unlock_bh ( & sk - > sk_callback_lock ) ;
return ;
}
tcp_conn = conn - > dd_data ;
tcp_sw_conn = tcp_conn - > dd_data ;
if ( tcp_sw_conn - > queue_recv )
iscsi_conn_queue_recv ( conn ) ;
else
iscsi_sw_tcp_recv_data ( conn ) ;
2016-05-18 08:44:06 +08:00
read_unlock_bh ( & sk - > sk_callback_lock ) ;
2005-08-05 10:30:08 +08:00
}
2008-12-02 14:32:12 +08:00
static void iscsi_sw_tcp_state_change ( struct sock * sk )
2005-08-05 10:30:08 +08:00
{
2006-04-07 10:26:46 +08:00
struct iscsi_tcp_conn * tcp_conn ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn ;
2005-08-05 10:30:08 +08:00
struct iscsi_conn * conn ;
void ( * old_state_change ) ( struct sock * ) ;
2016-05-18 08:44:06 +08:00
read_lock_bh ( & sk - > sk_callback_lock ) ;
2011-06-25 04:11:54 +08:00
conn = sk - > sk_user_data ;
if ( ! conn ) {
2016-05-18 08:44:06 +08:00
read_unlock_bh ( & sk - > sk_callback_lock ) ;
2011-06-25 04:11:54 +08:00
return ;
}
2005-08-05 10:30:08 +08:00
2009-08-21 04:11:02 +08:00
iscsi_sw_sk_state_check ( sk ) ;
2005-08-05 10:30:08 +08:00
2006-04-07 10:26:46 +08:00
tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
tcp_sw_conn = tcp_conn - > dd_data ;
old_state_change = tcp_sw_conn - > old_state_change ;
2005-08-05 10:30:08 +08:00
2016-05-18 08:44:06 +08:00
read_unlock_bh ( & sk - > sk_callback_lock ) ;
2005-08-05 10:30:08 +08:00
old_state_change ( sk ) ;
}
/**
2020-10-24 00:33:14 +08:00
* iscsi_sw_tcp_write_space - Called when more output buffer space is available
2005-08-05 10:30:08 +08:00
* @ sk : socket space is available for
* */
2008-12-02 14:32:12 +08:00
static void iscsi_sw_tcp_write_space ( struct sock * sk )
2005-08-05 10:30:08 +08:00
{
2011-06-25 04:11:54 +08:00
struct iscsi_conn * conn ;
struct iscsi_tcp_conn * tcp_conn ;
struct iscsi_sw_tcp_conn * tcp_sw_conn ;
void ( * old_write_space ) ( struct sock * ) ;
read_lock_bh ( & sk - > sk_callback_lock ) ;
conn = sk - > sk_user_data ;
if ( ! conn ) {
read_unlock_bh ( & sk - > sk_callback_lock ) ;
return ;
}
tcp_conn = conn - > dd_data ;
tcp_sw_conn = tcp_conn - > dd_data ;
old_write_space = tcp_sw_conn - > old_write_space ;
read_unlock_bh ( & sk - > sk_callback_lock ) ;
old_write_space ( sk ) ;
2006-04-07 10:26:46 +08:00
2009-03-06 04:46:00 +08:00
ISCSI_SW_TCP_DBG ( conn , " iscsi_write_space \n " ) ;
2022-06-17 06:45:49 +08:00
iscsi_conn_queue_xmit ( conn ) ;
2005-08-05 10:30:08 +08:00
}
2008-12-02 14:32:12 +08:00
static void iscsi_sw_tcp_conn_set_callbacks ( struct iscsi_conn * conn )
2005-08-05 10:30:08 +08:00
{
2006-04-07 10:26:46 +08:00
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
struct sock * sk = tcp_sw_conn - > sock - > sk ;
2005-08-05 10:30:08 +08:00
/* assign new callbacks */
write_lock_bh ( & sk - > sk_callback_lock ) ;
sk - > sk_user_data = conn ;
2008-12-02 14:32:12 +08:00
tcp_sw_conn - > old_data_ready = sk - > sk_data_ready ;
tcp_sw_conn - > old_state_change = sk - > sk_state_change ;
tcp_sw_conn - > old_write_space = sk - > sk_write_space ;
sk - > sk_data_ready = iscsi_sw_tcp_data_ready ;
sk - > sk_state_change = iscsi_sw_tcp_state_change ;
sk - > sk_write_space = iscsi_sw_tcp_write_space ;
2005-08-05 10:30:08 +08:00
write_unlock_bh ( & sk - > sk_callback_lock ) ;
}
2008-12-02 14:32:12 +08:00
static void
2010-04-10 11:07:38 +08:00
iscsi_sw_tcp_conn_restore_callbacks ( struct iscsi_conn * conn )
2005-08-05 10:30:08 +08:00
{
2010-04-10 11:07:38 +08:00
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct sock * sk = tcp_sw_conn - > sock - > sk ;
2005-08-05 10:30:08 +08:00
/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
write_lock_bh ( & sk - > sk_callback_lock ) ;
sk - > sk_user_data = NULL ;
2008-12-02 14:32:12 +08:00
sk - > sk_data_ready = tcp_sw_conn - > old_data_ready ;
sk - > sk_state_change = tcp_sw_conn - > old_state_change ;
sk - > sk_write_space = tcp_sw_conn - > old_write_space ;
2014-05-23 23:47:19 +08:00
sk - > sk_no_check_tx = 0 ;
2005-08-05 10:30:08 +08:00
write_unlock_bh ( & sk - > sk_callback_lock ) ;
}
/**
2008-12-02 14:32:12 +08:00
* iscsi_sw_tcp_xmit_segment - transmit segment
2008-12-02 14:32:16 +08:00
* @ tcp_conn : the iSCSI TCP connection
2008-12-02 14:32:12 +08:00
* @ segment : the buffer to transmnit
*
* This function transmits as much of the buffer as
* the network layer will accept , and returns the number of
* bytes transmitted .
*
* If CRC hashing is enabled , the function will compute the
* hash as it goes . When the entire segment has been transmitted ,
* it will retrieve the hash value and send it as well .
*/
2008-12-02 14:32:16 +08:00
static int iscsi_sw_tcp_xmit_segment ( struct iscsi_tcp_conn * tcp_conn ,
2008-12-02 14:32:12 +08:00
struct iscsi_segment * segment )
{
2008-12-02 14:32:16 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct socket * sk = tcp_sw_conn - > sock ;
unsigned int copied = 0 ;
int r = 0 ;
2008-12-02 14:32:16 +08:00
while ( ! iscsi_tcp_segment_done ( tcp_conn , segment , 0 , r ) ) {
2008-12-02 14:32:12 +08:00
struct scatterlist * sg ;
2023-06-24 06:55:08 +08:00
struct msghdr msg = { } ;
struct bio_vec bv ;
2008-12-02 14:32:12 +08:00
unsigned int offset , copy ;
r = 0 ;
offset = segment - > copied ;
copy = segment - > size - offset ;
if ( segment - > total_copied + segment - > size < segment - > total_size )
2023-06-24 06:55:08 +08:00
msg . msg_flags | = MSG_MORE ;
2008-12-02 14:32:12 +08:00
2022-06-17 06:45:51 +08:00
if ( tcp_sw_conn - > queue_recv )
2023-06-24 06:55:08 +08:00
msg . msg_flags | = MSG_DONTWAIT ;
2022-06-17 06:45:51 +08:00
2008-12-02 14:32:12 +08:00
if ( ! segment - > data ) {
2023-06-24 06:55:08 +08:00
if ( ! tcp_conn - > iscsi_conn - > datadgst_en )
msg . msg_flags | = MSG_SPLICE_PAGES ;
2008-12-02 14:32:12 +08:00
sg = segment - > sg ;
offset + = segment - > sg_offset + sg - > offset ;
2023-06-24 06:55:08 +08:00
bvec_set_page ( & bv , sg_page ( sg ) , copy , offset ) ;
2008-12-02 14:32:12 +08:00
} else {
2023-06-24 06:55:08 +08:00
bvec_set_virt ( & bv , segment - > data + offset , copy ) ;
2008-12-02 14:32:12 +08:00
}
2023-06-24 06:55:08 +08:00
iov_iter_bvec ( & msg . msg_iter , ITER_SOURCE , & bv , 1 , copy ) ;
2008-12-02 14:32:12 +08:00
2023-06-24 06:55:08 +08:00
r = sock_sendmsg ( sk , & msg ) ;
2008-12-02 14:32:12 +08:00
if ( r < 0 ) {
iscsi_tcp_segment_unmap ( segment ) ;
return r ;
}
copied + = r ;
}
return copied ;
}
/**
* iscsi_sw_tcp_xmit - TCP transmit
2017-12-23 06:08:27 +08:00
* @ conn : iscsi connection
2007-12-14 02:43:35 +08:00
* */
2008-12-02 14:32:12 +08:00
static int iscsi_sw_tcp_xmit ( struct iscsi_conn * conn )
2005-08-05 10:30:08 +08:00
{
2006-04-07 10:26:46 +08:00
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
struct iscsi_segment * segment = & tcp_sw_conn - > out . segment ;
2007-12-14 02:43:35 +08:00
unsigned int consumed = 0 ;
int rc = 0 ;
2005-08-05 10:30:08 +08:00
2007-12-14 02:43:35 +08:00
while ( 1 ) {
2008-12-02 14:32:16 +08:00
rc = iscsi_sw_tcp_xmit_segment ( tcp_conn , segment ) ;
2009-06-16 11:11:09 +08:00
/*
* We may not have been able to send data because the conn
2011-03-31 09:57:33 +08:00
* is getting stopped . libiscsi will know so propagate err
2009-06-16 11:11:09 +08:00
* for it to do the right thing .
*/
if ( rc = = - EAGAIN )
return rc ;
else if ( rc < 0 ) {
2008-09-25 00:46:13 +08:00
rc = ISCSI_ERR_XMIT_FAILED ;
2007-12-14 02:43:35 +08:00
goto error ;
2009-06-16 11:11:09 +08:00
} else if ( rc = = 0 )
2007-12-14 02:43:35 +08:00
break ;
consumed + = rc ;
if ( segment - > total_copied > = segment - > total_size ) {
if ( segment - > done ! = NULL ) {
rc = segment - > done ( tcp_conn , segment ) ;
2008-09-25 00:46:13 +08:00
if ( rc ! = 0 )
2007-12-14 02:43:35 +08:00
goto error ;
}
}
2006-05-30 13:37:28 +08:00
}
2009-03-06 04:46:00 +08:00
ISCSI_SW_TCP_DBG ( conn , " xmit %d bytes \n " , consumed ) ;
2007-12-14 02:43:35 +08:00
conn - > txdata_octets + = consumed ;
return consumed ;
error :
/* Transmit error. We could initiate error recovery
* here . */
2009-03-06 04:46:00 +08:00
ISCSI_SW_TCP_DBG ( conn , " Error sending PDU, errno=%d \n " , rc ) ;
2008-09-25 00:46:13 +08:00
iscsi_conn_failure ( conn , rc ) ;
return - EIO ;
2005-08-05 10:30:08 +08:00
}
/**
2020-10-24 00:33:14 +08:00
* iscsi_sw_tcp_xmit_qlen - return the number of bytes queued for xmit
2017-12-23 06:08:27 +08:00
* @ conn : iscsi connection
2007-12-14 02:43:35 +08:00
*/
2008-12-02 14:32:12 +08:00
static inline int iscsi_sw_tcp_xmit_qlen ( struct iscsi_conn * conn )
2005-08-05 10:30:08 +08:00
{
2007-12-14 02:43:35 +08:00
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
struct iscsi_segment * segment = & tcp_sw_conn - > out . segment ;
2005-08-05 10:30:08 +08:00
2007-12-14 02:43:35 +08:00
return segment - > total_copied - segment - > total_size ;
2005-08-05 10:30:08 +08:00
}
2008-12-02 14:32:12 +08:00
static int iscsi_sw_tcp_pdu_xmit ( struct iscsi_task * task )
2005-08-05 10:30:08 +08:00
{
2008-12-02 14:32:07 +08:00
struct iscsi_conn * conn = task - > conn ;
2017-05-09 06:59:53 +08:00
unsigned int noreclaim_flag ;
2019-11-16 08:47:35 +08:00
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2013-04-11 00:24:39 +08:00
int rc = 0 ;
2019-11-16 08:47:35 +08:00
if ( ! tcp_sw_conn - > sock ) {
iscsi_conn_printk ( KERN_ERR , conn ,
" Transport not bound to socket! \n " ) ;
return - EINVAL ;
}
2017-05-09 06:59:53 +08:00
noreclaim_flag = memalloc_noreclaim_save ( ) ;
2007-12-14 02:43:35 +08:00
2008-12-02 14:32:12 +08:00
while ( iscsi_sw_tcp_xmit_qlen ( conn ) ) {
rc = iscsi_sw_tcp_xmit ( conn ) ;
2013-04-11 00:24:39 +08:00
if ( rc = = 0 ) {
rc = - EAGAIN ;
break ;
}
2007-12-14 02:43:35 +08:00
if ( rc < 0 )
2013-04-11 00:24:39 +08:00
break ;
rc = 0 ;
2006-05-30 13:37:28 +08:00
}
2005-08-05 10:30:08 +08:00
2017-05-09 06:59:53 +08:00
memalloc_noreclaim_restore ( noreclaim_flag ) ;
2013-04-11 00:24:39 +08:00
return rc ;
2005-08-05 10:30:08 +08:00
}
2007-12-14 02:43:35 +08:00
/*
* This is called when we ' re done sending the header .
* Simply copy the data_segment to the send segment , and return .
*/
2008-12-02 14:32:12 +08:00
static int iscsi_sw_tcp_send_hdr_done ( struct iscsi_tcp_conn * tcp_conn ,
struct iscsi_segment * segment )
2005-08-05 10:30:08 +08:00
{
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
tcp_sw_conn - > out . segment = tcp_sw_conn - > out . data_segment ;
2009-03-06 04:46:00 +08:00
ISCSI_SW_TCP_DBG ( tcp_conn - > iscsi_conn ,
" Header done. Next segment size %u total_size %u \n " ,
tcp_sw_conn - > out . segment . size ,
tcp_sw_conn - > out . segment . total_size ) ;
2007-12-14 02:43:35 +08:00
return 0 ;
}
2008-12-02 14:32:12 +08:00
static void iscsi_sw_tcp_send_hdr_prep ( struct iscsi_conn * conn , void * hdr ,
size_t hdrlen )
2007-12-14 02:43:35 +08:00
{
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2007-12-14 02:43:35 +08:00
2009-03-06 04:46:00 +08:00
ISCSI_SW_TCP_DBG ( conn , " %s \n " , conn - > hdrdgst_en ?
" digest enabled " : " digest disabled " ) ;
2007-12-14 02:43:35 +08:00
/* Clear the data segment - needs to be filled in by the
* caller using iscsi_tcp_send_data_prep ( ) */
2008-12-02 14:32:12 +08:00
memset ( & tcp_sw_conn - > out . data_segment , 0 ,
sizeof ( struct iscsi_segment ) ) ;
2007-12-14 02:43:35 +08:00
/* If header digest is enabled, compute the CRC and
* place the digest into the same buffer . We make
2008-05-22 04:54:10 +08:00
* sure that both iscsi_tcp_task and mtask have
2007-12-14 02:43:35 +08:00
* sufficient room .
*/
if ( conn - > hdrdgst_en ) {
2016-01-24 21:19:41 +08:00
iscsi_tcp_dgst_header ( tcp_sw_conn - > tx_hash , hdr , hdrlen ,
2007-12-14 02:43:35 +08:00
hdr + hdrlen ) ;
hdrlen + = ISCSI_DIGEST_SIZE ;
}
/* Remember header pointer for later, when we need
* to decide whether there ' s a payload to go along
* with the header . */
2008-12-02 14:32:12 +08:00
tcp_sw_conn - > out . hdr = hdr ;
2007-12-14 02:43:35 +08:00
2008-12-02 14:32:12 +08:00
iscsi_segment_init_linear ( & tcp_sw_conn - > out . segment , hdr , hdrlen ,
iscsi_sw_tcp_send_hdr_done , NULL ) ;
2007-12-14 02:43:35 +08:00
}
/*
* Prepare the send buffer for the payload data .
* Padding and checksumming will all be taken care
* of by the iscsi_segment routines .
*/
static int
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_send_data_prep ( struct iscsi_conn * conn , struct scatterlist * sg ,
unsigned int count , unsigned int offset ,
unsigned int len )
2007-12-14 02:43:35 +08:00
{
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2016-01-24 21:19:41 +08:00
struct ahash_request * tx_hash = NULL ;
2007-12-14 02:43:35 +08:00
unsigned int hdr_spec_len ;
2009-03-06 04:46:00 +08:00
ISCSI_SW_TCP_DBG ( conn , " offset=%d, datalen=%d %s \n " , offset , len ,
conn - > datadgst_en ?
" digest enabled " : " digest disabled " ) ;
2007-12-14 02:43:35 +08:00
/* Make sure the datalen matches what the caller
said he would send . */
2008-12-02 14:32:12 +08:00
hdr_spec_len = ntoh24 ( tcp_sw_conn - > out . hdr - > dlength ) ;
2007-12-14 02:43:35 +08:00
WARN_ON ( iscsi_padded ( len ) ! = iscsi_padded ( hdr_spec_len ) ) ;
if ( conn - > datadgst_en )
2016-01-24 21:19:41 +08:00
tx_hash = tcp_sw_conn - > tx_hash ;
2007-12-14 02:43:35 +08:00
2008-12-02 14:32:12 +08:00
return iscsi_segment_seek_sg ( & tcp_sw_conn - > out . data_segment ,
sg , count , offset , len ,
NULL , tx_hash ) ;
2007-12-14 02:43:35 +08:00
}
static void
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_send_linear_data_prep ( struct iscsi_conn * conn , void * data ,
2007-12-14 02:43:35 +08:00
size_t len )
{
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2016-01-24 21:19:41 +08:00
struct ahash_request * tx_hash = NULL ;
2007-12-14 02:43:35 +08:00
unsigned int hdr_spec_len ;
2009-03-06 04:46:00 +08:00
ISCSI_SW_TCP_DBG ( conn , " datalen=%zd %s \n " , len , conn - > datadgst_en ?
" digest enabled " : " digest disabled " ) ;
2007-12-14 02:43:35 +08:00
/* Make sure the datalen matches what the caller
said he would send . */
2008-12-02 14:32:12 +08:00
hdr_spec_len = ntoh24 ( tcp_sw_conn - > out . hdr - > dlength ) ;
2007-12-14 02:43:35 +08:00
WARN_ON ( iscsi_padded ( len ) ! = iscsi_padded ( hdr_spec_len ) ) ;
if ( conn - > datadgst_en )
2016-01-24 21:19:41 +08:00
tx_hash = tcp_sw_conn - > tx_hash ;
2007-12-14 02:43:35 +08:00
2008-12-02 14:32:12 +08:00
iscsi_segment_init_linear ( & tcp_sw_conn - > out . data_segment ,
2007-12-14 02:43:35 +08:00
data , len , NULL , tx_hash ) ;
2005-08-05 10:30:08 +08:00
}
2008-12-02 14:32:12 +08:00
static int iscsi_sw_tcp_pdu_init ( struct iscsi_task * task ,
unsigned int offset , unsigned int count )
2008-12-02 14:32:07 +08:00
{
struct iscsi_conn * conn = task - > conn ;
int err = 0 ;
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_send_hdr_prep ( conn , task - > hdr , task - > hdr_len ) ;
2008-12-02 14:32:07 +08:00
if ( ! count )
return 0 ;
if ( ! task - > sc )
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_send_linear_data_prep ( conn , task - > data , count ) ;
2008-12-02 14:32:07 +08:00
else {
2019-01-29 16:33:07 +08:00
struct scsi_data_buffer * sdb = & task - > sc - > sdb ;
2008-12-02 14:32:07 +08:00
2008-12-02 14:32:12 +08:00
err = iscsi_sw_tcp_send_data_prep ( conn , sdb - > table . sgl ,
sdb - > table . nents , offset ,
count ) ;
2008-12-02 14:32:07 +08:00
}
if ( err ) {
2009-04-22 04:32:31 +08:00
/* got invalid offset/len */
2008-12-02 14:32:07 +08:00
return - EIO ;
}
return 0 ;
}
2008-12-02 14:32:14 +08:00
static int iscsi_sw_tcp_pdu_alloc ( struct iscsi_task * task , uint8_t opcode )
2005-08-05 10:30:08 +08:00
{
2008-05-22 04:54:10 +08:00
struct iscsi_tcp_task * tcp_task = task - > dd_data ;
2005-08-05 10:30:08 +08:00
2008-12-02 14:32:12 +08:00
task - > hdr = task - > dd_data + sizeof ( * tcp_task ) ;
task - > hdr_max = sizeof ( struct iscsi_sw_tcp_hdrbuf ) - ISCSI_DIGEST_SIZE ;
2007-12-14 02:43:35 +08:00
return 0 ;
2005-08-05 10:30:08 +08:00
}
2006-04-07 10:26:46 +08:00
static struct iscsi_cls_conn *
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_conn_create ( struct iscsi_cls_session * cls_session ,
uint32_t conn_idx )
2005-08-05 10:30:08 +08:00
{
2006-04-07 10:26:46 +08:00
struct iscsi_conn * conn ;
struct iscsi_cls_conn * cls_conn ;
struct iscsi_tcp_conn * tcp_conn ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn ;
2016-01-24 21:19:41 +08:00
struct crypto_ahash * tfm ;
2005-08-05 10:30:08 +08:00
2008-12-02 14:32:12 +08:00
cls_conn = iscsi_tcp_conn_setup ( cls_session , sizeof ( * tcp_sw_conn ) ,
conn_idx ) ;
2006-04-07 10:26:46 +08:00
if ( ! cls_conn )
return NULL ;
conn = cls_conn - > dd_data ;
2008-05-22 04:54:01 +08:00
tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
tcp_sw_conn = tcp_conn - > dd_data ;
2022-06-17 06:45:51 +08:00
INIT_WORK ( & conn - > recvwork , iscsi_sw_tcp_recv_data_work ) ;
tcp_sw_conn - > queue_recv = iscsi_recv_from_iscsi_q ;
2005-08-05 10:30:08 +08:00
2022-09-08 06:17:00 +08:00
mutex_init ( & tcp_sw_conn - > sock_lock ) ;
2016-01-24 21:19:41 +08:00
tfm = crypto_alloc_ahash ( " crc32c " , 0 , CRYPTO_ALG_ASYNC ) ;
if ( IS_ERR ( tfm ) )
2008-05-22 04:54:01 +08:00
goto free_conn ;
2006-09-01 06:09:28 +08:00
2016-01-24 21:19:41 +08:00
tcp_sw_conn - > tx_hash = ahash_request_alloc ( tfm , GFP_KERNEL ) ;
if ( ! tcp_sw_conn - > tx_hash )
goto free_tfm ;
ahash_request_set_callback ( tcp_sw_conn - > tx_hash , 0 , NULL , NULL ) ;
tcp_sw_conn - > rx_hash = ahash_request_alloc ( tfm , GFP_KERNEL ) ;
if ( ! tcp_sw_conn - > rx_hash )
goto free_tx_hash ;
ahash_request_set_callback ( tcp_sw_conn - > rx_hash , 0 , NULL , NULL ) ;
tcp_conn - > rx_hash = tcp_sw_conn - > rx_hash ;
2006-09-01 06:09:28 +08:00
2006-04-07 10:26:46 +08:00
return cls_conn ;
2005-08-05 10:30:08 +08:00
2016-01-24 21:19:41 +08:00
free_tx_hash :
ahash_request_free ( tcp_sw_conn - > tx_hash ) ;
free_tfm :
crypto_free_ahash ( tfm ) ;
2008-05-22 04:54:01 +08:00
free_conn :
2008-02-01 03:36:52 +08:00
iscsi_conn_printk ( KERN_ERR , conn ,
" Could not create connection due to crc32c "
" loading error. Make sure the crc32c "
" module is built as a module or into the "
" kernel \n " ) ;
2008-12-02 14:32:12 +08:00
iscsi_tcp_conn_teardown ( cls_conn ) ;
2006-04-07 10:26:46 +08:00
return NULL ;
2005-08-05 10:30:08 +08:00
}
2008-12-02 14:32:12 +08:00
static void iscsi_sw_tcp_release_conn ( struct iscsi_conn * conn )
2006-07-25 04:47:26 +08:00
{
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
struct socket * sock = tcp_sw_conn - > sock ;
2006-07-25 04:47:26 +08:00
2022-09-08 06:17:00 +08:00
/*
* The iscsi transport class will make sure we are not called in
* parallel with start , stop , bind and destroys . However , this can be
* called twice if userspace does a stop then a destroy .
*/
2007-05-31 01:57:24 +08:00
if ( ! sock )
2006-07-25 04:47:26 +08:00
return ;
2021-05-26 02:18:02 +08:00
/*
* Make sure we start socket shutdown now in case userspace is up
* but delayed in releasing the socket .
*/
kernel_sock_shutdown ( sock , SHUT_RDWR ) ;
2007-05-31 01:57:24 +08:00
sock_hold ( sock - > sk ) ;
2010-04-10 11:07:38 +08:00
iscsi_sw_tcp_conn_restore_callbacks ( conn ) ;
2007-05-31 01:57:24 +08:00
sock_put ( sock - > sk ) ;
2006-07-25 04:47:26 +08:00
2022-06-17 06:45:51 +08:00
iscsi_suspend_rx ( conn ) ;
2022-09-08 06:17:00 +08:00
mutex_lock ( & tcp_sw_conn - > sock_lock ) ;
2008-12-02 14:32:12 +08:00
tcp_sw_conn - > sock = NULL ;
2022-09-08 06:17:00 +08:00
mutex_unlock ( & tcp_sw_conn - > sock_lock ) ;
2007-05-31 01:57:24 +08:00
sockfd_put ( sock ) ;
2006-07-25 04:47:26 +08:00
}
2008-12-02 14:32:12 +08:00
static void iscsi_sw_tcp_conn_destroy ( struct iscsi_cls_conn * cls_conn )
2005-08-05 10:30:08 +08:00
{
2006-04-07 10:26:46 +08:00
struct iscsi_conn * conn = cls_conn - > dd_data ;
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2005-08-05 10:30:08 +08:00
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_release_conn ( conn ) ;
2005-08-05 10:30:08 +08:00
2016-01-24 21:19:41 +08:00
ahash_request_free ( tcp_sw_conn - > rx_hash ) ;
if ( tcp_sw_conn - > tx_hash ) {
struct crypto_ahash * tfm ;
tfm = crypto_ahash_reqtfm ( tcp_sw_conn - > tx_hash ) ;
ahash_request_free ( tcp_sw_conn - > tx_hash ) ;
crypto_free_ahash ( tfm ) ;
}
2005-08-05 10:30:08 +08:00
2008-12-02 14:32:12 +08:00
iscsi_tcp_conn_teardown ( cls_conn ) ;
2006-04-07 10:26:46 +08:00
}
2005-08-05 10:30:08 +08:00
2008-12-02 14:32:12 +08:00
static void iscsi_sw_tcp_conn_stop ( struct iscsi_cls_conn * cls_conn , int flag )
2006-07-25 04:47:26 +08:00
{
struct iscsi_conn * conn = cls_conn - > dd_data ;
2008-05-22 04:54:18 +08:00
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2010-02-11 06:51:47 +08:00
struct socket * sock = tcp_sw_conn - > sock ;
2008-05-22 04:54:18 +08:00
/* userspace may have goofed up and not bound us */
2010-02-11 06:51:47 +08:00
if ( ! sock )
2008-05-22 04:54:18 +08:00
return ;
2006-07-25 04:47:26 +08:00
2010-05-22 07:24:16 +08:00
sock - > sk - > sk_err = EIO ;
wake_up_interruptible ( sk_sleep ( sock - > sk ) ) ;
2010-02-11 06:51:47 +08:00
2011-06-25 04:11:54 +08:00
/* stop xmit side */
iscsi_suspend_tx ( conn ) ;
/* stop recv side and release socket */
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_release_conn ( conn ) ;
2011-06-25 04:11:54 +08:00
iscsi_conn_stop ( cls_conn , flag ) ;
2006-07-25 04:47:26 +08:00
}
2006-04-07 10:26:46 +08:00
static int
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_conn_bind ( struct iscsi_cls_session * cls_session ,
struct iscsi_cls_conn * cls_conn , uint64_t transport_eph ,
int is_leading )
2006-04-07 10:26:46 +08:00
{
struct iscsi_conn * conn = cls_conn - > dd_data ;
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2006-04-07 10:26:46 +08:00
struct sock * sk ;
struct socket * sock ;
int err ;
2005-08-05 10:30:08 +08:00
2006-04-07 10:26:46 +08:00
/* lookup for existing socket */
2006-05-03 08:46:36 +08:00
sock = sockfd_lookup ( ( int ) transport_eph , & err ) ;
2006-04-07 10:26:46 +08:00
if ( ! sock ) {
2008-02-01 03:36:52 +08:00
iscsi_conn_printk ( KERN_ERR , conn ,
" sockfd_lookup failed %d \n " , err ) ;
2006-04-07 10:26:46 +08:00
return - EEXIST ;
2005-08-05 10:30:08 +08:00
}
2023-09-16 01:11:11 +08:00
err = - EINVAL ;
if ( ! sk_is_tcp ( sock - > sk ) )
goto free_socket ;
2006-04-07 10:26:46 +08:00
err = iscsi_conn_bind ( cls_session , cls_conn , is_leading ) ;
if ( err )
2007-05-31 01:57:24 +08:00
goto free_socket ;
2005-08-05 10:30:08 +08:00
2022-09-08 06:17:00 +08:00
mutex_lock ( & tcp_sw_conn - > sock_lock ) ;
2006-05-30 13:37:20 +08:00
/* bind iSCSI connection and socket */
2008-12-02 14:32:12 +08:00
tcp_sw_conn - > sock = sock ;
2022-09-08 06:17:00 +08:00
mutex_unlock ( & tcp_sw_conn - > sock_lock ) ;
2005-08-05 10:30:08 +08:00
2006-05-30 13:37:20 +08:00
/* setup Socket parameters */
sk = sock - > sk ;
2012-04-19 11:39:36 +08:00
sk - > sk_reuse = SK_CAN_REUSE ;
2006-05-30 13:37:20 +08:00
sk - > sk_sndtimeo = 15 * HZ ; /* FIXME: make it configurable */
sk - > sk_allocation = GFP_ATOMIC ;
2022-12-16 20:45:27 +08:00
sk - > sk_use_task_frag = false ;
2013-04-11 00:24:39 +08:00
sk_set_memalloc ( sk ) ;
2021-05-26 02:18:01 +08:00
sock_no_linger ( sk ) ;
2005-08-05 10:30:08 +08:00
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_conn_set_callbacks ( conn ) ;
2006-05-30 13:37:20 +08:00
/*
* set receive state machine into initial state
*/
2007-12-14 02:43:21 +08:00
iscsi_tcp_hdr_recv_prep ( tcp_conn ) ;
2005-08-05 10:30:08 +08:00
return 0 ;
2007-05-31 01:57:24 +08:00
free_socket :
sockfd_put ( sock ) ;
return err ;
2005-08-05 10:30:08 +08:00
}
2008-12-02 14:32:12 +08:00
static int iscsi_sw_tcp_conn_set_param ( struct iscsi_cls_conn * cls_conn ,
enum iscsi_param param , char * buf ,
int buflen )
2005-08-05 10:30:08 +08:00
{
2006-02-02 11:06:49 +08:00
struct iscsi_conn * conn = cls_conn - > dd_data ;
2006-04-07 10:26:46 +08:00
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2005-08-05 10:30:08 +08:00
switch ( param ) {
case ISCSI_PARAM_HDRDGST_EN :
2006-06-29 01:00:26 +08:00
iscsi_set_param ( cls_conn , param , buf , buflen ) ;
2005-08-05 10:30:08 +08:00
break ;
case ISCSI_PARAM_DATADGST_EN :
2022-09-08 06:17:00 +08:00
mutex_lock ( & tcp_sw_conn - > sock_lock ) ;
if ( ! tcp_sw_conn - > sock ) {
mutex_unlock ( & tcp_sw_conn - > sock_lock ) ;
return - ENOTCONN ;
}
2023-03-29 15:17:39 +08:00
iscsi_set_param ( cls_conn , param , buf , buflen ) ;
2022-09-08 06:17:00 +08:00
mutex_unlock ( & tcp_sw_conn - > sock_lock ) ;
2005-08-05 10:30:08 +08:00
break ;
case ISCSI_PARAM_MAX_R2T :
2012-01-27 11:13:10 +08:00
return iscsi_tcp_set_max_r2t ( conn , buf ) ;
2005-08-05 10:30:08 +08:00
default :
2006-06-29 01:00:26 +08:00
return iscsi_set_param ( cls_conn , param , buf , buflen ) ;
2005-08-05 10:30:08 +08:00
}
return 0 ;
}
2008-12-02 14:32:12 +08:00
static int iscsi_sw_tcp_conn_get_param ( struct iscsi_cls_conn * cls_conn ,
enum iscsi_param param , char * buf )
2006-01-14 08:05:50 +08:00
{
2006-02-02 11:06:49 +08:00
struct iscsi_conn * conn = cls_conn - > dd_data ;
2022-09-08 06:17:00 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn ;
struct iscsi_tcp_conn * tcp_conn ;
2011-02-17 05:04:36 +08:00
struct sockaddr_in6 addr ;
2020-09-28 12:33:29 +08:00
struct socket * sock ;
2018-02-13 03:00:20 +08:00
int rc ;
2006-01-14 08:05:50 +08:00
switch ( param ) {
2006-04-07 10:13:36 +08:00
case ISCSI_PARAM_CONN_PORT :
case ISCSI_PARAM_CONN_ADDRESS :
2014-09-30 02:55:42 +08:00
case ISCSI_PARAM_LOCAL_PORT :
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 14:41:38 +08:00
spin_lock_bh ( & conn - > session - > frwd_lock ) ;
2022-09-08 06:17:00 +08:00
if ( ! conn - > session - > leadconn ) {
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 14:41:38 +08:00
spin_unlock_bh ( & conn - > session - > frwd_lock ) ;
2011-02-17 05:04:36 +08:00
return - ENOTCONN ;
}
2022-09-08 06:17:00 +08:00
/*
* The conn has been setup and bound , so just grab a ref
* incase a destroy runs while we are in the net layer .
*/
iscsi_get_conn ( conn - > cls_conn ) ;
2020-09-28 12:33:29 +08:00
spin_unlock_bh ( & conn - > session - > frwd_lock ) ;
2022-09-08 06:17:00 +08:00
tcp_conn = conn - > dd_data ;
tcp_sw_conn = tcp_conn - > dd_data ;
mutex_lock ( & tcp_sw_conn - > sock_lock ) ;
sock = tcp_sw_conn - > sock ;
if ( ! sock ) {
rc = - ENOTCONN ;
goto sock_unlock ;
}
2014-09-30 02:55:42 +08:00
if ( param = = ISCSI_PARAM_LOCAL_PORT )
2020-09-28 12:33:29 +08:00
rc = kernel_getsockname ( sock ,
2018-02-13 03:00:20 +08:00
( struct sockaddr * ) & addr ) ;
2014-09-30 02:55:42 +08:00
else
2020-09-28 12:33:29 +08:00
rc = kernel_getpeername ( sock ,
2018-02-13 03:00:20 +08:00
( struct sockaddr * ) & addr ) ;
2022-09-08 06:17:00 +08:00
sock_unlock :
mutex_unlock ( & tcp_sw_conn - > sock_lock ) ;
iscsi_put_conn ( conn - > cls_conn ) ;
2018-02-13 03:00:20 +08:00
if ( rc < 0 )
2011-02-17 05:04:36 +08:00
return rc ;
return iscsi_conn_get_addr_param ( ( struct sockaddr_storage * )
& addr , param , buf ) ;
2006-04-07 10:13:36 +08:00
default :
2006-06-29 01:00:26 +08:00
return iscsi_conn_get_param ( cls_conn , param , buf ) ;
2006-04-07 10:13:36 +08:00
}
2011-02-17 05:04:36 +08:00
return 0 ;
}
static int iscsi_sw_tcp_host_get_param ( struct Scsi_Host * shost ,
enum iscsi_host_param param , char * buf )
{
struct iscsi_sw_tcp_host * tcp_sw_host = iscsi_host_priv ( shost ) ;
2023-01-18 03:39:37 +08:00
struct iscsi_session * session ;
2011-02-17 05:04:36 +08:00
struct iscsi_conn * conn ;
struct iscsi_tcp_conn * tcp_conn ;
struct iscsi_sw_tcp_conn * tcp_sw_conn ;
struct sockaddr_in6 addr ;
2020-09-28 12:33:29 +08:00
struct socket * sock ;
2018-02-13 03:00:20 +08:00
int rc ;
2011-02-17 05:04:36 +08:00
switch ( param ) {
case ISCSI_HOST_PARAM_IPADDRESS :
2023-01-18 03:39:37 +08:00
session = tcp_sw_host - > session ;
2014-02-07 14:41:42 +08:00
if ( ! session )
return - ENOTCONN ;
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 14:41:38 +08:00
spin_lock_bh ( & session - > frwd_lock ) ;
2011-02-17 05:04:36 +08:00
conn = session - > leadconn ;
if ( ! conn ) {
[SCSI] libiscsi: Reduce locking contention in fast path
Replace the session lock with two locks, a forward lock and
a backwards lock named frwd_lock and back_lock respectively.
The forward lock protects resources that change while sending a
request to the target, such as cmdsn, queued_cmdsn, and allocating
task from the commands' pool with kfifo_out.
The backward lock protects resources that change while processing
a response or in error path, such as cmdsn_exp, cmdsn_max, and
returning tasks to the commands' pool with kfifo_in.
Under a steady state fast-path situation, that is when one
or more processes/threads submit IO to an iscsi device and
a single kernel upcall (e.g softirq) is dealing with processing
of responses without errors, this patch eliminates the contention
between the queuecommand()/request response/scsi_done() flows
associated with iscsi sessions.
Between the forward and the backward locks exists a strict locking
hierarchy. The mutual exclusion zone protected by the forward lock can
enclose the mutual exclusion zone protected by the backward lock but not
vice versa.
For example, in iscsi_conn_teardown or in iscsi_xmit_data when there is
a failure and __iscsi_put_task is called, the backward lock is taken while
the forward lock is still taken. On the other hand, if in the RX path a nop
is to be sent, for example in iscsi_handle_reject or __iscsi_complete_pdu
than the forward lock is released and the backward lock is taken for the
duration of iscsi_send_nopout, later the backward lock is released and the
forward lock is retaken.
libiscsi_tcp uses two kernel fifos the r2t pool and the r2t queue.
The insertion and deletion from these queues didn't corespond to the
assumption taken by the new forward/backwards session locking paradigm.
That is, in iscsi_tcp_clenup_task which belongs to the RX (backwards)
path, r2t is taken out from r2t queue and inserted to the r2t pool.
In iscsi_tcp_get_curr_r2t which belong to the TX (forward) path, r2t
is also inserted to the r2t pool and another r2t is pulled from r2t
queue.
Only in iscsi_tcp_r2t_rsp which is called in the RX path but can requeue
to the TX path, r2t is taken from the r2t pool and inserted to the r2t
queue.
In order to cope with this situation, two spin locks were added,
pool2queue and queue2pool. The former protects extracting from the
r2t pool and inserting to the r2t queue, and the later protects the
extracing from the r2t queue and inserting to the r2t pool.
Signed-off-by: Shlomo Pongratz <shlomop@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[minor fix up to apply cleanly and compile fix]
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
2014-02-07 14:41:38 +08:00
spin_unlock_bh ( & session - > frwd_lock ) ;
2011-02-17 05:04:36 +08:00
return - ENOTCONN ;
}
tcp_conn = conn - > dd_data ;
tcp_sw_conn = tcp_conn - > dd_data ;
2022-09-08 06:17:00 +08:00
/*
* The conn has been setup and bound , so just grab a ref
* incase a destroy runs while we are in the net layer .
*/
iscsi_get_conn ( conn - > cls_conn ) ;
2020-09-28 12:33:29 +08:00
spin_unlock_bh ( & session - > frwd_lock ) ;
2011-02-17 05:04:36 +08:00
2022-09-08 06:17:00 +08:00
mutex_lock ( & tcp_sw_conn - > sock_lock ) ;
sock = tcp_sw_conn - > sock ;
if ( ! sock )
rc = - ENOTCONN ;
else
rc = kernel_getsockname ( sock , ( struct sockaddr * ) & addr ) ;
mutex_unlock ( & tcp_sw_conn - > sock_lock ) ;
iscsi_put_conn ( conn - > cls_conn ) ;
2018-02-13 03:00:20 +08:00
if ( rc < 0 )
2011-02-17 05:04:36 +08:00
return rc ;
return iscsi_conn_get_addr_param ( ( struct sockaddr_storage * )
2018-10-04 09:06:15 +08:00
& addr ,
( enum iscsi_param ) param , buf ) ;
2011-02-17 05:04:36 +08:00
default :
return iscsi_host_get_param ( shost , param , buf ) ;
}
return 0 ;
2006-04-07 10:13:36 +08:00
}
2005-08-05 10:30:08 +08:00
static void
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_conn_get_stats ( struct iscsi_cls_conn * cls_conn ,
struct iscsi_stats * stats )
2005-08-05 10:30:08 +08:00
{
2006-02-02 11:06:49 +08:00
struct iscsi_conn * conn = cls_conn - > dd_data ;
2006-04-07 10:26:46 +08:00
struct iscsi_tcp_conn * tcp_conn = conn - > dd_data ;
2008-12-02 14:32:12 +08:00
struct iscsi_sw_tcp_conn * tcp_sw_conn = tcp_conn - > dd_data ;
2005-08-05 10:30:08 +08:00
stats - > custom_length = 3 ;
strcpy ( stats - > custom [ 0 ] . desc , " tx_sendpage_failures " ) ;
2008-12-02 14:32:12 +08:00
stats - > custom [ 0 ] . value = tcp_sw_conn - > sendpage_failures_cnt ;
2005-08-05 10:30:08 +08:00
strcpy ( stats - > custom [ 1 ] . desc , " rx_discontiguous_hdr " ) ;
2008-12-02 14:32:12 +08:00
stats - > custom [ 1 ] . value = tcp_sw_conn - > discontiguous_hdr_cnt ;
2005-08-05 10:30:08 +08:00
strcpy ( stats - > custom [ 2 ] . desc , " eh_abort_cnt " ) ;
stats - > custom [ 2 ] . value = conn - > eh_abort_cnt ;
2008-12-02 14:32:12 +08:00
iscsi_tcp_conn_get_stats ( cls_conn , stats ) ;
2005-08-05 10:30:08 +08:00
}
2006-04-07 10:26:46 +08:00
static struct iscsi_cls_session *
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_session_create ( struct iscsi_endpoint * ep , uint16_t cmds_max ,
2009-03-06 04:46:06 +08:00
uint16_t qdepth , uint32_t initial_cmdsn )
2005-08-05 10:30:08 +08:00
{
2006-04-07 10:26:46 +08:00
struct iscsi_cls_session * cls_session ;
struct iscsi_session * session ;
2011-02-17 05:04:36 +08:00
struct iscsi_sw_tcp_host * tcp_sw_host ;
2008-05-22 04:54:15 +08:00
struct Scsi_Host * shost ;
2021-02-07 12:46:05 +08:00
int rc ;
2005-08-05 10:30:08 +08:00
2008-05-22 04:54:15 +08:00
if ( ep ) {
printk ( KERN_ERR " iscsi_tcp: invalid ep %p. \n " , ep ) ;
2008-05-22 04:53:59 +08:00
return NULL ;
}
2011-02-17 05:04:36 +08:00
shost = iscsi_host_alloc ( & iscsi_sw_tcp_sht ,
sizeof ( struct iscsi_sw_tcp_host ) , 1 ) ;
2008-05-22 04:53:59 +08:00
if ( ! shost )
2006-04-07 10:26:46 +08:00
return NULL ;
2008-12-02 14:32:12 +08:00
shost - > transportt = iscsi_sw_tcp_scsi_transport ;
2009-03-06 04:46:04 +08:00
shost - > cmd_per_lun = qdepth ;
2008-05-22 04:53:59 +08:00
shost - > max_lun = iscsi_max_lun ;
shost - > max_id = 0 ;
shost - > max_channel = 0 ;
2008-05-26 16:31:19 +08:00
shost - > max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE ;
2024-04-09 22:37:32 +08:00
shost - > dma_alignment = 0 ;
2008-05-22 04:53:59 +08:00
2021-02-07 12:46:05 +08:00
rc = iscsi_host_get_max_scsi_cmds ( shost , cmds_max ) ;
if ( rc < 0 )
goto free_host ;
shost - > can_queue = rc ;
2008-05-22 04:54:00 +08:00
if ( iscsi_host_add ( shost , NULL ) )
2008-05-22 04:53:59 +08:00
goto free_host ;
2008-12-02 14:32:12 +08:00
cls_session = iscsi_session_setup ( & iscsi_sw_tcp_transport , shost ,
2009-09-22 10:51:22 +08:00
cmds_max , 0 ,
2008-12-02 14:32:12 +08:00
sizeof ( struct iscsi_tcp_task ) +
sizeof ( struct iscsi_sw_tcp_hdrbuf ) ,
2008-05-22 04:54:12 +08:00
initial_cmdsn , 0 ) ;
2008-05-22 04:53:59 +08:00
if ( ! cls_session )
goto remove_host ;
session = cls_session - > dd_data ;
2005-08-05 10:30:08 +08:00
2008-12-02 14:32:12 +08:00
if ( iscsi_tcp_r2tpool_alloc ( session ) )
2008-05-22 04:53:59 +08:00
goto remove_session ;
2023-01-18 03:39:37 +08:00
/* We are now fully setup so expose the session to sysfs. */
tcp_sw_host = iscsi_host_priv ( shost ) ;
tcp_sw_host - > session = session ;
2006-04-07 10:26:46 +08:00
return cls_session ;
2008-05-22 04:53:59 +08:00
remove_session :
2006-04-07 10:26:46 +08:00
iscsi_session_teardown ( cls_session ) ;
2008-05-22 04:53:59 +08:00
remove_host :
2022-06-17 06:27:38 +08:00
iscsi_host_remove ( shost , false ) ;
2008-05-22 04:53:59 +08:00
free_host :
2008-05-22 04:54:00 +08:00
iscsi_host_free ( shost ) ;
2006-04-07 10:26:46 +08:00
return NULL ;
}
2008-12-02 14:32:12 +08:00
static void iscsi_sw_tcp_session_destroy ( struct iscsi_cls_session * cls_session )
2006-04-07 10:26:46 +08:00
{
2008-05-22 04:53:59 +08:00
struct Scsi_Host * shost = iscsi_session_to_shost ( cls_session ) ;
2019-12-27 04:31:48 +08:00
struct iscsi_session * session = cls_session - > dd_data ;
if ( WARN_ON_ONCE ( session - > leadconn ) )
return ;
2008-05-22 04:53:59 +08:00
2023-01-18 03:39:36 +08:00
iscsi_session_remove ( cls_session ) ;
/*
* Our get_host_param needs to access the session , so remove the
* host from sysfs before freeing the session to make sure userspace
* is no longer accessing the callout .
*/
iscsi_host_remove ( shost , false ) ;
2008-12-02 14:32:12 +08:00
iscsi_tcp_r2tpool_free ( cls_session - > dd_data ) ;
2008-05-22 04:53:59 +08:00
2023-01-18 03:39:36 +08:00
iscsi_session_free ( cls_session ) ;
2008-05-22 04:54:00 +08:00
iscsi_host_free ( shost ) ;
2005-08-05 10:30:08 +08:00
}
2011-07-24 11:11:19 +08:00
static umode_t iscsi_sw_tcp_attr_is_visible ( int param_type , int param )
2011-07-26 02:48:42 +08:00
{
switch ( param_type ) {
2011-07-26 02:48:45 +08:00
case ISCSI_HOST_PARAM :
switch ( param ) {
case ISCSI_HOST_PARAM_NETDEV_NAME :
case ISCSI_HOST_PARAM_HWADDRESS :
case ISCSI_HOST_PARAM_IPADDRESS :
case ISCSI_HOST_PARAM_INITIATOR_NAME :
return S_IRUGO ;
default :
return 0 ;
}
2011-07-26 02:48:42 +08:00
case ISCSI_PARAM :
switch ( param ) {
case ISCSI_PARAM_MAX_RECV_DLENGTH :
case ISCSI_PARAM_MAX_XMIT_DLENGTH :
case ISCSI_PARAM_HDRDGST_EN :
case ISCSI_PARAM_DATADGST_EN :
case ISCSI_PARAM_CONN_ADDRESS :
case ISCSI_PARAM_CONN_PORT :
2014-09-30 02:55:42 +08:00
case ISCSI_PARAM_LOCAL_PORT :
2011-07-26 02:48:42 +08:00
case ISCSI_PARAM_EXP_STATSN :
case ISCSI_PARAM_PERSISTENT_ADDRESS :
case ISCSI_PARAM_PERSISTENT_PORT :
case ISCSI_PARAM_PING_TMO :
case ISCSI_PARAM_RECV_TMO :
2011-07-26 02:48:43 +08:00
case ISCSI_PARAM_INITIAL_R2T_EN :
case ISCSI_PARAM_MAX_R2T :
case ISCSI_PARAM_IMM_DATA_EN :
case ISCSI_PARAM_FIRST_BURST :
case ISCSI_PARAM_MAX_BURST :
case ISCSI_PARAM_PDU_INORDER_EN :
case ISCSI_PARAM_DATASEQ_INORDER_EN :
case ISCSI_PARAM_ERL :
case ISCSI_PARAM_TARGET_NAME :
case ISCSI_PARAM_TPGT :
case ISCSI_PARAM_USERNAME :
case ISCSI_PARAM_PASSWORD :
case ISCSI_PARAM_USERNAME_IN :
case ISCSI_PARAM_PASSWORD_IN :
case ISCSI_PARAM_FAST_ABORT :
case ISCSI_PARAM_ABORT_TMO :
case ISCSI_PARAM_LU_RESET_TMO :
case ISCSI_PARAM_TGT_RESET_TMO :
case ISCSI_PARAM_IFACE_NAME :
case ISCSI_PARAM_INITIATOR_NAME :
2011-07-26 02:48:42 +08:00
return S_IRUGO ;
default :
return 0 ;
}
}
return 0 ;
}
2024-06-17 14:04:44 +08:00
static int iscsi_sw_tcp_device_configure ( struct scsi_device * sdev ,
struct queue_limits * lim )
2007-05-31 01:57:21 +08:00
{
2018-03-07 20:29:03 +08:00
struct iscsi_sw_tcp_host * tcp_sw_host = iscsi_host_priv ( sdev - > host ) ;
struct iscsi_session * session = tcp_sw_host - > session ;
struct iscsi_conn * conn = session - > leadconn ;
if ( conn - > datadgst_en )
2024-06-17 14:04:44 +08:00
lim - > features | = BLK_FEAT_STABLE_WRITES ;
2007-05-31 01:57:21 +08:00
return 0 ;
}
2023-03-23 03:54:44 +08:00
static const struct scsi_host_template iscsi_sw_tcp_sht = {
2007-07-27 01:46:46 +08:00
. module = THIS_MODULE ,
2006-07-25 04:47:54 +08:00
. name = " iSCSI Initiator over TCP/IP " ,
2006-04-07 10:26:46 +08:00
. queuecommand = iscsi_queuecommand ,
2014-11-13 22:08:42 +08:00
. change_queue_depth = scsi_change_queue_depth ,
2021-02-07 12:46:05 +08:00
. can_queue = ISCSI_TOTAL_CMDS_MAX ,
2007-12-14 02:43:39 +08:00
. sg_tablesize = 4096 ,
2007-03-01 07:32:20 +08:00
. max_sectors = 0xFFFF ,
2006-04-07 10:26:46 +08:00
. cmd_per_lun = ISCSI_DEF_CMD_PER_LUN ,
2017-01-30 20:18:58 +08:00
. eh_timed_out = iscsi_eh_cmd_timed_out ,
2006-04-07 10:26:46 +08:00
. eh_abort_handler = iscsi_eh_abort ,
2007-12-14 02:43:20 +08:00
. eh_device_reset_handler = iscsi_eh_device_reset ,
2010-02-20 10:32:10 +08:00
. eh_target_reset_handler = iscsi_eh_recover_target ,
2018-12-13 23:17:09 +08:00
. dma_boundary = PAGE_SIZE - 1 ,
2024-06-17 14:04:44 +08:00
. device_configure = iscsi_sw_tcp_device_configure ,
2006-04-07 10:26:46 +08:00
. proc_name = " iscsi_tcp " ,
. this_id = - 1 ,
2014-11-13 21:25:11 +08:00
. track_queue_depth = 1 ,
2022-02-19 03:50:53 +08:00
. cmd_size = sizeof ( struct iscsi_cmd ) ,
2006-04-07 10:26:46 +08:00
} ;
2008-12-02 14:32:12 +08:00
static struct iscsi_transport iscsi_sw_tcp_transport = {
2005-08-05 10:30:08 +08:00
. owner = THIS_MODULE ,
. name = " tcp " ,
. caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
| CAP_DATADGST ,
2006-04-07 10:26:46 +08:00
/* session management */
2008-12-02 14:32:12 +08:00
. create_session = iscsi_sw_tcp_session_create ,
. destroy_session = iscsi_sw_tcp_session_destroy ,
2006-04-07 10:26:46 +08:00
/* connection management */
2008-12-02 14:32:12 +08:00
. create_conn = iscsi_sw_tcp_conn_create ,
. bind_conn = iscsi_sw_tcp_conn_bind ,
. destroy_conn = iscsi_sw_tcp_conn_destroy ,
2011-07-26 02:48:42 +08:00
. attr_is_visible = iscsi_sw_tcp_attr_is_visible ,
2008-12-02 14:32:12 +08:00
. set_param = iscsi_sw_tcp_conn_set_param ,
. get_conn_param = iscsi_sw_tcp_conn_get_param ,
2006-01-14 08:05:50 +08:00
. get_session_param = iscsi_session_get_param ,
2005-08-05 10:30:08 +08:00
. start_conn = iscsi_conn_start ,
2008-12-02 14:32:12 +08:00
. stop_conn = iscsi_sw_tcp_conn_stop ,
2007-05-31 01:57:12 +08:00
/* iscsi host params */
2011-02-17 05:04:36 +08:00
. get_host_param = iscsi_sw_tcp_host_get_param ,
2007-05-31 01:57:12 +08:00
. set_host_param = iscsi_host_set_param ,
2006-04-07 10:26:46 +08:00
/* IO */
2005-08-05 10:30:08 +08:00
. send_pdu = iscsi_conn_send_pdu ,
2008-12-02 14:32:12 +08:00
. get_stats = iscsi_sw_tcp_conn_get_stats ,
2008-12-02 14:32:07 +08:00
/* iscsi task/cmd helpers */
2008-05-22 04:54:07 +08:00
. init_task = iscsi_tcp_task_init ,
. xmit_task = iscsi_tcp_task_xmit ,
. cleanup_task = iscsi_tcp_cleanup_task ,
2008-12-02 14:32:07 +08:00
/* low level pdu helpers */
2008-12-02 14:32:12 +08:00
. xmit_pdu = iscsi_sw_tcp_pdu_xmit ,
. init_pdu = iscsi_sw_tcp_pdu_init ,
. alloc_pdu = iscsi_sw_tcp_pdu_alloc ,
2006-04-07 10:26:46 +08:00
/* recovery */
2006-04-07 10:13:39 +08:00
. session_recovery_timedout = iscsi_session_recovery_timedout ,
2005-08-05 10:30:08 +08:00
} ;
2008-12-02 14:32:12 +08:00
static int __init iscsi_sw_tcp_init ( void )
2005-08-05 10:30:08 +08:00
{
if ( iscsi_max_lun < 1 ) {
2006-05-03 08:46:43 +08:00
printk ( KERN_ERR " iscsi_tcp: Invalid max_lun value of %u \n " ,
iscsi_max_lun ) ;
2005-08-05 10:30:08 +08:00
return - EINVAL ;
}
2008-12-02 14:32:12 +08:00
iscsi_sw_tcp_scsi_transport = iscsi_register_transport (
& iscsi_sw_tcp_transport ) ;
if ( ! iscsi_sw_tcp_scsi_transport )
2006-05-19 09:31:36 +08:00
return - ENODEV ;
2005-08-05 10:30:08 +08:00
2006-01-14 08:05:50 +08:00
return 0 ;
2005-08-05 10:30:08 +08:00
}
2008-12-02 14:32:12 +08:00
static void __exit iscsi_sw_tcp_exit ( void )
2005-08-05 10:30:08 +08:00
{
2008-12-02 14:32:12 +08:00
iscsi_unregister_transport ( & iscsi_sw_tcp_transport ) ;
2005-08-05 10:30:08 +08:00
}
2008-12-02 14:32:12 +08:00
module_init ( iscsi_sw_tcp_init ) ;
module_exit ( iscsi_sw_tcp_exit ) ;