2005-08-12 04:25:23 +08:00
|
|
|
/*
|
|
|
|
* linux/net/sunrpc/xprtsock.c
|
|
|
|
*
|
|
|
|
* Client-side transport implementation for sockets.
|
|
|
|
*
|
2008-10-14 10:01:08 +08:00
|
|
|
* TCP callback races fixes (C) 1998 Red Hat
|
|
|
|
* TCP send fixes (C) 1998 Red Hat
|
2005-08-12 04:25:23 +08:00
|
|
|
* TCP NFS related read + write fixes
|
|
|
|
* (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
|
|
|
|
*
|
|
|
|
* Rewrite of larges part of the code in order to stabilize TCP stuff.
|
|
|
|
* Fix behaviour when socket buffer is full.
|
|
|
|
* (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
|
2005-08-12 04:25:47 +08:00
|
|
|
*
|
|
|
|
* IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
|
2007-08-06 23:57:53 +08:00
|
|
|
*
|
|
|
|
* IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
|
|
|
|
* <gilles.quillard@bull.net>
|
2005-08-12 04:25:23 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/slab.h>
|
2007-09-11 01:46:39 +08:00
|
|
|
#include <linux/module.h>
|
2005-08-12 04:25:23 +08:00
|
|
|
#include <linux/capability.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/udp.h>
|
|
|
|
#include <linux/tcp.h>
|
|
|
|
#include <linux/sunrpc/clnt.h>
|
2006-01-03 16:55:49 +08:00
|
|
|
#include <linux/sunrpc/sched.h>
|
2009-09-10 22:32:28 +08:00
|
|
|
#include <linux/sunrpc/svcsock.h>
|
2007-09-11 01:47:31 +08:00
|
|
|
#include <linux/sunrpc/xprtsock.h>
|
2005-08-12 04:25:23 +08:00
|
|
|
#include <linux/file.h>
|
2009-04-01 21:23:02 +08:00
|
|
|
#ifdef CONFIG_NFS_V4_1
|
|
|
|
#include <linux/sunrpc/bc_xprt.h>
|
|
|
|
#endif
|
2005-08-12 04:25:23 +08:00
|
|
|
|
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/checksum.h>
|
|
|
|
#include <net/udp.h>
|
|
|
|
#include <net/tcp.h>
|
|
|
|
|
2009-09-10 22:32:28 +08:00
|
|
|
#include "sunrpc.h"
|
2005-11-02 01:24:48 +08:00
|
|
|
/*
|
|
|
|
* xprtsock tunables
|
|
|
|
*/
|
|
|
|
unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
|
|
|
|
unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
|
|
|
|
|
|
|
|
unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
|
|
|
|
unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
|
|
|
|
|
2009-03-12 02:38:03 +08:00
|
|
|
#define XS_TCP_LINGER_TO (15U * HZ)
|
2009-03-12 02:38:03 +08:00
|
|
|
static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
|
2009-03-12 02:38:03 +08:00
|
|
|
|
2006-12-06 05:35:54 +08:00
|
|
|
/*
|
|
|
|
* We can register our own files under /proc/sys/sunrpc by
|
|
|
|
* calling register_sysctl_table() again. The files in that
|
|
|
|
* directory become the union of all files registered there.
|
|
|
|
*
|
|
|
|
* We simply need to make sure that we don't collide with
|
|
|
|
* someone else's file names!
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef RPC_DEBUG
|
|
|
|
|
|
|
|
static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
|
|
|
|
static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
|
|
|
|
static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
|
|
|
|
static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
|
|
|
|
|
|
|
|
static struct ctl_table_header *sunrpc_table_header;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: changing the UDP slot table size should also resize the UDP
|
|
|
|
* socket buffers for existing UDP transports
|
|
|
|
*/
|
|
|
|
static ctl_table xs_tunables_table[] = {
|
|
|
|
{
|
|
|
|
.procname = "udp_slot_table_entries",
|
|
|
|
.data = &xprt_udp_slot_table_entries,
|
|
|
|
.maxlen = sizeof(unsigned int),
|
|
|
|
.mode = 0644,
|
2009-11-16 19:11:48 +08:00
|
|
|
.proc_handler = proc_dointvec_minmax,
|
2006-12-06 05:35:54 +08:00
|
|
|
.extra1 = &min_slot_table_size,
|
|
|
|
.extra2 = &max_slot_table_size
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.procname = "tcp_slot_table_entries",
|
|
|
|
.data = &xprt_tcp_slot_table_entries,
|
|
|
|
.maxlen = sizeof(unsigned int),
|
|
|
|
.mode = 0644,
|
2009-11-16 19:11:48 +08:00
|
|
|
.proc_handler = proc_dointvec_minmax,
|
2006-12-06 05:35:54 +08:00
|
|
|
.extra1 = &min_slot_table_size,
|
|
|
|
.extra2 = &max_slot_table_size
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.procname = "min_resvport",
|
|
|
|
.data = &xprt_min_resvport,
|
|
|
|
.maxlen = sizeof(unsigned int),
|
|
|
|
.mode = 0644,
|
2009-11-16 19:11:48 +08:00
|
|
|
.proc_handler = proc_dointvec_minmax,
|
2006-12-06 05:35:54 +08:00
|
|
|
.extra1 = &xprt_min_resvport_limit,
|
|
|
|
.extra2 = &xprt_max_resvport_limit
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.procname = "max_resvport",
|
|
|
|
.data = &xprt_max_resvport,
|
|
|
|
.maxlen = sizeof(unsigned int),
|
|
|
|
.mode = 0644,
|
2009-11-16 19:11:48 +08:00
|
|
|
.proc_handler = proc_dointvec_minmax,
|
2006-12-06 05:35:54 +08:00
|
|
|
.extra1 = &xprt_min_resvport_limit,
|
|
|
|
.extra2 = &xprt_max_resvport_limit
|
|
|
|
},
|
2009-03-12 02:38:03 +08:00
|
|
|
{
|
|
|
|
.procname = "tcp_fin_timeout",
|
|
|
|
.data = &xs_tcp_fin_timeout,
|
|
|
|
.maxlen = sizeof(xs_tcp_fin_timeout),
|
|
|
|
.mode = 0644,
|
2009-11-16 19:11:48 +08:00
|
|
|
.proc_handler = proc_dointvec_jiffies,
|
2006-12-06 05:35:54 +08:00
|
|
|
},
|
2009-11-06 05:32:03 +08:00
|
|
|
{ },
|
2006-12-06 05:35:54 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static ctl_table sunrpc_table[] = {
|
|
|
|
{
|
|
|
|
.procname = "sunrpc",
|
|
|
|
.mode = 0555,
|
|
|
|
.child = xs_tunables_table
|
|
|
|
},
|
2009-11-06 05:32:03 +08:00
|
|
|
{ },
|
2006-12-06 05:35:54 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2005-08-26 07:25:55 +08:00
|
|
|
/*
|
|
|
|
* Wait duration for a reply from the RPC portmapper.
|
|
|
|
*/
|
|
|
|
#define XS_BIND_TO (60U * HZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Delay if a UDP socket connect error occurs. This is most likely some
|
|
|
|
* kind of resource problem on the local host.
|
|
|
|
*/
|
|
|
|
#define XS_UDP_REEST_TO (2U * HZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The reestablish timeout allows clients to delay for a bit before attempting
|
|
|
|
* to reconnect to a server that just dropped our connection.
|
|
|
|
*
|
|
|
|
* We implement an exponential backoff when trying to reestablish a TCP
|
|
|
|
* transport connection with the server. Some servers like to drop a TCP
|
|
|
|
* connection when they are overworked, so we start with a short timeout and
|
|
|
|
* increase over time if the server is down or not responding.
|
|
|
|
*/
|
|
|
|
#define XS_TCP_INIT_REEST_TO (3U * HZ)
|
|
|
|
#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TCP idle timeout; client drops the transport socket if it is idle
|
|
|
|
* for this long. Note that we also timeout UDP sockets to prevent
|
|
|
|
* holding port numbers when there is no RPC traffic.
|
|
|
|
*/
|
|
|
|
#define XS_IDLE_DISC_TO (5U * 60 * HZ)
|
|
|
|
|
2005-08-12 04:25:23 +08:00
|
|
|
#ifdef RPC_DEBUG
|
|
|
|
# undef RPC_DEBUG_DATA
|
2005-08-12 04:25:26 +08:00
|
|
|
# define RPCDBG_FACILITY RPCDBG_TRANS
|
2005-08-12 04:25:23 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef RPC_DEBUG_DATA
|
2005-08-12 04:25:26 +08:00
|
|
|
static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2005-08-12 04:25:26 +08:00
|
|
|
u8 *buf = (u8 *) packet;
|
|
|
|
int j;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: %s\n", msg);
|
2005-08-12 04:25:23 +08:00
|
|
|
for (j = 0; j < count && j < 128; j += 4) {
|
|
|
|
if (!(j & 31)) {
|
|
|
|
if (j)
|
|
|
|
dprintk("\n");
|
|
|
|
dprintk("0x%04x ", j);
|
|
|
|
}
|
|
|
|
dprintk("%02x%02x%02x%02x ",
|
|
|
|
buf[j], buf[j+1], buf[j+2], buf[j+3]);
|
|
|
|
}
|
|
|
|
dprintk("\n");
|
|
|
|
}
|
|
|
|
#else
|
2005-08-12 04:25:26 +08:00
|
|
|
static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
|
|
|
/* NOP */
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-12-06 05:35:11 +08:00
|
|
|
struct sock_xprt {
|
|
|
|
struct rpc_xprt xprt;
|
2006-12-06 05:35:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Network layer
|
|
|
|
*/
|
|
|
|
struct socket * sock;
|
|
|
|
struct sock * inet;
|
2006-12-06 05:35:19 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* State of TCP reply receive
|
|
|
|
*/
|
|
|
|
__be32 tcp_fraghdr,
|
2010-06-17 01:57:32 +08:00
|
|
|
tcp_xid,
|
|
|
|
tcp_calldir;
|
2006-12-06 05:35:19 +08:00
|
|
|
|
|
|
|
u32 tcp_offset,
|
|
|
|
tcp_reclen;
|
|
|
|
|
|
|
|
unsigned long tcp_copied,
|
|
|
|
tcp_flags;
|
2006-12-06 05:35:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Connection of transports
|
|
|
|
*/
|
2006-12-08 04:48:15 +08:00
|
|
|
struct delayed_work connect_worker;
|
2009-08-10 03:09:46 +08:00
|
|
|
struct sockaddr_storage srcaddr;
|
|
|
|
unsigned short srcport;
|
2006-12-06 05:35:30 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* UDP socket buffer size parameters
|
|
|
|
*/
|
|
|
|
size_t rcvsize,
|
|
|
|
sndsize;
|
2006-12-06 05:35:34 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Saved socket callback addresses
|
|
|
|
*/
|
|
|
|
void (*old_data_ready)(struct sock *, int);
|
|
|
|
void (*old_state_change)(struct sock *);
|
|
|
|
void (*old_write_space)(struct sock *);
|
2008-10-29 03:21:39 +08:00
|
|
|
void (*old_error_report)(struct sock *);
|
2006-12-06 05:35:11 +08:00
|
|
|
};
|
|
|
|
|
2006-12-06 05:35:23 +08:00
|
|
|
/*
|
|
|
|
* TCP receive state flags
|
|
|
|
*/
|
|
|
|
#define TCP_RCV_LAST_FRAG (1UL << 0)
|
|
|
|
#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
|
|
|
|
#define TCP_RCV_COPY_XID (1UL << 2)
|
|
|
|
#define TCP_RCV_COPY_DATA (1UL << 3)
|
2009-04-01 21:22:54 +08:00
|
|
|
#define TCP_RCV_READ_CALLDIR (1UL << 4)
|
|
|
|
#define TCP_RCV_COPY_CALLDIR (1UL << 5)
|
2009-04-01 21:22:53 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* TCP RPC flags
|
|
|
|
*/
|
2009-04-01 21:22:54 +08:00
|
|
|
#define TCP_RPC_REPLY (1UL << 6)
|
2006-12-06 05:35:23 +08:00
|
|
|
|
2007-08-06 23:57:58 +08:00
|
|
|
static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
return (struct sockaddr *) &xprt->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
|
2006-08-23 08:06:18 +08:00
|
|
|
{
|
2007-08-06 23:57:58 +08:00
|
|
|
return (struct sockaddr_in *) &xprt->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
return (struct sockaddr_in6 *) &xprt->addr;
|
|
|
|
}
|
|
|
|
|
2009-08-10 03:09:36 +08:00
|
|
|
static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
|
2006-08-23 08:06:18 +08:00
|
|
|
{
|
2009-08-10 03:09:36 +08:00
|
|
|
struct sockaddr *sap = xs_addr(xprt);
|
2009-08-10 03:09:46 +08:00
|
|
|
struct sockaddr_in6 *sin6;
|
|
|
|
struct sockaddr_in *sin;
|
2009-08-10 03:09:36 +08:00
|
|
|
char buf[128];
|
2006-08-23 08:06:18 +08:00
|
|
|
|
2009-08-10 03:09:36 +08:00
|
|
|
(void)rpc_ntop(sap, buf, sizeof(buf));
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
|
2007-08-06 23:57:02 +08:00
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
switch (sap->sa_family) {
|
|
|
|
case AF_INET:
|
|
|
|
sin = xs_addr_in(xprt);
|
2010-03-09 04:15:28 +08:00
|
|
|
snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
|
2009-08-10 03:09:46 +08:00
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
sin6 = xs_addr_in6(xprt);
|
2010-03-09 04:15:28 +08:00
|
|
|
snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
|
2009-08-10 03:09:46 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
2007-08-17 04:03:26 +08:00
|
|
|
}
|
2009-08-10 03:09:46 +08:00
|
|
|
xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
|
2006-08-23 08:06:18 +08:00
|
|
|
}
|
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
|
2007-08-06 23:57:12 +08:00
|
|
|
{
|
2009-08-10 03:09:46 +08:00
|
|
|
struct sockaddr *sap = xs_addr(xprt);
|
|
|
|
char buf[128];
|
2007-08-06 23:57:12 +08:00
|
|
|
|
2010-03-09 04:15:59 +08:00
|
|
|
snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
|
2009-08-10 03:09:36 +08:00
|
|
|
xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
|
2007-08-06 23:57:12 +08:00
|
|
|
|
2010-03-09 04:15:59 +08:00
|
|
|
snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
|
2009-08-10 03:09:36 +08:00
|
|
|
xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
|
|
|
|
}
|
2007-08-06 23:57:12 +08:00
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
static void xs_format_peer_addresses(struct rpc_xprt *xprt,
|
|
|
|
const char *protocol,
|
|
|
|
const char *netid)
|
2009-08-10 03:09:36 +08:00
|
|
|
{
|
2008-01-08 07:34:48 +08:00
|
|
|
xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
|
|
|
|
xprt->address_strings[RPC_DISPLAY_NETID] = netid;
|
2009-08-10 03:09:36 +08:00
|
|
|
xs_format_common_peer_addresses(xprt);
|
2009-08-10 03:09:46 +08:00
|
|
|
xs_format_common_peer_ports(xprt);
|
2006-08-23 08:06:18 +08:00
|
|
|
}
|
2007-08-06 23:57:12 +08:00
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
static void xs_update_peer_port(struct rpc_xprt *xprt)
|
2007-08-06 23:57:12 +08:00
|
|
|
{
|
2009-08-10 03:09:46 +08:00
|
|
|
kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
|
|
|
|
kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
|
2007-09-11 01:43:05 +08:00
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
xs_format_common_peer_ports(xprt);
|
2006-08-23 08:06:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void xs_free_peer_addresses(struct rpc_xprt *xprt)
|
|
|
|
{
|
2008-01-15 01:32:20 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < RPC_DISPLAY_MAX; i++)
|
|
|
|
switch (i) {
|
|
|
|
case RPC_DISPLAY_PROTO:
|
|
|
|
case RPC_DISPLAY_NETID:
|
|
|
|
continue;
|
|
|
|
default:
|
|
|
|
kfree(xprt->address_strings[i]);
|
|
|
|
}
|
2006-08-23 08:06:18 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:29 +08:00
|
|
|
#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
|
|
|
|
|
2006-10-18 03:06:22 +08:00
|
|
|
static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
|
2005-08-12 04:25:29 +08:00
|
|
|
{
|
|
|
|
struct msghdr msg = {
|
|
|
|
.msg_name = addr,
|
|
|
|
.msg_namelen = addrlen,
|
2006-10-18 03:06:22 +08:00
|
|
|
.msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
|
|
|
|
};
|
|
|
|
struct kvec iov = {
|
|
|
|
.iov_base = vec->iov_base + base,
|
|
|
|
.iov_len = vec->iov_len - base,
|
2005-08-12 04:25:29 +08:00
|
|
|
};
|
|
|
|
|
2006-10-18 03:06:22 +08:00
|
|
|
if (iov.iov_len != 0)
|
2005-08-12 04:25:29 +08:00
|
|
|
return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
|
|
|
|
return kernel_sendmsg(sock, &msg, NULL, 0, 0);
|
|
|
|
}
|
|
|
|
|
2006-10-18 03:06:22 +08:00
|
|
|
static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
|
2005-08-12 04:25:29 +08:00
|
|
|
{
|
2006-10-18 03:06:22 +08:00
|
|
|
struct page **ppage;
|
|
|
|
unsigned int remainder;
|
|
|
|
int err, sent = 0;
|
|
|
|
|
|
|
|
remainder = xdr->page_len - base;
|
|
|
|
base += xdr->page_base;
|
|
|
|
ppage = xdr->pages + (base >> PAGE_SHIFT);
|
|
|
|
base &= ~PAGE_MASK;
|
|
|
|
for(;;) {
|
|
|
|
unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
|
|
|
|
int flags = XS_SENDMSG_FLAGS;
|
2005-08-12 04:25:29 +08:00
|
|
|
|
2006-10-18 03:06:22 +08:00
|
|
|
remainder -= len;
|
|
|
|
if (remainder != 0 || more)
|
|
|
|
flags |= MSG_MORE;
|
|
|
|
err = sock->ops->sendpage(sock, *ppage, base, len, flags);
|
|
|
|
if (remainder == 0 || err != len)
|
|
|
|
break;
|
|
|
|
sent += err;
|
|
|
|
ppage++;
|
|
|
|
base = 0;
|
|
|
|
}
|
|
|
|
if (sent == 0)
|
|
|
|
return err;
|
|
|
|
if (err > 0)
|
|
|
|
sent += err;
|
|
|
|
return sent;
|
2005-08-12 04:25:29 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xs_sendpages - write pages directly to a socket
|
|
|
|
* @sock: socket to send on
|
|
|
|
* @addr: UDP only -- address of destination
|
|
|
|
* @addrlen: UDP only -- length of destination address
|
|
|
|
* @xdr: buffer containing this request
|
|
|
|
* @base: starting position in the buffer
|
|
|
|
*
|
2005-08-12 04:25:23 +08:00
|
|
|
*/
|
2006-10-18 03:06:22 +08:00
|
|
|
static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2006-10-18 03:06:22 +08:00
|
|
|
unsigned int remainder = xdr->len - base;
|
|
|
|
int err, sent = 0;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
if (unlikely(!sock))
|
2009-03-12 02:06:41 +08:00
|
|
|
return -ENOTSOCK;
|
2005-08-12 04:25:56 +08:00
|
|
|
|
|
|
|
clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
|
2006-10-18 03:06:22 +08:00
|
|
|
if (base != 0) {
|
|
|
|
addr = NULL;
|
|
|
|
addrlen = 0;
|
|
|
|
}
|
2005-08-12 04:25:56 +08:00
|
|
|
|
2006-10-18 03:06:22 +08:00
|
|
|
if (base < xdr->head[0].iov_len || addr != NULL) {
|
|
|
|
unsigned int len = xdr->head[0].iov_len - base;
|
|
|
|
remainder -= len;
|
|
|
|
err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
|
|
|
|
if (remainder == 0 || err != len)
|
2005-08-12 04:25:23 +08:00
|
|
|
goto out;
|
2006-10-18 03:06:22 +08:00
|
|
|
sent += err;
|
2005-08-12 04:25:23 +08:00
|
|
|
base = 0;
|
|
|
|
} else
|
2006-10-18 03:06:22 +08:00
|
|
|
base -= xdr->head[0].iov_len;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2006-10-18 03:06:22 +08:00
|
|
|
if (base < xdr->page_len) {
|
|
|
|
unsigned int len = xdr->page_len - base;
|
|
|
|
remainder -= len;
|
|
|
|
err = xs_send_pagedata(sock, xdr, base, remainder != 0);
|
|
|
|
if (remainder == 0 || err != len)
|
2005-08-12 04:25:23 +08:00
|
|
|
goto out;
|
2006-10-18 03:06:22 +08:00
|
|
|
sent += err;
|
2005-08-12 04:25:23 +08:00
|
|
|
base = 0;
|
2006-10-18 03:06:22 +08:00
|
|
|
} else
|
|
|
|
base -= xdr->page_len;
|
|
|
|
|
|
|
|
if (base >= xdr->tail[0].iov_len)
|
|
|
|
return sent;
|
|
|
|
err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
|
2005-08-12 04:25:23 +08:00
|
|
|
out:
|
2006-10-18 03:06:22 +08:00
|
|
|
if (sent == 0)
|
|
|
|
return err;
|
|
|
|
if (err > 0)
|
|
|
|
sent += err;
|
|
|
|
return sent;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2008-04-18 06:52:19 +08:00
|
|
|
static void xs_nospace_callback(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
|
|
|
|
|
|
|
|
transport->inet->sk_write_pending--;
|
|
|
|
clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
2005-08-12 04:25:56 +08:00
|
|
|
* xs_nospace - place task on wait queue if transmit was incomplete
|
|
|
|
* @task: task to put to sleep
|
2005-08-12 04:25:26 +08:00
|
|
|
*
|
2005-08-12 04:25:23 +08:00
|
|
|
*/
|
2009-03-12 02:38:01 +08:00
|
|
|
static int xs_nospace(struct rpc_task *task)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2005-08-12 04:25:56 +08:00
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
2006-12-06 05:35:15 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
2009-03-12 02:38:01 +08:00
|
|
|
int ret = 0;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
|
2005-08-12 04:25:56 +08:00
|
|
|
task->tk_pid, req->rq_slen - req->rq_bytes_sent,
|
|
|
|
req->rq_slen);
|
|
|
|
|
2008-04-18 06:52:19 +08:00
|
|
|
/* Protect against races with write_space */
|
|
|
|
spin_lock_bh(&xprt->transport_lock);
|
|
|
|
|
|
|
|
/* Don't race with disconnect */
|
|
|
|
if (xprt_connected(xprt)) {
|
|
|
|
if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
|
2009-03-12 02:38:01 +08:00
|
|
|
ret = -EAGAIN;
|
2008-04-18 06:52:19 +08:00
|
|
|
/*
|
|
|
|
* Notify TCP that we're limited by the application
|
|
|
|
* window size
|
|
|
|
*/
|
|
|
|
set_bit(SOCK_NOSPACE, &transport->sock->flags);
|
|
|
|
transport->inet->sk_write_pending++;
|
|
|
|
/* ...and wait for more buffer space */
|
|
|
|
xprt_wait_for_buffer_space(task, xs_nospace_callback);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
|
2009-03-12 02:38:01 +08:00
|
|
|
ret = -ENOTCONN;
|
2008-04-18 06:52:19 +08:00
|
|
|
}
|
2005-08-12 04:25:56 +08:00
|
|
|
|
2008-04-18 06:52:19 +08:00
|
|
|
spin_unlock_bh(&xprt->transport_lock);
|
2009-03-12 02:38:01 +08:00
|
|
|
return ret;
|
2005-08-12 04:25:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xs_udp_send_request - write an RPC request to a UDP socket
|
|
|
|
* @task: address of RPC task that manages the state of an RPC request
|
|
|
|
*
|
|
|
|
* Return values:
|
|
|
|
* 0: The request has been sent
|
|
|
|
* EAGAIN: The socket was blocked, please call again later to
|
|
|
|
* complete the request
|
|
|
|
* ENOTCONN: Caller needs to invoke connect logic then call again
|
2011-03-31 09:57:33 +08:00
|
|
|
* other: Some other error occurred, the request was not sent
|
2005-08-12 04:25:56 +08:00
|
|
|
*/
|
|
|
|
static int xs_udp_send_request(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
2006-12-06 05:35:15 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
2005-08-12 04:25:56 +08:00
|
|
|
struct xdr_buf *xdr = &req->rq_snd_buf;
|
|
|
|
int status;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
xs_pktdump("packet data:",
|
2005-08-12 04:25:23 +08:00
|
|
|
req->rq_svec->iov_base,
|
|
|
|
req->rq_svec->iov_len);
|
|
|
|
|
2009-03-12 02:09:39 +08:00
|
|
|
if (!xprt_bound(xprt))
|
|
|
|
return -ENOTCONN;
|
2006-12-06 05:35:15 +08:00
|
|
|
status = xs_sendpages(transport->sock,
|
2007-08-06 23:57:58 +08:00
|
|
|
xs_addr(xprt),
|
2006-12-06 05:35:15 +08:00
|
|
|
xprt->addrlen, xdr,
|
|
|
|
req->rq_bytes_sent);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_udp_send_request(%u) = %d\n",
|
2005-08-12 04:25:56 +08:00
|
|
|
xdr->len - req->rq_bytes_sent, status);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-10-01 23:43:37 +08:00
|
|
|
if (status >= 0) {
|
2010-05-14 00:51:49 +08:00
|
|
|
req->rq_xmit_bytes_sent += status;
|
2007-10-01 23:43:37 +08:00
|
|
|
if (status >= req->rq_slen)
|
|
|
|
return 0;
|
|
|
|
/* Still some bytes left; set up for a retry later. */
|
2005-08-12 04:25:56 +08:00
|
|
|
status = -EAGAIN;
|
2007-10-01 23:43:37 +08:00
|
|
|
}
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
switch (status) {
|
2009-03-12 02:06:41 +08:00
|
|
|
case -ENOTSOCK:
|
|
|
|
status = -ENOTCONN;
|
|
|
|
/* Should we call xs_close() here? */
|
|
|
|
break;
|
2008-04-18 06:52:19 +08:00
|
|
|
case -EAGAIN:
|
2009-03-12 02:38:01 +08:00
|
|
|
status = xs_nospace(task);
|
2008-04-18 06:52:19 +08:00
|
|
|
break;
|
2009-03-12 02:37:59 +08:00
|
|
|
default:
|
|
|
|
dprintk("RPC: sendmsg returned unrecognized error %d\n",
|
|
|
|
-status);
|
2005-08-12 04:25:56 +08:00
|
|
|
case -ENETUNREACH:
|
|
|
|
case -EPIPE:
|
2005-08-12 04:25:23 +08:00
|
|
|
case -ECONNREFUSED:
|
|
|
|
/* When the server has died, an ICMP port unreachable message
|
2005-08-12 04:25:26 +08:00
|
|
|
* prompts ECONNREFUSED. */
|
2008-04-18 06:52:19 +08:00
|
|
|
clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
2010-03-08 14:49:01 +08:00
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
return status;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2007-11-06 04:44:12 +08:00
|
|
|
/**
|
|
|
|
* xs_tcp_shutdown - gracefully shut down a TCP socket
|
|
|
|
* @xprt: transport
|
|
|
|
*
|
|
|
|
* Initiates a graceful shutdown of the TCP socket by calling the
|
|
|
|
* equivalent of shutdown(SHUT_WR);
|
|
|
|
*/
|
|
|
|
static void xs_tcp_shutdown(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
struct socket *sock = transport->sock;
|
|
|
|
|
|
|
|
if (sock != NULL)
|
|
|
|
kernel_sock_shutdown(sock, SHUT_WR);
|
|
|
|
}
|
|
|
|
|
2005-08-26 07:25:49 +08:00
|
|
|
static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
|
|
|
|
{
|
|
|
|
u32 reclen = buf->len - sizeof(rpc_fraghdr);
|
|
|
|
rpc_fraghdr *base = buf->head[0].iov_base;
|
|
|
|
*base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
2005-08-12 04:25:56 +08:00
|
|
|
* xs_tcp_send_request - write an RPC request to a TCP socket
|
2005-08-12 04:25:26 +08:00
|
|
|
* @task: address of RPC task that manages the state of an RPC request
|
|
|
|
*
|
|
|
|
* Return values:
|
2005-08-12 04:25:56 +08:00
|
|
|
* 0: The request has been sent
|
|
|
|
* EAGAIN: The socket was blocked, please call again later to
|
|
|
|
* complete the request
|
|
|
|
* ENOTCONN: Caller needs to invoke connect logic then call again
|
2011-03-31 09:57:33 +08:00
|
|
|
* other: Some other error occurred, the request was not sent
|
2005-08-12 04:25:26 +08:00
|
|
|
*
|
|
|
|
* XXX: In the case of soft timeouts, should we eventually give up
|
2005-08-12 04:25:56 +08:00
|
|
|
* if sendmsg is not able to make progress?
|
2005-08-12 04:25:26 +08:00
|
|
|
*/
|
2005-08-12 04:25:56 +08:00
|
|
|
static int xs_tcp_send_request(struct rpc_task *task)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
2006-12-06 05:35:15 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
2005-08-12 04:25:56 +08:00
|
|
|
struct xdr_buf *xdr = &req->rq_snd_buf;
|
2007-08-06 23:56:42 +08:00
|
|
|
int status;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-26 07:25:49 +08:00
|
|
|
xs_encode_tcp_record_marker(&req->rq_snd_buf);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
xs_pktdump("packet data:",
|
|
|
|
req->rq_svec->iov_base,
|
|
|
|
req->rq_svec->iov_len);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
|
|
|
/* Continue transmitting the packet/record. We must be careful
|
|
|
|
* to cope with writespace callbacks arriving _after_ we have
|
2005-08-12 04:25:56 +08:00
|
|
|
* called sendmsg(). */
|
2005-08-12 04:25:23 +08:00
|
|
|
while (1) {
|
2006-12-06 05:35:15 +08:00
|
|
|
status = xs_sendpages(transport->sock,
|
|
|
|
NULL, 0, xdr, req->rq_bytes_sent);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
|
2005-08-12 04:25:56 +08:00
|
|
|
xdr->len - req->rq_bytes_sent, status);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
if (unlikely(status < 0))
|
2005-08-12 04:25:23 +08:00
|
|
|
break;
|
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
/* If we've sent the entire packet, immediately
|
|
|
|
* reset the count of bytes sent. */
|
|
|
|
req->rq_bytes_sent += status;
|
2010-05-14 00:51:49 +08:00
|
|
|
req->rq_xmit_bytes_sent += status;
|
2005-08-12 04:25:56 +08:00
|
|
|
if (likely(req->rq_bytes_sent >= req->rq_slen)) {
|
|
|
|
req->rq_bytes_sent = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2008-04-17 04:51:38 +08:00
|
|
|
if (status != 0)
|
|
|
|
continue;
|
2005-08-12 04:25:23 +08:00
|
|
|
status = -EAGAIN;
|
2008-04-17 04:51:38 +08:00
|
|
|
break;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
switch (status) {
|
2009-03-12 02:06:41 +08:00
|
|
|
case -ENOTSOCK:
|
|
|
|
status = -ENOTCONN;
|
|
|
|
/* Should we call xs_close() here? */
|
|
|
|
break;
|
2005-08-12 04:25:56 +08:00
|
|
|
case -EAGAIN:
|
2009-03-12 02:38:01 +08:00
|
|
|
status = xs_nospace(task);
|
2005-08-12 04:25:56 +08:00
|
|
|
break;
|
2009-03-12 02:37:59 +08:00
|
|
|
default:
|
|
|
|
dprintk("RPC: sendmsg returned unrecognized error %d\n",
|
|
|
|
-status);
|
2005-08-12 04:25:56 +08:00
|
|
|
case -ECONNRESET:
|
2009-03-12 03:29:24 +08:00
|
|
|
case -EPIPE:
|
2008-10-29 03:21:39 +08:00
|
|
|
xs_tcp_shutdown(xprt);
|
|
|
|
case -ECONNREFUSED:
|
2005-08-12 04:25:56 +08:00
|
|
|
case -ENOTCONN:
|
2008-04-18 06:52:19 +08:00
|
|
|
clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
2010-03-08 14:49:01 +08:00
|
|
|
|
2005-08-12 04:25:23 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-07-28 05:22:50 +08:00
|
|
|
/**
|
|
|
|
* xs_tcp_release_xprt - clean up after a tcp transmission
|
|
|
|
* @xprt: transport
|
|
|
|
* @task: rpc task
|
|
|
|
*
|
|
|
|
* This cleans up if an error causes us to abort the transmission of a request.
|
|
|
|
* In this case, the socket may need to be reset in order to avoid confusing
|
|
|
|
* the server.
|
|
|
|
*/
|
|
|
|
static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req;
|
|
|
|
|
|
|
|
if (task != xprt->snd_task)
|
|
|
|
return;
|
|
|
|
if (task == NULL)
|
|
|
|
goto out_release;
|
|
|
|
req = task->tk_rqstp;
|
|
|
|
if (req->rq_bytes_sent == 0)
|
|
|
|
goto out_release;
|
|
|
|
if (req->rq_bytes_sent == req->rq_snd_buf.len)
|
|
|
|
goto out_release;
|
|
|
|
set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
|
|
|
|
out_release:
|
|
|
|
xprt_release_xprt(xprt, task);
|
|
|
|
}
|
|
|
|
|
2008-10-29 03:21:39 +08:00
|
|
|
static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
|
|
|
|
{
|
|
|
|
transport->old_data_ready = sk->sk_data_ready;
|
|
|
|
transport->old_state_change = sk->sk_state_change;
|
|
|
|
transport->old_write_space = sk->sk_write_space;
|
|
|
|
transport->old_error_report = sk->sk_error_report;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
|
|
|
|
{
|
|
|
|
sk->sk_data_ready = transport->old_data_ready;
|
|
|
|
sk->sk_state_change = transport->old_state_change;
|
|
|
|
sk->sk_write_space = transport->old_write_space;
|
|
|
|
sk->sk_error_report = transport->old_error_report;
|
|
|
|
}
|
|
|
|
|
2009-03-12 02:10:21 +08:00
|
|
|
static void xs_reset_transport(struct sock_xprt *transport)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2006-12-06 05:35:15 +08:00
|
|
|
struct socket *sock = transport->sock;
|
|
|
|
struct sock *sk = transport->inet;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2009-03-12 02:10:21 +08:00
|
|
|
if (sk == NULL)
|
|
|
|
return;
|
2005-08-12 04:25:26 +08:00
|
|
|
|
2011-03-23 06:40:10 +08:00
|
|
|
transport->srcport = 0;
|
|
|
|
|
2005-08-12 04:25:23 +08:00
|
|
|
write_lock_bh(&sk->sk_callback_lock);
|
2006-12-06 05:35:15 +08:00
|
|
|
transport->inet = NULL;
|
|
|
|
transport->sock = NULL;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
sk->sk_user_data = NULL;
|
2008-10-29 03:21:39 +08:00
|
|
|
|
|
|
|
xs_restore_old_callbacks(transport, sk);
|
2005-08-12 04:25:23 +08:00
|
|
|
write_unlock_bh(&sk->sk_callback_lock);
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
sk->sk_no_check = 0;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
|
|
|
sock_release(sock);
|
2009-03-12 02:10:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xs_close - close a socket
|
|
|
|
* @xprt: transport
|
|
|
|
*
|
|
|
|
* This is used when all requests are complete; ie, no DRC state remains
|
|
|
|
* on the server we want to save.
|
2009-04-22 05:18:20 +08:00
|
|
|
*
|
|
|
|
* The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
|
|
|
|
* xs_reset_transport() zeroing the socket from underneath a writer.
|
2009-03-12 02:10:21 +08:00
|
|
|
*/
|
|
|
|
static void xs_close(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
|
|
|
dprintk("RPC: xs_close xprt %p\n", xprt);
|
|
|
|
|
|
|
|
xs_reset_transport(transport);
|
NFS/RPC: fix problems with reestablish_timeout and related code.
[[resending with correct cc: - "vfs.kernel.org" just isn't right!]]
xprt->reestablish_timeout is used to cause TCP connection attempts to
back off if the connection fails so as not to hammer the network,
but to still allow immediate connections when there is no reason to
believe there is a problem.
It is not used for the first connection (when transport->sock is NULL)
but only on reconnects.
It is currently set:
a/ to 0 when xs_tcp_state_change finds a state of TCP_FIN_WAIT1
on the assumption that the client has closed the connection
so the reconnect should be immediate when needed.
b/ to at least XS_TCP_INIT_REEST_TO when xs_tcp_state_change
detects TCP_CLOSING or TCP_CLOSE_WAIT on the assumption that the
server closed the connection so a small delay at least is
required.
c/ as above when xs_tcp_state_change detects TCP_SYN_SENT, so that
it is never 0 while a connection has been attempted, else
the doubling will produce 0 and there will be no backoff.
d/ to double is value (up to a limit) when delaying a connection,
thus providing exponential backoff and
e/ to XS_TCP_INIT_REEST_TO in xs_setup_tcp as simple initialisation.
So you can see it is highly dependant on xs_tcp_state_change being
called as expected. However experimental evidence shows that
xs_tcp_state_change does not see all state changes.
("rpcdebug -m rpc trans" can help show what actually happens).
Results show:
TCP_ESTABLISHED is reported when a connection is made. TCP_SYN_SENT
is never reported, so rule 'c' above is never effective.
When the server closes the connection, TCP_CLOSE_WAIT and
TCP_LAST_ACK *might* be reported, and TCP_CLOSE is always
reported. This rule 'b' above will sometimes be effective, but
not reliably.
When the client closes the connection, it used to result in
TCP_FIN_WAIT1, TCP_FIN_WAIT2, TCP_CLOSE. However since commit
f75e674 (SUNRPC: Fix the problem of EADDRNOTAVAIL syslog floods on
reconnect) we don't see *any* events on client-close. I think this
is because xs_restore_old_callbacks is called to disconnect
xs_tcp_state_change before the socket is closed.
In any case, rule 'a' no longer applies.
So all that is left are rule d, which successfully doubles the
timeout which is never rest, and rule e which initialises the timeout.
Even if the rules worked as expected, there would be a problem because
a successful connection does not reset the timeout, so a sequence
of events where the server closes the connection (e.g. during failover
testing) will cause longer and longer timeouts with no good reason.
This patch:
- sets reestablish_timeout to 0 in xs_close thus effecting rule 'a'
- sets it to 0 in xs_tcp_data_ready to ensure that a successful
connection resets the timeout
- sets it to at least XS_TCP_INIT_REEST_TO after it is doubled,
thus effecting rule c
I have not reimplemented rule b and the new version of rule c
seems sufficient.
I suspect other code in xs_tcp_data_ready needs to be revised as well.
For example I don't think connect_cookie is being incremented as often
as it should be.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2009-09-24 02:36:37 +08:00
|
|
|
xprt->reestablish_timeout = 0;
|
2009-03-12 02:10:21 +08:00
|
|
|
|
2006-01-03 16:55:55 +08:00
|
|
|
smp_mb__before_clear_bit();
|
2009-03-12 02:38:03 +08:00
|
|
|
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
|
2006-01-03 16:55:55 +08:00
|
|
|
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
2007-11-06 06:42:39 +08:00
|
|
|
clear_bit(XPRT_CLOSING, &xprt->state);
|
2006-01-03 16:55:55 +08:00
|
|
|
smp_mb__after_clear_bit();
|
2007-11-07 07:44:20 +08:00
|
|
|
xprt_disconnect_done(xprt);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2009-04-22 05:18:20 +08:00
|
|
|
static void xs_tcp_close(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
|
|
|
|
xs_close(xprt);
|
|
|
|
else
|
|
|
|
xs_tcp_shutdown(xprt);
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xs_destroy - prepare to shutdown a transport
|
|
|
|
* @xprt: doomed transport
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void xs_destroy(struct rpc_xprt *xprt)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2006-12-06 05:35:26 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_destroy xprt %p\n", xprt);
|
2005-08-12 04:25:26 +08:00
|
|
|
|
2010-12-14 23:21:17 +08:00
|
|
|
cancel_delayed_work_sync(&transport->connect_worker);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
xs_close(xprt);
|
2006-08-23 08:06:18 +08:00
|
|
|
xs_free_peer_addresses(xprt);
|
2010-09-29 20:03:13 +08:00
|
|
|
xprt_free(xprt);
|
2007-09-11 01:46:39 +08:00
|
|
|
module_put(THIS_MODULE);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
|
|
|
|
{
|
|
|
|
return (struct rpc_xprt *) sk->sk_user_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xs_udp_data_ready - "data ready" callback for UDP sockets
|
|
|
|
* @sk: socket with data to read
|
|
|
|
* @len: how much data to read
|
|
|
|
*
|
2005-08-12 04:25:23 +08:00
|
|
|
*/
|
2005-08-12 04:25:26 +08:00
|
|
|
static void xs_udp_data_ready(struct sock *sk, int len)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2005-08-12 04:25:26 +08:00
|
|
|
struct rpc_task *task;
|
|
|
|
struct rpc_xprt *xprt;
|
2005-08-12 04:25:23 +08:00
|
|
|
struct rpc_rqst *rovr;
|
2005-08-12 04:25:26 +08:00
|
|
|
struct sk_buff *skb;
|
2005-08-12 04:25:23 +08:00
|
|
|
int err, repsize, copied;
|
2006-09-27 13:29:38 +08:00
|
|
|
u32 _xid;
|
|
|
|
__be32 *xp;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2010-09-22 20:43:39 +08:00
|
|
|
read_lock_bh(&sk->sk_callback_lock);
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_udp_data_ready...\n");
|
2005-08-12 04:25:26 +08:00
|
|
|
if (!(xprt = xprt_from_sock(sk)))
|
2005-08-12 04:25:23 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (xprt->shutdown)
|
|
|
|
goto dropit;
|
|
|
|
|
|
|
|
repsize = skb->len - sizeof(struct udphdr);
|
|
|
|
if (repsize < 4) {
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: impossible RPC reply size %d!\n", repsize);
|
2005-08-12 04:25:23 +08:00
|
|
|
goto dropit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy the XID from the skb... */
|
|
|
|
xp = skb_header_pointer(skb, sizeof(struct udphdr),
|
|
|
|
sizeof(_xid), &_xid);
|
|
|
|
if (xp == NULL)
|
|
|
|
goto dropit;
|
|
|
|
|
|
|
|
/* Look up and lock the request corresponding to the given XID */
|
2005-08-12 04:25:32 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
rovr = xprt_lookup_rqst(xprt, *xp);
|
|
|
|
if (!rovr)
|
|
|
|
goto out_unlock;
|
|
|
|
task = rovr->rq_task;
|
|
|
|
|
|
|
|
if ((copied = rovr->rq_private_buf.buflen) > repsize)
|
|
|
|
copied = repsize;
|
|
|
|
|
|
|
|
/* Suck it into the iovec, verify checksum if not done by hw. */
|
2007-12-12 03:30:32 +08:00
|
|
|
if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
|
|
|
|
UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
|
2005-08-12 04:25:23 +08:00
|
|
|
goto out_unlock;
|
2007-12-12 03:30:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
|
|
|
/* Something worked... */
|
2009-06-02 13:19:30 +08:00
|
|
|
dst_confirm(skb_dst(skb));
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-26 07:25:52 +08:00
|
|
|
xprt_adjust_cwnd(task, copied);
|
|
|
|
xprt_complete_rqst(task, copied);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
|
|
|
out_unlock:
|
2005-08-12 04:25:32 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
dropit:
|
|
|
|
skb_free_datagram(sk, skb);
|
|
|
|
out:
|
2010-09-22 20:43:39 +08:00
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2006-12-06 05:35:44 +08:00
|
|
|
static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2006-12-06 05:35:19 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
2005-08-12 04:25:23 +08:00
|
|
|
size_t len, used;
|
|
|
|
char *p;
|
|
|
|
|
2006-12-06 05:35:19 +08:00
|
|
|
p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
|
|
|
|
len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
|
2006-12-06 05:35:41 +08:00
|
|
|
used = xdr_skb_read_bits(desc, p, len);
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_offset += used;
|
2005-08-12 04:25:23 +08:00
|
|
|
if (used != len)
|
|
|
|
return;
|
2005-08-26 07:25:49 +08:00
|
|
|
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
|
|
|
|
if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
|
2006-12-06 05:35:23 +08:00
|
|
|
transport->tcp_flags |= TCP_RCV_LAST_FRAG;
|
2005-08-12 04:25:23 +08:00
|
|
|
else
|
2006-12-06 05:35:23 +08:00
|
|
|
transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
|
2005-08-26 07:25:49 +08:00
|
|
|
|
2006-12-06 05:35:23 +08:00
|
|
|
transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_offset = 0;
|
2005-08-26 07:25:49 +08:00
|
|
|
|
2005-08-12 04:25:23 +08:00
|
|
|
/* Sanity check of the record length */
|
2009-04-01 21:22:53 +08:00
|
|
|
if (unlikely(transport->tcp_reclen < 8)) {
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: invalid TCP record fragment length\n");
|
2007-11-07 07:40:12 +08:00
|
|
|
xprt_force_disconnect(xprt);
|
2005-08-12 04:25:26 +08:00
|
|
|
return;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: reading TCP record fragment of length %d\n",
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_reclen);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2006-12-06 05:35:19 +08:00
|
|
|
static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2006-12-06 05:35:19 +08:00
|
|
|
if (transport->tcp_offset == transport->tcp_reclen) {
|
2006-12-06 05:35:23 +08:00
|
|
|
transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_offset = 0;
|
2006-12-06 05:35:23 +08:00
|
|
|
if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
|
|
|
|
transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
|
|
|
|
transport->tcp_flags |= TCP_RCV_COPY_XID;
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_copied = 0;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-12-06 05:35:44 +08:00
|
|
|
static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
|
|
|
size_t len, used;
|
|
|
|
char *p;
|
|
|
|
|
2006-12-06 05:35:19 +08:00
|
|
|
len = sizeof(transport->tcp_xid) - transport->tcp_offset;
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: reading XID (%Zu bytes)\n", len);
|
2006-12-06 05:35:19 +08:00
|
|
|
p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
|
2006-12-06 05:35:41 +08:00
|
|
|
used = xdr_skb_read_bits(desc, p, len);
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_offset += used;
|
2005-08-12 04:25:23 +08:00
|
|
|
if (used != len)
|
|
|
|
return;
|
2006-12-06 05:35:23 +08:00
|
|
|
transport->tcp_flags &= ~TCP_RCV_COPY_XID;
|
2009-04-01 21:22:54 +08:00
|
|
|
transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_copied = 4;
|
2009-04-01 21:22:53 +08:00
|
|
|
dprintk("RPC: reading %s XID %08x\n",
|
|
|
|
(transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
|
|
|
|
: "request with",
|
2006-12-06 05:35:19 +08:00
|
|
|
ntohl(transport->tcp_xid));
|
|
|
|
xs_tcp_check_fraghdr(transport);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:53 +08:00
|
|
|
static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
|
|
|
|
struct xdr_skb_reader *desc)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2009-04-01 21:22:53 +08:00
|
|
|
size_t len, used;
|
|
|
|
u32 offset;
|
2010-06-17 01:57:32 +08:00
|
|
|
char *p;
|
2009-04-01 21:22:53 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We want transport->tcp_offset to be 8 at the end of this routine
|
|
|
|
* (4 bytes for the xid and 4 bytes for the call/reply flag).
|
|
|
|
* When this function is called for the first time,
|
|
|
|
* transport->tcp_offset is 4 (after having already read the xid).
|
|
|
|
*/
|
|
|
|
offset = transport->tcp_offset - sizeof(transport->tcp_xid);
|
2010-06-17 01:57:32 +08:00
|
|
|
len = sizeof(transport->tcp_calldir) - offset;
|
2009-04-01 21:22:53 +08:00
|
|
|
dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
|
2010-06-17 01:57:32 +08:00
|
|
|
p = ((char *) &transport->tcp_calldir) + offset;
|
|
|
|
used = xdr_skb_read_bits(desc, p, len);
|
2009-04-01 21:22:53 +08:00
|
|
|
transport->tcp_offset += used;
|
|
|
|
if (used != len)
|
|
|
|
return;
|
2009-04-01 21:22:54 +08:00
|
|
|
transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
|
|
|
|
/*
|
|
|
|
* We don't yet have the XDR buffer, so we will write the calldir
|
|
|
|
* out after we get the buffer from the 'struct rpc_rqst'
|
|
|
|
*/
|
2010-06-17 01:57:32 +08:00
|
|
|
switch (ntohl(transport->tcp_calldir)) {
|
|
|
|
case RPC_REPLY:
|
|
|
|
transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
|
|
|
|
transport->tcp_flags |= TCP_RCV_COPY_DATA;
|
2009-04-01 21:22:53 +08:00
|
|
|
transport->tcp_flags |= TCP_RPC_REPLY;
|
2010-06-17 01:57:32 +08:00
|
|
|
break;
|
|
|
|
case RPC_CALL:
|
|
|
|
transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
|
|
|
|
transport->tcp_flags |= TCP_RCV_COPY_DATA;
|
2009-04-01 21:22:53 +08:00
|
|
|
transport->tcp_flags &= ~TCP_RPC_REPLY;
|
2010-06-17 01:57:32 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dprintk("RPC: invalid request message type\n");
|
|
|
|
xprt_force_disconnect(&transport->xprt);
|
|
|
|
}
|
2009-04-01 21:22:53 +08:00
|
|
|
xs_tcp_check_fraghdr(transport);
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:23:02 +08:00
|
|
|
static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
|
|
|
|
struct xdr_skb_reader *desc,
|
|
|
|
struct rpc_rqst *req)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2009-04-01 21:23:02 +08:00
|
|
|
struct sock_xprt *transport =
|
|
|
|
container_of(xprt, struct sock_xprt, xprt);
|
2005-08-12 04:25:23 +08:00
|
|
|
struct xdr_buf *rcvbuf;
|
|
|
|
size_t len;
|
|
|
|
ssize_t r;
|
|
|
|
|
|
|
|
rcvbuf = &req->rq_private_buf;
|
2009-04-01 21:22:54 +08:00
|
|
|
|
|
|
|
if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
|
|
|
|
/*
|
|
|
|
* Save the RPC direction in the XDR buffer
|
|
|
|
*/
|
|
|
|
memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
|
2010-06-17 01:57:32 +08:00
|
|
|
&transport->tcp_calldir,
|
|
|
|
sizeof(transport->tcp_calldir));
|
|
|
|
transport->tcp_copied += sizeof(transport->tcp_calldir);
|
2009-04-01 21:22:54 +08:00
|
|
|
transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
len = desc->count;
|
2006-12-06 05:35:19 +08:00
|
|
|
if (len > transport->tcp_reclen - transport->tcp_offset) {
|
2006-12-06 05:35:44 +08:00
|
|
|
struct xdr_skb_reader my_desc;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2006-12-06 05:35:19 +08:00
|
|
|
len = transport->tcp_reclen - transport->tcp_offset;
|
2005-08-12 04:25:23 +08:00
|
|
|
memcpy(&my_desc, desc, sizeof(my_desc));
|
|
|
|
my_desc.count = len;
|
2006-12-06 05:35:19 +08:00
|
|
|
r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
|
2006-12-06 05:35:41 +08:00
|
|
|
&my_desc, xdr_skb_read_bits);
|
2005-08-12 04:25:23 +08:00
|
|
|
desc->count -= r;
|
|
|
|
desc->offset += r;
|
|
|
|
} else
|
2006-12-06 05:35:19 +08:00
|
|
|
r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
|
2006-12-06 05:35:41 +08:00
|
|
|
desc, xdr_skb_read_bits);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
|
|
|
if (r > 0) {
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_copied += r;
|
|
|
|
transport->tcp_offset += r;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
if (r != len) {
|
|
|
|
/* Error when copying to the receive buffer,
|
|
|
|
* usually because we weren't able to allocate
|
|
|
|
* additional buffer pages. All we can do now
|
2006-12-06 05:35:23 +08:00
|
|
|
* is turn off TCP_RCV_COPY_DATA, so the request
|
2005-08-12 04:25:23 +08:00
|
|
|
* will not receive any additional updates,
|
|
|
|
* and time out.
|
|
|
|
* Any remaining data from this record will
|
|
|
|
* be discarded.
|
|
|
|
*/
|
2006-12-06 05:35:23 +08:00
|
|
|
transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: XID %08x truncated request\n",
|
2006-12-06 05:35:19 +08:00
|
|
|
ntohl(transport->tcp_xid));
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xprt = %p, tcp_copied = %lu, "
|
|
|
|
"tcp_offset = %u, tcp_reclen = %u\n",
|
|
|
|
xprt, transport->tcp_copied,
|
|
|
|
transport->tcp_offset, transport->tcp_reclen);
|
2009-04-01 21:23:02 +08:00
|
|
|
return;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: XID %08x read %Zd bytes\n",
|
2006-12-06 05:35:19 +08:00
|
|
|
ntohl(transport->tcp_xid), r);
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
|
|
|
|
"tcp_reclen = %u\n", xprt, transport->tcp_copied,
|
|
|
|
transport->tcp_offset, transport->tcp_reclen);
|
2006-12-06 05:35:19 +08:00
|
|
|
|
|
|
|
if (transport->tcp_copied == req->rq_private_buf.buflen)
|
2006-12-06 05:35:23 +08:00
|
|
|
transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
|
2006-12-06 05:35:19 +08:00
|
|
|
else if (transport->tcp_offset == transport->tcp_reclen) {
|
2006-12-06 05:35:23 +08:00
|
|
|
if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
|
|
|
|
transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
2009-04-01 21:23:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Finds the request corresponding to the RPC xid and invokes the common
|
|
|
|
* tcp read code to read the data.
|
|
|
|
*/
|
|
|
|
static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
|
|
|
|
struct xdr_skb_reader *desc)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport =
|
|
|
|
container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
struct rpc_rqst *req;
|
|
|
|
|
|
|
|
dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
|
|
|
|
|
|
|
|
/* Find and lock the request corresponding to this xid */
|
|
|
|
spin_lock(&xprt->transport_lock);
|
|
|
|
req = xprt_lookup_rqst(xprt, transport->tcp_xid);
|
|
|
|
if (!req) {
|
|
|
|
dprintk("RPC: XID %08x request not found!\n",
|
|
|
|
ntohl(transport->tcp_xid));
|
|
|
|
spin_unlock(&xprt->transport_lock);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
xs_tcp_read_common(xprt, desc, req);
|
|
|
|
|
2006-12-06 05:35:23 +08:00
|
|
|
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
|
2006-12-06 05:35:19 +08:00
|
|
|
xprt_complete_rqst(req->rq_task, transport->tcp_copied);
|
2009-04-01 21:23:02 +08:00
|
|
|
|
2005-08-12 04:25:32 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2009-04-01 21:23:02 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
/*
|
|
|
|
* Obtains an rpc_rqst previously allocated and invokes the common
|
|
|
|
* tcp read code to read the data. The result is placed in the callback
|
|
|
|
* queue.
|
|
|
|
* If we're unable to obtain the rpc_rqst we schedule the closing of the
|
|
|
|
* connection and return -1.
|
|
|
|
*/
|
|
|
|
static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
|
|
|
|
struct xdr_skb_reader *desc)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport =
|
|
|
|
container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
struct rpc_rqst *req;
|
|
|
|
|
|
|
|
req = xprt_alloc_bc_request(xprt);
|
|
|
|
if (req == NULL) {
|
|
|
|
printk(KERN_WARNING "Callback slot table overflowed\n");
|
|
|
|
xprt_force_disconnect(xprt);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
req->rq_xid = transport->tcp_xid;
|
|
|
|
dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
|
|
|
|
xs_tcp_read_common(xprt, desc, req);
|
|
|
|
|
|
|
|
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
|
|
|
|
struct svc_serv *bc_serv = xprt->bc_serv;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add callback request to callback list. The callback
|
|
|
|
* service sleeps on the sv_cb_waitq waiting for new
|
|
|
|
* requests. Wake it up after adding enqueing the
|
|
|
|
* request.
|
|
|
|
*/
|
|
|
|
dprintk("RPC: add callback request to list\n");
|
|
|
|
spin_lock(&bc_serv->sv_cb_lock);
|
|
|
|
list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
|
|
|
|
spin_unlock(&bc_serv->sv_cb_lock);
|
|
|
|
wake_up(&bc_serv->sv_cb_waitq);
|
|
|
|
}
|
|
|
|
|
|
|
|
req->rq_private_buf.len = transport->tcp_copied;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
|
|
|
|
struct xdr_skb_reader *desc)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport =
|
|
|
|
container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
|
|
|
return (transport->tcp_flags & TCP_RPC_REPLY) ?
|
|
|
|
xs_tcp_read_reply(xprt, desc) :
|
|
|
|
xs_tcp_read_callback(xprt, desc);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
|
|
|
|
struct xdr_skb_reader *desc)
|
|
|
|
{
|
|
|
|
return xs_tcp_read_reply(xprt, desc);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read data off the transport. This can be either an RPC_CALL or an
|
|
|
|
* RPC_REPLY. Relay the processing to helper functions.
|
|
|
|
*/
|
|
|
|
static void xs_tcp_read_data(struct rpc_xprt *xprt,
|
|
|
|
struct xdr_skb_reader *desc)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport =
|
|
|
|
container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
|
|
|
if (_xs_tcp_read_data(xprt, desc) == 0)
|
|
|
|
xs_tcp_check_fraghdr(transport);
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* The transport_lock protects the request handling.
|
|
|
|
* There's no need to hold it to update the tcp_flags.
|
|
|
|
*/
|
|
|
|
transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
|
|
|
|
}
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2006-12-06 05:35:44 +08:00
|
|
|
static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
|
|
|
size_t len;
|
|
|
|
|
2006-12-06 05:35:19 +08:00
|
|
|
len = transport->tcp_reclen - transport->tcp_offset;
|
2005-08-12 04:25:23 +08:00
|
|
|
if (len > desc->count)
|
|
|
|
len = desc->count;
|
|
|
|
desc->count -= len;
|
|
|
|
desc->offset += len;
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_offset += len;
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: discarded %Zu bytes\n", len);
|
2006-12-06 05:35:19 +08:00
|
|
|
xs_tcp_check_fraghdr(transport);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt = rd_desc->arg.data;
|
2006-12-06 05:35:19 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
2006-12-06 05:35:44 +08:00
|
|
|
struct xdr_skb_reader desc = {
|
2005-08-12 04:25:23 +08:00
|
|
|
.skb = skb,
|
|
|
|
.offset = offset,
|
|
|
|
.count = len,
|
2005-08-12 04:25:26 +08:00
|
|
|
};
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_tcp_data_recv started\n");
|
2005-08-12 04:25:23 +08:00
|
|
|
do {
|
|
|
|
/* Read in a new fragment marker if necessary */
|
|
|
|
/* Can we ever really expect to get completely empty fragments? */
|
2006-12-06 05:35:23 +08:00
|
|
|
if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
|
2005-08-12 04:25:26 +08:00
|
|
|
xs_tcp_read_fraghdr(xprt, &desc);
|
2005-08-12 04:25:23 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Read in the xid if necessary */
|
2006-12-06 05:35:23 +08:00
|
|
|
if (transport->tcp_flags & TCP_RCV_COPY_XID) {
|
2006-12-06 05:35:19 +08:00
|
|
|
xs_tcp_read_xid(transport, &desc);
|
2005-08-12 04:25:23 +08:00
|
|
|
continue;
|
|
|
|
}
|
2009-04-01 21:22:53 +08:00
|
|
|
/* Read in the call/reply flag */
|
2009-04-01 21:22:54 +08:00
|
|
|
if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
|
2009-04-01 21:22:53 +08:00
|
|
|
xs_tcp_read_calldir(transport, &desc);
|
|
|
|
continue;
|
|
|
|
}
|
2005-08-12 04:25:23 +08:00
|
|
|
/* Read in the request data */
|
2006-12-06 05:35:23 +08:00
|
|
|
if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
|
2009-04-01 21:23:02 +08:00
|
|
|
xs_tcp_read_data(xprt, &desc);
|
2005-08-12 04:25:23 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Skip over any trailing bytes on short reads */
|
2006-12-06 05:35:19 +08:00
|
|
|
xs_tcp_read_discard(transport, &desc);
|
2005-08-12 04:25:23 +08:00
|
|
|
} while (desc.count);
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_tcp_data_recv done\n");
|
2005-08-12 04:25:23 +08:00
|
|
|
return len - desc.count;
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xs_tcp_data_ready - "data ready" callback for TCP sockets
|
|
|
|
* @sk: socket with data to read
|
|
|
|
* @bytes: how much data to read
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void xs_tcp_data_ready(struct sock *sk, int bytes)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt;
|
|
|
|
read_descriptor_t rd_desc;
|
2008-02-26 13:40:51 +08:00
|
|
|
int read;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_tcp_data_ready...\n");
|
|
|
|
|
2010-09-22 20:43:39 +08:00
|
|
|
read_lock_bh(&sk->sk_callback_lock);
|
2005-08-12 04:25:26 +08:00
|
|
|
if (!(xprt = xprt_from_sock(sk)))
|
2005-08-12 04:25:23 +08:00
|
|
|
goto out;
|
|
|
|
if (xprt->shutdown)
|
|
|
|
goto out;
|
|
|
|
|
NFS/RPC: fix problems with reestablish_timeout and related code.
[[resending with correct cc: - "vfs.kernel.org" just isn't right!]]
xprt->reestablish_timeout is used to cause TCP connection attempts to
back off if the connection fails so as not to hammer the network,
but to still allow immediate connections when there is no reason to
believe there is a problem.
It is not used for the first connection (when transport->sock is NULL)
but only on reconnects.
It is currently set:
a/ to 0 when xs_tcp_state_change finds a state of TCP_FIN_WAIT1
on the assumption that the client has closed the connection
so the reconnect should be immediate when needed.
b/ to at least XS_TCP_INIT_REEST_TO when xs_tcp_state_change
detects TCP_CLOSING or TCP_CLOSE_WAIT on the assumption that the
server closed the connection so a small delay at least is
required.
c/ as above when xs_tcp_state_change detects TCP_SYN_SENT, so that
it is never 0 while a connection has been attempted, else
the doubling will produce 0 and there will be no backoff.
d/ to double is value (up to a limit) when delaying a connection,
thus providing exponential backoff and
e/ to XS_TCP_INIT_REEST_TO in xs_setup_tcp as simple initialisation.
So you can see it is highly dependant on xs_tcp_state_change being
called as expected. However experimental evidence shows that
xs_tcp_state_change does not see all state changes.
("rpcdebug -m rpc trans" can help show what actually happens).
Results show:
TCP_ESTABLISHED is reported when a connection is made. TCP_SYN_SENT
is never reported, so rule 'c' above is never effective.
When the server closes the connection, TCP_CLOSE_WAIT and
TCP_LAST_ACK *might* be reported, and TCP_CLOSE is always
reported. This rule 'b' above will sometimes be effective, but
not reliably.
When the client closes the connection, it used to result in
TCP_FIN_WAIT1, TCP_FIN_WAIT2, TCP_CLOSE. However since commit
f75e674 (SUNRPC: Fix the problem of EADDRNOTAVAIL syslog floods on
reconnect) we don't see *any* events on client-close. I think this
is because xs_restore_old_callbacks is called to disconnect
xs_tcp_state_change before the socket is closed.
In any case, rule 'a' no longer applies.
So all that is left are rule d, which successfully doubles the
timeout which is never rest, and rule e which initialises the timeout.
Even if the rules worked as expected, there would be a problem because
a successful connection does not reset the timeout, so a sequence
of events where the server closes the connection (e.g. during failover
testing) will cause longer and longer timeouts with no good reason.
This patch:
- sets reestablish_timeout to 0 in xs_close thus effecting rule 'a'
- sets it to 0 in xs_tcp_data_ready to ensure that a successful
connection resets the timeout
- sets it to at least XS_TCP_INIT_REEST_TO after it is doubled,
thus effecting rule c
I have not reimplemented rule b and the new version of rule c
seems sufficient.
I suspect other code in xs_tcp_data_ready needs to be revised as well.
For example I don't think connect_cookie is being incremented as often
as it should be.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2009-09-24 02:36:37 +08:00
|
|
|
/* Any data means we had a useful conversation, so
|
|
|
|
* the we don't need to delay the next reconnect
|
|
|
|
*/
|
|
|
|
if (xprt->reestablish_timeout)
|
|
|
|
xprt->reestablish_timeout = 0;
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
|
2005-08-12 04:25:23 +08:00
|
|
|
rd_desc.arg.data = xprt;
|
2008-02-26 13:40:51 +08:00
|
|
|
do {
|
|
|
|
rd_desc.count = 65536;
|
|
|
|
read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
|
|
|
|
} while (read > 0);
|
2005-08-12 04:25:23 +08:00
|
|
|
out:
|
2010-09-22 20:43:39 +08:00
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2009-03-12 02:38:03 +08:00
|
|
|
/*
|
|
|
|
* Do the equivalent of linger/linger2 handling for dealing with
|
|
|
|
* broken servers that don't close the socket in a timely
|
|
|
|
* fashion
|
|
|
|
*/
|
|
|
|
static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt,
|
|
|
|
unsigned long timeout)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport;
|
|
|
|
|
|
|
|
if (xprt_test_and_set_connecting(xprt))
|
|
|
|
return;
|
|
|
|
set_bit(XPRT_CONNECTION_ABORT, &xprt->state);
|
|
|
|
transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
queue_delayed_work(rpciod_workqueue, &transport->connect_worker,
|
|
|
|
timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport;
|
|
|
|
|
|
|
|
transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
|
|
|
if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) ||
|
|
|
|
!cancel_delayed_work(&transport->connect_worker))
|
|
|
|
return;
|
|
|
|
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
|
|
|
|
xprt_clear_connecting(xprt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xs_sock_mark_closed(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
smp_mb__before_clear_bit();
|
|
|
|
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
|
|
|
clear_bit(XPRT_CLOSING, &xprt->state);
|
|
|
|
smp_mb__after_clear_bit();
|
|
|
|
/* Mark transport as closed and wake up all pending tasks */
|
|
|
|
xprt_disconnect_done(xprt);
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xs_tcp_state_change - callback to handle TCP socket state changes
|
|
|
|
* @sk: socket whose state has changed
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void xs_tcp_state_change(struct sock *sk)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2005-08-12 04:25:26 +08:00
|
|
|
struct rpc_xprt *xprt;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2010-09-22 20:43:39 +08:00
|
|
|
read_lock_bh(&sk->sk_callback_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
if (!(xprt = xprt_from_sock(sk)))
|
|
|
|
goto out;
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
|
2010-08-10 22:19:53 +08:00
|
|
|
dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
|
2007-02-01 01:14:08 +08:00
|
|
|
sk->sk_state, xprt_connected(xprt),
|
|
|
|
sock_flag(sk, SOCK_DEAD),
|
2010-08-10 22:19:53 +08:00
|
|
|
sock_flag(sk, SOCK_ZAPPED),
|
|
|
|
sk->sk_shutdown);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
|
|
|
switch (sk->sk_state) {
|
|
|
|
case TCP_ESTABLISHED:
|
2010-09-22 20:43:39 +08:00
|
|
|
spin_lock(&xprt->transport_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
if (!xprt_test_and_set_connected(xprt)) {
|
2006-12-06 05:35:19 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt,
|
|
|
|
struct sock_xprt, xprt);
|
|
|
|
|
2005-08-12 04:25:23 +08:00
|
|
|
/* Reset TCP record info */
|
2006-12-06 05:35:19 +08:00
|
|
|
transport->tcp_offset = 0;
|
|
|
|
transport->tcp_reclen = 0;
|
|
|
|
transport->tcp_copied = 0;
|
2006-12-06 05:35:23 +08:00
|
|
|
transport->tcp_flags =
|
|
|
|
TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
|
2006-12-06 05:35:19 +08:00
|
|
|
|
2009-03-12 02:38:00 +08:00
|
|
|
xprt_wake_pending_tasks(xprt, -EAGAIN);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
2010-09-22 20:43:39 +08:00
|
|
|
spin_unlock(&xprt->transport_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
break;
|
2007-11-06 06:42:39 +08:00
|
|
|
case TCP_FIN_WAIT1:
|
|
|
|
/* The client initiated a shutdown of the socket */
|
2008-04-18 04:52:57 +08:00
|
|
|
xprt->connect_cookie++;
|
2008-01-02 07:42:12 +08:00
|
|
|
xprt->reestablish_timeout = 0;
|
2007-11-06 06:42:39 +08:00
|
|
|
set_bit(XPRT_CLOSING, &xprt->state);
|
|
|
|
smp_mb__before_clear_bit();
|
|
|
|
clear_bit(XPRT_CONNECTED, &xprt->state);
|
2008-01-01 05:19:17 +08:00
|
|
|
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
2007-11-06 06:42:39 +08:00
|
|
|
smp_mb__after_clear_bit();
|
2009-03-12 02:38:03 +08:00
|
|
|
xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
|
2005-08-12 04:25:23 +08:00
|
|
|
break;
|
2006-01-03 16:55:55 +08:00
|
|
|
case TCP_CLOSE_WAIT:
|
2007-11-06 06:42:39 +08:00
|
|
|
/* The server initiated a shutdown of the socket */
|
2007-11-06 23:18:36 +08:00
|
|
|
xprt_force_disconnect(xprt);
|
2008-04-18 04:52:57 +08:00
|
|
|
xprt->connect_cookie++;
|
2008-01-02 07:42:12 +08:00
|
|
|
case TCP_CLOSING:
|
|
|
|
/*
|
|
|
|
* If the server closed down the connection, make sure that
|
|
|
|
* we back off before reconnecting
|
|
|
|
*/
|
|
|
|
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
|
|
|
|
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
|
2007-11-06 06:42:39 +08:00
|
|
|
break;
|
|
|
|
case TCP_LAST_ACK:
|
2009-03-12 02:37:58 +08:00
|
|
|
set_bit(XPRT_CLOSING, &xprt->state);
|
2009-03-12 02:38:03 +08:00
|
|
|
xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
|
2007-11-06 06:42:39 +08:00
|
|
|
smp_mb__before_clear_bit();
|
|
|
|
clear_bit(XPRT_CONNECTED, &xprt->state);
|
|
|
|
smp_mb__after_clear_bit();
|
|
|
|
break;
|
|
|
|
case TCP_CLOSE:
|
2009-03-12 02:38:03 +08:00
|
|
|
xs_tcp_cancel_linger_timeout(xprt);
|
|
|
|
xs_sock_mark_closed(xprt);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
out:
|
2010-09-22 20:43:39 +08:00
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2008-10-29 03:21:39 +08:00
|
|
|
/**
|
2009-03-12 02:38:00 +08:00
|
|
|
* xs_error_report - callback mainly for catching socket errors
|
2008-10-29 03:21:39 +08:00
|
|
|
* @sk: socket
|
|
|
|
*/
|
2009-03-12 02:38:00 +08:00
|
|
|
static void xs_error_report(struct sock *sk)
|
2008-10-29 03:21:39 +08:00
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt;
|
|
|
|
|
2010-09-22 20:43:39 +08:00
|
|
|
read_lock_bh(&sk->sk_callback_lock);
|
2008-10-29 03:21:39 +08:00
|
|
|
if (!(xprt = xprt_from_sock(sk)))
|
|
|
|
goto out;
|
|
|
|
dprintk("RPC: %s client %p...\n"
|
|
|
|
"RPC: error %d\n",
|
|
|
|
__func__, xprt, sk->sk_err);
|
2009-03-12 02:38:00 +08:00
|
|
|
xprt_wake_pending_tasks(xprt, -EAGAIN);
|
2008-10-29 03:21:39 +08:00
|
|
|
out:
|
2010-09-22 20:43:39 +08:00
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
2008-10-29 03:21:39 +08:00
|
|
|
}
|
|
|
|
|
net/sunrpc/xprtsock.c: some common code found
$ diff-funcs xs_udp_write_space net/sunrpc/xprtsock.c
net/sunrpc/xprtsock.c xs_tcp_write_space
--- net/sunrpc/xprtsock.c:xs_udp_write_space()
+++ net/sunrpc/xprtsock.c:xs_tcp_write_space()
@@ -1,4 +1,4 @@
- * xs_udp_write_space - callback invoked when socket buffer space
+ * xs_tcp_write_space - callback invoked when socket buffer space
* becomes available
* @sk: socket whose state has changed
*
@@ -7,12 +7,12 @@
* progress, otherwise we'll waste resources thrashing kernel_sendmsg
* with a bunch of small requests.
*/
-static void xs_udp_write_space(struct sock *sk)
+static void xs_tcp_write_space(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
- /* from net/core/sock.c:sock_def_write_space */
- if (sock_writeable(sk)) {
+ /* from net/core/stream.c:sk_stream_write_space */
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
struct socket *sock;
struct rpc_xprt *xprt;
$ codiff net/sunrpc/xprtsock.o net/sunrpc/xprtsock.o.new
net/sunrpc/xprtsock.c:
xs_tcp_write_space | -163
xs_udp_write_space | -163
2 functions changed, 326 bytes removed
net/sunrpc/xprtsock.c:
xs_write_space | +179
1 function changed, 179 bytes added
net/sunrpc/xprtsock.o.new:
3 functions changed, 179 bytes added, 326 bytes removed, diff: -147
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-02-07 15:48:33 +08:00
|
|
|
static void xs_write_space(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct socket *sock;
|
|
|
|
struct rpc_xprt *xprt;
|
|
|
|
|
|
|
|
if (unlikely(!(sock = sk->sk_socket)))
|
|
|
|
return;
|
|
|
|
clear_bit(SOCK_NOSPACE, &sock->flags);
|
|
|
|
|
|
|
|
if (unlikely(!(xprt = xprt_from_sock(sk))))
|
|
|
|
return;
|
|
|
|
if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
xprt_write_space(xprt);
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
2005-08-12 04:25:50 +08:00
|
|
|
* xs_udp_write_space - callback invoked when socket buffer space
|
|
|
|
* becomes available
|
2005-08-12 04:25:26 +08:00
|
|
|
* @sk: socket whose state has changed
|
|
|
|
*
|
2005-08-12 04:25:23 +08:00
|
|
|
* Called when more output buffer space is available for this socket.
|
|
|
|
* We try not to wake our writers until they can make "significant"
|
2005-08-12 04:25:50 +08:00
|
|
|
* progress, otherwise we'll waste resources thrashing kernel_sendmsg
|
2005-08-12 04:25:23 +08:00
|
|
|
* with a bunch of small requests.
|
|
|
|
*/
|
2005-08-12 04:25:50 +08:00
|
|
|
static void xs_udp_write_space(struct sock *sk)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2010-09-22 20:43:39 +08:00
|
|
|
read_lock_bh(&sk->sk_callback_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:50 +08:00
|
|
|
/* from net/core/sock.c:sock_def_write_space */
|
net/sunrpc/xprtsock.c: some common code found
$ diff-funcs xs_udp_write_space net/sunrpc/xprtsock.c
net/sunrpc/xprtsock.c xs_tcp_write_space
--- net/sunrpc/xprtsock.c:xs_udp_write_space()
+++ net/sunrpc/xprtsock.c:xs_tcp_write_space()
@@ -1,4 +1,4 @@
- * xs_udp_write_space - callback invoked when socket buffer space
+ * xs_tcp_write_space - callback invoked when socket buffer space
* becomes available
* @sk: socket whose state has changed
*
@@ -7,12 +7,12 @@
* progress, otherwise we'll waste resources thrashing kernel_sendmsg
* with a bunch of small requests.
*/
-static void xs_udp_write_space(struct sock *sk)
+static void xs_tcp_write_space(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
- /* from net/core/sock.c:sock_def_write_space */
- if (sock_writeable(sk)) {
+ /* from net/core/stream.c:sk_stream_write_space */
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
struct socket *sock;
struct rpc_xprt *xprt;
$ codiff net/sunrpc/xprtsock.o net/sunrpc/xprtsock.o.new
net/sunrpc/xprtsock.c:
xs_tcp_write_space | -163
xs_udp_write_space | -163
2 functions changed, 326 bytes removed
net/sunrpc/xprtsock.c:
xs_write_space | +179
1 function changed, 179 bytes added
net/sunrpc/xprtsock.o.new:
3 functions changed, 179 bytes added, 326 bytes removed, diff: -147
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-02-07 15:48:33 +08:00
|
|
|
if (sock_writeable(sk))
|
|
|
|
xs_write_space(sk);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2010-09-22 20:43:39 +08:00
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
2005-08-12 04:25:50 +08:00
|
|
|
}
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:50 +08:00
|
|
|
/**
|
|
|
|
* xs_tcp_write_space - callback invoked when socket buffer space
|
|
|
|
* becomes available
|
|
|
|
* @sk: socket whose state has changed
|
|
|
|
*
|
|
|
|
* Called when more output buffer space is available for this socket.
|
|
|
|
* We try not to wake our writers until they can make "significant"
|
|
|
|
* progress, otherwise we'll waste resources thrashing kernel_sendmsg
|
|
|
|
* with a bunch of small requests.
|
|
|
|
*/
|
|
|
|
static void xs_tcp_write_space(struct sock *sk)
|
|
|
|
{
|
2010-09-22 20:43:39 +08:00
|
|
|
read_lock_bh(&sk->sk_callback_lock);
|
2005-08-12 04:25:50 +08:00
|
|
|
|
|
|
|
/* from net/core/stream.c:sk_stream_write_space */
|
net/sunrpc/xprtsock.c: some common code found
$ diff-funcs xs_udp_write_space net/sunrpc/xprtsock.c
net/sunrpc/xprtsock.c xs_tcp_write_space
--- net/sunrpc/xprtsock.c:xs_udp_write_space()
+++ net/sunrpc/xprtsock.c:xs_tcp_write_space()
@@ -1,4 +1,4 @@
- * xs_udp_write_space - callback invoked when socket buffer space
+ * xs_tcp_write_space - callback invoked when socket buffer space
* becomes available
* @sk: socket whose state has changed
*
@@ -7,12 +7,12 @@
* progress, otherwise we'll waste resources thrashing kernel_sendmsg
* with a bunch of small requests.
*/
-static void xs_udp_write_space(struct sock *sk)
+static void xs_tcp_write_space(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
- /* from net/core/sock.c:sock_def_write_space */
- if (sock_writeable(sk)) {
+ /* from net/core/stream.c:sk_stream_write_space */
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
struct socket *sock;
struct rpc_xprt *xprt;
$ codiff net/sunrpc/xprtsock.o net/sunrpc/xprtsock.o.new
net/sunrpc/xprtsock.c:
xs_tcp_write_space | -163
xs_udp_write_space | -163
2 functions changed, 326 bytes removed
net/sunrpc/xprtsock.c:
xs_write_space | +179
1 function changed, 179 bytes added
net/sunrpc/xprtsock.o.new:
3 functions changed, 179 bytes added, 326 bytes removed, diff: -147
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-02-07 15:48:33 +08:00
|
|
|
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
|
|
|
|
xs_write_space(sk);
|
2008-04-18 06:52:19 +08:00
|
|
|
|
2010-09-22 20:43:39 +08:00
|
|
|
read_unlock_bh(&sk->sk_callback_lock);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2005-08-26 07:25:56 +08:00
|
|
|
static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2006-12-06 05:35:15 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
struct sock *sk = transport->inet;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2006-12-06 05:35:30 +08:00
|
|
|
if (transport->rcvsize) {
|
2005-08-12 04:25:23 +08:00
|
|
|
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
|
2006-12-06 05:35:30 +08:00
|
|
|
sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
2006-12-06 05:35:30 +08:00
|
|
|
if (transport->sndsize) {
|
2005-08-12 04:25:23 +08:00
|
|
|
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
|
2006-12-06 05:35:30 +08:00
|
|
|
sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
|
2005-08-12 04:25:23 +08:00
|
|
|
sk->sk_write_space(sk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-08-26 07:25:49 +08:00
|
|
|
/**
|
2005-08-26 07:25:56 +08:00
|
|
|
* xs_udp_set_buffer_size - set send and receive limits
|
2005-08-26 07:25:49 +08:00
|
|
|
* @xprt: generic transport
|
2005-08-26 07:25:56 +08:00
|
|
|
* @sndsize: requested size of send buffer, in bytes
|
|
|
|
* @rcvsize: requested size of receive buffer, in bytes
|
2005-08-26 07:25:49 +08:00
|
|
|
*
|
2005-08-26 07:25:56 +08:00
|
|
|
* Set socket send and receive buffer size limits.
|
2005-08-26 07:25:49 +08:00
|
|
|
*/
|
2005-08-26 07:25:56 +08:00
|
|
|
static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
|
2005-08-26 07:25:49 +08:00
|
|
|
{
|
2006-12-06 05:35:30 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
|
|
|
transport->sndsize = 0;
|
2005-08-26 07:25:56 +08:00
|
|
|
if (sndsize)
|
2006-12-06 05:35:30 +08:00
|
|
|
transport->sndsize = sndsize + 1024;
|
|
|
|
transport->rcvsize = 0;
|
2005-08-26 07:25:56 +08:00
|
|
|
if (rcvsize)
|
2006-12-06 05:35:30 +08:00
|
|
|
transport->rcvsize = rcvsize + 1024;
|
2005-08-26 07:25:56 +08:00
|
|
|
|
|
|
|
xs_udp_do_set_buffer_size(xprt);
|
2005-08-26 07:25:49 +08:00
|
|
|
}
|
|
|
|
|
2005-08-26 07:25:52 +08:00
|
|
|
/**
|
|
|
|
* xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
|
|
|
|
* @task: task that timed out
|
|
|
|
*
|
|
|
|
* Adjust the congestion window after a retransmit timeout has occurred.
|
|
|
|
*/
|
|
|
|
static void xs_udp_timer(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
xprt_adjust_cwnd(task, -ETIMEDOUT);
|
|
|
|
}
|
|
|
|
|
2006-05-25 13:40:49 +08:00
|
|
|
static unsigned short xs_get_random_port(void)
|
|
|
|
{
|
|
|
|
unsigned short range = xprt_max_resvport - xprt_min_resvport;
|
|
|
|
unsigned short rand = (unsigned short) net_random() % range;
|
|
|
|
return rand + xprt_min_resvport;
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:51 +08:00
|
|
|
/**
|
|
|
|
* xs_set_port - reset the port number in the remote endpoint address
|
|
|
|
* @xprt: generic transport
|
|
|
|
* @port: new port number
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
|
|
|
|
{
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
|
2006-08-23 08:06:19 +08:00
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
rpc_set_port(xs_addr(xprt), port);
|
|
|
|
xs_update_peer_port(xprt);
|
2006-01-03 16:55:51 +08:00
|
|
|
}
|
|
|
|
|
2010-10-04 20:51:23 +08:00
|
|
|
static unsigned short xs_get_srcport(struct sock_xprt *transport)
|
2007-11-06 06:40:58 +08:00
|
|
|
{
|
2009-08-10 03:09:46 +08:00
|
|
|
unsigned short port = transport->srcport;
|
2007-11-06 06:40:58 +08:00
|
|
|
|
|
|
|
if (port == 0 && transport->xprt.resvport)
|
|
|
|
port = xs_get_random_port();
|
|
|
|
return port;
|
|
|
|
}
|
|
|
|
|
2010-10-04 20:51:56 +08:00
|
|
|
static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
|
2007-11-06 06:40:58 +08:00
|
|
|
{
|
2009-08-10 03:09:46 +08:00
|
|
|
if (transport->srcport != 0)
|
|
|
|
transport->srcport = 0;
|
2007-11-06 06:40:58 +08:00
|
|
|
if (!transport->xprt.resvport)
|
|
|
|
return 0;
|
|
|
|
if (port <= xprt_min_resvport || port > xprt_max_resvport)
|
|
|
|
return xprt_max_resvport;
|
|
|
|
return --port;
|
|
|
|
}
|
2010-10-05 19:53:08 +08:00
|
|
|
static int xs_bind(struct sock_xprt *transport, struct socket *sock)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2010-10-05 19:53:08 +08:00
|
|
|
struct sockaddr_storage myaddr;
|
2007-11-06 06:40:58 +08:00
|
|
|
int err, nloop = 0;
|
2010-10-04 20:51:23 +08:00
|
|
|
unsigned short port = xs_get_srcport(transport);
|
2007-11-06 06:40:58 +08:00
|
|
|
unsigned short last;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2010-10-05 19:53:08 +08:00
|
|
|
memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
|
2005-08-12 04:25:23 +08:00
|
|
|
do {
|
2010-10-05 19:53:08 +08:00
|
|
|
rpc_set_port((struct sockaddr *)&myaddr, port);
|
|
|
|
err = kernel_bind(sock, (struct sockaddr *)&myaddr,
|
|
|
|
transport->xprt.addrlen);
|
2007-11-06 06:40:58 +08:00
|
|
|
if (port == 0)
|
2007-07-10 04:23:35 +08:00
|
|
|
break;
|
2005-08-12 04:25:23 +08:00
|
|
|
if (err == 0) {
|
2009-08-10 03:09:46 +08:00
|
|
|
transport->srcport = port;
|
2007-07-10 04:23:35 +08:00
|
|
|
break;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
2007-11-06 06:40:58 +08:00
|
|
|
last = port;
|
2010-10-04 20:51:56 +08:00
|
|
|
port = xs_next_srcport(transport, port);
|
2007-11-06 06:40:58 +08:00
|
|
|
if (port > last)
|
|
|
|
nloop++;
|
|
|
|
} while (err == -EADDRINUSE && nloop != 2);
|
2007-08-06 23:57:33 +08:00
|
|
|
|
2010-10-20 23:52:51 +08:00
|
|
|
if (myaddr.ss_family == AF_INET)
|
2010-10-05 19:53:08 +08:00
|
|
|
dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
|
|
|
|
&((struct sockaddr_in *)&myaddr)->sin_addr,
|
|
|
|
port, err ? "failed" : "ok", err);
|
|
|
|
else
|
|
|
|
dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
|
|
|
|
&((struct sockaddr_in6 *)&myaddr)->sin6_addr,
|
|
|
|
port, err ? "failed" : "ok", err);
|
2005-08-12 04:25:23 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-10-05 19:53:08 +08:00
|
|
|
|
2006-12-07 12:35:24 +08:00
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
static struct lock_class_key xs_key[2];
|
|
|
|
static struct lock_class_key xs_slock_key[2];
|
|
|
|
|
2007-08-06 23:58:04 +08:00
|
|
|
static inline void xs_reclassify_socket4(struct socket *sock)
|
2006-12-07 12:35:24 +08:00
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
2007-08-06 23:58:04 +08:00
|
|
|
|
2007-09-12 16:42:12 +08:00
|
|
|
BUG_ON(sock_owned_by_user(sk));
|
2007-08-06 23:58:04 +08:00
|
|
|
sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
|
|
|
|
&xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
|
|
|
|
}
|
2006-12-07 12:35:24 +08:00
|
|
|
|
2007-08-06 23:58:04 +08:00
|
|
|
static inline void xs_reclassify_socket6(struct socket *sock)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
2006-12-07 12:35:24 +08:00
|
|
|
|
2007-10-16 01:46:05 +08:00
|
|
|
BUG_ON(sock_owned_by_user(sk));
|
2007-08-06 23:58:04 +08:00
|
|
|
sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
|
|
|
|
&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
|
2006-12-07 12:35:24 +08:00
|
|
|
}
|
2010-10-04 20:56:38 +08:00
|
|
|
|
|
|
|
static inline void xs_reclassify_socket(int family, struct socket *sock)
|
|
|
|
{
|
2010-10-20 23:52:51 +08:00
|
|
|
switch (family) {
|
|
|
|
case AF_INET:
|
2010-10-04 20:56:38 +08:00
|
|
|
xs_reclassify_socket4(sock);
|
2010-10-20 23:52:51 +08:00
|
|
|
break;
|
|
|
|
case AF_INET6:
|
2010-10-04 20:56:38 +08:00
|
|
|
xs_reclassify_socket6(sock);
|
2010-10-20 23:52:51 +08:00
|
|
|
break;
|
|
|
|
}
|
2010-10-04 20:56:38 +08:00
|
|
|
}
|
2006-12-07 12:35:24 +08:00
|
|
|
#else
|
2007-08-06 23:58:04 +08:00
|
|
|
static inline void xs_reclassify_socket4(struct socket *sock)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void xs_reclassify_socket6(struct socket *sock)
|
2006-12-07 12:35:24 +08:00
|
|
|
{
|
|
|
|
}
|
2010-10-04 20:56:38 +08:00
|
|
|
|
|
|
|
static inline void xs_reclassify_socket(int family, struct socket *sock)
|
|
|
|
{
|
|
|
|
}
|
2006-12-07 12:35:24 +08:00
|
|
|
#endif
|
|
|
|
|
2010-10-04 20:56:38 +08:00
|
|
|
static struct socket *xs_create_sock(struct rpc_xprt *xprt,
|
|
|
|
struct sock_xprt *transport, int family, int type, int protocol)
|
2010-10-04 20:54:26 +08:00
|
|
|
{
|
|
|
|
struct socket *sock;
|
|
|
|
int err;
|
|
|
|
|
2010-10-04 20:56:38 +08:00
|
|
|
err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
|
2010-10-04 20:54:26 +08:00
|
|
|
if (err < 0) {
|
|
|
|
dprintk("RPC: can't create %d transport socket (%d).\n",
|
|
|
|
protocol, -err);
|
|
|
|
goto out;
|
|
|
|
}
|
2010-10-04 20:56:38 +08:00
|
|
|
xs_reclassify_socket(family, sock);
|
2010-10-04 20:54:26 +08:00
|
|
|
|
2011-02-23 05:54:34 +08:00
|
|
|
err = xs_bind(transport, sock);
|
|
|
|
if (err) {
|
2010-10-04 20:54:26 +08:00
|
|
|
sock_release(sock);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
return sock;
|
|
|
|
out:
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2007-08-06 23:57:38 +08:00
|
|
|
static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
|
|
|
|
{
|
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
|
|
|
if (!transport->inet) {
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
|
|
|
|
write_lock_bh(&sk->sk_callback_lock);
|
|
|
|
|
2008-10-29 03:21:39 +08:00
|
|
|
xs_save_old_callbacks(transport, sk);
|
|
|
|
|
2007-08-06 23:57:38 +08:00
|
|
|
sk->sk_user_data = xprt;
|
|
|
|
sk->sk_data_ready = xs_udp_data_ready;
|
|
|
|
sk->sk_write_space = xs_udp_write_space;
|
2009-03-12 02:38:00 +08:00
|
|
|
sk->sk_error_report = xs_error_report;
|
2007-08-06 23:57:38 +08:00
|
|
|
sk->sk_no_check = UDP_CSUM_NORCV;
|
|
|
|
sk->sk_allocation = GFP_ATOMIC;
|
|
|
|
|
|
|
|
xprt_set_connected(xprt);
|
|
|
|
|
|
|
|
/* Reset to new socket */
|
|
|
|
transport->sock = sock;
|
|
|
|
transport->inet = sk;
|
|
|
|
|
|
|
|
write_unlock_bh(&sk->sk_callback_lock);
|
|
|
|
}
|
|
|
|
xs_udp_do_set_buffer_size(xprt);
|
|
|
|
}
|
|
|
|
|
2010-10-04 20:58:02 +08:00
|
|
|
static void xs_udp_setup_socket(struct work_struct *work)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2006-12-08 04:48:15 +08:00
|
|
|
struct sock_xprt *transport =
|
|
|
|
container_of(work, struct sock_xprt, connect_worker.work);
|
2006-12-06 05:35:26 +08:00
|
|
|
struct rpc_xprt *xprt = &transport->xprt;
|
2006-12-06 05:35:15 +08:00
|
|
|
struct socket *sock = transport->sock;
|
2010-10-04 20:53:46 +08:00
|
|
|
int status = -EIO;
|
2005-08-12 04:25:26 +08:00
|
|
|
|
2009-03-12 02:09:39 +08:00
|
|
|
if (xprt->shutdown)
|
2005-08-12 04:25:53 +08:00
|
|
|
goto out;
|
2005-08-12 04:25:26 +08:00
|
|
|
|
2005-08-12 04:25:53 +08:00
|
|
|
/* Start by resetting any existing state */
|
2009-03-12 02:10:21 +08:00
|
|
|
xs_reset_transport(transport);
|
2010-10-04 20:58:02 +08:00
|
|
|
sock = xs_create_sock(xprt, transport,
|
|
|
|
xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP);
|
2010-10-04 20:53:46 +08:00
|
|
|
if (IS_ERR(sock))
|
2005-08-12 04:25:53 +08:00
|
|
|
goto out;
|
2007-08-06 23:57:48 +08:00
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
dprintk("RPC: worker connecting xprt %p via %s to "
|
|
|
|
"%s (port %s)\n", xprt,
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PROTO],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PORT]);
|
2007-08-06 23:57:48 +08:00
|
|
|
|
|
|
|
xs_udp_finish_connecting(xprt, sock);
|
2005-08-12 04:25:53 +08:00
|
|
|
status = 0;
|
|
|
|
out:
|
|
|
|
xprt_clear_connecting(xprt);
|
2009-03-12 02:38:03 +08:00
|
|
|
xprt_wake_pending_tasks(xprt, status);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2005-08-26 07:25:55 +08:00
|
|
|
/*
|
|
|
|
* We need to preserve the port number so the reply cache on the server can
|
|
|
|
* find our cached RPC replies when we get around to reconnecting.
|
|
|
|
*/
|
2010-10-04 20:52:55 +08:00
|
|
|
static void xs_abort_connection(struct sock_xprt *transport)
|
2005-08-26 07:25:55 +08:00
|
|
|
{
|
|
|
|
int result;
|
|
|
|
struct sockaddr any;
|
|
|
|
|
2010-10-04 20:52:55 +08:00
|
|
|
dprintk("RPC: disconnecting xprt %p to reuse port\n", transport);
|
2005-08-26 07:25:55 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Disconnect the transport socket by doing a connect operation
|
|
|
|
* with AF_UNSPEC. This should return immediately...
|
|
|
|
*/
|
|
|
|
memset(&any, 0, sizeof(any));
|
|
|
|
any.sa_family = AF_UNSPEC;
|
2006-12-06 05:35:15 +08:00
|
|
|
result = kernel_connect(transport->sock, &any, sizeof(any), 0);
|
2009-03-12 02:38:03 +08:00
|
|
|
if (!result)
|
2010-10-04 20:52:55 +08:00
|
|
|
xs_sock_mark_closed(&transport->xprt);
|
2009-03-12 02:38:03 +08:00
|
|
|
else
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: AF_UNSPEC connect return code %d\n",
|
2005-08-26 07:25:55 +08:00
|
|
|
result);
|
|
|
|
}
|
|
|
|
|
2010-10-04 20:52:55 +08:00
|
|
|
static void xs_tcp_reuse_connection(struct sock_xprt *transport)
|
2009-03-12 02:37:58 +08:00
|
|
|
{
|
|
|
|
unsigned int state = transport->inet->sk_state;
|
|
|
|
|
2010-08-10 22:19:53 +08:00
|
|
|
if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
|
|
|
|
/* we don't need to abort the connection if the socket
|
|
|
|
* hasn't undergone a shutdown
|
|
|
|
*/
|
|
|
|
if (transport->inet->sk_shutdown == 0)
|
|
|
|
return;
|
|
|
|
dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n",
|
|
|
|
__func__, transport->inet->sk_shutdown);
|
|
|
|
}
|
|
|
|
if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
|
|
|
|
/* we don't need to abort the connection if the socket
|
|
|
|
* hasn't undergone a shutdown
|
|
|
|
*/
|
|
|
|
if (transport->inet->sk_shutdown == 0)
|
|
|
|
return;
|
|
|
|
dprintk("RPC: %s: ESTABLISHED/SYN_SENT "
|
|
|
|
"sk_shutdown set to %d\n",
|
|
|
|
__func__, transport->inet->sk_shutdown);
|
|
|
|
}
|
2010-10-04 20:52:55 +08:00
|
|
|
xs_abort_connection(transport);
|
2009-03-12 02:37:58 +08:00
|
|
|
}
|
|
|
|
|
2007-08-06 23:57:38 +08:00
|
|
|
static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2007-08-06 23:57:38 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
2011-03-19 08:21:23 +08:00
|
|
|
int ret = -ENOTCONN;
|
2006-08-23 08:06:18 +08:00
|
|
|
|
2006-12-06 05:35:15 +08:00
|
|
|
if (!transport->inet) {
|
2005-08-12 04:25:53 +08:00
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
|
|
|
|
write_lock_bh(&sk->sk_callback_lock);
|
|
|
|
|
2008-10-29 03:21:39 +08:00
|
|
|
xs_save_old_callbacks(transport, sk);
|
|
|
|
|
2005-08-12 04:25:53 +08:00
|
|
|
sk->sk_user_data = xprt;
|
|
|
|
sk->sk_data_ready = xs_tcp_data_ready;
|
|
|
|
sk->sk_state_change = xs_tcp_state_change;
|
|
|
|
sk->sk_write_space = xs_tcp_write_space;
|
2009-03-12 02:38:00 +08:00
|
|
|
sk->sk_error_report = xs_error_report;
|
2005-12-14 05:13:52 +08:00
|
|
|
sk->sk_allocation = GFP_ATOMIC;
|
2005-08-26 07:25:55 +08:00
|
|
|
|
|
|
|
/* socket options */
|
|
|
|
sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
|
|
|
|
sock_reset_flag(sk, SOCK_LINGER);
|
|
|
|
tcp_sk(sk)->linger2 = 0;
|
|
|
|
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
|
2005-08-12 04:25:53 +08:00
|
|
|
|
|
|
|
xprt_clear_connected(xprt);
|
|
|
|
|
|
|
|
/* Reset to new socket */
|
2006-12-06 05:35:15 +08:00
|
|
|
transport->sock = sock;
|
|
|
|
transport->inet = sk;
|
2005-08-12 04:25:53 +08:00
|
|
|
|
|
|
|
write_unlock_bh(&sk->sk_callback_lock);
|
|
|
|
}
|
|
|
|
|
2009-03-12 02:09:39 +08:00
|
|
|
if (!xprt_bound(xprt))
|
2011-03-19 08:21:23 +08:00
|
|
|
goto out;
|
2009-03-12 02:09:39 +08:00
|
|
|
|
2005-08-12 04:25:53 +08:00
|
|
|
/* Tell the socket layer to start connecting... */
|
2006-03-21 02:44:16 +08:00
|
|
|
xprt->stat.connect_count++;
|
|
|
|
xprt->stat.connect_start = jiffies;
|
2011-03-19 08:21:23 +08:00
|
|
|
ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
|
|
|
|
switch (ret) {
|
|
|
|
case 0:
|
|
|
|
case -EINPROGRESS:
|
|
|
|
/* SYN_SENT! */
|
|
|
|
xprt->connect_cookie++;
|
|
|
|
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
|
|
|
|
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
2007-08-06 23:57:38 +08:00
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
2009-03-12 02:38:04 +08:00
|
|
|
* xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
|
|
|
|
* @xprt: RPC transport to connect
|
|
|
|
* @transport: socket transport to connect
|
|
|
|
* @create_sock: function to create a socket of the correct type
|
2005-08-12 04:25:26 +08:00
|
|
|
*
|
|
|
|
* Invoked by a work queue tasklet.
|
2005-08-12 04:25:23 +08:00
|
|
|
*/
|
2010-10-04 20:57:40 +08:00
|
|
|
static void xs_tcp_setup_socket(struct work_struct *work)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2010-10-04 20:57:40 +08:00
|
|
|
struct sock_xprt *transport =
|
|
|
|
container_of(work, struct sock_xprt, connect_worker.work);
|
2006-12-06 05:35:15 +08:00
|
|
|
struct socket *sock = transport->sock;
|
2010-10-04 20:52:25 +08:00
|
|
|
struct rpc_xprt *xprt = &transport->xprt;
|
2009-03-12 02:38:04 +08:00
|
|
|
int status = -EIO;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2009-03-12 02:09:39 +08:00
|
|
|
if (xprt->shutdown)
|
2005-08-12 04:25:23 +08:00
|
|
|
goto out;
|
|
|
|
|
2006-12-06 05:35:15 +08:00
|
|
|
if (!sock) {
|
2009-03-12 02:38:03 +08:00
|
|
|
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
|
2010-10-04 20:57:40 +08:00
|
|
|
sock = xs_create_sock(xprt, transport,
|
|
|
|
xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP);
|
2009-03-12 02:38:04 +08:00
|
|
|
if (IS_ERR(sock)) {
|
|
|
|
status = PTR_ERR(sock);
|
2005-08-26 07:25:55 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2009-03-12 02:38:03 +08:00
|
|
|
} else {
|
|
|
|
int abort_and_exit;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2009-03-12 02:38:03 +08:00
|
|
|
abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
|
|
|
|
&xprt->state);
|
2005-08-26 07:25:55 +08:00
|
|
|
/* "close" the socket, preserving the local port */
|
2010-10-04 20:52:55 +08:00
|
|
|
xs_tcp_reuse_connection(transport);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2009-03-12 02:38:03 +08:00
|
|
|
if (abort_and_exit)
|
|
|
|
goto out_eagain;
|
|
|
|
}
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
dprintk("RPC: worker connecting xprt %p via %s to "
|
|
|
|
"%s (port %s)\n", xprt,
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PROTO],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PORT]);
|
2006-08-23 08:06:18 +08:00
|
|
|
|
2007-08-06 23:57:38 +08:00
|
|
|
status = xs_tcp_finish_connecting(xprt, sock);
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: %p connect status %d connected %d sock state %d\n",
|
|
|
|
xprt, -status, xprt_connected(xprt),
|
|
|
|
sock->sk->sk_state);
|
2009-03-12 02:38:00 +08:00
|
|
|
switch (status) {
|
2009-04-22 05:18:20 +08:00
|
|
|
default:
|
|
|
|
printk("%s: connect returned unhandled error %d\n",
|
|
|
|
__func__, status);
|
|
|
|
case -EADDRNOTAVAIL:
|
|
|
|
/* We're probably in TIME_WAIT. Get rid of existing socket,
|
|
|
|
* and retry
|
|
|
|
*/
|
|
|
|
set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
|
|
|
|
xprt_force_disconnect(xprt);
|
2009-06-18 04:22:57 +08:00
|
|
|
break;
|
2009-03-12 02:38:01 +08:00
|
|
|
case -ECONNREFUSED:
|
|
|
|
case -ECONNRESET:
|
|
|
|
case -ENETUNREACH:
|
|
|
|
/* retry with existing socket, after a delay */
|
2009-03-12 02:38:00 +08:00
|
|
|
case 0:
|
|
|
|
case -EINPROGRESS:
|
|
|
|
case -EALREADY:
|
2009-03-12 02:38:03 +08:00
|
|
|
xprt_clear_connecting(xprt);
|
|
|
|
return;
|
2010-03-03 02:06:21 +08:00
|
|
|
case -EINVAL:
|
|
|
|
/* Happens, for instance, if the user specified a link
|
|
|
|
* local IPv6 address without a scope-id.
|
|
|
|
*/
|
|
|
|
goto out;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
2009-03-12 02:38:03 +08:00
|
|
|
out_eagain:
|
2009-03-12 02:38:00 +08:00
|
|
|
status = -EAGAIN;
|
2005-08-12 04:25:23 +08:00
|
|
|
out:
|
2007-08-06 23:57:48 +08:00
|
|
|
xprt_clear_connecting(xprt);
|
2009-03-12 02:38:03 +08:00
|
|
|
xprt_wake_pending_tasks(xprt, status);
|
2007-08-06 23:57:48 +08:00
|
|
|
}
|
2005-08-12 04:25:53 +08:00
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xs_connect - connect a socket to a remote endpoint
|
|
|
|
* @task: address of RPC task that manages state of connect request
|
|
|
|
*
|
|
|
|
* TCP: If the remote end dropped the connection, delay reconnecting.
|
2005-08-26 07:25:55 +08:00
|
|
|
*
|
|
|
|
* UDP socket connects are synchronous, but we use a work queue anyway
|
|
|
|
* to guarantee that even unprivileged user processes can set up a
|
|
|
|
* socket on a privileged port.
|
|
|
|
*
|
|
|
|
* If a UDP socket connect fails, the delay behavior here prevents
|
|
|
|
* retry floods (hard mounts).
|
2005-08-12 04:25:26 +08:00
|
|
|
*/
|
|
|
|
static void xs_connect(struct rpc_task *task)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt = task->tk_xprt;
|
2006-12-06 05:35:15 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
SUNRPC: Allow RPCs to fail quickly if the server is unreachable
The kernel sometimes makes RPC calls to services that aren't running.
Because the kernel's RPC client always assumes the hard retry semantic
when reconnecting a connection-oriented RPC transport, the underlying
reconnect logic takes a long while to time out, even though the remote
may have responded immediately with ECONNREFUSED.
In certain cases, like upcalls to our local rpcbind daemon, or for NFS
mount requests, we'd like the kernel to fail immediately if the remote
service isn't reachable. This allows another transport to be tried
immediately, or the pending request can be abandoned quickly.
Introduce a per-request flag which controls how call_transmit_status()
behaves when request transmission fails because the server cannot be
reached.
We don't want soft connection semantics to apply to other errors. The
default case of the switch statement in call_transmit_status() no
longer falls through; the fall through code is copied to the default
case, and a "break;" is added.
The transport's connection re-establishment timeout is also ignored for
such requests. We want the request to fail immediately, so the
reconnect delay is skipped. Additionally, we don't want a connect
failure here to further increase the reconnect timeout value, since
this request will not be retried.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2009-12-04 04:58:56 +08:00
|
|
|
if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_connect delayed xprt %p for %lu "
|
|
|
|
"seconds\n",
|
2005-08-26 07:25:55 +08:00
|
|
|
xprt, xprt->reestablish_timeout / HZ);
|
2007-06-15 06:00:42 +08:00
|
|
|
queue_delayed_work(rpciod_workqueue,
|
|
|
|
&transport->connect_worker,
|
|
|
|
xprt->reestablish_timeout);
|
2005-08-26 07:25:55 +08:00
|
|
|
xprt->reestablish_timeout <<= 1;
|
NFS/RPC: fix problems with reestablish_timeout and related code.
[[resending with correct cc: - "vfs.kernel.org" just isn't right!]]
xprt->reestablish_timeout is used to cause TCP connection attempts to
back off if the connection fails so as not to hammer the network,
but to still allow immediate connections when there is no reason to
believe there is a problem.
It is not used for the first connection (when transport->sock is NULL)
but only on reconnects.
It is currently set:
a/ to 0 when xs_tcp_state_change finds a state of TCP_FIN_WAIT1
on the assumption that the client has closed the connection
so the reconnect should be immediate when needed.
b/ to at least XS_TCP_INIT_REEST_TO when xs_tcp_state_change
detects TCP_CLOSING or TCP_CLOSE_WAIT on the assumption that the
server closed the connection so a small delay at least is
required.
c/ as above when xs_tcp_state_change detects TCP_SYN_SENT, so that
it is never 0 while a connection has been attempted, else
the doubling will produce 0 and there will be no backoff.
d/ to double is value (up to a limit) when delaying a connection,
thus providing exponential backoff and
e/ to XS_TCP_INIT_REEST_TO in xs_setup_tcp as simple initialisation.
So you can see it is highly dependant on xs_tcp_state_change being
called as expected. However experimental evidence shows that
xs_tcp_state_change does not see all state changes.
("rpcdebug -m rpc trans" can help show what actually happens).
Results show:
TCP_ESTABLISHED is reported when a connection is made. TCP_SYN_SENT
is never reported, so rule 'c' above is never effective.
When the server closes the connection, TCP_CLOSE_WAIT and
TCP_LAST_ACK *might* be reported, and TCP_CLOSE is always
reported. This rule 'b' above will sometimes be effective, but
not reliably.
When the client closes the connection, it used to result in
TCP_FIN_WAIT1, TCP_FIN_WAIT2, TCP_CLOSE. However since commit
f75e674 (SUNRPC: Fix the problem of EADDRNOTAVAIL syslog floods on
reconnect) we don't see *any* events on client-close. I think this
is because xs_restore_old_callbacks is called to disconnect
xs_tcp_state_change before the socket is closed.
In any case, rule 'a' no longer applies.
So all that is left are rule d, which successfully doubles the
timeout which is never rest, and rule e which initialises the timeout.
Even if the rules worked as expected, there would be a problem because
a successful connection does not reset the timeout, so a sequence
of events where the server closes the connection (e.g. during failover
testing) will cause longer and longer timeouts with no good reason.
This patch:
- sets reestablish_timeout to 0 in xs_close thus effecting rule 'a'
- sets it to 0 in xs_tcp_data_ready to ensure that a successful
connection resets the timeout
- sets it to at least XS_TCP_INIT_REEST_TO after it is doubled,
thus effecting rule c
I have not reimplemented rule b and the new version of rule c
seems sufficient.
I suspect other code in xs_tcp_data_ready needs to be revised as well.
For example I don't think connect_cookie is being incremented as often
as it should be.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2009-09-24 02:36:37 +08:00
|
|
|
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
|
|
|
|
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
|
2005-08-26 07:25:55 +08:00
|
|
|
if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
|
|
|
|
xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
|
2005-08-12 04:25:53 +08:00
|
|
|
} else {
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
|
2007-06-15 06:00:42 +08:00
|
|
|
queue_delayed_work(rpciod_workqueue,
|
|
|
|
&transport->connect_worker, 0);
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-03-21 02:44:16 +08:00
|
|
|
/**
|
|
|
|
* xs_udp_print_stats - display UDP socket-specifc stats
|
|
|
|
* @xprt: rpc_xprt struct containing statistics
|
|
|
|
* @seq: output file
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
|
|
|
|
{
|
2006-12-06 05:35:26 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
2006-03-21 02:44:16 +08:00
|
|
|
seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
|
2009-08-10 03:09:46 +08:00
|
|
|
transport->srcport,
|
2006-03-21 02:44:16 +08:00
|
|
|
xprt->stat.bind_count,
|
|
|
|
xprt->stat.sends,
|
|
|
|
xprt->stat.recvs,
|
|
|
|
xprt->stat.bad_xids,
|
|
|
|
xprt->stat.req_u,
|
|
|
|
xprt->stat.bklog_u);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* xs_tcp_print_stats - display TCP socket-specifc stats
|
|
|
|
* @xprt: rpc_xprt struct containing statistics
|
|
|
|
* @seq: output file
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
|
|
|
|
{
|
2006-12-06 05:35:26 +08:00
|
|
|
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
|
2006-03-21 02:44:16 +08:00
|
|
|
long idle_time = 0;
|
|
|
|
|
|
|
|
if (xprt_connected(xprt))
|
|
|
|
idle_time = (long)(jiffies - xprt->last_used) / HZ;
|
|
|
|
|
|
|
|
seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
|
2009-08-10 03:09:46 +08:00
|
|
|
transport->srcport,
|
2006-03-21 02:44:16 +08:00
|
|
|
xprt->stat.bind_count,
|
|
|
|
xprt->stat.connect_count,
|
|
|
|
xprt->stat.connect_time,
|
|
|
|
idle_time,
|
|
|
|
xprt->stat.sends,
|
|
|
|
xprt->stat.recvs,
|
|
|
|
xprt->stat.bad_xids,
|
|
|
|
xprt->stat.req_u,
|
|
|
|
xprt->stat.bklog_u);
|
|
|
|
}
|
|
|
|
|
2009-09-10 22:32:28 +08:00
|
|
|
/*
|
|
|
|
* Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
|
|
|
|
* we allocate pages instead doing a kmalloc like rpc_malloc is because we want
|
|
|
|
* to use the server side send routines.
|
|
|
|
*/
|
2010-01-15 06:38:31 +08:00
|
|
|
static void *bc_malloc(struct rpc_task *task, size_t size)
|
2009-09-10 22:32:28 +08:00
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
struct rpc_buffer *buf;
|
|
|
|
|
|
|
|
BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!page)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
buf = page_address(page);
|
|
|
|
buf->len = PAGE_SIZE;
|
|
|
|
|
|
|
|
return buf->data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the space allocated in the bc_alloc routine
|
|
|
|
*/
|
2010-01-15 06:38:31 +08:00
|
|
|
static void bc_free(void *buffer)
|
2009-09-10 22:32:28 +08:00
|
|
|
{
|
|
|
|
struct rpc_buffer *buf;
|
|
|
|
|
|
|
|
if (!buffer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
buf = container_of(buffer, struct rpc_buffer, data);
|
|
|
|
free_page((unsigned long)buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
|
|
|
|
* held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
|
|
|
|
*/
|
|
|
|
static int bc_sendto(struct rpc_rqst *req)
|
|
|
|
{
|
|
|
|
int len;
|
|
|
|
struct xdr_buf *xbufp = &req->rq_snd_buf;
|
|
|
|
struct rpc_xprt *xprt = req->rq_xprt;
|
|
|
|
struct sock_xprt *transport =
|
|
|
|
container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
struct socket *sock = transport->sock;
|
|
|
|
unsigned long headoff;
|
|
|
|
unsigned long tailoff;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up the rpc header and record marker stuff
|
|
|
|
*/
|
|
|
|
xs_encode_tcp_record_marker(xbufp);
|
|
|
|
|
|
|
|
tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
|
|
|
|
headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
|
|
|
|
len = svc_send_common(sock, xbufp,
|
|
|
|
virt_to_page(xbufp->head[0].iov_base), headoff,
|
|
|
|
xbufp->tail[0].iov_base, tailoff);
|
|
|
|
|
|
|
|
if (len != xbufp->len) {
|
|
|
|
printk(KERN_NOTICE "Error sending entire callback!\n");
|
|
|
|
len = -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The send routine. Borrows from svc_send
|
|
|
|
*/
|
|
|
|
static int bc_send_request(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
struct rpc_rqst *req = task->tk_rqstp;
|
|
|
|
struct svc_xprt *xprt;
|
|
|
|
struct svc_sock *svsk;
|
|
|
|
u32 len;
|
|
|
|
|
|
|
|
dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
|
|
|
|
/*
|
|
|
|
* Get the server socket associated with this callback xprt
|
|
|
|
*/
|
|
|
|
xprt = req->rq_xprt->bc_xprt;
|
|
|
|
svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Grab the mutex to serialize data as the connection is shared
|
|
|
|
* with the fore channel
|
|
|
|
*/
|
|
|
|
if (!mutex_trylock(&xprt->xpt_mutex)) {
|
|
|
|
rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
|
|
|
|
if (!mutex_trylock(&xprt->xpt_mutex))
|
|
|
|
return -EAGAIN;
|
|
|
|
rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
|
|
|
|
}
|
|
|
|
if (test_bit(XPT_DEAD, &xprt->xpt_flags))
|
|
|
|
len = -ENOTCONN;
|
|
|
|
else
|
|
|
|
len = bc_sendto(req);
|
|
|
|
mutex_unlock(&xprt->xpt_mutex);
|
|
|
|
|
|
|
|
if (len > 0)
|
|
|
|
len = 0;
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The close routine. Since this is client initiated, we do nothing
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void bc_close(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The xprt destroy routine. Again, because this connection is client
|
|
|
|
* initiated, we do nothing
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void bc_destroy(struct rpc_xprt *xprt)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
static struct rpc_xprt_ops xs_udp_ops = {
|
2005-08-26 07:25:49 +08:00
|
|
|
.set_buffer_size = xs_udp_set_buffer_size,
|
2005-08-26 07:25:51 +08:00
|
|
|
.reserve_xprt = xprt_reserve_xprt_cong,
|
2005-08-26 07:25:51 +08:00
|
|
|
.release_xprt = xprt_release_xprt_cong,
|
2007-07-02 00:13:17 +08:00
|
|
|
.rpcbind = rpcb_getport_async,
|
2006-01-03 16:55:51 +08:00
|
|
|
.set_port = xs_set_port,
|
2005-08-12 04:25:56 +08:00
|
|
|
.connect = xs_connect,
|
2006-01-03 16:55:49 +08:00
|
|
|
.buf_alloc = rpc_malloc,
|
|
|
|
.buf_free = rpc_free,
|
2005-08-12 04:25:56 +08:00
|
|
|
.send_request = xs_udp_send_request,
|
2005-08-26 07:25:50 +08:00
|
|
|
.set_retrans_timeout = xprt_set_retrans_timeout_rtt,
|
2005-08-26 07:25:52 +08:00
|
|
|
.timer = xs_udp_timer,
|
2005-08-26 07:25:53 +08:00
|
|
|
.release_request = xprt_release_rqst_cong,
|
2005-08-12 04:25:56 +08:00
|
|
|
.close = xs_close,
|
|
|
|
.destroy = xs_destroy,
|
2006-03-21 02:44:16 +08:00
|
|
|
.print_stats = xs_udp_print_stats,
|
2005-08-12 04:25:56 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct rpc_xprt_ops xs_tcp_ops = {
|
2005-08-26 07:25:51 +08:00
|
|
|
.reserve_xprt = xprt_reserve_xprt,
|
2006-07-28 05:22:50 +08:00
|
|
|
.release_xprt = xs_tcp_release_xprt,
|
2007-07-02 00:13:17 +08:00
|
|
|
.rpcbind = rpcb_getport_async,
|
2006-01-03 16:55:51 +08:00
|
|
|
.set_port = xs_set_port,
|
2010-04-17 04:41:57 +08:00
|
|
|
.connect = xs_connect,
|
2006-01-03 16:55:49 +08:00
|
|
|
.buf_alloc = rpc_malloc,
|
|
|
|
.buf_free = rpc_free,
|
2005-08-12 04:25:56 +08:00
|
|
|
.send_request = xs_tcp_send_request,
|
2005-08-26 07:25:50 +08:00
|
|
|
.set_retrans_timeout = xprt_set_retrans_timeout_def,
|
2009-04-22 05:18:20 +08:00
|
|
|
.close = xs_tcp_close,
|
2005-08-12 04:25:26 +08:00
|
|
|
.destroy = xs_destroy,
|
2006-03-21 02:44:16 +08:00
|
|
|
.print_stats = xs_tcp_print_stats,
|
2005-08-12 04:25:23 +08:00
|
|
|
};
|
|
|
|
|
2009-09-10 22:32:28 +08:00
|
|
|
/*
|
|
|
|
* The rpc_xprt_ops for the server backchannel
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct rpc_xprt_ops bc_tcp_ops = {
|
|
|
|
.reserve_xprt = xprt_reserve_xprt,
|
|
|
|
.release_xprt = xprt_release_xprt,
|
|
|
|
.buf_alloc = bc_malloc,
|
|
|
|
.buf_free = bc_free,
|
|
|
|
.send_request = bc_send_request,
|
|
|
|
.set_retrans_timeout = xprt_set_retrans_timeout_def,
|
|
|
|
.close = bc_close,
|
|
|
|
.destroy = bc_destroy,
|
|
|
|
.print_stats = xs_tcp_print_stats,
|
|
|
|
};
|
|
|
|
|
SUNRPC: Properly initialize sock_xprt.srcaddr in all cases
The source address field in the transport's sock_xprt is initialized
ONLY IF the RPC application passed a pointer to a source address
during the call to rpc_create(). However, xs_bind() subsequently uses
the value of this field without regard to whether the source address
was initialized during transport creation or not.
So far we've been lucky: the uninitialized value of this field is
zeroes. xs_bind(), until recently, used only the sin[6]_addr field in
this sockaddr, and all zeroes is a valid value for this: it means
ANYADDR. This is a happy coincidence.
However, xs_bind() now wants to use the sa_family field as well, and
expects it to be initialized to something other than zero.
Therefore, the source address sockaddr field should be fully
initialized at transport create time in _every_ case, not just when
the RPC application wants to use a specific bind address.
Bruce added a workaround for this missing initialization by adjusting
commit 6bc9638a, but the "right" way to do this is to ensure that the
source address sockaddr is always correctly initialized from the
get-go.
This patch doesn't introduce a behavior change. It's simply a
clean-up of Bruce's fix, to prevent future problems of this kind. It
may look like overkill, but
a) it clearly documents the default initial value of this field,
b) it doesn't assume that the sockaddr_storage memory is first
initialized to any particular value, and
c) it will fail verbosely if some unknown address family is passed
in
Originally introduced by commit d3bc9a1d.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2010-10-20 23:53:01 +08:00
|
|
|
static int xs_init_anyaddr(const int family, struct sockaddr *sap)
|
|
|
|
{
|
|
|
|
static const struct sockaddr_in sin = {
|
|
|
|
.sin_family = AF_INET,
|
|
|
|
.sin_addr.s_addr = htonl(INADDR_ANY),
|
|
|
|
};
|
|
|
|
static const struct sockaddr_in6 sin6 = {
|
|
|
|
.sin6_family = AF_INET6,
|
|
|
|
.sin6_addr = IN6ADDR_ANY_INIT,
|
|
|
|
};
|
|
|
|
|
|
|
|
switch (family) {
|
|
|
|
case AF_INET:
|
|
|
|
memcpy(sap, &sin, sizeof(sin));
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
memcpy(sap, &sin6, sizeof(sin6));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dprintk("RPC: %s: Bad address family\n", __func__);
|
|
|
|
return -EAFNOSUPPORT;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-09-11 01:47:07 +08:00
|
|
|
static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
|
2007-09-11 01:46:39 +08:00
|
|
|
unsigned int slot_table_size)
|
2006-10-18 02:44:27 +08:00
|
|
|
{
|
|
|
|
struct rpc_xprt *xprt;
|
2006-12-06 05:35:11 +08:00
|
|
|
struct sock_xprt *new;
|
2006-10-18 02:44:27 +08:00
|
|
|
|
2007-07-08 19:08:54 +08:00
|
|
|
if (args->addrlen > sizeof(xprt->addr)) {
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_setup_xprt: address too large\n");
|
2006-10-18 02:44:27 +08:00
|
|
|
return ERR_PTR(-EBADF);
|
|
|
|
}
|
|
|
|
|
2010-09-29 20:05:43 +08:00
|
|
|
xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size);
|
2010-09-29 20:02:43 +08:00
|
|
|
if (xprt == NULL) {
|
2007-02-01 01:14:08 +08:00
|
|
|
dprintk("RPC: xs_setup_xprt: couldn't allocate "
|
|
|
|
"rpc_xprt\n");
|
2006-10-18 02:44:27 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2010-09-29 20:02:43 +08:00
|
|
|
new = container_of(xprt, struct sock_xprt, xprt);
|
2007-07-08 19:08:54 +08:00
|
|
|
memcpy(&xprt->addr, args->dstaddr, args->addrlen);
|
|
|
|
xprt->addrlen = args->addrlen;
|
2007-07-10 04:23:35 +08:00
|
|
|
if (args->srcaddr)
|
2009-08-10 03:09:46 +08:00
|
|
|
memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
|
SUNRPC: Properly initialize sock_xprt.srcaddr in all cases
The source address field in the transport's sock_xprt is initialized
ONLY IF the RPC application passed a pointer to a source address
during the call to rpc_create(). However, xs_bind() subsequently uses
the value of this field without regard to whether the source address
was initialized during transport creation or not.
So far we've been lucky: the uninitialized value of this field is
zeroes. xs_bind(), until recently, used only the sin[6]_addr field in
this sockaddr, and all zeroes is a valid value for this: it means
ANYADDR. This is a happy coincidence.
However, xs_bind() now wants to use the sa_family field as well, and
expects it to be initialized to something other than zero.
Therefore, the source address sockaddr field should be fully
initialized at transport create time in _every_ case, not just when
the RPC application wants to use a specific bind address.
Bruce added a workaround for this missing initialization by adjusting
commit 6bc9638a, but the "right" way to do this is to ensure that the
source address sockaddr is always correctly initialized from the
get-go.
This patch doesn't introduce a behavior change. It's simply a
clean-up of Bruce's fix, to prevent future problems of this kind. It
may look like overkill, but
a) it clearly documents the default initial value of this field,
b) it doesn't assume that the sockaddr_storage memory is first
initialized to any particular value, and
c) it will fail verbosely if some unknown address family is passed
in
Originally introduced by commit d3bc9a1d.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2010-10-20 23:53:01 +08:00
|
|
|
else {
|
|
|
|
int err;
|
|
|
|
err = xs_init_anyaddr(args->dstaddr->sa_family,
|
|
|
|
(struct sockaddr *)&new->srcaddr);
|
|
|
|
if (err != 0)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
2006-10-18 02:44:27 +08:00
|
|
|
|
|
|
|
return xprt;
|
|
|
|
}
|
|
|
|
|
2007-12-21 05:03:54 +08:00
|
|
|
static const struct rpc_timeout xs_udp_default_timeout = {
|
|
|
|
.to_initval = 5 * HZ,
|
|
|
|
.to_maxval = 30 * HZ,
|
|
|
|
.to_increment = 5 * HZ,
|
|
|
|
.to_retries = 5,
|
|
|
|
};
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xs_setup_udp - Set up transport to use a UDP socket
|
2007-07-08 19:08:54 +08:00
|
|
|
* @args: rpc transport creation arguments
|
2005-08-12 04:25:26 +08:00
|
|
|
*
|
|
|
|
*/
|
2007-10-25 00:24:02 +08:00
|
|
|
static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2007-08-06 23:57:53 +08:00
|
|
|
struct sockaddr *addr = args->dstaddr;
|
2006-10-18 02:44:27 +08:00
|
|
|
struct rpc_xprt *xprt;
|
2006-12-06 05:35:26 +08:00
|
|
|
struct sock_xprt *transport;
|
2010-05-26 20:42:24 +08:00
|
|
|
struct rpc_xprt *ret;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-07-08 19:08:54 +08:00
|
|
|
xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
|
2006-10-18 02:44:27 +08:00
|
|
|
if (IS_ERR(xprt))
|
|
|
|
return xprt;
|
2006-12-06 05:35:26 +08:00
|
|
|
transport = container_of(xprt, struct sock_xprt, xprt);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2006-08-23 08:06:15 +08:00
|
|
|
xprt->prot = IPPROTO_UDP;
|
2005-08-26 07:25:49 +08:00
|
|
|
xprt->tsh_size = 0;
|
2005-08-12 04:25:23 +08:00
|
|
|
/* XXX: header size can vary due to auth type, IPv6, etc. */
|
|
|
|
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
|
|
|
|
|
2005-08-26 07:25:55 +08:00
|
|
|
xprt->bind_timeout = XS_BIND_TO;
|
|
|
|
xprt->reestablish_timeout = XS_UDP_REEST_TO;
|
|
|
|
xprt->idle_timeout = XS_IDLE_DISC_TO;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
xprt->ops = &xs_udp_ops;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-12-21 05:03:55 +08:00
|
|
|
xprt->timeout = &xs_udp_default_timeout;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-08-06 23:57:53 +08:00
|
|
|
switch (addr->sa_family) {
|
|
|
|
case AF_INET:
|
|
|
|
if (((struct sockaddr_in *)addr)->sin_port != htons(0))
|
|
|
|
xprt_set_bound(xprt);
|
|
|
|
|
|
|
|
INIT_DELAYED_WORK(&transport->connect_worker,
|
2010-10-04 20:58:02 +08:00
|
|
|
xs_udp_setup_socket);
|
2009-08-10 03:09:46 +08:00
|
|
|
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
|
2007-08-06 23:57:53 +08:00
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
|
|
|
|
xprt_set_bound(xprt);
|
|
|
|
|
|
|
|
INIT_DELAYED_WORK(&transport->connect_worker,
|
2010-10-04 20:58:02 +08:00
|
|
|
xs_udp_setup_socket);
|
2009-08-10 03:09:46 +08:00
|
|
|
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
|
2007-08-06 23:57:53 +08:00
|
|
|
break;
|
|
|
|
default:
|
2010-05-26 20:42:24 +08:00
|
|
|
ret = ERR_PTR(-EAFNOSUPPORT);
|
|
|
|
goto out_err;
|
2007-08-06 23:57:53 +08:00
|
|
|
}
|
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
if (xprt_bound(xprt))
|
|
|
|
dprintk("RPC: set up xprt to %s (port %s) via %s\n",
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PORT],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PROTO]);
|
|
|
|
else
|
|
|
|
dprintk("RPC: set up xprt to %s (autobind) via %s\n",
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PROTO]);
|
2006-08-23 08:06:18 +08:00
|
|
|
|
2007-09-11 01:46:39 +08:00
|
|
|
if (try_module_get(THIS_MODULE))
|
|
|
|
return xprt;
|
2010-05-26 20:42:24 +08:00
|
|
|
ret = ERR_PTR(-EINVAL);
|
|
|
|
out_err:
|
2010-09-29 20:03:13 +08:00
|
|
|
xprt_free(xprt);
|
2010-05-26 20:42:24 +08:00
|
|
|
return ret;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
|
|
|
|
2007-12-21 05:03:54 +08:00
|
|
|
static const struct rpc_timeout xs_tcp_default_timeout = {
|
|
|
|
.to_initval = 60 * HZ,
|
|
|
|
.to_maxval = 60 * HZ,
|
|
|
|
.to_retries = 2,
|
|
|
|
};
|
|
|
|
|
2005-08-12 04:25:26 +08:00
|
|
|
/**
|
|
|
|
* xs_setup_tcp - Set up transport to use a TCP socket
|
2007-07-08 19:08:54 +08:00
|
|
|
* @args: rpc transport creation arguments
|
2005-08-12 04:25:26 +08:00
|
|
|
*
|
|
|
|
*/
|
2007-10-25 00:24:02 +08:00
|
|
|
static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
|
2005-08-12 04:25:23 +08:00
|
|
|
{
|
2007-08-06 23:57:53 +08:00
|
|
|
struct sockaddr *addr = args->dstaddr;
|
2006-10-18 02:44:27 +08:00
|
|
|
struct rpc_xprt *xprt;
|
2006-12-06 05:35:26 +08:00
|
|
|
struct sock_xprt *transport;
|
2010-05-26 20:42:24 +08:00
|
|
|
struct rpc_xprt *ret;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-07-08 19:08:54 +08:00
|
|
|
xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
|
2006-10-18 02:44:27 +08:00
|
|
|
if (IS_ERR(xprt))
|
|
|
|
return xprt;
|
2006-12-06 05:35:26 +08:00
|
|
|
transport = container_of(xprt, struct sock_xprt, xprt);
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2006-08-23 08:06:15 +08:00
|
|
|
xprt->prot = IPPROTO_TCP;
|
2005-08-26 07:25:49 +08:00
|
|
|
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
|
|
|
|
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-26 07:25:55 +08:00
|
|
|
xprt->bind_timeout = XS_BIND_TO;
|
|
|
|
xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
|
|
|
|
xprt->idle_timeout = XS_IDLE_DISC_TO;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2005-08-12 04:25:56 +08:00
|
|
|
xprt->ops = &xs_tcp_ops;
|
2007-12-21 05:03:55 +08:00
|
|
|
xprt->timeout = &xs_tcp_default_timeout;
|
2005-08-12 04:25:23 +08:00
|
|
|
|
2007-08-06 23:57:53 +08:00
|
|
|
switch (addr->sa_family) {
|
|
|
|
case AF_INET:
|
|
|
|
if (((struct sockaddr_in *)addr)->sin_port != htons(0))
|
|
|
|
xprt_set_bound(xprt);
|
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
INIT_DELAYED_WORK(&transport->connect_worker,
|
2010-10-04 20:57:40 +08:00
|
|
|
xs_tcp_setup_socket);
|
2009-08-10 03:09:46 +08:00
|
|
|
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
|
2007-08-06 23:57:53 +08:00
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
|
|
|
|
xprt_set_bound(xprt);
|
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
INIT_DELAYED_WORK(&transport->connect_worker,
|
2010-10-04 20:57:40 +08:00
|
|
|
xs_tcp_setup_socket);
|
2009-08-10 03:09:46 +08:00
|
|
|
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
|
2007-08-06 23:57:53 +08:00
|
|
|
break;
|
|
|
|
default:
|
2010-05-26 20:42:24 +08:00
|
|
|
ret = ERR_PTR(-EAFNOSUPPORT);
|
|
|
|
goto out_err;
|
2007-08-06 23:57:53 +08:00
|
|
|
}
|
|
|
|
|
2009-08-10 03:09:46 +08:00
|
|
|
if (xprt_bound(xprt))
|
|
|
|
dprintk("RPC: set up xprt to %s (port %s) via %s\n",
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PORT],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PROTO]);
|
|
|
|
else
|
|
|
|
dprintk("RPC: set up xprt to %s (autobind) via %s\n",
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PROTO]);
|
|
|
|
|
2006-08-23 08:06:18 +08:00
|
|
|
|
2007-09-11 01:46:39 +08:00
|
|
|
if (try_module_get(THIS_MODULE))
|
|
|
|
return xprt;
|
2010-05-26 20:42:24 +08:00
|
|
|
ret = ERR_PTR(-EINVAL);
|
|
|
|
out_err:
|
2010-09-29 20:03:13 +08:00
|
|
|
xprt_free(xprt);
|
2010-05-26 20:42:24 +08:00
|
|
|
return ret;
|
2005-08-12 04:25:23 +08:00
|
|
|
}
|
2006-12-06 05:35:51 +08:00
|
|
|
|
2009-09-10 22:33:30 +08:00
|
|
|
/**
|
|
|
|
* xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
|
|
|
|
* @args: rpc transport creation arguments
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
|
|
|
|
{
|
|
|
|
struct sockaddr *addr = args->dstaddr;
|
|
|
|
struct rpc_xprt *xprt;
|
|
|
|
struct sock_xprt *transport;
|
|
|
|
struct svc_sock *bc_sock;
|
2010-05-26 20:42:24 +08:00
|
|
|
struct rpc_xprt *ret;
|
2009-09-10 22:33:30 +08:00
|
|
|
|
2010-12-09 02:48:19 +08:00
|
|
|
if (args->bc_xprt->xpt_bc_xprt) {
|
|
|
|
/*
|
|
|
|
* This server connection already has a backchannel
|
|
|
|
* export; we can't create a new one, as we wouldn't be
|
|
|
|
* able to match replies based on xid any more. So,
|
|
|
|
* reuse the already-existing one:
|
|
|
|
*/
|
|
|
|
return args->bc_xprt->xpt_bc_xprt;
|
|
|
|
}
|
2009-09-10 22:33:30 +08:00
|
|
|
xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
|
|
|
|
if (IS_ERR(xprt))
|
|
|
|
return xprt;
|
|
|
|
transport = container_of(xprt, struct sock_xprt, xprt);
|
|
|
|
|
|
|
|
xprt->prot = IPPROTO_TCP;
|
|
|
|
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
|
|
|
|
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
|
|
|
|
xprt->timeout = &xs_tcp_default_timeout;
|
|
|
|
|
|
|
|
/* backchannel */
|
|
|
|
xprt_set_bound(xprt);
|
|
|
|
xprt->bind_timeout = 0;
|
|
|
|
xprt->reestablish_timeout = 0;
|
|
|
|
xprt->idle_timeout = 0;
|
|
|
|
|
|
|
|
xprt->ops = &bc_tcp_ops;
|
|
|
|
|
|
|
|
switch (addr->sa_family) {
|
|
|
|
case AF_INET:
|
|
|
|
xs_format_peer_addresses(xprt, "tcp",
|
|
|
|
RPCBIND_NETID_TCP);
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
xs_format_peer_addresses(xprt, "tcp",
|
|
|
|
RPCBIND_NETID_TCP6);
|
|
|
|
break;
|
|
|
|
default:
|
2010-05-26 20:42:24 +08:00
|
|
|
ret = ERR_PTR(-EAFNOSUPPORT);
|
|
|
|
goto out_err;
|
2009-09-10 22:33:30 +08:00
|
|
|
}
|
|
|
|
|
2010-10-06 00:49:35 +08:00
|
|
|
dprintk("RPC: set up xprt to %s (port %s) via %s\n",
|
|
|
|
xprt->address_strings[RPC_DISPLAY_ADDR],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PORT],
|
|
|
|
xprt->address_strings[RPC_DISPLAY_PROTO]);
|
2009-09-10 22:33:30 +08:00
|
|
|
|
2010-12-09 01:45:44 +08:00
|
|
|
/*
|
|
|
|
* Once we've associated a backchannel xprt with a connection,
|
|
|
|
* we want to keep it around as long as long as the connection
|
|
|
|
* lasts, in case we need to start using it for a backchannel
|
|
|
|
* again; this reference won't be dropped until bc_xprt is
|
|
|
|
* destroyed.
|
|
|
|
*/
|
|
|
|
xprt_get(xprt);
|
|
|
|
args->bc_xprt->xpt_bc_xprt = xprt;
|
|
|
|
xprt->bc_xprt = args->bc_xprt;
|
|
|
|
bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
|
|
|
|
transport->sock = bc_sock->sk_sock;
|
|
|
|
transport->inet = bc_sock->sk_sk;
|
|
|
|
|
2009-09-10 22:33:30 +08:00
|
|
|
/*
|
|
|
|
* Since we don't want connections for the backchannel, we set
|
|
|
|
* the xprt status to connected
|
|
|
|
*/
|
|
|
|
xprt_set_connected(xprt);
|
|
|
|
|
|
|
|
|
|
|
|
if (try_module_get(THIS_MODULE))
|
|
|
|
return xprt;
|
2010-12-09 01:45:44 +08:00
|
|
|
xprt_put(xprt);
|
2010-05-26 20:42:24 +08:00
|
|
|
ret = ERR_PTR(-EINVAL);
|
|
|
|
out_err:
|
2010-09-29 20:03:13 +08:00
|
|
|
xprt_free(xprt);
|
2010-05-26 20:42:24 +08:00
|
|
|
return ret;
|
2009-09-10 22:33:30 +08:00
|
|
|
}
|
|
|
|
|
2007-09-11 01:46:39 +08:00
|
|
|
static struct xprt_class xs_udp_transport = {
|
|
|
|
.list = LIST_HEAD_INIT(xs_udp_transport.list),
|
|
|
|
.name = "udp",
|
|
|
|
.owner = THIS_MODULE,
|
2009-09-10 22:33:30 +08:00
|
|
|
.ident = XPRT_TRANSPORT_UDP,
|
2007-09-11 01:46:39 +08:00
|
|
|
.setup = xs_setup_udp,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct xprt_class xs_tcp_transport = {
|
|
|
|
.list = LIST_HEAD_INIT(xs_tcp_transport.list),
|
|
|
|
.name = "tcp",
|
|
|
|
.owner = THIS_MODULE,
|
2009-09-10 22:33:30 +08:00
|
|
|
.ident = XPRT_TRANSPORT_TCP,
|
2007-09-11 01:46:39 +08:00
|
|
|
.setup = xs_setup_tcp,
|
|
|
|
};
|
|
|
|
|
2009-09-10 22:33:30 +08:00
|
|
|
static struct xprt_class xs_bc_tcp_transport = {
|
|
|
|
.list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
|
|
|
|
.name = "tcp NFSv4.1 backchannel",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.ident = XPRT_TRANSPORT_BC_TCP,
|
|
|
|
.setup = xs_setup_bc_tcp,
|
|
|
|
};
|
|
|
|
|
2006-12-06 05:35:51 +08:00
|
|
|
/**
|
2007-09-11 01:46:39 +08:00
|
|
|
* init_socket_xprt - set up xprtsock's sysctls, register with RPC client
|
2006-12-06 05:35:51 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
int init_socket_xprt(void)
|
|
|
|
{
|
2006-12-06 05:35:54 +08:00
|
|
|
#ifdef RPC_DEBUG
|
2007-02-14 16:33:24 +08:00
|
|
|
if (!sunrpc_table_header)
|
2007-02-14 16:34:09 +08:00
|
|
|
sunrpc_table_header = register_sysctl_table(sunrpc_table);
|
2006-12-06 05:35:54 +08:00
|
|
|
#endif
|
|
|
|
|
2007-09-11 01:46:39 +08:00
|
|
|
xprt_register_transport(&xs_udp_transport);
|
|
|
|
xprt_register_transport(&xs_tcp_transport);
|
2009-09-10 22:33:30 +08:00
|
|
|
xprt_register_transport(&xs_bc_tcp_transport);
|
2007-09-11 01:46:39 +08:00
|
|
|
|
2006-12-06 05:35:51 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2007-09-11 01:46:39 +08:00
|
|
|
* cleanup_socket_xprt - remove xprtsock's sysctls, unregister
|
2006-12-06 05:35:51 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
void cleanup_socket_xprt(void)
|
|
|
|
{
|
2006-12-06 05:35:54 +08:00
|
|
|
#ifdef RPC_DEBUG
|
|
|
|
if (sunrpc_table_header) {
|
|
|
|
unregister_sysctl_table(sunrpc_table_header);
|
|
|
|
sunrpc_table_header = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
2007-09-11 01:46:39 +08:00
|
|
|
|
|
|
|
xprt_unregister_transport(&xs_udp_transport);
|
|
|
|
xprt_unregister_transport(&xs_tcp_transport);
|
2009-09-10 22:33:30 +08:00
|
|
|
xprt_unregister_transport(&xs_bc_tcp_transport);
|
2006-12-06 05:35:51 +08:00
|
|
|
}
|
2009-08-10 03:06:19 +08:00
|
|
|
|
2010-08-12 13:04:12 +08:00
|
|
|
static int param_set_uint_minmax(const char *val,
|
|
|
|
const struct kernel_param *kp,
|
2009-08-10 03:06:19 +08:00
|
|
|
unsigned int min, unsigned int max)
|
|
|
|
{
|
|
|
|
unsigned long num;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!val)
|
|
|
|
return -EINVAL;
|
|
|
|
ret = strict_strtoul(val, 0, &num);
|
|
|
|
if (ret == -EINVAL || num < min || num > max)
|
|
|
|
return -EINVAL;
|
|
|
|
*((unsigned int *)kp->arg) = num;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-12 13:04:12 +08:00
|
|
|
static int param_set_portnr(const char *val, const struct kernel_param *kp)
|
2009-08-10 03:06:19 +08:00
|
|
|
{
|
|
|
|
return param_set_uint_minmax(val, kp,
|
|
|
|
RPC_MIN_RESVPORT,
|
|
|
|
RPC_MAX_RESVPORT);
|
|
|
|
}
|
|
|
|
|
2010-08-12 13:04:12 +08:00
|
|
|
static struct kernel_param_ops param_ops_portnr = {
|
|
|
|
.set = param_set_portnr,
|
|
|
|
.get = param_get_uint,
|
|
|
|
};
|
|
|
|
|
2009-08-10 03:06:19 +08:00
|
|
|
#define param_check_portnr(name, p) \
|
|
|
|
__param_check(name, p, unsigned int);
|
|
|
|
|
|
|
|
module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
|
|
|
|
module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
|
|
|
|
|
2010-08-12 13:04:12 +08:00
|
|
|
static int param_set_slot_table_size(const char *val,
|
|
|
|
const struct kernel_param *kp)
|
2009-08-10 03:06:19 +08:00
|
|
|
{
|
|
|
|
return param_set_uint_minmax(val, kp,
|
|
|
|
RPC_MIN_SLOT_TABLE,
|
|
|
|
RPC_MAX_SLOT_TABLE);
|
|
|
|
}
|
|
|
|
|
2010-08-12 13:04:12 +08:00
|
|
|
static struct kernel_param_ops param_ops_slot_table_size = {
|
|
|
|
.set = param_set_slot_table_size,
|
|
|
|
.get = param_get_uint,
|
|
|
|
};
|
|
|
|
|
2009-08-10 03:06:19 +08:00
|
|
|
#define param_check_slot_table_size(name, p) \
|
|
|
|
__param_check(name, p, unsigned int);
|
|
|
|
|
|
|
|
module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
|
|
|
|
slot_table_size, 0644);
|
|
|
|
module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
|
|
|
|
slot_table_size, 0644);
|
|
|
|
|