2019-05-19 20:08:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/net/sunrpc/svcsock.c
|
|
|
|
*
|
|
|
|
* These are the RPC server socket internals.
|
|
|
|
*
|
|
|
|
* The server scheduling algorithm does not always distribute the load
|
|
|
|
* evenly when servicing a single client. May need to modify the
|
2007-12-31 11:07:57 +08:00
|
|
|
* svc_xprt_enqueue procedure...
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* TCP support is largely untested and may be a little slow. The problem
|
|
|
|
* is that we currently do two separate recvfrom's, one for the 4-byte
|
|
|
|
* record length, and the second for the actual record. This could possibly
|
|
|
|
* be improved by always reading a minimum size of around 100 bytes and
|
|
|
|
* tucking any superfluous bytes away in a temporary store. Still, that
|
|
|
|
* leaves write requests out in the rain. An alternative may be to peek at
|
|
|
|
* the first skb in the queue, and if it matches the next TCP sequence
|
|
|
|
* number, to extract the record marker. Yuck.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
|
|
|
|
*/
|
|
|
|
|
2007-08-29 06:50:33 +08:00
|
|
|
#include <linux/kernel.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/sched.h>
|
2011-05-27 21:12:25 +08:00
|
|
|
#include <linux/module.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/inet.h>
|
|
|
|
#include <linux/udp.h>
|
2005-08-10 11:20:07 +08:00
|
|
|
#include <linux/tcp.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
2006-10-02 17:17:48 +08:00
|
|
|
#include <linux/file.h>
|
2006-12-07 12:34:23 +08:00
|
|
|
#include <linux/freezer.h>
|
2023-07-20 02:31:03 +08:00
|
|
|
#include <linux/bvec.h>
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/checksum.h>
|
|
|
|
#include <net/ip.h>
|
2007-02-12 16:53:36 +08:00
|
|
|
#include <net/ipv6.h>
|
2016-10-21 19:55:47 +08:00
|
|
|
#include <net/udp.h>
|
2008-04-15 00:27:01 +08:00
|
|
|
#include <net/tcp.h>
|
2005-08-10 11:08:28 +08:00
|
|
|
#include <net/tcp_states.h>
|
2023-07-28 01:35:23 +08:00
|
|
|
#include <net/tls_prot.h>
|
2023-04-21 01:56:24 +08:00
|
|
|
#include <net/handshake.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2020-06-15 14:25:23 +08:00
|
|
|
#include <linux/highmem.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/ioctls.h>
|
2023-04-21 01:56:24 +08:00
|
|
|
#include <linux/key.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <linux/sunrpc/types.h>
|
2007-02-12 16:53:32 +08:00
|
|
|
#include <linux/sunrpc/clnt.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/sunrpc/xdr.h>
|
2008-04-15 00:27:30 +08:00
|
|
|
#include <linux/sunrpc/msg_prot.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/sunrpc/svcsock.h>
|
|
|
|
#include <linux/sunrpc/stats.h>
|
2009-09-10 22:32:28 +08:00
|
|
|
#include <linux/sunrpc/xprt.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-01-20 08:45:16 +08:00
|
|
|
#include <trace/events/sock.h>
|
2020-03-18 03:06:31 +08:00
|
|
|
#include <trace/events/sunrpc.h>
|
|
|
|
|
2020-03-03 04:19:54 +08:00
|
|
|
#include "socklib.h"
|
2011-06-21 08:54:51 +08:00
|
|
|
#include "sunrpc.h"
|
|
|
|
|
2007-12-31 11:07:17 +08:00
|
|
|
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-04-21 01:56:24 +08:00
|
|
|
/* To-do: to avoid tying up an nfsd thread while waiting for a
|
|
|
|
* handshake request, the request could instead be deferred.
|
|
|
|
*/
|
|
|
|
enum {
|
|
|
|
SVC_HANDSHAKE_TO = 5U * HZ
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
|
2012-08-14 05:46:17 +08:00
|
|
|
int flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
static int svc_udp_recvfrom(struct svc_rqst *);
|
|
|
|
static int svc_udp_sendto(struct svc_rqst *);
|
2007-12-31 11:07:27 +08:00
|
|
|
static void svc_sock_detach(struct svc_xprt *);
|
2008-12-24 05:30:11 +08:00
|
|
|
static void svc_tcp_sock_detach(struct svc_xprt *);
|
2007-12-31 11:07:27 +08:00
|
|
|
static void svc_sock_free(struct svc_xprt *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-31 11:07:42 +08:00
|
|
|
static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
|
2010-09-29 20:04:18 +08:00
|
|
|
struct net *, struct sockaddr *,
|
|
|
|
int, int);
|
2006-12-07 12:35:24 +08:00
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
static struct lock_class_key svc_key[2];
|
|
|
|
static struct lock_class_key svc_slock_key[2];
|
|
|
|
|
2007-12-31 11:08:27 +08:00
|
|
|
static void svc_reclassify_socket(struct socket *sock)
|
2006-12-07 12:35:24 +08:00
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
2012-10-23 22:43:39 +08:00
|
|
|
|
2016-04-08 21:11:27 +08:00
|
|
|
if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
|
2012-10-23 22:43:39 +08:00
|
|
|
return;
|
|
|
|
|
2006-12-07 12:35:24 +08:00
|
|
|
switch (sk->sk_family) {
|
|
|
|
case AF_INET:
|
|
|
|
sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
|
2007-12-31 11:08:08 +08:00
|
|
|
&svc_slock_key[0],
|
|
|
|
"sk_xprt.xpt_lock-AF_INET-NFSD",
|
|
|
|
&svc_key[0]);
|
2006-12-07 12:35:24 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AF_INET6:
|
|
|
|
sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
|
2007-12-31 11:08:08 +08:00
|
|
|
&svc_slock_key[1],
|
|
|
|
"sk_xprt.xpt_lock-AF_INET6-NFSD",
|
|
|
|
&svc_key[1]);
|
2006-12-07 12:35:24 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
2007-12-31 11:08:27 +08:00
|
|
|
static void svc_reclassify_socket(struct socket *sock)
|
2006-12-07 12:35:24 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-05-21 00:53:05 +08:00
|
|
|
/**
|
2023-05-09 07:42:47 +08:00
|
|
|
* svc_tcp_release_ctxt - Release transport-related resources
|
|
|
|
* @xprt: the transport which owned the context
|
|
|
|
* @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
|
2020-05-21 00:53:05 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2023-05-09 07:42:47 +08:00
|
|
|
static void svc_tcp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-05-21 00:53:05 +08:00
|
|
|
/**
|
2023-05-09 07:42:47 +08:00
|
|
|
* svc_udp_release_ctxt - Release transport-related resources
|
|
|
|
* @xprt: the transport which owned the context
|
|
|
|
* @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
|
2020-05-21 00:53:05 +08:00
|
|
|
*
|
|
|
|
*/
|
2023-05-09 07:42:47 +08:00
|
|
|
static void svc_udp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
|
2016-10-21 19:55:47 +08:00
|
|
|
{
|
2023-05-09 07:42:47 +08:00
|
|
|
struct sk_buff *skb = ctxt;
|
2016-10-21 19:55:47 +08:00
|
|
|
|
2023-05-09 07:42:47 +08:00
|
|
|
if (skb)
|
2016-10-21 19:55:47 +08:00
|
|
|
consume_skb(skb);
|
|
|
|
}
|
|
|
|
|
2007-02-12 16:53:36 +08:00
|
|
|
union svc_pktinfo_u {
|
|
|
|
struct in_pktinfo pkti;
|
|
|
|
struct in6_pktinfo pkti6;
|
|
|
|
};
|
2007-04-13 04:35:59 +08:00
|
|
|
#define SVC_PKTINFO_SPACE \
|
|
|
|
CMSG_SPACE(sizeof(union svc_pktinfo_u))
|
2007-02-12 16:53:36 +08:00
|
|
|
|
|
|
|
static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
|
|
|
|
{
|
2007-12-31 11:08:22 +08:00
|
|
|
struct svc_sock *svsk =
|
|
|
|
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
|
|
|
|
switch (svsk->sk_sk->sk_family) {
|
2007-02-12 16:53:36 +08:00
|
|
|
case AF_INET: {
|
|
|
|
struct in_pktinfo *pki = CMSG_DATA(cmh);
|
|
|
|
|
|
|
|
cmh->cmsg_level = SOL_IP;
|
|
|
|
cmh->cmsg_type = IP_PKTINFO;
|
|
|
|
pki->ipi_ifindex = 0;
|
2011-08-30 17:18:41 +08:00
|
|
|
pki->ipi_spec_dst.s_addr =
|
|
|
|
svc_daddr_in(rqstp)->sin_addr.s_addr;
|
2007-02-12 16:53:36 +08:00
|
|
|
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
|
|
|
|
}
|
|
|
|
break;
|
2007-03-06 17:42:22 +08:00
|
|
|
|
2007-02-12 16:53:36 +08:00
|
|
|
case AF_INET6: {
|
|
|
|
struct in6_pktinfo *pki = CMSG_DATA(cmh);
|
2011-08-30 17:18:41 +08:00
|
|
|
struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
|
2007-02-12 16:53:36 +08:00
|
|
|
|
|
|
|
cmh->cmsg_level = SOL_IPV6;
|
|
|
|
cmh->cmsg_type = IPV6_PKTINFO;
|
2011-08-30 17:18:41 +08:00
|
|
|
pki->ipi6_ifindex = daddr->sin6_scope_id;
|
2011-11-21 11:39:03 +08:00
|
|
|
pki->ipi6_addr = daddr->sin6_addr;
|
2007-02-12 16:53:36 +08:00
|
|
|
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-10 22:36:42 +08:00
|
|
|
static int svc_sock_result_payload(struct svc_rqst *rqstp, unsigned int offset,
|
|
|
|
unsigned int length)
|
2020-03-03 03:45:53 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-10-02 17:17:47 +08:00
|
|
|
/*
|
|
|
|
* Report socket names for nfsdfs
|
|
|
|
*/
|
2009-04-24 07:32:48 +08:00
|
|
|
static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
|
2006-10-02 17:17:47 +08:00
|
|
|
{
|
2009-04-24 07:33:03 +08:00
|
|
|
const struct sock *sk = svsk->sk_sk;
|
|
|
|
const char *proto_name = sk->sk_protocol == IPPROTO_UDP ?
|
|
|
|
"udp" : "tcp";
|
2006-10-02 17:17:47 +08:00
|
|
|
int len;
|
|
|
|
|
2009-04-24 07:33:03 +08:00
|
|
|
switch (sk->sk_family) {
|
2009-04-24 07:32:48 +08:00
|
|
|
case PF_INET:
|
|
|
|
len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n",
|
2009-04-24 07:33:03 +08:00
|
|
|
proto_name,
|
2009-10-15 14:30:45 +08:00
|
|
|
&inet_sk(sk)->inet_rcv_saddr,
|
|
|
|
inet_sk(sk)->inet_num);
|
2006-10-02 17:17:47 +08:00
|
|
|
break;
|
2013-10-09 18:05:48 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2009-04-24 07:32:55 +08:00
|
|
|
case PF_INET6:
|
|
|
|
len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
|
2009-04-24 07:33:03 +08:00
|
|
|
proto_name,
|
ipv6: make lookups simpler and faster
TCP listener refactoring, part 4 :
To speed up inet lookups, we moved IPv4 addresses from inet to struct
sock_common
Now is time to do the same for IPv6, because it permits us to have fast
lookups for all kind of sockets, including upcoming SYN_RECV.
Getting IPv6 addresses in TCP lookups currently requires two extra cache
lines, plus a dereference (and memory stall).
inet6_sk(sk) does the dereference of inet_sk(__sk)->pinet6
This patch is way bigger than its IPv4 counter part, because for IPv4,
we could add aliases (inet_daddr, inet_rcv_saddr), while on IPv6,
it's not doable easily.
inet6_sk(sk)->daddr becomes sk->sk_v6_daddr
inet6_sk(sk)->rcv_saddr becomes sk->sk_v6_rcv_saddr
And timewait socket also have tw->tw_v6_daddr & tw->tw_v6_rcv_saddr
at the same offset.
We get rid of INET6_TW_MATCH() as INET6_MATCH() is now the generic
macro.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-10-04 06:42:29 +08:00
|
|
|
&sk->sk_v6_rcv_saddr,
|
2009-10-15 14:30:45 +08:00
|
|
|
inet_sk(sk)->inet_num);
|
2006-10-02 17:17:47 +08:00
|
|
|
break;
|
2013-10-09 18:05:48 +08:00
|
|
|
#endif
|
2006-10-02 17:17:47 +08:00
|
|
|
default:
|
2009-04-24 07:32:48 +08:00
|
|
|
len = snprintf(buf, remaining, "*unknown-%d*\n",
|
2009-04-24 07:33:03 +08:00
|
|
|
sk->sk_family);
|
2006-10-02 17:17:47 +08:00
|
|
|
}
|
2009-04-24 07:32:48 +08:00
|
|
|
|
|
|
|
if (len >= remaining) {
|
|
|
|
*buf = '\0';
|
|
|
|
return -ENAMETOOLONG;
|
2006-10-02 17:17:47 +08:00
|
|
|
}
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2023-04-17 21:42:14 +08:00
|
|
|
static int
|
2023-07-28 01:37:37 +08:00
|
|
|
svc_tcp_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
|
2023-04-17 21:42:14 +08:00
|
|
|
struct cmsghdr *cmsg, int ret)
|
|
|
|
{
|
2023-07-28 01:37:37 +08:00
|
|
|
u8 content_type = tls_get_record_type(sock->sk, cmsg);
|
|
|
|
u8 level, description;
|
|
|
|
|
|
|
|
switch (content_type) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case TLS_RECORD_TYPE_DATA:
|
|
|
|
/* TLS sets EOR at the end of each application data
|
|
|
|
* record, even though there might be more frames
|
|
|
|
* waiting to be decrypted.
|
|
|
|
*/
|
|
|
|
msg->msg_flags &= ~MSG_EOR;
|
|
|
|
break;
|
|
|
|
case TLS_RECORD_TYPE_ALERT:
|
|
|
|
tls_alert_recv(sock->sk, msg, &level, &description);
|
|
|
|
ret = (level == TLS_ALERT_LEVEL_FATAL) ?
|
|
|
|
-ENOTCONN : -EAGAIN;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* discard this record type */
|
|
|
|
ret = -EAGAIN;
|
2023-04-17 21:42:14 +08:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
struct cmsghdr cmsg;
|
|
|
|
u8 buf[CMSG_SPACE(sizeof(u8))];
|
|
|
|
} u;
|
2023-07-28 01:37:37 +08:00
|
|
|
struct socket *sock = svsk->sk_sock;
|
2023-04-17 21:42:14 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
msg->msg_control = &u;
|
|
|
|
msg->msg_controllen = sizeof(u);
|
2023-07-28 01:37:37 +08:00
|
|
|
ret = sock_recvmsg(sock, msg, MSG_DONTWAIT);
|
2023-04-17 21:42:14 +08:00
|
|
|
if (unlikely(msg->msg_controllen != sizeof(u)))
|
2023-07-28 01:37:37 +08:00
|
|
|
ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret);
|
2023-04-17 21:42:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-05-21 05:30:12 +08:00
|
|
|
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
|
|
|
static void svc_flush_bvec(const struct bio_vec *bvec, size_t size, size_t seek)
|
|
|
|
{
|
|
|
|
struct bvec_iter bi = {
|
2020-09-21 01:46:25 +08:00
|
|
|
.bi_size = size + seek,
|
2020-05-21 05:30:12 +08:00
|
|
|
};
|
|
|
|
struct bio_vec bv;
|
|
|
|
|
|
|
|
bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
|
|
|
|
for_each_bvec(bv, bvec, bi, bi)
|
|
|
|
flush_dcache_page(bv.bv_page);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void svc_flush_bvec(const struct bio_vec *bvec, size_t size,
|
|
|
|
size_t seek)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2020-05-21 05:30:12 +08:00
|
|
|
* Read from @rqstp's transport socket. The incoming message fills whole
|
|
|
|
* pages in @rqstp's rq_pages array until the last page of the message
|
|
|
|
* has been received into a partial page.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2020-05-21 05:30:12 +08:00
|
|
|
static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
|
|
|
|
size_t seek)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-12-31 11:08:22 +08:00
|
|
|
struct svc_sock *svsk =
|
|
|
|
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
|
2020-05-21 05:30:12 +08:00
|
|
|
struct bio_vec *bvec = rqstp->rq_bvec;
|
2018-10-01 22:41:53 +08:00
|
|
|
struct msghdr msg = { NULL };
|
2020-05-21 05:30:12 +08:00
|
|
|
unsigned int i;
|
2018-10-01 22:41:53 +08:00
|
|
|
ssize_t len;
|
2020-05-21 05:30:12 +08:00
|
|
|
size_t t;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-08-04 01:03:13 +08:00
|
|
|
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
2020-05-21 05:30:12 +08:00
|
|
|
|
2023-02-03 23:06:32 +08:00
|
|
|
for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE)
|
|
|
|
bvec_set_page(&bvec[i], rqstp->rq_pages[i], PAGE_SIZE, 0);
|
2020-05-21 05:30:12 +08:00
|
|
|
rqstp->rq_respages = &rqstp->rq_pages[i];
|
|
|
|
rqstp->rq_next_page = rqstp->rq_respages + 1;
|
|
|
|
|
2022-09-16 08:25:47 +08:00
|
|
|
iov_iter_bvec(&msg.msg_iter, ITER_DEST, bvec, i, buflen);
|
2020-05-21 05:30:12 +08:00
|
|
|
if (seek) {
|
|
|
|
iov_iter_advance(&msg.msg_iter, seek);
|
|
|
|
buflen -= seek;
|
2018-10-01 22:41:53 +08:00
|
|
|
}
|
2023-04-17 21:42:14 +08:00
|
|
|
len = svc_tcp_sock_recv_cmsg(svsk, &msg);
|
2020-05-21 05:30:12 +08:00
|
|
|
if (len > 0)
|
|
|
|
svc_flush_bvec(bvec, len, seek);
|
|
|
|
|
2014-08-04 01:03:13 +08:00
|
|
|
/* If we read a full record, then assume there may be more
|
|
|
|
* data to read (stream based sockets only!)
|
|
|
|
*/
|
|
|
|
if (len == buflen)
|
|
|
|
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set socket snd and rcv buffer lengths
|
|
|
|
*/
|
2019-02-21 01:54:50 +08:00
|
|
|
static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2019-02-21 01:54:50 +08:00
|
|
|
unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg;
|
|
|
|
struct socket *sock = svsk->sk_sock;
|
|
|
|
|
|
|
|
nreqs = min(nreqs, INT_MAX / 2 / max_mesg);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
lock_sock(sock->sk);
|
2019-02-21 01:54:50 +08:00
|
|
|
sock->sk->sk_sndbuf = nreqs * max_mesg * 2;
|
|
|
|
sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
|
2009-05-19 05:47:56 +08:00
|
|
|
sock->sk->sk_write_space(sock->sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
release_sock(sock->sk);
|
|
|
|
}
|
2014-05-20 01:40:22 +08:00
|
|
|
|
2018-03-27 22:49:38 +08:00
|
|
|
static void svc_sock_secure_port(struct svc_rqst *rqstp)
|
2014-05-20 01:40:22 +08:00
|
|
|
{
|
2018-03-27 22:49:38 +08:00
|
|
|
if (svc_port_is_privileged(svc_addr(rqstp)))
|
2023-01-07 01:43:37 +08:00
|
|
|
set_bit(RQ_SECURE, &rqstp->rq_flags);
|
2018-03-27 22:49:38 +08:00
|
|
|
else
|
2023-01-07 01:43:37 +08:00
|
|
|
clear_bit(RQ_SECURE, &rqstp->rq_flags);
|
2014-05-20 01:40:22 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* INET callback when data has been received on the socket.
|
|
|
|
*/
|
2016-06-24 22:55:48 +08:00
|
|
|
static void svc_data_ready(struct sock *sk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-09-13 16:25:39 +08:00
|
|
|
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-01-20 08:45:16 +08:00
|
|
|
trace_sk_data_ready(sk);
|
|
|
|
|
2005-09-13 16:25:39 +08:00
|
|
|
if (svsk) {
|
2017-08-21 19:23:07 +08:00
|
|
|
/* Refer to svc_setup_socket() for details. */
|
|
|
|
rmb();
|
2016-06-24 22:55:48 +08:00
|
|
|
svsk->sk_odata(sk);
|
2020-03-18 03:06:31 +08:00
|
|
|
trace_svcsock_data_ready(&svsk->sk_xprt, 0);
|
2023-04-21 01:56:24 +08:00
|
|
|
if (test_bit(XPT_HANDSHAKE, &svsk->sk_xprt.xpt_flags))
|
|
|
|
return;
|
2016-06-24 22:55:49 +08:00
|
|
|
if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
|
|
|
|
svc_xprt_enqueue(&svsk->sk_xprt);
|
2005-09-13 16:25:39 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* INET callback when space is newly available on the socket.
|
|
|
|
*/
|
2007-12-31 11:08:27 +08:00
|
|
|
static void svc_write_space(struct sock *sk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
|
|
|
|
|
|
|
|
if (svsk) {
|
2017-08-21 19:23:07 +08:00
|
|
|
/* Refer to svc_setup_socket() for details. */
|
|
|
|
rmb();
|
2020-03-18 03:06:31 +08:00
|
|
|
trace_svcsock_write_space(&svsk->sk_xprt, 0);
|
2016-06-24 22:55:48 +08:00
|
|
|
svsk->sk_owspace(sk);
|
2007-12-31 11:07:57 +08:00
|
|
|
svc_xprt_enqueue(&svsk->sk_xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-25 11:59:32 +08:00
|
|
|
static int svc_tcp_has_wspace(struct svc_xprt *xprt)
|
|
|
|
{
|
2016-06-24 22:55:51 +08:00
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
2014-07-25 11:59:32 +08:00
|
|
|
|
|
|
|
if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
|
|
|
|
return 1;
|
2016-06-24 22:55:51 +08:00
|
|
|
return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
|
2014-07-25 11:59:33 +08:00
|
|
|
}
|
|
|
|
|
2016-11-12 02:16:22 +08:00
|
|
|
static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
|
|
|
|
{
|
2020-05-28 13:12:10 +08:00
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
|
|
|
|
|
|
|
sock_no_linger(svsk->sk_sock->sk);
|
2016-11-12 02:16:22 +08:00
|
|
|
}
|
|
|
|
|
2023-04-21 01:56:24 +08:00
|
|
|
/**
|
|
|
|
* svc_tcp_handshake_done - Handshake completion handler
|
|
|
|
* @data: address of xprt to wake
|
|
|
|
* @status: status of handshake
|
|
|
|
* @peerid: serial number of key containing the remote peer's identity
|
|
|
|
*
|
|
|
|
* If a security policy is specified as an export option, we don't
|
|
|
|
* have a specific export here to check. So we set a "TLS session
|
|
|
|
* is present" flag on the xprt and let an upper layer enforce local
|
|
|
|
* security policy.
|
|
|
|
*/
|
|
|
|
static void svc_tcp_handshake_done(void *data, int status, key_serial_t peerid)
|
|
|
|
{
|
|
|
|
struct svc_xprt *xprt = data;
|
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
|
|
|
|
|
|
|
if (!status) {
|
|
|
|
if (peerid != TLS_NO_PEERID)
|
|
|
|
set_bit(XPT_PEER_AUTH, &xprt->xpt_flags);
|
|
|
|
set_bit(XPT_TLS_SESSION, &xprt->xpt_flags);
|
|
|
|
}
|
|
|
|
clear_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
|
|
|
|
complete_all(&svsk->sk_handshake_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* svc_tcp_handshake - Perform a transport-layer security handshake
|
|
|
|
* @xprt: connected transport endpoint
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void svc_tcp_handshake(struct svc_xprt *xprt)
|
|
|
|
{
|
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
|
|
|
struct sock *sk = svsk->sk_sock->sk;
|
|
|
|
struct tls_handshake_args args = {
|
|
|
|
.ta_sock = svsk->sk_sock,
|
|
|
|
.ta_done = svc_tcp_handshake_done,
|
|
|
|
.ta_data = xprt,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
trace_svc_tls_upcall(xprt);
|
|
|
|
|
|
|
|
clear_bit(XPT_TLS_SESSION, &xprt->xpt_flags);
|
|
|
|
init_completion(&svsk->sk_handshake_done);
|
|
|
|
|
|
|
|
ret = tls_server_hello_x509(&args, GFP_KERNEL);
|
|
|
|
if (ret) {
|
|
|
|
trace_svc_tls_not_started(xprt);
|
|
|
|
goto out_failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = wait_for_completion_interruptible_timeout(&svsk->sk_handshake_done,
|
|
|
|
SVC_HANDSHAKE_TO);
|
|
|
|
if (ret <= 0) {
|
|
|
|
if (tls_handshake_cancel(sk)) {
|
|
|
|
trace_svc_tls_timed_out(xprt);
|
|
|
|
goto out_close;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags)) {
|
|
|
|
trace_svc_tls_unavailable(xprt);
|
|
|
|
goto out_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark the transport ready in case the remote sent RPC
|
|
|
|
* traffic before the kernel received the handshake
|
|
|
|
* completion downcall.
|
|
|
|
*/
|
|
|
|
set_bit(XPT_DATA, &xprt->xpt_flags);
|
|
|
|
svc_xprt_enqueue(xprt);
|
|
|
|
return;
|
|
|
|
|
|
|
|
out_close:
|
|
|
|
set_bit(XPT_CLOSE, &xprt->xpt_flags);
|
|
|
|
out_failed:
|
|
|
|
clear_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
|
|
|
|
set_bit(XPT_DATA, &xprt->xpt_flags);
|
|
|
|
svc_xprt_enqueue(xprt);
|
|
|
|
}
|
|
|
|
|
2009-07-13 22:54:26 +08:00
|
|
|
/*
|
|
|
|
* See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
|
|
|
|
*/
|
|
|
|
static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
|
|
|
|
struct cmsghdr *cmh)
|
|
|
|
{
|
|
|
|
struct in_pktinfo *pki = CMSG_DATA(cmh);
|
2011-08-30 17:18:41 +08:00
|
|
|
struct sockaddr_in *daddr = svc_daddr_in(rqstp);
|
|
|
|
|
2009-07-13 22:54:26 +08:00
|
|
|
if (cmh->cmsg_type != IP_PKTINFO)
|
|
|
|
return 0;
|
2011-08-30 17:18:41 +08:00
|
|
|
|
|
|
|
daddr->sin_family = AF_INET;
|
|
|
|
daddr->sin_addr.s_addr = pki->ipi_spec_dst.s_addr;
|
2009-07-13 22:54:26 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-01-31 09:02:24 +08:00
|
|
|
* See net/ipv6/datagram.c : ip6_datagram_recv_ctl
|
2009-07-13 22:54:26 +08:00
|
|
|
*/
|
|
|
|
static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
|
|
|
|
struct cmsghdr *cmh)
|
|
|
|
{
|
|
|
|
struct in6_pktinfo *pki = CMSG_DATA(cmh);
|
2011-08-30 17:18:41 +08:00
|
|
|
struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
|
|
|
|
|
2009-07-13 22:54:26 +08:00
|
|
|
if (cmh->cmsg_type != IPV6_PKTINFO)
|
|
|
|
return 0;
|
2011-08-30 17:18:41 +08:00
|
|
|
|
|
|
|
daddr->sin6_family = AF_INET6;
|
2011-11-21 11:39:03 +08:00
|
|
|
daddr->sin6_addr = pki->ipi6_addr;
|
2011-08-30 17:18:41 +08:00
|
|
|
daddr->sin6_scope_id = pki->ipi6_ifindex;
|
2009-07-13 22:54:26 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2007-12-31 11:08:12 +08:00
|
|
|
/*
|
|
|
|
* Copy the UDP datagram's destination address to the rqstp structure.
|
|
|
|
* The 'destination' address in this case is the address to which the
|
|
|
|
* peer sent the datagram, i.e. our local address. For multihomed
|
|
|
|
* hosts, this can change from msg to msg. Note that only the IP
|
|
|
|
* address changes, the port number should remain the same.
|
|
|
|
*/
|
2009-07-13 22:54:26 +08:00
|
|
|
static int svc_udp_get_dest_address(struct svc_rqst *rqstp,
|
|
|
|
struct cmsghdr *cmh)
|
2007-02-12 16:53:38 +08:00
|
|
|
{
|
2009-07-13 22:54:26 +08:00
|
|
|
switch (cmh->cmsg_level) {
|
|
|
|
case SOL_IP:
|
|
|
|
return svc_udp_get_dest_address4(rqstp, cmh);
|
|
|
|
case SOL_IPV6:
|
|
|
|
return svc_udp_get_dest_address6(rqstp, cmh);
|
2007-02-12 16:53:38 +08:00
|
|
|
}
|
2009-07-13 22:54:26 +08:00
|
|
|
|
|
|
|
return 0;
|
2007-02-12 16:53:38 +08:00
|
|
|
}
|
|
|
|
|
2020-05-21 05:30:24 +08:00
|
|
|
/**
|
|
|
|
* svc_udp_recvfrom - Receive a datagram from a UDP socket.
|
|
|
|
* @rqstp: request structure into which to receive an RPC Call
|
|
|
|
*
|
|
|
|
* Called in a loop when XPT_DATA has been set.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* On success, the number of bytes in a received RPC Call, or
|
|
|
|
* %0 if a complete RPC Call message was not ready to return
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-12-31 11:08:27 +08:00
|
|
|
static int svc_udp_recvfrom(struct svc_rqst *rqstp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-12-31 11:08:22 +08:00
|
|
|
struct svc_sock *svsk =
|
|
|
|
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
|
2007-12-31 11:07:50 +08:00
|
|
|
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sk_buff *skb;
|
2007-04-13 04:35:59 +08:00
|
|
|
union {
|
|
|
|
struct cmsghdr hdr;
|
|
|
|
long all[SVC_PKTINFO_SPACE / sizeof(long)];
|
|
|
|
} buffer;
|
|
|
|
struct cmsghdr *cmh = &buffer.hdr;
|
2007-03-06 17:42:21 +08:00
|
|
|
struct msghdr msg = {
|
|
|
|
.msg_name = svc_addr(rqstp),
|
|
|
|
.msg_control = cmh,
|
|
|
|
.msg_controllen = sizeof(buffer),
|
|
|
|
.msg_flags = MSG_DONTWAIT,
|
|
|
|
};
|
2009-04-24 07:31:25 +08:00
|
|
|
size_t len;
|
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-31 11:07:48 +08:00
|
|
|
if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
|
2005-04-17 06:20:36 +08:00
|
|
|
/* udp sockets need large rcvbuf as all pending
|
|
|
|
* requests are still in that buffer. sndbuf must
|
|
|
|
* also be large enough that there is enough space
|
2006-10-02 17:17:58 +08:00
|
|
|
* for one reply per thread. We count all threads
|
|
|
|
* rather than threads in a particular pool, which
|
|
|
|
* provides an upper bound on the number of threads
|
|
|
|
* which will access the socket.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2019-02-21 01:54:50 +08:00
|
|
|
svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-31 11:07:48 +08:00
|
|
|
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
2007-05-09 17:34:55 +08:00
|
|
|
err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
|
|
|
|
0, 0, MSG_PEEK | MSG_DONTWAIT);
|
2020-05-21 05:30:24 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto out_recv_err;
|
net: remove noblock parameter from recvmsg() entities
The internal recvmsg() functions have two parameters 'flags' and 'noblock'
that were merged inside skb_recv_datagram(). As a follow up patch to commit
f4b41f062c42 ("net: remove noblock parameter from skb_recv_datagram()")
this patch removes the separate 'noblock' parameter for recvmsg().
Analogue to the referenced patch for skb_recv_datagram() the 'flags' and
'noblock' parameters are unnecessarily split up with e.g.
err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
flags & ~MSG_DONTWAIT, &addr_len);
or in
err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg,
sk, msg, size, flags & MSG_DONTWAIT,
flags & ~MSG_DONTWAIT, &addr_len);
instead of simply using only flags all the time and check for MSG_DONTWAIT
where needed (to preserve for the formerly separated no(n)block condition).
Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
Link: https://lore.kernel.org/r/20220411124955.154876-1-socketcan@hartkopp.net
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2022-04-11 20:49:55 +08:00
|
|
|
skb = skb_recv_udp(svsk->sk_sk, MSG_DONTWAIT, &err);
|
2020-05-21 05:30:24 +08:00
|
|
|
if (!skb)
|
|
|
|
goto out_recv_err;
|
|
|
|
|
2007-12-31 11:08:12 +08:00
|
|
|
len = svc_addr_len(svc_addr(rqstp));
|
|
|
|
rqstp->rq_addrlen = len;
|
2016-12-25 18:38:40 +08:00
|
|
|
if (skb->tstamp == 0) {
|
2007-04-20 07:16:32 +08:00
|
|
|
skb->tstamp = ktime_get_real();
|
2007-02-10 07:38:13 +08:00
|
|
|
/* Don't enable netstamp, sunrpc doesn't
|
2005-04-17 06:20:36 +08:00
|
|
|
need that much accuracy */
|
|
|
|
}
|
2018-12-28 10:55:09 +08:00
|
|
|
sock_write_timestamp(svsk->sk_sk, skb->tstamp);
|
2007-12-31 11:07:48 +08:00
|
|
|
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-05-21 05:30:24 +08:00
|
|
|
len = skb->len;
|
2005-04-17 06:20:36 +08:00
|
|
|
rqstp->rq_arg.len = len;
|
2020-05-21 05:30:24 +08:00
|
|
|
trace_svcsock_udp_recv(&svsk->sk_xprt, len);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-02-12 16:53:38 +08:00
|
|
|
rqstp->rq_prot = IPPROTO_UDP;
|
2007-02-12 16:53:34 +08:00
|
|
|
|
2020-05-21 05:30:24 +08:00
|
|
|
if (!svc_udp_get_dest_address(rqstp, cmh))
|
|
|
|
goto out_cmsg_err;
|
2011-08-30 17:18:41 +08:00
|
|
|
rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (skb_is_nonlinear(skb)) {
|
|
|
|
/* we have to copy */
|
|
|
|
local_bh_disable();
|
2020-05-21 05:30:24 +08:00
|
|
|
if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb))
|
|
|
|
goto out_bh_enable;
|
2005-04-17 06:20:36 +08:00
|
|
|
local_bh_enable();
|
2016-10-21 19:55:47 +08:00
|
|
|
consume_skb(skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
/* we can use it in-place */
|
2016-04-07 23:44:58 +08:00
|
|
|
rqstp->rq_arg.head[0].iov_base = skb->data;
|
2005-04-17 06:20:36 +08:00
|
|
|
rqstp->rq_arg.head[0].iov_len = len;
|
2012-06-27 08:23:44 +08:00
|
|
|
if (skb_checksum_complete(skb))
|
|
|
|
goto out_free;
|
2007-12-31 11:07:25 +08:00
|
|
|
rqstp->rq_xprt_ctxt = skb;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rqstp->rq_arg.page_base = 0;
|
|
|
|
if (len <= rqstp->rq_arg.head[0].iov_len) {
|
|
|
|
rqstp->rq_arg.head[0].iov_len = len;
|
|
|
|
rqstp->rq_arg.page_len = 0;
|
2006-10-04 17:15:46 +08:00
|
|
|
rqstp->rq_respages = rqstp->rq_pages+1;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
|
2006-10-04 17:15:46 +08:00
|
|
|
rqstp->rq_respages = rqstp->rq_pages + 1 +
|
2007-08-29 06:50:33 +08:00
|
|
|
DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-12-11 07:01:37 +08:00
|
|
|
rqstp->rq_next_page = rqstp->rq_respages+1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (serv->sv_stats)
|
|
|
|
serv->sv_stats->netudpcnt++;
|
|
|
|
|
2023-01-25 04:40:22 +08:00
|
|
|
svc_sock_secure_port(rqstp);
|
2021-01-05 23:15:09 +08:00
|
|
|
svc_xprt_received(rqstp->rq_xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
return len;
|
2020-05-21 05:30:24 +08:00
|
|
|
|
|
|
|
out_recv_err:
|
|
|
|
if (err != -EAGAIN) {
|
|
|
|
/* possibly an icmp error */
|
|
|
|
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
|
|
|
}
|
|
|
|
trace_svcsock_udp_recv_err(&svsk->sk_xprt, err);
|
2021-01-05 23:15:09 +08:00
|
|
|
goto out_clear_busy;
|
2020-05-21 05:30:24 +08:00
|
|
|
out_cmsg_err:
|
|
|
|
net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
|
|
|
|
cmh->cmsg_level, cmh->cmsg_type);
|
|
|
|
goto out_free;
|
|
|
|
out_bh_enable:
|
|
|
|
local_bh_enable();
|
2012-08-18 08:32:27 +08:00
|
|
|
out_free:
|
2016-10-21 19:55:47 +08:00
|
|
|
kfree_skb(skb);
|
2021-01-05 23:15:09 +08:00
|
|
|
out_clear_busy:
|
|
|
|
svc_xprt_received(rqstp->rq_xprt);
|
2012-08-18 08:32:27 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2020-03-03 04:20:33 +08:00
|
|
|
/**
|
|
|
|
* svc_udp_sendto - Send out a reply on a UDP socket
|
|
|
|
* @rqstp: completed svc_rqst
|
|
|
|
*
|
2020-05-02 22:37:44 +08:00
|
|
|
* xpt_mutex ensures @rqstp's whole message is written to the socket
|
|
|
|
* without interruption.
|
|
|
|
*
|
2020-03-03 04:20:33 +08:00
|
|
|
* Returns the number of bytes sent, or a negative errno.
|
|
|
|
*/
|
|
|
|
static int svc_udp_sendto(struct svc_rqst *rqstp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-03-03 04:20:33 +08:00
|
|
|
struct svc_xprt *xprt = rqstp->rq_xprt;
|
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
|
|
|
struct xdr_buf *xdr = &rqstp->rq_res;
|
|
|
|
union {
|
|
|
|
struct cmsghdr hdr;
|
|
|
|
long all[SVC_PKTINFO_SPACE / sizeof(long)];
|
|
|
|
} buffer;
|
|
|
|
struct cmsghdr *cmh = &buffer.hdr;
|
|
|
|
struct msghdr msg = {
|
|
|
|
.msg_name = &rqstp->rq_addr,
|
|
|
|
.msg_namelen = rqstp->rq_addrlen,
|
|
|
|
.msg_control = cmh,
|
2023-07-20 02:31:16 +08:00
|
|
|
.msg_flags = MSG_SPLICE_PAGES,
|
2020-03-03 04:20:33 +08:00
|
|
|
.msg_controllen = sizeof(buffer),
|
|
|
|
};
|
2023-07-20 02:31:16 +08:00
|
|
|
unsigned int count;
|
2020-03-03 04:20:33 +08:00
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-05-09 07:42:47 +08:00
|
|
|
svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
|
|
|
|
rqstp->rq_xprt_ctxt = NULL;
|
2020-04-01 05:02:33 +08:00
|
|
|
|
2020-03-03 04:20:33 +08:00
|
|
|
svc_set_cmsg_data(rqstp, cmh);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-05-02 22:37:44 +08:00
|
|
|
mutex_lock(&xprt->xpt_mutex);
|
|
|
|
|
|
|
|
if (svc_xprt_is_dead(xprt))
|
|
|
|
goto out_notconn;
|
|
|
|
|
2023-07-20 02:31:16 +08:00
|
|
|
count = xdr_buf_to_bvec(rqstp->rq_bvec,
|
|
|
|
ARRAY_SIZE(rqstp->rq_bvec), xdr);
|
2022-04-07 10:51:58 +08:00
|
|
|
|
2023-07-20 02:31:16 +08:00
|
|
|
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
|
2024-01-18 05:06:28 +08:00
|
|
|
count, rqstp->rq_res.len);
|
2023-07-20 02:31:16 +08:00
|
|
|
err = sock_sendmsg(svsk->sk_sock, &msg);
|
2020-03-03 04:20:33 +08:00
|
|
|
if (err == -ECONNREFUSED) {
|
|
|
|
/* ICMP error on earlier request. */
|
2023-07-20 02:31:16 +08:00
|
|
|
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
|
2024-01-18 05:06:28 +08:00
|
|
|
count, rqstp->rq_res.len);
|
2023-07-20 02:31:16 +08:00
|
|
|
err = sock_sendmsg(svsk->sk_sock, &msg);
|
2020-03-03 04:20:33 +08:00
|
|
|
}
|
2023-07-20 02:31:16 +08:00
|
|
|
|
2020-03-18 03:06:31 +08:00
|
|
|
trace_svcsock_udp_send(xprt, err);
|
2023-07-20 02:31:16 +08:00
|
|
|
|
2020-05-02 22:37:44 +08:00
|
|
|
mutex_unlock(&xprt->xpt_mutex);
|
2023-07-20 02:31:16 +08:00
|
|
|
return err;
|
2020-05-02 22:37:44 +08:00
|
|
|
|
|
|
|
out_notconn:
|
|
|
|
mutex_unlock(&xprt->xpt_mutex);
|
|
|
|
return -ENOTCONN;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-12-31 11:07:31 +08:00
|
|
|
static int svc_udp_has_wspace(struct svc_xprt *xprt)
|
|
|
|
{
|
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
2007-12-31 11:07:50 +08:00
|
|
|
struct svc_serv *serv = xprt->xpt_server;
|
2007-12-31 11:07:31 +08:00
|
|
|
unsigned long required;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the SOCK_NOSPACE flag before checking the available
|
|
|
|
* sock space.
|
|
|
|
*/
|
|
|
|
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
|
2007-12-31 11:07:55 +08:00
|
|
|
required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
|
2007-12-31 11:07:31 +08:00
|
|
|
if (required*2 > sock_wspace(svsk->sk_sk))
|
|
|
|
return 0;
|
|
|
|
clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2007-12-31 11:07:36 +08:00
|
|
|
static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
|
|
|
|
{
|
|
|
|
BUG();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-11-12 02:16:22 +08:00
|
|
|
static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2007-12-31 11:07:42 +08:00
|
|
|
static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
|
2010-09-29 20:04:18 +08:00
|
|
|
struct net *net,
|
2007-12-31 11:07:42 +08:00
|
|
|
struct sockaddr *sa, int salen,
|
|
|
|
int flags)
|
|
|
|
{
|
2010-09-29 20:04:18 +08:00
|
|
|
return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags);
|
2007-12-31 11:07:42 +08:00
|
|
|
}
|
|
|
|
|
2017-08-01 23:59:49 +08:00
|
|
|
static const struct svc_xprt_ops svc_udp_ops = {
|
2007-12-31 11:07:42 +08:00
|
|
|
.xpo_create = svc_udp_create,
|
2007-12-31 11:07:23 +08:00
|
|
|
.xpo_recvfrom = svc_udp_recvfrom,
|
|
|
|
.xpo_sendto = svc_udp_sendto,
|
2020-06-10 22:36:42 +08:00
|
|
|
.xpo_result_payload = svc_sock_result_payload,
|
2023-05-09 07:42:47 +08:00
|
|
|
.xpo_release_ctxt = svc_udp_release_ctxt,
|
2007-12-31 11:07:27 +08:00
|
|
|
.xpo_detach = svc_sock_detach,
|
|
|
|
.xpo_free = svc_sock_free,
|
2007-12-31 11:07:31 +08:00
|
|
|
.xpo_has_wspace = svc_udp_has_wspace,
|
2007-12-31 11:07:36 +08:00
|
|
|
.xpo_accept = svc_udp_accept,
|
2016-11-12 02:16:22 +08:00
|
|
|
.xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
|
2007-12-31 11:07:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct svc_xprt_class svc_udp_class = {
|
|
|
|
.xcl_name = "udp",
|
2007-12-31 11:07:42 +08:00
|
|
|
.xcl_owner = THIS_MODULE,
|
2007-12-31 11:07:17 +08:00
|
|
|
.xcl_ops = &svc_udp_ops,
|
2007-12-31 11:07:21 +08:00
|
|
|
.xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
|
2014-07-17 03:38:32 +08:00
|
|
|
.xcl_ident = XPRT_TRANSPORT_UDP,
|
2007-12-31 11:07:17 +08:00
|
|
|
};
|
|
|
|
|
2007-12-31 11:07:50 +08:00
|
|
|
static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-12-06 19:19:10 +08:00
|
|
|
svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class,
|
|
|
|
&svsk->sk_xprt, serv);
|
2007-12-31 11:08:08 +08:00
|
|
|
clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
|
2016-06-24 22:55:48 +08:00
|
|
|
svsk->sk_sk->sk_data_ready = svc_data_ready;
|
2005-04-17 06:20:36 +08:00
|
|
|
svsk->sk_sk->sk_write_space = svc_write_space;
|
|
|
|
|
|
|
|
/* initialise setting must have enough space to
|
2007-02-10 07:38:13 +08:00
|
|
|
* receive and respond to one request.
|
2005-04-17 06:20:36 +08:00
|
|
|
* svc_udp_recvfrom will re-adjust if necessary
|
|
|
|
*/
|
2019-02-21 01:54:50 +08:00
|
|
|
svc_sock_setbufsize(svsk, 3);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-31 11:08:27 +08:00
|
|
|
/* data might have come in before data_ready set up */
|
|
|
|
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
2007-12-31 11:07:48 +08:00
|
|
|
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
|
2007-03-06 17:42:21 +08:00
|
|
|
|
|
|
|
/* make sure we get destination address info */
|
2009-07-13 22:54:26 +08:00
|
|
|
switch (svsk->sk_sk->sk_family) {
|
|
|
|
case AF_INET:
|
2020-05-28 13:12:30 +08:00
|
|
|
ip_sock_set_pktinfo(svsk->sk_sock->sk);
|
2020-05-28 13:12:34 +08:00
|
|
|
break;
|
2009-07-13 22:54:26 +08:00
|
|
|
case AF_INET6:
|
2020-05-28 13:12:34 +08:00
|
|
|
ip6_sock_set_recvpktinfo(svsk->sk_sock->sk);
|
2009-07-13 22:54:26 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A data_ready event on a listening socket means there's a connection
|
|
|
|
* pending. Do not use state_change as a substitute for it.
|
|
|
|
*/
|
2014-04-12 04:15:36 +08:00
|
|
|
static void svc_tcp_listen_data_ready(struct sock *sk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-09-13 16:25:39 +08:00
|
|
|
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-01-20 08:45:16 +08:00
|
|
|
trace_sk_data_ready(sk);
|
|
|
|
|
2005-09-13 16:25:39 +08:00
|
|
|
/*
|
|
|
|
* This callback may called twice when a new connection
|
|
|
|
* is established as a child socket inherits everything
|
|
|
|
* from a parent LISTEN socket.
|
|
|
|
* 1) data_ready method of the parent socket will be called
|
|
|
|
* when one of child sockets become ESTABLISHED.
|
|
|
|
* 2) data_ready method of the child socket may be called
|
|
|
|
* when it receives data before the socket is accepted.
|
2023-05-15 10:13:07 +08:00
|
|
|
* In case of 2, we should ignore it silently and DO NOT
|
|
|
|
* dereference svsk.
|
2005-09-13 16:25:39 +08:00
|
|
|
*/
|
2023-05-15 10:13:07 +08:00
|
|
|
if (sk->sk_state != TCP_LISTEN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (svsk) {
|
|
|
|
/* Refer to svc_setup_socket() for details. */
|
|
|
|
rmb();
|
|
|
|
svsk->sk_odata(sk);
|
|
|
|
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
|
|
|
|
svc_xprt_enqueue(&svsk->sk_xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A state change on a connected socket means it's dying or dead.
|
|
|
|
*/
|
2007-12-31 11:08:27 +08:00
|
|
|
static void svc_tcp_state_change(struct sock *sk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-09-13 16:25:39 +08:00
|
|
|
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-03-18 23:20:50 +08:00
|
|
|
if (svsk) {
|
2017-08-21 19:23:07 +08:00
|
|
|
/* Refer to svc_setup_socket() for details. */
|
|
|
|
rmb();
|
2016-06-24 22:55:48 +08:00
|
|
|
svsk->sk_ostate(sk);
|
2020-03-18 23:20:50 +08:00
|
|
|
trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock);
|
2021-02-21 07:53:40 +08:00
|
|
|
if (sk->sk_state != TCP_ESTABLISHED)
|
|
|
|
svc_xprt_deferred_close(&svsk->sk_xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Accept a TCP connection
|
|
|
|
*/
|
2007-12-31 11:07:36 +08:00
|
|
|
static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-12-31 11:07:36 +08:00
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
2007-02-12 16:53:38 +08:00
|
|
|
struct sockaddr_storage addr;
|
|
|
|
struct sockaddr *sin = (struct sockaddr *) &addr;
|
2007-12-31 11:07:50 +08:00
|
|
|
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct socket *sock = svsk->sk_sock;
|
|
|
|
struct socket *newsock;
|
|
|
|
struct svc_sock *newsvsk;
|
|
|
|
int err, slen;
|
|
|
|
|
|
|
|
if (!sock)
|
2007-12-31 11:07:36 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-31 11:07:48 +08:00
|
|
|
clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
|
2006-08-08 11:58:01 +08:00
|
|
|
err = kernel_accept(sock, &newsock, O_NONBLOCK);
|
|
|
|
if (err < 0) {
|
2023-05-15 21:32:53 +08:00
|
|
|
if (err != -EAGAIN)
|
|
|
|
trace_svcsock_accept_err(xprt, serv->sv_name, err);
|
2007-12-31 11:07:36 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2023-05-05 01:43:33 +08:00
|
|
|
if (IS_ERR(sock_alloc_file(newsock, O_NONBLOCK, NULL)))
|
|
|
|
return NULL;
|
|
|
|
|
2007-12-31 11:07:48 +08:00
|
|
|
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-02-13 03:00:20 +08:00
|
|
|
err = kernel_getpeername(newsock, sin);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (err < 0) {
|
2020-03-18 23:20:50 +08:00
|
|
|
trace_svcsock_getpeername_err(xprt, serv->sv_name, err);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto failed; /* aborted connection or whatever */
|
|
|
|
}
|
2018-02-13 03:00:20 +08:00
|
|
|
slen = err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-06-24 22:55:48 +08:00
|
|
|
/* Reset the inherited callbacks before calling svc_setup_socket */
|
|
|
|
newsock->sk->sk_state_change = svsk->sk_ostate;
|
|
|
|
newsock->sk->sk_data_ready = svsk->sk_odata;
|
|
|
|
newsock->sk->sk_write_space = svsk->sk_owspace;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* make sure that a write doesn't block forever when
|
|
|
|
* low on memory
|
|
|
|
*/
|
|
|
|
newsock->sk->sk_sndtimeo = HZ*30;
|
|
|
|
|
2012-08-14 05:46:17 +08:00
|
|
|
newsvsk = svc_setup_socket(serv, newsock,
|
|
|
|
(SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY));
|
|
|
|
if (IS_ERR(newsvsk))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto failed;
|
2007-12-31 11:08:12 +08:00
|
|
|
svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen);
|
2018-02-13 03:00:20 +08:00
|
|
|
err = kernel_getsockname(newsock, sin);
|
|
|
|
slen = err;
|
2020-03-18 23:20:50 +08:00
|
|
|
if (unlikely(err < 0))
|
2007-07-10 04:21:39 +08:00
|
|
|
slen = offsetof(struct sockaddr, sa_data);
|
2007-12-31 11:08:12 +08:00
|
|
|
svc_xprt_set_local(&newsvsk->sk_xprt, sin, slen);
|
2007-02-12 16:53:30 +08:00
|
|
|
|
2014-05-12 09:22:47 +08:00
|
|
|
if (sock_is_loopback(newsock->sk))
|
|
|
|
set_bit(XPT_LOCAL, &newsvsk->sk_xprt.xpt_flags);
|
|
|
|
else
|
|
|
|
clear_bit(XPT_LOCAL, &newsvsk->sk_xprt.xpt_flags);
|
2007-12-31 11:07:40 +08:00
|
|
|
if (serv->sv_stats)
|
|
|
|
serv->sv_stats->nettcpconn++;
|
|
|
|
|
|
|
|
return &newsvsk->sk_xprt;
|
|
|
|
|
|
|
|
failed:
|
2023-05-05 01:43:33 +08:00
|
|
|
sockfd_put(newsock);
|
2007-12-31 11:07:40 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-05-21 05:30:12 +08:00
|
|
|
static size_t svc_tcp_restore_pages(struct svc_sock *svsk,
|
|
|
|
struct svc_rqst *rqstp)
|
2011-02-25 03:25:33 +08:00
|
|
|
{
|
2020-05-21 05:30:12 +08:00
|
|
|
size_t len = svsk->sk_datalen;
|
|
|
|
unsigned int i, npages;
|
2011-02-25 03:25:33 +08:00
|
|
|
|
2020-05-21 05:30:12 +08:00
|
|
|
if (!len)
|
2011-02-25 03:25:33 +08:00
|
|
|
return 0;
|
|
|
|
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
if (rqstp->rq_pages[i] != NULL)
|
|
|
|
put_page(rqstp->rq_pages[i]);
|
|
|
|
BUG_ON(svsk->sk_pages[i] == NULL);
|
|
|
|
rqstp->rq_pages[i] = svsk->sk_pages[i];
|
|
|
|
svsk->sk_pages[i] = NULL;
|
|
|
|
}
|
|
|
|
rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
|
|
|
{
|
|
|
|
unsigned int i, len, npages;
|
|
|
|
|
2012-12-04 05:45:35 +08:00
|
|
|
if (svsk->sk_datalen == 0)
|
2011-02-25 03:25:33 +08:00
|
|
|
return;
|
2012-12-04 05:45:35 +08:00
|
|
|
len = svsk->sk_datalen;
|
2011-02-25 03:25:33 +08:00
|
|
|
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
svsk->sk_pages[i] = rqstp->rq_pages[i];
|
|
|
|
rqstp->rq_pages[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void svc_tcp_clear_pages(struct svc_sock *svsk)
|
|
|
|
{
|
|
|
|
unsigned int i, len, npages;
|
|
|
|
|
2012-12-04 05:45:35 +08:00
|
|
|
if (svsk->sk_datalen == 0)
|
2011-02-25 03:25:33 +08:00
|
|
|
goto out;
|
2012-12-04 05:45:35 +08:00
|
|
|
len = svsk->sk_datalen;
|
2011-02-25 03:25:33 +08:00
|
|
|
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
for (i = 0; i < npages; i++) {
|
2013-06-26 23:09:06 +08:00
|
|
|
if (svsk->sk_pages[i] == NULL) {
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
continue;
|
|
|
|
}
|
2011-02-25 03:25:33 +08:00
|
|
|
put_page(svsk->sk_pages[i]);
|
|
|
|
svsk->sk_pages[i] = NULL;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
svsk->sk_tcplen = 0;
|
2012-12-04 05:45:35 +08:00
|
|
|
svsk->sk_datalen = 0;
|
2011-02-25 03:25:33 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2020-03-17 02:53:04 +08:00
|
|
|
* Receive fragment record header into sk_marker.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2020-03-17 02:53:04 +08:00
|
|
|
static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
|
|
|
|
struct svc_rqst *rqstp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-03-17 02:53:04 +08:00
|
|
|
ssize_t want, len;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-03-17 02:53:04 +08:00
|
|
|
/* If we haven't gotten the record length yet,
|
|
|
|
* get the next four bytes.
|
|
|
|
*/
|
2008-04-15 00:27:30 +08:00
|
|
|
if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
|
2020-03-17 02:53:04 +08:00
|
|
|
struct msghdr msg = { NULL };
|
2005-04-17 06:20:36 +08:00
|
|
|
struct kvec iov;
|
|
|
|
|
2009-05-19 05:47:56 +08:00
|
|
|
want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
|
2020-03-18 02:12:15 +08:00
|
|
|
iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
|
2005-04-17 06:20:36 +08:00
|
|
|
iov.iov_len = want;
|
2022-09-16 08:25:47 +08:00
|
|
|
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
|
2023-04-17 21:42:14 +08:00
|
|
|
len = svc_tcp_sock_recv_cmsg(svsk, &msg);
|
2018-10-01 22:41:53 +08:00
|
|
|
if (len < 0)
|
2020-03-17 02:53:04 +08:00
|
|
|
return len;
|
2005-04-17 06:20:36 +08:00
|
|
|
svsk->sk_tcplen += len;
|
|
|
|
if (len < want) {
|
2020-03-17 02:53:04 +08:00
|
|
|
/* call again to read the remaining bytes */
|
|
|
|
goto err_short;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2020-03-17 02:53:04 +08:00
|
|
|
trace_svcsock_marker(&svsk->sk_xprt, svsk->sk_marker);
|
2012-12-04 04:50:38 +08:00
|
|
|
if (svc_sock_reclen(svsk) + svsk->sk_datalen >
|
2020-03-17 02:53:04 +08:00
|
|
|
svsk->sk_xprt.xpt_server->sv_max_mesg)
|
|
|
|
goto err_too_large;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-12-04 05:11:13 +08:00
|
|
|
return svc_sock_reclen(svsk);
|
2020-03-17 02:53:04 +08:00
|
|
|
|
|
|
|
err_too_large:
|
|
|
|
net_notice_ratelimited("svc: %s %s RPC fragment too large: %d\n",
|
|
|
|
__func__, svsk->sk_xprt.xpt_server->sv_name,
|
|
|
|
svc_sock_reclen(svsk));
|
2021-02-21 07:53:40 +08:00
|
|
|
svc_xprt_deferred_close(&svsk->sk_xprt);
|
2020-03-17 02:53:04 +08:00
|
|
|
err_short:
|
2009-08-20 08:34:19 +08:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
2009-05-19 05:47:56 +08:00
|
|
|
static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
2009-09-10 22:32:28 +08:00
|
|
|
{
|
2009-05-19 05:47:56 +08:00
|
|
|
struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
|
2009-09-10 22:32:28 +08:00
|
|
|
struct rpc_rqst *req = NULL;
|
2009-05-19 05:47:56 +08:00
|
|
|
struct kvec *src, *dst;
|
|
|
|
__be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
|
2023-12-16 05:47:15 +08:00
|
|
|
__be32 xid = *p;
|
2009-09-10 22:32:28 +08:00
|
|
|
|
2014-11-13 07:04:04 +08:00
|
|
|
if (!bc_xprt)
|
2009-05-19 05:47:56 +08:00
|
|
|
return -EAGAIN;
|
2018-08-31 22:21:00 +08:00
|
|
|
spin_lock(&bc_xprt->queue_lock);
|
2014-11-13 07:04:04 +08:00
|
|
|
req = xprt_lookup_rqst(bc_xprt, xid);
|
|
|
|
if (!req)
|
2023-12-16 05:47:15 +08:00
|
|
|
goto unlock_eagain;
|
2009-05-19 05:47:56 +08:00
|
|
|
|
|
|
|
memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
|
|
|
|
/*
|
|
|
|
* XXX!: cheating for now! Only copying HEAD.
|
|
|
|
* But we know this is good enough for now (in fact, for any
|
|
|
|
* callback reply in the forseeable future).
|
|
|
|
*/
|
|
|
|
dst = &req->rq_private_buf.head[0];
|
|
|
|
src = &rqstp->rq_arg.head[0];
|
|
|
|
if (dst->iov_len < src->iov_len)
|
2014-11-13 07:04:04 +08:00
|
|
|
goto unlock_eagain; /* whatever; just giving up. */
|
2009-05-19 05:47:56 +08:00
|
|
|
memcpy(dst->iov_base, src->iov_base, src->iov_len);
|
2012-12-04 05:11:13 +08:00
|
|
|
xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
|
2009-05-19 05:47:56 +08:00
|
|
|
rqstp->rq_arg.len = 0;
|
2018-08-31 22:21:00 +08:00
|
|
|
spin_unlock(&bc_xprt->queue_lock);
|
2009-05-19 05:47:56 +08:00
|
|
|
return 0;
|
2014-11-13 07:04:04 +08:00
|
|
|
unlock_eagain:
|
2018-08-31 22:21:00 +08:00
|
|
|
spin_unlock(&bc_xprt->queue_lock);
|
2014-11-13 07:04:04 +08:00
|
|
|
return -EAGAIN;
|
2009-05-19 05:47:56 +08:00
|
|
|
}
|
|
|
|
|
2012-12-04 04:50:38 +08:00
|
|
|
static void svc_tcp_fragment_received(struct svc_sock *svsk)
|
|
|
|
{
|
|
|
|
/* If we have more data, signal svc_xprt_enqueue() to try again */
|
|
|
|
svsk->sk_tcplen = 0;
|
2020-03-18 02:12:15 +08:00
|
|
|
svsk->sk_marker = xdr_zero;
|
2023-07-20 02:31:29 +08:00
|
|
|
|
|
|
|
smp_wmb();
|
|
|
|
tcp_set_rcvlowat(svsk->sk_sk, 1);
|
2012-12-04 04:50:38 +08:00
|
|
|
}
|
2011-02-25 03:25:33 +08:00
|
|
|
|
2020-05-21 05:30:12 +08:00
|
|
|
/**
|
|
|
|
* svc_tcp_recvfrom - Receive data from a TCP socket
|
|
|
|
* @rqstp: request structure into which to receive an RPC Call
|
|
|
|
*
|
|
|
|
* Called in a loop when XPT_DATA has been set.
|
|
|
|
*
|
|
|
|
* Read the 4-byte stream record marker, then use the record length
|
|
|
|
* in that marker to set up exactly the resources needed to receive
|
|
|
|
* the next RPC message into @rqstp.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* On success, the number of bytes in a received RPC Call, or
|
|
|
|
* %0 if a complete RPC Call message was not ready to return
|
|
|
|
*
|
|
|
|
* The zero return case handles partial receives and callback Replies.
|
|
|
|
* The state of a partial receive is preserved in the svc_sock for
|
|
|
|
* the next call to svc_tcp_recvfrom.
|
2009-08-20 08:34:19 +08:00
|
|
|
*/
|
|
|
|
static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
|
|
|
|
{
|
|
|
|
struct svc_sock *svsk =
|
|
|
|
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
|
|
|
|
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
|
2020-05-21 05:30:12 +08:00
|
|
|
size_t want, base;
|
|
|
|
ssize_t len;
|
2009-05-19 05:47:56 +08:00
|
|
|
__be32 *p;
|
|
|
|
__be32 calldir;
|
2009-08-20 08:34:19 +08:00
|
|
|
|
2020-03-17 02:53:04 +08:00
|
|
|
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
|
|
|
len = svc_tcp_read_marker(svsk, rqstp);
|
2009-08-20 08:34:19 +08:00
|
|
|
if (len < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-02-25 03:25:33 +08:00
|
|
|
base = svc_tcp_restore_pages(svsk, rqstp);
|
2020-03-17 02:53:04 +08:00
|
|
|
want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
|
2020-05-21 05:30:12 +08:00
|
|
|
len = svc_tcp_read_msg(rqstp, base + want, base);
|
2012-12-04 05:45:35 +08:00
|
|
|
if (len >= 0) {
|
2020-05-21 01:41:02 +08:00
|
|
|
trace_svcsock_tcp_recv(&svsk->sk_xprt, len);
|
2011-02-25 03:25:33 +08:00
|
|
|
svsk->sk_tcplen += len;
|
2012-12-04 05:45:35 +08:00
|
|
|
svsk->sk_datalen += len;
|
|
|
|
}
|
2020-05-21 00:29:13 +08:00
|
|
|
if (len != want || !svc_sock_final_rec(svsk))
|
|
|
|
goto err_incomplete;
|
|
|
|
if (svsk->sk_datalen < 8)
|
|
|
|
goto err_nuts;
|
2012-12-04 05:30:42 +08:00
|
|
|
|
2012-12-04 04:50:38 +08:00
|
|
|
rqstp->rq_arg.len = svsk->sk_datalen;
|
2005-04-17 06:20:36 +08:00
|
|
|
rqstp->rq_arg.page_base = 0;
|
2009-05-19 05:47:56 +08:00
|
|
|
if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
|
|
|
|
rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
|
2005-04-17 06:20:36 +08:00
|
|
|
rqstp->rq_arg.page_len = 0;
|
2009-05-19 05:47:56 +08:00
|
|
|
} else
|
|
|
|
rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-31 11:07:25 +08:00
|
|
|
rqstp->rq_xprt_ctxt = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
rqstp->rq_prot = IPPROTO_TCP;
|
2014-11-19 20:51:15 +08:00
|
|
|
if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
|
2023-01-07 01:43:37 +08:00
|
|
|
set_bit(RQ_LOCAL, &rqstp->rq_flags);
|
2014-11-19 20:51:15 +08:00
|
|
|
else
|
2023-01-07 01:43:37 +08:00
|
|
|
clear_bit(RQ_LOCAL, &rqstp->rq_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-05-19 05:47:56 +08:00
|
|
|
p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
|
|
|
|
calldir = p[1];
|
2011-04-09 22:03:10 +08:00
|
|
|
if (calldir)
|
2009-05-19 05:47:56 +08:00
|
|
|
len = receive_cb_reply(svsk, rqstp);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Reset TCP read info */
|
2012-12-04 05:45:35 +08:00
|
|
|
svsk->sk_datalen = 0;
|
2012-12-04 04:50:38 +08:00
|
|
|
svc_tcp_fragment_received(svsk);
|
2009-05-19 05:47:56 +08:00
|
|
|
|
2011-04-09 22:03:10 +08:00
|
|
|
if (len < 0)
|
|
|
|
goto error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-31 11:08:12 +08:00
|
|
|
svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (serv->sv_stats)
|
|
|
|
serv->sv_stats->nettcpcnt++;
|
|
|
|
|
2023-01-25 04:40:22 +08:00
|
|
|
svc_sock_secure_port(rqstp);
|
2021-01-05 23:15:09 +08:00
|
|
|
svc_xprt_received(rqstp->rq_xprt);
|
2009-05-19 05:47:56 +08:00
|
|
|
return rqstp->rq_arg.len;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-05-21 00:29:13 +08:00
|
|
|
err_incomplete:
|
|
|
|
svc_tcp_save_pages(svsk, rqstp);
|
|
|
|
if (len < 0 && len != -EAGAIN)
|
|
|
|
goto err_delete;
|
|
|
|
if (len == want)
|
|
|
|
svc_tcp_fragment_received(svsk);
|
2023-07-20 02:31:29 +08:00
|
|
|
else {
|
|
|
|
/* Avoid more ->sk_data_ready() calls until the rest
|
|
|
|
* of the message has arrived. This reduces service
|
|
|
|
* thread wake-ups on large incoming messages. */
|
|
|
|
tcp_set_rcvlowat(svsk->sk_sk,
|
|
|
|
svc_sock_reclen(svsk) - svsk->sk_tcplen);
|
|
|
|
|
2020-05-21 00:29:13 +08:00
|
|
|
trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
|
|
|
|
svc_sock_reclen(svsk),
|
|
|
|
svsk->sk_tcplen - sizeof(rpc_fraghdr));
|
2023-07-20 02:31:29 +08:00
|
|
|
}
|
2020-05-21 00:29:13 +08:00
|
|
|
goto err_noclose;
|
2009-08-20 08:34:19 +08:00
|
|
|
error:
|
2011-02-25 03:25:33 +08:00
|
|
|
if (len != -EAGAIN)
|
2012-12-04 05:30:42 +08:00
|
|
|
goto err_delete;
|
2020-05-21 00:29:13 +08:00
|
|
|
trace_svcsock_tcp_recv_eagain(&svsk->sk_xprt, 0);
|
2021-01-05 23:15:09 +08:00
|
|
|
goto err_noclose;
|
2020-05-21 00:29:13 +08:00
|
|
|
err_nuts:
|
|
|
|
svsk->sk_datalen = 0;
|
2012-12-04 05:30:42 +08:00
|
|
|
err_delete:
|
2020-05-21 00:29:13 +08:00
|
|
|
trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len);
|
2021-02-21 07:53:40 +08:00
|
|
|
svc_xprt_deferred_close(&svsk->sk_xprt);
|
2011-02-25 03:25:33 +08:00
|
|
|
err_noclose:
|
2021-01-05 23:15:09 +08:00
|
|
|
svc_xprt_received(rqstp->rq_xprt);
|
2012-08-18 09:35:24 +08:00
|
|
|
return 0; /* record not complete */
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2020-12-19 01:28:41 +08:00
|
|
|
/*
|
2023-06-09 18:02:18 +08:00
|
|
|
* MSG_SPLICE_PAGES is used exclusively to reduce the number of
|
2020-12-19 01:28:41 +08:00
|
|
|
* copy operations in this path. Therefore the caller must ensure
|
|
|
|
* that the pages backing @xdr are unchanging.
|
|
|
|
*
|
2023-07-20 02:31:03 +08:00
|
|
|
* Note that the send is non-blocking. The caller has incremented
|
|
|
|
* the reference count on each page backing the RPC message, and
|
|
|
|
* the network layer will "put" these pages when transmission is
|
|
|
|
* complete.
|
|
|
|
*
|
|
|
|
* This is safe for our RPC services because the memory backing
|
|
|
|
* the head and tail components is never kmalloc'd. These always
|
|
|
|
* come from pages in the svc_rqst::rq_pages array.
|
2020-12-19 01:28:41 +08:00
|
|
|
*/
|
2023-07-20 02:31:03 +08:00
|
|
|
static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
|
2021-02-17 01:37:40 +08:00
|
|
|
rpc_fraghdr marker, unsigned int *sentp)
|
2020-12-19 01:28:41 +08:00
|
|
|
{
|
2021-02-17 01:37:40 +08:00
|
|
|
struct msghdr msg = {
|
2023-07-20 02:31:09 +08:00
|
|
|
.msg_flags = MSG_SPLICE_PAGES,
|
2021-02-17 01:37:40 +08:00
|
|
|
};
|
2023-07-20 02:31:03 +08:00
|
|
|
unsigned int count;
|
2023-07-20 02:31:09 +08:00
|
|
|
void *buf;
|
2021-02-17 01:17:23 +08:00
|
|
|
int ret;
|
2020-12-19 01:28:41 +08:00
|
|
|
|
|
|
|
*sentp = 0;
|
|
|
|
|
2023-07-20 02:31:09 +08:00
|
|
|
/* The stream record marker is copied into a temporary page
|
|
|
|
* fragment buffer so that it can be included in rq_bvec.
|
|
|
|
*/
|
|
|
|
buf = page_frag_alloc(&svsk->sk_frag_cache, sizeof(marker),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
memcpy(buf, &marker, sizeof(marker));
|
|
|
|
bvec_set_virt(rqstp->rq_bvec, buf, sizeof(marker));
|
2020-12-19 01:28:41 +08:00
|
|
|
|
2023-07-20 02:31:09 +08:00
|
|
|
count = xdr_buf_to_bvec(rqstp->rq_bvec + 1,
|
|
|
|
ARRAY_SIZE(rqstp->rq_bvec) - 1, &rqstp->rq_res);
|
2023-08-15 01:36:54 +08:00
|
|
|
|
2023-07-20 02:31:03 +08:00
|
|
|
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
|
2023-07-20 02:31:09 +08:00
|
|
|
1 + count, sizeof(marker) + rqstp->rq_res.len);
|
2023-07-20 02:31:03 +08:00
|
|
|
ret = sock_sendmsg(svsk->sk_sock, &msg);
|
2023-06-09 18:02:18 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
*sentp += ret;
|
2020-12-19 01:28:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-03 04:20:33 +08:00
|
|
|
/**
|
|
|
|
* svc_tcp_sendto - Send out a reply on a TCP socket
|
|
|
|
* @rqstp: completed svc_rqst
|
|
|
|
*
|
2020-05-02 22:37:44 +08:00
|
|
|
* xpt_mutex ensures @rqstp's whole message is written to the socket
|
|
|
|
* without interruption.
|
|
|
|
*
|
2020-03-03 04:20:33 +08:00
|
|
|
* Returns the number of bytes sent, or a negative errno.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-12-31 11:08:27 +08:00
|
|
|
static int svc_tcp_sendto(struct svc_rqst *rqstp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-03-03 04:20:33 +08:00
|
|
|
struct svc_xprt *xprt = rqstp->rq_xprt;
|
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
|
|
|
struct xdr_buf *xdr = &rqstp->rq_res;
|
|
|
|
rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
|
|
|
|
(u32)xdr->len);
|
treewide: Remove uninitialized_var() usage
Using uninitialized_var() is dangerous as it papers over real bugs[1]
(or can in the future), and suppresses unrelated compiler warnings
(e.g. "unused variable"). If the compiler thinks it is uninitialized,
either simply initialize the variable or make compiler changes.
In preparation for removing[2] the[3] macro[4], remove all remaining
needless uses with the following script:
git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \
xargs perl -pi -e \
's/\buninitialized_var\(([^\)]+)\)/\1/g;
s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;'
drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid
pathological white-space.
No outstanding warnings were found building allmodconfig with GCC 9.3.0
for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64,
alpha, and m68k.
[1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/
[2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/
[3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/
[4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/
Reviewed-by: Leon Romanovsky <leonro@mellanox.com> # drivers/infiniband and mlx4/mlx5
Acked-by: Jason Gunthorpe <jgg@mellanox.com> # IB
Acked-by: Kalle Valo <kvalo@codeaurora.org> # wireless drivers
Reviewed-by: Chao Yu <yuchao0@huawei.com> # erofs
Signed-off-by: Kees Cook <keescook@chromium.org>
2020-06-04 04:09:38 +08:00
|
|
|
unsigned int sent;
|
2020-03-03 04:20:33 +08:00
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-05-09 07:42:47 +08:00
|
|
|
svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
|
|
|
|
rqstp->rq_xprt_ctxt = NULL;
|
2020-04-01 05:02:33 +08:00
|
|
|
|
2020-05-02 22:37:44 +08:00
|
|
|
mutex_lock(&xprt->xpt_mutex);
|
|
|
|
if (svc_xprt_is_dead(xprt))
|
|
|
|
goto out_notconn;
|
2023-07-20 02:31:03 +08:00
|
|
|
err = svc_tcp_sendmsg(svsk, rqstp, marker, &sent);
|
2021-04-22 17:14:37 +08:00
|
|
|
trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
|
2020-03-03 04:20:33 +08:00
|
|
|
if (err < 0 || sent != (xdr->len + sizeof(marker)))
|
|
|
|
goto out_close;
|
2020-05-02 22:37:44 +08:00
|
|
|
mutex_unlock(&xprt->xpt_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
return sent;
|
2020-03-03 04:20:33 +08:00
|
|
|
|
2020-05-02 22:37:44 +08:00
|
|
|
out_notconn:
|
|
|
|
mutex_unlock(&xprt->xpt_mutex);
|
|
|
|
return -ENOTCONN;
|
2020-03-03 04:20:33 +08:00
|
|
|
out_close:
|
|
|
|
pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
|
|
|
|
xprt->xpt_server->sv_name,
|
|
|
|
(err < 0) ? "got error" : "sent",
|
|
|
|
(err < 0) ? err : sent, xdr->len);
|
2021-02-21 07:53:40 +08:00
|
|
|
svc_xprt_deferred_close(xprt);
|
2020-05-02 22:37:44 +08:00
|
|
|
mutex_unlock(&xprt->xpt_mutex);
|
2020-03-03 04:20:33 +08:00
|
|
|
return -EAGAIN;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-12-31 11:07:42 +08:00
|
|
|
static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
|
2010-09-29 20:04:18 +08:00
|
|
|
struct net *net,
|
2007-12-31 11:07:42 +08:00
|
|
|
struct sockaddr *sa, int salen,
|
|
|
|
int flags)
|
|
|
|
{
|
2010-09-29 20:04:18 +08:00
|
|
|
return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
|
2007-12-31 11:07:42 +08:00
|
|
|
}
|
|
|
|
|
2017-08-01 23:59:49 +08:00
|
|
|
static const struct svc_xprt_ops svc_tcp_ops = {
|
2007-12-31 11:07:42 +08:00
|
|
|
.xpo_create = svc_tcp_create,
|
2007-12-31 11:07:23 +08:00
|
|
|
.xpo_recvfrom = svc_tcp_recvfrom,
|
|
|
|
.xpo_sendto = svc_tcp_sendto,
|
2020-06-10 22:36:42 +08:00
|
|
|
.xpo_result_payload = svc_sock_result_payload,
|
2023-05-09 07:42:47 +08:00
|
|
|
.xpo_release_ctxt = svc_tcp_release_ctxt,
|
2008-12-24 05:30:11 +08:00
|
|
|
.xpo_detach = svc_tcp_sock_detach,
|
2007-12-31 11:07:27 +08:00
|
|
|
.xpo_free = svc_sock_free,
|
2007-12-31 11:07:31 +08:00
|
|
|
.xpo_has_wspace = svc_tcp_has_wspace,
|
2007-12-31 11:07:36 +08:00
|
|
|
.xpo_accept = svc_tcp_accept,
|
2016-11-12 02:16:22 +08:00
|
|
|
.xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
|
2023-04-21 01:56:24 +08:00
|
|
|
.xpo_handshake = svc_tcp_handshake,
|
2007-12-31 11:07:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct svc_xprt_class svc_tcp_class = {
|
|
|
|
.xcl_name = "tcp",
|
2007-12-31 11:07:42 +08:00
|
|
|
.xcl_owner = THIS_MODULE,
|
2007-12-31 11:07:17 +08:00
|
|
|
.xcl_ops = &svc_tcp_ops,
|
2007-12-31 11:07:21 +08:00
|
|
|
.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
|
2014-07-17 03:38:32 +08:00
|
|
|
.xcl_ident = XPRT_TRANSPORT_TCP,
|
2007-12-31 11:07:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
void svc_init_xprt_sock(void)
|
|
|
|
{
|
|
|
|
svc_reg_xprt_class(&svc_tcp_class);
|
|
|
|
svc_reg_xprt_class(&svc_udp_class);
|
|
|
|
}
|
|
|
|
|
|
|
|
void svc_cleanup_xprt_sock(void)
|
|
|
|
{
|
|
|
|
svc_unreg_xprt_class(&svc_tcp_class);
|
|
|
|
svc_unreg_xprt_class(&svc_udp_class);
|
|
|
|
}
|
|
|
|
|
2007-12-31 11:07:50 +08:00
|
|
|
static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sock *sk = svsk->sk_sk;
|
|
|
|
|
2011-12-06 19:19:10 +08:00
|
|
|
svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class,
|
|
|
|
&svsk->sk_xprt, serv);
|
2007-12-31 11:08:08 +08:00
|
|
|
set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
|
2017-02-25 02:25:23 +08:00
|
|
|
set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sk->sk_state == TCP_LISTEN) {
|
2018-03-27 22:51:00 +08:00
|
|
|
strcpy(svsk->sk_xprt.xpt_remotebuf, "listener");
|
2007-12-31 11:07:48 +08:00
|
|
|
set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
sk->sk_data_ready = svc_tcp_listen_data_ready;
|
2007-12-31 11:07:48 +08:00
|
|
|
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
sk->sk_state_change = svc_tcp_state_change;
|
2016-06-24 22:55:48 +08:00
|
|
|
sk->sk_data_ready = svc_data_ready;
|
2016-06-24 22:55:51 +08:00
|
|
|
sk->sk_write_space = svc_write_space;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-03-18 02:12:15 +08:00
|
|
|
svsk->sk_marker = xdr_zero;
|
2005-04-17 06:20:36 +08:00
|
|
|
svsk->sk_tcplen = 0;
|
2012-12-04 05:45:35 +08:00
|
|
|
svsk->sk_datalen = 0;
|
2011-02-25 03:25:33 +08:00
|
|
|
memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2021-02-17 01:17:22 +08:00
|
|
|
tcp_sock_set_nodelay(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-31 11:07:48 +08:00
|
|
|
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
|
2016-07-26 21:51:19 +08:00
|
|
|
switch (sk->sk_state) {
|
|
|
|
case TCP_SYN_RECV:
|
|
|
|
case TCP_ESTABLISHED:
|
|
|
|
break;
|
|
|
|
default:
|
2021-02-21 07:53:40 +08:00
|
|
|
svc_xprt_deferred_close(&svsk->sk_xprt);
|
2016-07-26 21:51:19 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-31 11:08:27 +08:00
|
|
|
void svc_sock_update_bufs(struct svc_serv *serv)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The number of server threads has changed. Update
|
|
|
|
* rcvbuf and sndbuf accordingly on all sockets
|
|
|
|
*/
|
2010-10-06 03:30:19 +08:00
|
|
|
struct svc_sock *svsk;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
spin_lock_bh(&serv->sv_lock);
|
2010-10-06 03:30:19 +08:00
|
|
|
list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
|
2007-12-31 11:07:48 +08:00
|
|
|
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_unlock_bh(&serv->sv_lock);
|
|
|
|
}
|
2008-12-24 05:30:12 +08:00
|
|
|
EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize socket for RPC use and create svc_sock struct
|
|
|
|
*/
|
2007-02-12 16:53:28 +08:00
|
|
|
static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
|
|
|
|
struct socket *sock,
|
2012-08-14 05:46:17 +08:00
|
|
|
int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct svc_sock *svsk;
|
|
|
|
struct sock *inet;
|
2007-02-12 16:53:28 +08:00
|
|
|
int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-08-14 05:46:17 +08:00
|
|
|
svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
|
|
|
|
if (!svsk)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
inet = sock->sk;
|
|
|
|
|
2023-03-07 22:14:46 +08:00
|
|
|
if (pmap_register) {
|
|
|
|
int err;
|
|
|
|
|
2012-08-14 05:46:17 +08:00
|
|
|
err = svc_register(serv, sock_net(sock->sk), inet->sk_family,
|
2012-01-13 18:02:48 +08:00
|
|
|
inet->sk_protocol,
|
2009-10-15 14:30:45 +08:00
|
|
|
ntohs(inet_sk(inet)->inet_sport));
|
2023-03-07 22:14:46 +08:00
|
|
|
if (err < 0) {
|
|
|
|
kfree(svsk);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
svsk->sk_sock = sock;
|
|
|
|
svsk->sk_sk = inet;
|
|
|
|
svsk->sk_ostate = inet->sk_state_change;
|
|
|
|
svsk->sk_odata = inet->sk_data_ready;
|
|
|
|
svsk->sk_owspace = inet->sk_write_space;
|
2017-08-21 19:23:07 +08:00
|
|
|
/*
|
|
|
|
* This barrier is necessary in order to prevent race condition
|
2023-05-15 21:32:41 +08:00
|
|
|
* with svc_data_ready(), svc_tcp_listen_data_ready(), and others
|
2017-08-21 19:23:07 +08:00
|
|
|
* when calling callbacks above.
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
inet->sk_user_data = svsk;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Initialize the socket */
|
|
|
|
if (sock->type == SOCK_DGRAM)
|
2007-12-31 11:07:50 +08:00
|
|
|
svc_udp_init(svsk, serv);
|
2016-06-24 22:55:51 +08:00
|
|
|
else
|
2007-12-31 11:07:50 +08:00
|
|
|
svc_tcp_init(svsk, serv);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-05-15 21:33:00 +08:00
|
|
|
trace_svcsock_new(svsk, sock);
|
2005-04-17 06:20:36 +08:00
|
|
|
return svsk;
|
|
|
|
}
|
|
|
|
|
2009-04-24 07:32:33 +08:00
|
|
|
/**
|
|
|
|
* svc_addsock - add a listener socket to an RPC service
|
|
|
|
* @serv: pointer to RPC service to which to add a new listener
|
2023-05-29 19:35:55 +08:00
|
|
|
* @net: caller's network namespace
|
2009-04-24 07:32:33 +08:00
|
|
|
* @fd: file descriptor of the new listener
|
|
|
|
* @name_return: pointer to buffer to fill in with name of listener
|
|
|
|
* @len: size of the buffer
|
2019-04-10 00:13:37 +08:00
|
|
|
* @cred: credential
|
2009-04-24 07:32:33 +08:00
|
|
|
*
|
|
|
|
* Fills in socket name and returns positive length of name if successful.
|
|
|
|
* Name is terminated with '\n'. On error, returns a negative errno
|
|
|
|
* value.
|
|
|
|
*/
|
2023-05-29 19:35:55 +08:00
|
|
|
int svc_addsock(struct svc_serv *serv, struct net *net, const int fd,
|
|
|
|
char *name_return, const size_t len, const struct cred *cred)
|
2006-10-02 17:17:48 +08:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct socket *so = sockfd_lookup(fd, &err);
|
|
|
|
struct svc_sock *svsk = NULL;
|
2012-08-14 06:01:03 +08:00
|
|
|
struct sockaddr_storage addr;
|
|
|
|
struct sockaddr *sin = (struct sockaddr *)&addr;
|
|
|
|
int salen;
|
2006-10-02 17:17:48 +08:00
|
|
|
|
|
|
|
if (!so)
|
|
|
|
return err;
|
2023-05-29 19:35:55 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
if (sock_net(so->sk) != net)
|
|
|
|
goto out;
|
2012-08-14 06:01:03 +08:00
|
|
|
err = -EAFNOSUPPORT;
|
2010-01-27 03:03:56 +08:00
|
|
|
if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6))
|
2012-08-14 06:01:03 +08:00
|
|
|
goto out;
|
|
|
|
err = -EPROTONOSUPPORT;
|
|
|
|
if (so->sk->sk_protocol != IPPROTO_TCP &&
|
2006-10-02 17:17:48 +08:00
|
|
|
so->sk->sk_protocol != IPPROTO_UDP)
|
2012-08-14 06:01:03 +08:00
|
|
|
goto out;
|
|
|
|
err = -EISCONN;
|
|
|
|
if (so->state > SS_UNCONNECTED)
|
|
|
|
goto out;
|
|
|
|
err = -ENOENT;
|
|
|
|
if (!try_module_get(THIS_MODULE))
|
|
|
|
goto out;
|
|
|
|
svsk = svc_setup_socket(serv, so, SVC_SOCK_DEFAULTS);
|
|
|
|
if (IS_ERR(svsk)) {
|
|
|
|
module_put(THIS_MODULE);
|
|
|
|
err = PTR_ERR(svsk);
|
|
|
|
goto out;
|
2006-10-02 17:17:48 +08:00
|
|
|
}
|
2018-02-13 03:00:20 +08:00
|
|
|
salen = kernel_getsockname(svsk->sk_sock, sin);
|
|
|
|
if (salen >= 0)
|
2012-08-14 06:01:03 +08:00
|
|
|
svc_xprt_set_local(&svsk->sk_xprt, sin, salen);
|
2019-04-10 00:13:37 +08:00
|
|
|
svsk->sk_xprt.xpt_cred = get_cred(cred);
|
2012-08-15 03:50:34 +08:00
|
|
|
svc_add_new_perm_xprt(serv, &svsk->sk_xprt);
|
2009-04-24 07:32:48 +08:00
|
|
|
return svc_one_sock_name(svsk, name_return, len);
|
2012-08-14 06:01:03 +08:00
|
|
|
out:
|
|
|
|
sockfd_put(so);
|
|
|
|
return err;
|
2006-10-02 17:17:48 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(svc_addsock);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Create socket for RPC service.
|
|
|
|
*/
|
2007-12-31 11:07:42 +08:00
|
|
|
static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
|
|
|
|
int protocol,
|
2010-09-29 20:04:18 +08:00
|
|
|
struct net *net,
|
2007-12-31 11:07:42 +08:00
|
|
|
struct sockaddr *sin, int len,
|
|
|
|
int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct svc_sock *svsk;
|
|
|
|
struct socket *sock;
|
|
|
|
int error;
|
|
|
|
int type;
|
2007-12-31 11:08:12 +08:00
|
|
|
struct sockaddr_storage addr;
|
|
|
|
struct sockaddr *newsin = (struct sockaddr *)&addr;
|
|
|
|
int newlen;
|
2009-03-31 06:59:17 +08:00
|
|
|
int family;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
|
|
|
|
printk(KERN_WARNING "svc: only UDP and TCP "
|
|
|
|
"sockets supported\n");
|
2007-12-31 11:07:42 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2009-03-31 06:59:17 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
|
2009-03-31 06:59:17 +08:00
|
|
|
switch (sin->sa_family) {
|
|
|
|
case AF_INET6:
|
|
|
|
family = PF_INET6;
|
|
|
|
break;
|
|
|
|
case AF_INET:
|
|
|
|
family = PF_INET;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-29 20:06:57 +08:00
|
|
|
error = __sock_create(net, family, type, protocol, &sock, 1);
|
2007-02-12 16:53:39 +08:00
|
|
|
if (error < 0)
|
2007-12-31 11:07:42 +08:00
|
|
|
return ERR_PTR(error);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-12-07 12:35:24 +08:00
|
|
|
svc_reclassify_socket(sock);
|
|
|
|
|
2009-03-31 06:59:17 +08:00
|
|
|
/*
|
|
|
|
* If this is an PF_INET6 listener, we want to avoid
|
|
|
|
* getting requests from IPv4 remotes. Those should
|
|
|
|
* be shunted to a PF_INET listener via rpcbind.
|
|
|
|
*/
|
|
|
|
if (family == PF_INET6)
|
2020-05-28 13:12:31 +08:00
|
|
|
ip6_sock_set_v6only(sock->sk);
|
2006-09-29 05:37:07 +08:00
|
|
|
if (type == SOCK_STREAM)
|
2012-04-19 11:39:36 +08:00
|
|
|
sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */
|
2007-02-12 16:53:39 +08:00
|
|
|
error = kernel_bind(sock, sin, len);
|
2006-09-29 05:37:07 +08:00
|
|
|
if (error < 0)
|
|
|
|
goto bummer;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-02-13 03:00:20 +08:00
|
|
|
error = kernel_getsockname(sock, newsin);
|
2007-12-31 11:08:12 +08:00
|
|
|
if (error < 0)
|
|
|
|
goto bummer;
|
2018-02-13 03:00:20 +08:00
|
|
|
newlen = error;
|
2007-12-31 11:08:12 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (protocol == IPPROTO_TCP) {
|
2006-08-08 11:58:01 +08:00
|
|
|
if ((error = kernel_listen(sock, 64)) < 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto bummer;
|
|
|
|
}
|
|
|
|
|
2012-08-14 05:46:17 +08:00
|
|
|
svsk = svc_setup_socket(serv, sock, flags);
|
2012-08-14 06:01:03 +08:00
|
|
|
if (IS_ERR(svsk)) {
|
|
|
|
error = PTR_ERR(svsk);
|
|
|
|
goto bummer;
|
2007-02-12 16:53:30 +08:00
|
|
|
}
|
2012-08-14 06:01:03 +08:00
|
|
|
svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen);
|
|
|
|
return (struct svc_xprt *)svsk;
|
2005-04-17 06:20:36 +08:00
|
|
|
bummer:
|
|
|
|
sock_release(sock);
|
2007-12-31 11:07:42 +08:00
|
|
|
return ERR_PTR(error);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-12-31 11:07:27 +08:00
|
|
|
/*
|
|
|
|
* Detach the svc_sock from the socket so that no
|
|
|
|
* more callbacks occur.
|
|
|
|
*/
|
|
|
|
static void svc_sock_detach(struct svc_xprt *xprt)
|
|
|
|
{
|
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
|
|
|
struct sock *sk = svsk->sk_sk;
|
|
|
|
|
|
|
|
/* put back the old socket callbacks */
|
2016-06-24 22:55:47 +08:00
|
|
|
lock_sock(sk);
|
2007-12-31 11:07:27 +08:00
|
|
|
sk->sk_state_change = svsk->sk_ostate;
|
|
|
|
sk->sk_data_ready = svsk->sk_odata;
|
|
|
|
sk->sk_write_space = svsk->sk_owspace;
|
2016-06-24 22:55:47 +08:00
|
|
|
sk->sk_user_data = NULL;
|
|
|
|
release_sock(sk);
|
2008-12-24 05:30:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disconnect the socket, and reset the callbacks
|
|
|
|
*/
|
|
|
|
static void svc_tcp_sock_detach(struct svc_xprt *xprt)
|
|
|
|
{
|
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
|
|
|
|
2023-07-28 01:36:44 +08:00
|
|
|
tls_handshake_close(svsk->sk_sock);
|
|
|
|
|
2008-12-24 05:30:11 +08:00
|
|
|
svc_sock_detach(xprt);
|
|
|
|
|
2011-02-25 03:25:33 +08:00
|
|
|
if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
|
|
|
|
svc_tcp_clear_pages(svsk);
|
2008-12-24 05:30:11 +08:00
|
|
|
kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
|
2011-02-25 03:25:33 +08:00
|
|
|
}
|
2007-12-31 11:07:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the svc_sock's socket resources and the svc_sock itself.
|
|
|
|
*/
|
|
|
|
static void svc_sock_free(struct svc_xprt *xprt)
|
|
|
|
{
|
|
|
|
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
|
2023-07-20 02:31:09 +08:00
|
|
|
struct page_frag_cache *pfc = &svsk->sk_frag_cache;
|
2023-04-21 01:56:24 +08:00
|
|
|
struct socket *sock = svsk->sk_sock;
|
2007-12-31 11:07:27 +08:00
|
|
|
|
2023-05-15 21:33:00 +08:00
|
|
|
trace_svcsock_free(svsk, sock);
|
|
|
|
|
2023-04-21 01:56:24 +08:00
|
|
|
tls_handshake_cancel(sock->sk);
|
|
|
|
if (sock->file)
|
|
|
|
sockfd_put(sock);
|
2007-12-31 11:07:27 +08:00
|
|
|
else
|
2023-04-21 01:56:24 +08:00
|
|
|
sock_release(sock);
|
2023-07-20 02:31:09 +08:00
|
|
|
if (pfc->va)
|
|
|
|
__page_frag_cache_drain(virt_to_head_page(pfc->va),
|
|
|
|
pfc->pagecnt_bias);
|
2007-12-31 11:07:27 +08:00
|
|
|
kfree(svsk);
|
|
|
|
}
|