2008-01-11 22:57:09 +08:00
|
|
|
/* SCTP kernel implementation
|
2005-04-17 06:20:36 +08:00
|
|
|
* (C) Copyright IBM Corp. 2001, 2004
|
|
|
|
* Copyright (c) 1999-2000 Cisco, Inc.
|
|
|
|
* Copyright (c) 1999-2001 Motorola, Inc.
|
|
|
|
* Copyright (c) 2001-2003 Intel Corp.
|
|
|
|
* Copyright (c) 2001-2002 Nokia, Inc.
|
|
|
|
* Copyright (c) 2001 La Monte H.P. Yarroll
|
|
|
|
*
|
2008-01-11 22:57:09 +08:00
|
|
|
* This file is part of the SCTP kernel implementation
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* These functions interface with the sockets layer to implement the
|
|
|
|
* SCTP Extensions for the Sockets API.
|
|
|
|
*
|
|
|
|
* Note that the descriptions from the specification are USER level
|
|
|
|
* functions--this file is the functions which populate the struct proto
|
|
|
|
* for SCTP which is the BOTTOM of the sockets interface.
|
|
|
|
*
|
2008-01-11 22:57:09 +08:00
|
|
|
* This SCTP implementation is free software;
|
2005-04-17 06:20:36 +08:00
|
|
|
* you can redistribute it and/or modify it under the terms of
|
|
|
|
* the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2, or (at your option)
|
|
|
|
* any later version.
|
|
|
|
*
|
2008-01-11 22:57:09 +08:00
|
|
|
* This SCTP implementation is distributed in the hope that it
|
2005-04-17 06:20:36 +08:00
|
|
|
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
|
|
|
|
* ************************
|
|
|
|
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
* See the GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
2013-12-06 22:28:48 +08:00
|
|
|
* along with GNU CC; see the file COPYING. If not, see
|
|
|
|
* <http://www.gnu.org/licenses/>.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Please send any bug reports or fixes you make to the
|
|
|
|
* email address(es):
|
2013-07-23 20:51:47 +08:00
|
|
|
* lksctp developers <linux-sctp@vger.kernel.org>
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Written or modified by:
|
|
|
|
* La Monte H.P. Yarroll <piggy@acm.org>
|
|
|
|
* Narasimha Budihal <narsi@refcode.org>
|
|
|
|
* Karl Knutson <karl@athena.chicago.il.us>
|
|
|
|
* Jon Grimm <jgrimm@us.ibm.com>
|
|
|
|
* Xingang Guo <xingang.guo@intel.com>
|
|
|
|
* Daisy Chang <daisyc@us.ibm.com>
|
|
|
|
* Sridhar Samudrala <samudrala@us.ibm.com>
|
|
|
|
* Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
|
|
|
|
* Ardelle Fan <ardelle.fan@intel.com>
|
|
|
|
* Ryan Layer <rmlayer@us.ibm.com>
|
|
|
|
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
|
|
|
|
* Kevin Gao <kevin.gao@intel.com>
|
|
|
|
*/
|
|
|
|
|
2010-08-24 21:21:08 +08:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2016-01-24 21:20:12 +08:00
|
|
|
#include <crypto/hash.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/time.h>
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/signal.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/ip.h>
|
2006-01-12 04:17:47 +08:00
|
|
|
#include <linux/capability.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/init.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2012-08-18 12:25:51 +08:00
|
|
|
#include <linux/file.h>
|
2014-02-17 19:11:11 +08:00
|
|
|
#include <linux/compat.h>
|
2018-06-18 10:52:50 +08:00
|
|
|
#include <linux/rhashtable.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/icmp.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/inet_common.h>
|
2014-04-18 03:26:51 +08:00
|
|
|
#include <net/busy_poll.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <linux/socket.h> /* for sa_family_t */
|
2011-07-15 23:47:34 +08:00
|
|
|
#include <linux/export.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/sctp/sctp.h>
|
|
|
|
#include <net/sctp/sm.h>
|
2017-10-04 06:20:14 +08:00
|
|
|
#include <net/sctp/stream_sched.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Forward declarations for internal helper functions. */
|
2018-10-17 03:07:51 +08:00
|
|
|
static bool sctp_writeable(struct sock *sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
static void sctp_wfree(struct sk_buff *skb);
|
2017-11-15 16:57:26 +08:00
|
|
|
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
2018-01-15 17:01:36 +08:00
|
|
|
size_t msg_len);
|
2013-12-23 12:16:51 +08:00
|
|
|
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
|
2005-04-17 06:20:36 +08:00
|
|
|
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
|
|
|
|
static int sctp_wait_for_accept(struct sock *sk, long timeo);
|
|
|
|
static void sctp_wait_for_close(struct sock *sk, long timeo);
|
2013-06-26 00:17:29 +08:00
|
|
|
static void sctp_destruct_sock(struct sock *sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
|
|
|
|
union sctp_addr *addr, int len);
|
|
|
|
static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
|
|
|
|
static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
|
|
|
|
static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
|
|
|
|
static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
|
|
|
|
static int sctp_send_asconf(struct sctp_association *asoc,
|
|
|
|
struct sctp_chunk *chunk);
|
|
|
|
static int sctp_do_bind(struct sock *, union sctp_addr *, int);
|
|
|
|
static int sctp_autobind(struct sock *sk);
|
2017-08-11 10:23:50 +08:00
|
|
|
static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|
|
|
struct sctp_association *assoc,
|
|
|
|
enum sctp_socket_type type);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-08 04:29:12 +08:00
|
|
|
static unsigned long sctp_memory_pressure;
|
2010-11-10 07:24:26 +08:00
|
|
|
static atomic_long_t sctp_memory_allocated;
|
2008-11-26 13:16:35 +08:00
|
|
|
struct percpu_counter sctp_sockets_allocated;
|
2007-08-16 07:07:44 +08:00
|
|
|
|
2008-07-17 11:28:10 +08:00
|
|
|
static void sctp_enter_memory_pressure(struct sock *sk)
|
2007-08-16 07:07:44 +08:00
|
|
|
{
|
|
|
|
sctp_memory_pressure = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Get the sndbuf space available at the time on the association. */
|
|
|
|
static inline int sctp_wspace(struct sctp_association *asoc)
|
|
|
|
{
|
2018-10-17 03:07:51 +08:00
|
|
|
struct sock *sk = asoc->base.sk;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-10-17 03:07:51 +08:00
|
|
|
return asoc->ep->sndbuf_policy ? sk->sk_sndbuf - asoc->sndbuf_used
|
|
|
|
: sk_stream_wspace(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Increment the used sndbuf space count of the corresponding association by
|
|
|
|
* the size of the outgoing data chunk.
|
|
|
|
* Also, set the skb destructor for sndbuf accounting later.
|
|
|
|
*
|
|
|
|
* Since it is always 1-1 between chunk and skb, and also a new skb is always
|
|
|
|
* allocated for chunk bundling in sctp_packet_transmit(), we can use the
|
|
|
|
* destructor in the data chunk skb for the purpose of the sndbuf space
|
|
|
|
* tracking.
|
|
|
|
*/
|
|
|
|
static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
|
|
|
|
{
|
|
|
|
struct sctp_association *asoc = chunk->asoc;
|
|
|
|
struct sock *sk = asoc->base.sk;
|
|
|
|
|
|
|
|
/* The sndbuf space is tracked per association. */
|
|
|
|
sctp_association_hold(asoc);
|
|
|
|
|
2018-03-14 19:05:30 +08:00
|
|
|
if (chunk->shkey)
|
|
|
|
sctp_auth_shkey_hold(chunk->shkey);
|
|
|
|
|
2005-04-29 03:02:04 +08:00
|
|
|
skb_set_owner_w(chunk->skb, sk);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
chunk->skb->destructor = sctp_wfree;
|
|
|
|
/* Save the chunk pointer in skb for sctp_wfree to use later. */
|
net: sctp: keep owned chunk in destructor_arg instead of skb->cb
It's just silly to hold the skb destructor argument around inside
skb->cb[] as we currently do in SCTP.
Nowadays, we're sort of cheating on data accounting in the sense
that due to commit 4c3a5bdae293 ("sctp: Don't charge for data in
sndbuf again when transmitting packet"), we orphan the skb already
in the SCTP output path, i.e. giving back charged data memory, and
use a different destructor only to make sure the sk doesn't vanish
on skb destruction time. Thus, cb[] is still valid here as we
operate within the SCTP layer. (It's generally actually a big
candidate for future rework, imho.)
However, storing the destructor in the cb[] can easily cause issues
should an non sctp_packet_set_owner_w()'ed skb ever escape the SCTP
layer, since cb[] may get overwritten by lower layers and thus can
corrupt the chunk pointer. There are no such issues at present,
but lets keep the chunk in destructor_arg, as this is the actual
purpose for it.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-20 08:54:48 +08:00
|
|
|
skb_shinfo(chunk->skb)->destructor_arg = chunk;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-30 18:08:00 +08:00
|
|
|
refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
|
2018-10-17 03:07:50 +08:00
|
|
|
asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
|
|
|
|
sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
|
2007-12-31 16:11:19 +08:00
|
|
|
sk_mem_charge(sk, chunk->skb->truesize);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-10-28 02:13:29 +08:00
|
|
|
static void sctp_clear_owner_w(struct sctp_chunk *chunk)
|
|
|
|
{
|
|
|
|
skb_orphan(chunk->skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
|
|
|
|
void (*cb)(struct sctp_chunk *))
|
|
|
|
|
|
|
|
{
|
|
|
|
struct sctp_outq *q = &asoc->outqueue;
|
|
|
|
struct sctp_transport *t;
|
|
|
|
struct sctp_chunk *chunk;
|
|
|
|
|
|
|
|
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
|
|
|
|
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
|
|
|
|
cb(chunk);
|
|
|
|
|
2017-11-26 20:56:07 +08:00
|
|
|
list_for_each_entry(chunk, &q->retransmit, transmitted_list)
|
2017-10-28 02:13:29 +08:00
|
|
|
cb(chunk);
|
|
|
|
|
2017-11-26 20:56:07 +08:00
|
|
|
list_for_each_entry(chunk, &q->sacked, transmitted_list)
|
2017-10-28 02:13:29 +08:00
|
|
|
cb(chunk);
|
|
|
|
|
2017-11-26 20:56:07 +08:00
|
|
|
list_for_each_entry(chunk, &q->abandoned, transmitted_list)
|
2017-10-28 02:13:29 +08:00
|
|
|
cb(chunk);
|
|
|
|
|
|
|
|
list_for_each_entry(chunk, &q->out_chunk_list, list)
|
|
|
|
cb(chunk);
|
|
|
|
}
|
|
|
|
|
2017-12-08 21:04:09 +08:00
|
|
|
static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
|
|
|
|
void (*cb)(struct sk_buff *, struct sock *))
|
|
|
|
|
|
|
|
{
|
|
|
|
struct sk_buff *skb, *tmp;
|
|
|
|
|
|
|
|
sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
|
|
|
|
cb(skb, sk);
|
|
|
|
|
|
|
|
sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
|
|
|
|
cb(skb, sk);
|
|
|
|
|
|
|
|
sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
|
|
|
|
cb(skb, sk);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Verify that this is a valid address. */
|
|
|
|
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
|
|
|
|
int len)
|
|
|
|
{
|
|
|
|
struct sctp_af *af;
|
|
|
|
|
|
|
|
/* Verify basic sockaddr. */
|
|
|
|
af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
|
|
|
|
if (!af)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Is this a valid SCTP address? */
|
2006-06-18 13:55:35 +08:00
|
|
|
if (!af->addr_valid(addr, sctp_sk(sk), NULL))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Look up the association by its id. If this is not a UDP-style
|
|
|
|
* socket, the ID field is always ignored.
|
|
|
|
*/
|
|
|
|
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
|
|
|
|
{
|
|
|
|
struct sctp_association *asoc = NULL;
|
|
|
|
|
|
|
|
/* If this is not a UDP-style socket, assoc id should be ignored. */
|
|
|
|
if (!sctp_style(sk, UDP)) {
|
|
|
|
/* Return NULL if the socket state is not ESTABLISHED. It
|
|
|
|
* could be a TCP-style listening socket or a socket which
|
|
|
|
* hasn't yet called connect() to establish an association.
|
|
|
|
*/
|
2016-07-16 03:38:19 +08:00
|
|
|
if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING))
|
2005-04-17 06:20:36 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Get the first and the only association from the list. */
|
|
|
|
if (!list_empty(&sctp_sk(sk)->ep->asocs))
|
|
|
|
asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
|
|
|
|
struct sctp_association, asocs);
|
|
|
|
return asoc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise this is a UDP-style socket. */
|
2019-01-28 15:08:23 +08:00
|
|
|
if (id <= SCTP_ALL_ASSOC)
|
2005-04-17 06:20:36 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
spin_lock_bh(&sctp_assocs_id_lock);
|
|
|
|
asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
|
2018-10-17 02:18:17 +08:00
|
|
|
if (asoc && (asoc->base.sk != sk || asoc->base.dead))
|
|
|
|
asoc = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_unlock_bh(&sctp_assocs_id_lock);
|
|
|
|
|
|
|
|
return asoc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Look up the transport from an address and an assoc id. If both address and
|
|
|
|
* id are specified, the associations matching the address and the id should be
|
|
|
|
* the same.
|
|
|
|
*/
|
|
|
|
static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
|
|
|
|
struct sockaddr_storage *addr,
|
|
|
|
sctp_assoc_t id)
|
|
|
|
{
|
|
|
|
struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
|
2017-01-24 14:01:53 +08:00
|
|
|
struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
|
2005-04-17 06:20:36 +08:00
|
|
|
union sctp_addr *laddr = (union sctp_addr *)addr;
|
2017-01-24 14:01:53 +08:00
|
|
|
struct sctp_transport *transport;
|
|
|
|
|
2017-02-07 20:56:08 +08:00
|
|
|
if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
|
2017-01-24 14:01:53 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
|
2006-11-21 09:11:33 +08:00
|
|
|
laddr,
|
2005-04-17 06:20:36 +08:00
|
|
|
&transport);
|
|
|
|
|
|
|
|
if (!addr_asoc)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
id_asoc = sctp_id2assoc(sk, id);
|
|
|
|
if (id_asoc && (id_asoc != addr_asoc))
|
|
|
|
return NULL;
|
|
|
|
|
2014-07-31 02:40:53 +08:00
|
|
|
sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
|
2005-04-17 06:20:36 +08:00
|
|
|
(union sctp_addr *)addr);
|
|
|
|
|
|
|
|
return transport;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* API 3.1.2 bind() - UDP Style Syntax
|
|
|
|
* The syntax of bind() is,
|
|
|
|
*
|
|
|
|
* ret = bind(int sd, struct sockaddr *addr, int addrlen);
|
|
|
|
*
|
|
|
|
* sd - the socket descriptor returned by socket().
|
|
|
|
* addr - the address structure (struct sockaddr_in or struct
|
|
|
|
* sockaddr_in6 [RFC 2553]),
|
|
|
|
* addr_len - the size of the address structure.
|
|
|
|
*/
|
2013-06-17 17:40:05 +08:00
|
|
|
static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int retval = 0;
|
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
|
|
|
|
addr, addr_len);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Disallow binding twice. */
|
|
|
|
if (!sctp_sk(sk)->ep->base.bind_addr.port)
|
2005-06-21 04:14:57 +08:00
|
|
|
retval = sctp_do_bind(sk, (union sctp_addr *)addr,
|
2005-04-17 06:20:36 +08:00
|
|
|
addr_len);
|
|
|
|
else
|
|
|
|
retval = -EINVAL;
|
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long sctp_get_port_local(struct sock *, union sctp_addr *);
|
|
|
|
|
|
|
|
/* Verify this is a valid sockaddr. */
|
|
|
|
static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
|
|
|
|
union sctp_addr *addr, int len)
|
|
|
|
{
|
|
|
|
struct sctp_af *af;
|
|
|
|
|
|
|
|
/* Check minimum size. */
|
|
|
|
if (len < sizeof (struct sockaddr))
|
|
|
|
return NULL;
|
|
|
|
|
2018-01-15 17:02:00 +08:00
|
|
|
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
|
|
|
|
return NULL;
|
|
|
|
|
2018-04-08 22:52:08 +08:00
|
|
|
if (addr->sa.sa_family == AF_INET6) {
|
|
|
|
if (len < SIN6_LEN_RFC2133)
|
|
|
|
return NULL;
|
|
|
|
/* V4 mapped address are really of AF_INET family */
|
|
|
|
if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
|
|
|
|
!opt->pf->af_supported(AF_INET, opt))
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* If we get this far, af is valid. */
|
|
|
|
af = sctp_get_af_specific(addr->sa.sa_family);
|
|
|
|
|
|
|
|
if (len < af->sockaddr_len)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return af;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bind a local address either to an endpoint or to an association. */
|
2013-06-17 17:40:05 +08:00
|
|
|
static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-11-16 11:03:12 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_endpoint *ep = sp->ep;
|
|
|
|
struct sctp_bind_addr *bp = &ep->base.bind_addr;
|
|
|
|
struct sctp_af *af;
|
|
|
|
unsigned short snum;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Common sockaddr verification. */
|
|
|
|
af = sctp_sockaddr_af(sp, addr, len);
|
2005-06-21 04:14:57 +08:00
|
|
|
if (!af) {
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
|
|
|
|
__func__, sk, addr, len);
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2005-06-21 04:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
snum = ntohs(addr->v4.sin_port);
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
|
|
|
|
__func__, sk, &addr->sa, bp->port, snum, len);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* PF specific bind() address verification. */
|
|
|
|
if (!sp->pf->bind_verify(sp, addr))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
2007-05-16 05:14:58 +08:00
|
|
|
/* We must either be unbound, or bind to the same port.
|
|
|
|
* It's OK to allow 0 ports if we are already bound.
|
|
|
|
* We'll just inhert an already bound port in this case
|
|
|
|
*/
|
|
|
|
if (bp->port) {
|
|
|
|
if (!snum)
|
|
|
|
snum = bp->port;
|
|
|
|
else if (snum != bp->port) {
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: new port %d doesn't match existing port "
|
|
|
|
"%d\n", __func__, snum, bp->port);
|
2007-05-16 05:14:58 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-01-21 09:49:11 +08:00
|
|
|
if (snum && snum < inet_prot_sock(net) &&
|
2012-11-16 11:03:12 +08:00
|
|
|
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2008-07-19 14:06:32 +08:00
|
|
|
/* See if the address matches any of the addresses we may have
|
|
|
|
* already bound before checking against other endpoints.
|
|
|
|
*/
|
|
|
|
if (sctp_bind_addr_match(bp, addr, sp))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Make sure we are allowed to bind here.
|
|
|
|
* The function sctp_get_port_local() does duplicate address
|
|
|
|
* detection.
|
|
|
|
*/
|
2007-08-21 13:24:30 +08:00
|
|
|
addr->v4.sin_port = htons(snum);
|
2005-04-17 06:20:36 +08:00
|
|
|
if ((ret = sctp_get_port_local(sk, addr))) {
|
2008-07-19 14:06:32 +08:00
|
|
|
return -EADDRINUSE;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Refresh ephemeral port. */
|
|
|
|
if (!bp->port)
|
2009-10-15 14:30:45 +08:00
|
|
|
bp->port = inet_sk(sk)->inet_num;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-09-17 07:03:28 +08:00
|
|
|
/* Add the address to the bind address list.
|
|
|
|
* Use GFP_ATOMIC since BHs will be disabled.
|
|
|
|
*/
|
2016-03-08 21:34:28 +08:00
|
|
|
ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
|
|
|
|
SCTP_ADDR_SRC, GFP_ATOMIC);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Copy back into socket for getsockname() use. */
|
|
|
|
if (!ret) {
|
2009-10-15 14:30:45 +08:00
|
|
|
inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
|
2014-07-31 02:40:53 +08:00
|
|
|
sp->pf->to_sk_saddr(addr, sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
|
|
|
|
*
|
2007-02-09 22:25:18 +08:00
|
|
|
* R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
|
2005-04-17 06:20:36 +08:00
|
|
|
* at any one time. If a sender, after sending an ASCONF chunk, decides
|
2007-02-09 22:25:18 +08:00
|
|
|
* it needs to transfer another ASCONF Chunk, it MUST wait until the
|
2005-04-17 06:20:36 +08:00
|
|
|
* ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
|
2007-02-09 22:25:18 +08:00
|
|
|
* subsequent ASCONF. Note this restriction binds each side, so at any
|
|
|
|
* time two ASCONF may be in-transit on any given association (one sent
|
2005-04-17 06:20:36 +08:00
|
|
|
* from each endpoint).
|
|
|
|
*/
|
|
|
|
static int sctp_send_asconf(struct sctp_association *asoc,
|
|
|
|
struct sctp_chunk *chunk)
|
|
|
|
{
|
2012-08-07 15:25:24 +08:00
|
|
|
struct net *net = sock_net(asoc->base.sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
int retval = 0;
|
|
|
|
|
|
|
|
/* If there is an outstanding ASCONF chunk, queue it for later
|
|
|
|
* transmission.
|
2007-02-09 22:25:18 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
if (asoc->addip_last_asconf) {
|
2005-07-09 12:47:49 +08:00
|
|
|
list_add_tail(&chunk->list, &asoc->addip_chunk_list);
|
2007-02-09 22:25:18 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Hold the chunk until an ASCONF_ACK is received. */
|
|
|
|
sctp_chunk_hold(chunk);
|
2012-08-07 15:25:24 +08:00
|
|
|
retval = sctp_primitive_ASCONF(net, asoc, chunk);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (retval)
|
|
|
|
sctp_chunk_free(chunk);
|
|
|
|
else
|
|
|
|
asoc->addip_last_asconf = chunk;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add a list of addresses as bind addresses to local endpoint or
|
|
|
|
* association.
|
|
|
|
*
|
|
|
|
* Basically run through each address specified in the addrs/addrcnt
|
|
|
|
* array/length pair, determine if it is IPv6 or IPv4 and call
|
|
|
|
* sctp_do_bind() on it.
|
|
|
|
*
|
|
|
|
* If any of them fails, then the operation will be reversed and the
|
|
|
|
* ones that were added will be removed.
|
|
|
|
*
|
|
|
|
* Only sctp_setsockopt_bindx() is supposed to call this function.
|
|
|
|
*/
|
2007-07-27 05:21:31 +08:00
|
|
|
static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int cnt;
|
|
|
|
int retval = 0;
|
|
|
|
void *addr_buf;
|
|
|
|
struct sockaddr *sa_addr;
|
|
|
|
struct sctp_af *af;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
|
|
|
|
addrs, addrcnt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
addr_buf = addrs;
|
|
|
|
for (cnt = 0; cnt < addrcnt; cnt++) {
|
|
|
|
/* The list may contain either IPv4 or IPv6 address;
|
|
|
|
* determine the address length for walking thru the list.
|
|
|
|
*/
|
2011-06-14 00:21:26 +08:00
|
|
|
sa_addr = addr_buf;
|
2005-04-17 06:20:36 +08:00
|
|
|
af = sctp_get_af_specific(sa_addr->sa_family);
|
|
|
|
if (!af) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto err_bindx_add;
|
|
|
|
}
|
|
|
|
|
2007-02-09 22:25:18 +08:00
|
|
|
retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
|
2005-04-17 06:20:36 +08:00
|
|
|
af->sockaddr_len);
|
|
|
|
|
|
|
|
addr_buf += af->sockaddr_len;
|
|
|
|
|
|
|
|
err_bindx_add:
|
|
|
|
if (retval < 0) {
|
|
|
|
/* Failed. Cleanup the ones that have been added */
|
|
|
|
if (cnt > 0)
|
|
|
|
sctp_bindx_rem(sk, addrs, cnt);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send an ASCONF chunk with Add IP address parameters to all the peers of the
|
|
|
|
* associations that are part of the endpoint indicating that a list of local
|
|
|
|
* addresses are added to the endpoint.
|
|
|
|
*
|
2007-02-09 22:25:18 +08:00
|
|
|
* If any of the addresses is already in the bind address list of the
|
2005-04-17 06:20:36 +08:00
|
|
|
* association, we do not send the chunk for that association. But it will not
|
|
|
|
* affect other associations.
|
|
|
|
*
|
|
|
|
* Only sctp_setsockopt_bindx() is supposed to call this function.
|
|
|
|
*/
|
2007-02-09 22:25:18 +08:00
|
|
|
static int sctp_send_asconf_add_ip(struct sock *sk,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sockaddr *addrs,
|
|
|
|
int addrcnt)
|
|
|
|
{
|
2012-08-07 15:29:57 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_sock *sp;
|
|
|
|
struct sctp_endpoint *ep;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_bind_addr *bp;
|
|
|
|
struct sctp_chunk *chunk;
|
|
|
|
struct sctp_sockaddr_entry *laddr;
|
|
|
|
union sctp_addr *addr;
|
2006-07-22 05:49:25 +08:00
|
|
|
union sctp_addr saveaddr;
|
2005-04-17 06:20:36 +08:00
|
|
|
void *addr_buf;
|
|
|
|
struct sctp_af *af;
|
|
|
|
struct list_head *p;
|
|
|
|
int i;
|
|
|
|
int retval = 0;
|
|
|
|
|
2012-08-07 15:29:57 +08:00
|
|
|
if (!net->sctp.addip_enable)
|
2005-04-17 06:20:36 +08:00
|
|
|
return retval;
|
|
|
|
|
|
|
|
sp = sctp_sk(sk);
|
|
|
|
ep = sp->ep;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
|
|
|
|
__func__, sk, addrs, addrcnt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-13 09:54:24 +08:00
|
|
|
list_for_each_entry(asoc, &ep->asocs, asocs) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!asoc->peer.asconf_capable)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!sctp_state(asoc, ESTABLISHED))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Check if any address in the packed array of addresses is
|
2007-02-09 22:25:18 +08:00
|
|
|
* in the bind address list of the association. If so,
|
|
|
|
* do not send the asconf chunk to its peer, but continue with
|
2005-04-17 06:20:36 +08:00
|
|
|
* other associations.
|
|
|
|
*/
|
|
|
|
addr_buf = addrs;
|
|
|
|
for (i = 0; i < addrcnt; i++) {
|
2011-06-14 00:21:26 +08:00
|
|
|
addr = addr_buf;
|
2005-04-17 06:20:36 +08:00
|
|
|
af = sctp_get_af_specific(addr->v4.sin_family);
|
|
|
|
if (!af) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sctp_assoc_lookup_laddr(asoc, addr))
|
|
|
|
break;
|
|
|
|
|
|
|
|
addr_buf += af->sockaddr_len;
|
|
|
|
}
|
|
|
|
if (i < addrcnt)
|
|
|
|
continue;
|
|
|
|
|
2007-09-17 07:03:28 +08:00
|
|
|
/* Use the first valid address in bind addr list of
|
|
|
|
* association as Address Parameter of ASCONF CHUNK.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
bp = &asoc->base.bind_addr;
|
|
|
|
p = bp->address_list.next;
|
|
|
|
laddr = list_entry(p, struct sctp_sockaddr_entry, list);
|
2006-11-21 09:22:08 +08:00
|
|
|
chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
|
2005-04-17 06:20:36 +08:00
|
|
|
addrcnt, SCTP_PARAM_ADD_IP);
|
|
|
|
if (!chunk) {
|
|
|
|
retval = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2006-07-22 05:49:25 +08:00
|
|
|
/* Add the new addresses to the bind address list with
|
|
|
|
* use_as_src set to 0.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-07-22 05:49:25 +08:00
|
|
|
addr_buf = addrs;
|
|
|
|
for (i = 0; i < addrcnt; i++) {
|
2011-06-14 00:21:26 +08:00
|
|
|
addr = addr_buf;
|
2006-07-22 05:49:25 +08:00
|
|
|
af = sctp_get_af_specific(addr->v4.sin_family);
|
|
|
|
memcpy(&saveaddr, addr, af->sockaddr_len);
|
2007-12-21 06:12:24 +08:00
|
|
|
retval = sctp_add_bind_addr(bp, &saveaddr,
|
2016-03-08 21:34:28 +08:00
|
|
|
sizeof(saveaddr),
|
2007-12-21 06:12:24 +08:00
|
|
|
SCTP_ADDR_NEW, GFP_ATOMIC);
|
2006-07-22 05:49:25 +08:00
|
|
|
addr_buf += af->sockaddr_len;
|
|
|
|
}
|
2011-04-26 19:19:36 +08:00
|
|
|
if (asoc->src_out_of_asoc_ok) {
|
|
|
|
struct sctp_transport *trans;
|
|
|
|
|
|
|
|
list_for_each_entry(trans,
|
|
|
|
&asoc->peer.transport_addr_list, transports) {
|
|
|
|
trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
|
|
|
|
2*asoc->pathmtu, 4380));
|
|
|
|
trans->ssthresh = asoc->peer.i.a_rwnd;
|
|
|
|
trans->rto = asoc->rto_initial;
|
2012-12-01 12:49:42 +08:00
|
|
|
sctp_max_rto(asoc, trans);
|
2011-04-26 19:19:36 +08:00
|
|
|
trans->rtt = trans->srtt = trans->rttvar = 0;
|
2018-04-27 03:58:59 +08:00
|
|
|
/* Clear the source and route cache */
|
2011-04-26 19:19:36 +08:00
|
|
|
sctp_transport_route(trans, NULL,
|
2018-04-27 03:58:59 +08:00
|
|
|
sctp_sk(asoc->base.sk));
|
2011-04-26 19:19:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
retval = sctp_send_asconf(asoc, chunk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove a list of addresses from bind addresses list. Do not remove the
|
|
|
|
* last address.
|
|
|
|
*
|
|
|
|
* Basically run through each address specified in the addrs/addrcnt
|
|
|
|
* array/length pair, determine if it is IPv6 or IPv4 and call
|
|
|
|
* sctp_del_bind() on it.
|
|
|
|
*
|
|
|
|
* If any of them fails, then the operation will be reversed and the
|
|
|
|
* ones that were removed will be added back.
|
|
|
|
*
|
|
|
|
* At least one address has to be left; if only one address is
|
|
|
|
* available, the operation will return -EBUSY.
|
|
|
|
*
|
|
|
|
* Only sctp_setsockopt_bindx() is supposed to call this function.
|
|
|
|
*/
|
2007-07-27 05:21:31 +08:00
|
|
|
static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_endpoint *ep = sp->ep;
|
|
|
|
int cnt;
|
|
|
|
struct sctp_bind_addr *bp = &ep->base.bind_addr;
|
|
|
|
int retval = 0;
|
|
|
|
void *addr_buf;
|
2006-11-21 09:07:48 +08:00
|
|
|
union sctp_addr *sa_addr;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_af *af;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
|
|
|
|
__func__, sk, addrs, addrcnt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
addr_buf = addrs;
|
|
|
|
for (cnt = 0; cnt < addrcnt; cnt++) {
|
|
|
|
/* If the bind address list is empty or if there is only one
|
|
|
|
* bind address, there is nothing more to be removed (we need
|
|
|
|
* at least one address here).
|
|
|
|
*/
|
|
|
|
if (list_empty(&bp->address_list) ||
|
|
|
|
(sctp_list_single_entry(&bp->address_list))) {
|
|
|
|
retval = -EBUSY;
|
|
|
|
goto err_bindx_rem;
|
|
|
|
}
|
|
|
|
|
2011-06-14 00:21:26 +08:00
|
|
|
sa_addr = addr_buf;
|
2006-11-21 09:07:48 +08:00
|
|
|
af = sctp_get_af_specific(sa_addr->sa.sa_family);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!af) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto err_bindx_rem;
|
|
|
|
}
|
2007-04-18 03:52:36 +08:00
|
|
|
|
|
|
|
if (!af->addr_valid(sa_addr, sp, NULL)) {
|
|
|
|
retval = -EADDRNOTAVAIL;
|
|
|
|
goto err_bindx_rem;
|
|
|
|
}
|
|
|
|
|
2011-04-19 03:14:47 +08:00
|
|
|
if (sa_addr->v4.sin_port &&
|
|
|
|
sa_addr->v4.sin_port != htons(bp->port)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = -EINVAL;
|
|
|
|
goto err_bindx_rem;
|
|
|
|
}
|
|
|
|
|
2011-04-19 03:14:47 +08:00
|
|
|
if (!sa_addr->v4.sin_port)
|
|
|
|
sa_addr->v4.sin_port = htons(bp->port);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* FIXME - There is probably a need to check if sk->sk_saddr and
|
|
|
|
* sk->sk_rcv_addr are currently set to one of the addresses to
|
|
|
|
* be removed. This is something which needs to be looked into
|
|
|
|
* when we are fixing the outstanding issues with multi-homing
|
|
|
|
* socket routing and failover schemes. Refer to comments in
|
|
|
|
* sctp_do_bind(). -daisy
|
|
|
|
*/
|
2007-10-25 04:10:00 +08:00
|
|
|
retval = sctp_del_bind_addr(bp, sa_addr);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
addr_buf += af->sockaddr_len;
|
|
|
|
err_bindx_rem:
|
|
|
|
if (retval < 0) {
|
|
|
|
/* Failed. Add the ones that has been removed back */
|
|
|
|
if (cnt > 0)
|
|
|
|
sctp_bindx_add(sk, addrs, cnt);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send an ASCONF chunk with Delete IP address parameters to all the peers of
|
|
|
|
* the associations that are part of the endpoint indicating that a list of
|
|
|
|
* local addresses are removed from the endpoint.
|
|
|
|
*
|
2007-02-09 22:25:18 +08:00
|
|
|
* If any of the addresses is already in the bind address list of the
|
2005-04-17 06:20:36 +08:00
|
|
|
* association, we do not send the chunk for that association. But it will not
|
|
|
|
* affect other associations.
|
|
|
|
*
|
|
|
|
* Only sctp_setsockopt_bindx() is supposed to call this function.
|
|
|
|
*/
|
|
|
|
static int sctp_send_asconf_del_ip(struct sock *sk,
|
|
|
|
struct sockaddr *addrs,
|
|
|
|
int addrcnt)
|
|
|
|
{
|
2012-08-07 15:29:57 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_sock *sp;
|
|
|
|
struct sctp_endpoint *ep;
|
|
|
|
struct sctp_association *asoc;
|
2006-07-22 05:49:25 +08:00
|
|
|
struct sctp_transport *transport;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_bind_addr *bp;
|
|
|
|
struct sctp_chunk *chunk;
|
|
|
|
union sctp_addr *laddr;
|
|
|
|
void *addr_buf;
|
|
|
|
struct sctp_af *af;
|
2006-07-22 05:49:25 +08:00
|
|
|
struct sctp_sockaddr_entry *saddr;
|
2005-04-17 06:20:36 +08:00
|
|
|
int i;
|
|
|
|
int retval = 0;
|
2011-04-26 19:19:36 +08:00
|
|
|
int stored = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-04-26 19:19:36 +08:00
|
|
|
chunk = NULL;
|
2012-08-07 15:29:57 +08:00
|
|
|
if (!net->sctp.addip_enable)
|
2005-04-17 06:20:36 +08:00
|
|
|
return retval;
|
|
|
|
|
|
|
|
sp = sctp_sk(sk);
|
|
|
|
ep = sp->ep;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
|
|
|
|
__func__, sk, addrs, addrcnt);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-13 09:54:24 +08:00
|
|
|
list_for_each_entry(asoc, &ep->asocs, asocs) {
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!asoc->peer.asconf_capable)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!sctp_state(asoc, ESTABLISHED))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Check if any address in the packed array of addresses is
|
2007-02-09 22:25:18 +08:00
|
|
|
* not present in the bind address list of the association.
|
2005-04-17 06:20:36 +08:00
|
|
|
* If so, do not send the asconf chunk to its peer, but
|
|
|
|
* continue with other associations.
|
|
|
|
*/
|
|
|
|
addr_buf = addrs;
|
|
|
|
for (i = 0; i < addrcnt; i++) {
|
2011-06-14 00:21:26 +08:00
|
|
|
laddr = addr_buf;
|
2005-04-17 06:20:36 +08:00
|
|
|
af = sctp_get_af_specific(laddr->v4.sin_family);
|
|
|
|
if (!af) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sctp_assoc_lookup_laddr(asoc, laddr))
|
|
|
|
break;
|
|
|
|
|
|
|
|
addr_buf += af->sockaddr_len;
|
|
|
|
}
|
|
|
|
if (i < addrcnt)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Find one address in the association's bind address list
|
|
|
|
* that is not in the packed array of addresses. This is to
|
|
|
|
* make sure that we do not delete all the addresses in the
|
|
|
|
* association.
|
|
|
|
*/
|
|
|
|
bp = &asoc->base.bind_addr;
|
|
|
|
laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
|
|
|
|
addrcnt, sp);
|
2011-04-26 19:19:36 +08:00
|
|
|
if ((laddr == NULL) && (addrcnt == 1)) {
|
|
|
|
if (asoc->asconf_addr_del_pending)
|
|
|
|
continue;
|
|
|
|
asoc->asconf_addr_del_pending =
|
|
|
|
kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
|
2011-06-10 15:42:14 +08:00
|
|
|
if (asoc->asconf_addr_del_pending == NULL) {
|
|
|
|
retval = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2011-04-26 19:19:36 +08:00
|
|
|
asoc->asconf_addr_del_pending->sa.sa_family =
|
|
|
|
addrs->sa_family;
|
|
|
|
asoc->asconf_addr_del_pending->v4.sin_port =
|
|
|
|
htons(bp->port);
|
|
|
|
if (addrs->sa_family == AF_INET) {
|
|
|
|
struct sockaddr_in *sin;
|
|
|
|
|
|
|
|
sin = (struct sockaddr_in *)addrs;
|
|
|
|
asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
|
|
|
|
} else if (addrs->sa_family == AF_INET6) {
|
|
|
|
struct sockaddr_in6 *sin6;
|
|
|
|
|
|
|
|
sin6 = (struct sockaddr_in6 *)addrs;
|
2011-11-21 11:39:03 +08:00
|
|
|
asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
|
2011-04-26 19:19:36 +08:00
|
|
|
}
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
|
|
|
|
pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
|
|
|
|
__func__, asoc, &asoc->asconf_addr_del_pending->sa,
|
|
|
|
asoc->asconf_addr_del_pending);
|
|
|
|
|
2011-04-26 19:19:36 +08:00
|
|
|
asoc->src_out_of_asoc_ok = 1;
|
|
|
|
stored = 1;
|
|
|
|
goto skip_mkasconf;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-09-08 02:51:21 +08:00
|
|
|
if (laddr == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-09-17 07:03:28 +08:00
|
|
|
/* We do not need RCU protection throughout this loop
|
|
|
|
* because this is done under a socket lock from the
|
|
|
|
* setsockopt call.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
|
|
|
|
SCTP_PARAM_DEL_IP);
|
|
|
|
if (!chunk) {
|
|
|
|
retval = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2011-04-26 19:19:36 +08:00
|
|
|
skip_mkasconf:
|
2006-07-22 05:49:25 +08:00
|
|
|
/* Reset use_as_src flag for the addresses in the bind address
|
|
|
|
* list that are to be deleted.
|
|
|
|
*/
|
|
|
|
addr_buf = addrs;
|
|
|
|
for (i = 0; i < addrcnt; i++) {
|
2011-06-14 00:21:26 +08:00
|
|
|
laddr = addr_buf;
|
2006-07-22 05:49:25 +08:00
|
|
|
af = sctp_get_af_specific(laddr->v4.sin_family);
|
2007-09-17 07:03:28 +08:00
|
|
|
list_for_each_entry(saddr, &bp->address_list, list) {
|
2006-11-21 09:05:23 +08:00
|
|
|
if (sctp_cmp_addr_exact(&saddr->a, laddr))
|
2007-12-21 06:12:24 +08:00
|
|
|
saddr->state = SCTP_ADDR_DEL;
|
2006-07-22 05:49:25 +08:00
|
|
|
}
|
|
|
|
addr_buf += af->sockaddr_len;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-07-22 05:49:25 +08:00
|
|
|
/* Update the route and saddr entries for all the transports
|
|
|
|
* as some of the addresses in the bind address list are
|
|
|
|
* about to be deleted and cannot be used as source addresses.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2008-04-13 09:54:24 +08:00
|
|
|
list_for_each_entry(transport, &asoc->peer.transport_addr_list,
|
|
|
|
transports) {
|
2006-07-22 05:49:25 +08:00
|
|
|
sctp_transport_route(transport, NULL,
|
|
|
|
sctp_sk(asoc->base.sk));
|
|
|
|
}
|
|
|
|
|
2011-04-26 19:19:36 +08:00
|
|
|
if (stored)
|
|
|
|
/* We don't need to transmit ASCONF */
|
|
|
|
continue;
|
2006-07-22 05:49:25 +08:00
|
|
|
retval = sctp_send_asconf(asoc, chunk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2011-04-26 18:32:51 +08:00
|
|
|
/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
|
|
|
|
int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
|
|
|
|
{
|
|
|
|
struct sock *sk = sctp_opt2sk(sp);
|
|
|
|
union sctp_addr *addr;
|
|
|
|
struct sctp_af *af;
|
|
|
|
|
|
|
|
/* It is safe to write port space in caller. */
|
|
|
|
addr = &addrw->a;
|
|
|
|
addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
|
|
|
|
af = sctp_get_af_specific(addr->sa.sa_family);
|
|
|
|
if (!af)
|
|
|
|
return -EINVAL;
|
|
|
|
if (sctp_verify_addr(sk, addr, af->sockaddr_len))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (addrw->state == SCTP_ADDR_NEW)
|
|
|
|
return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
|
|
|
|
else
|
|
|
|
return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
|
|
|
|
*
|
|
|
|
* API 8.1
|
|
|
|
* int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
|
|
|
|
* int flags);
|
|
|
|
*
|
|
|
|
* If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
|
|
|
|
* If the sd is an IPv6 socket, the addresses passed can either be IPv4
|
|
|
|
* or IPv6 addresses.
|
|
|
|
*
|
|
|
|
* A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
|
|
|
|
* Section 3.1.2 for this usage.
|
|
|
|
*
|
|
|
|
* addrs is a pointer to an array of one or more socket addresses. Each
|
|
|
|
* address is contained in its appropriate structure (i.e. struct
|
|
|
|
* sockaddr_in or struct sockaddr_in6) the family of the address type
|
2006-10-17 13:08:28 +08:00
|
|
|
* must be used to distinguish the address length (note that this
|
2005-04-17 06:20:36 +08:00
|
|
|
* representation is termed a "packed array" of addresses). The caller
|
|
|
|
* specifies the number of addresses in the array with addrcnt.
|
|
|
|
*
|
|
|
|
* On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
|
|
|
|
* -1, and sets errno to the appropriate error code.
|
|
|
|
*
|
|
|
|
* For SCTP, the port given in each socket address must be the same, or
|
|
|
|
* sctp_bindx() will fail, setting errno to EINVAL.
|
|
|
|
*
|
|
|
|
* The flags parameter is formed from the bitwise OR of zero or more of
|
|
|
|
* the following currently defined flags:
|
|
|
|
*
|
|
|
|
* SCTP_BINDX_ADD_ADDR
|
|
|
|
*
|
|
|
|
* SCTP_BINDX_REM_ADDR
|
|
|
|
*
|
|
|
|
* SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
|
|
|
|
* association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
|
|
|
|
* addresses from the association. The two flags are mutually exclusive;
|
|
|
|
* if both are given, sctp_bindx() will fail with EINVAL. A caller may
|
|
|
|
* not remove all addresses from an association; sctp_bindx() will
|
|
|
|
* reject such an attempt with EINVAL.
|
|
|
|
*
|
|
|
|
* An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
|
|
|
|
* additional addresses with an endpoint after calling bind(). Or use
|
|
|
|
* sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
|
|
|
|
* socket is associated with so that no new association accepted will be
|
|
|
|
* associated with those addresses. If the endpoint supports dynamic
|
|
|
|
* address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
|
|
|
|
* endpoint to send the appropriate message to the peer to change the
|
|
|
|
* peers address lists.
|
|
|
|
*
|
|
|
|
* Adding and removing addresses from a connected association is
|
|
|
|
* optional functionality. Implementations that do not support this
|
|
|
|
* functionality should return EOPNOTSUPP.
|
|
|
|
*
|
|
|
|
* Basically do nothing but copying the addresses from user to kernel
|
|
|
|
* land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
|
2005-06-21 04:14:57 +08:00
|
|
|
* This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
|
|
|
|
* from userspace.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
|
|
|
|
* it.
|
|
|
|
*
|
|
|
|
* sk The sk of the socket
|
|
|
|
* addrs The pointer to the addresses in user land
|
|
|
|
* addrssize Size of the addrs buffer
|
|
|
|
* op Operation to perform (add or remove, see the flags of
|
|
|
|
* sctp_bindx)
|
|
|
|
*
|
|
|
|
* Returns 0 if ok, <0 errno code on error.
|
|
|
|
*/
|
2013-12-23 12:16:51 +08:00
|
|
|
static int sctp_setsockopt_bindx(struct sock *sk,
|
2013-06-17 17:40:05 +08:00
|
|
|
struct sockaddr __user *addrs,
|
|
|
|
int addrs_size, int op)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sockaddr *kaddrs;
|
|
|
|
int err;
|
|
|
|
int addrcnt = 0;
|
|
|
|
int walk_size = 0;
|
|
|
|
struct sockaddr *sa_addr;
|
|
|
|
void *addr_buf;
|
|
|
|
struct sctp_af *af;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
|
|
|
|
__func__, sk, addrs, addrs_size, op);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (unlikely(addrs_size <= 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-01-08 02:19:09 +08:00
|
|
|
kaddrs = vmemdup_user(addrs, addrs_size);
|
|
|
|
if (unlikely(IS_ERR(kaddrs)))
|
|
|
|
return PTR_ERR(kaddrs);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-02-09 22:25:18 +08:00
|
|
|
/* Walk through the addrs buffer and count the number of addresses. */
|
2005-04-17 06:20:36 +08:00
|
|
|
addr_buf = kaddrs;
|
|
|
|
while (walk_size < addrs_size) {
|
2010-10-01 19:16:58 +08:00
|
|
|
if (walk_size + sizeof(sa_family_t) > addrs_size) {
|
2018-01-08 02:19:09 +08:00
|
|
|
kvfree(kaddrs);
|
2010-10-01 19:16:58 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-06-14 00:21:26 +08:00
|
|
|
sa_addr = addr_buf;
|
2005-04-17 06:20:36 +08:00
|
|
|
af = sctp_get_af_specific(sa_addr->sa_family);
|
|
|
|
|
|
|
|
/* If the address family is not supported or if this address
|
|
|
|
* causes the address buffer to overflow return EINVAL.
|
2007-02-09 22:25:18 +08:00
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
|
2018-01-08 02:19:09 +08:00
|
|
|
kvfree(kaddrs);
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
addrcnt++;
|
|
|
|
addr_buf += af->sockaddr_len;
|
|
|
|
walk_size += af->sockaddr_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do the work. */
|
|
|
|
switch (op) {
|
|
|
|
case SCTP_BINDX_ADD_ADDR:
|
2018-02-14 04:56:24 +08:00
|
|
|
/* Allow security module to validate bindx addresses. */
|
|
|
|
err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_BINDX_ADD,
|
|
|
|
(struct sockaddr *)kaddrs,
|
|
|
|
addrs_size);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
err = sctp_bindx_add(sk, kaddrs, addrcnt);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCTP_BINDX_REM_ADDR:
|
|
|
|
err = sctp_bindx_rem(sk, kaddrs, addrcnt);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
out:
|
2018-01-08 02:19:09 +08:00
|
|
|
kvfree(kaddrs);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-06-21 04:14:57 +08:00
|
|
|
/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
|
|
|
|
*
|
|
|
|
* Common routine for handling connect() and sctp_connectx().
|
|
|
|
* Connect will come in with just a single address.
|
|
|
|
*/
|
2013-12-23 12:16:51 +08:00
|
|
|
static int __sctp_connect(struct sock *sk,
|
2005-06-21 04:14:57 +08:00
|
|
|
struct sockaddr *kaddrs,
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
int addrs_size, int flags,
|
2008-05-10 06:14:11 +08:00
|
|
|
sctp_assoc_t *assoc_id)
|
2005-06-21 04:14:57 +08:00
|
|
|
{
|
2012-08-07 15:25:24 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-06-21 04:14:57 +08:00
|
|
|
struct sctp_sock *sp;
|
|
|
|
struct sctp_endpoint *ep;
|
|
|
|
struct sctp_association *asoc = NULL;
|
|
|
|
struct sctp_association *asoc2;
|
|
|
|
struct sctp_transport *transport;
|
|
|
|
union sctp_addr to;
|
2017-08-05 19:59:54 +08:00
|
|
|
enum sctp_scope scope;
|
2005-06-21 04:14:57 +08:00
|
|
|
long timeo;
|
|
|
|
int err = 0;
|
|
|
|
int addrcnt = 0;
|
|
|
|
int walk_size = 0;
|
2007-08-01 22:56:43 +08:00
|
|
|
union sctp_addr *sa_addr = NULL;
|
2005-06-21 04:14:57 +08:00
|
|
|
void *addr_buf;
|
2007-05-05 04:34:09 +08:00
|
|
|
unsigned short port;
|
2005-06-21 04:14:57 +08:00
|
|
|
|
|
|
|
sp = sctp_sk(sk);
|
|
|
|
ep = sp->ep;
|
|
|
|
|
|
|
|
/* connect() cannot be done on a socket that is already in ESTABLISHED
|
|
|
|
* state - UDP-style peeled off socket or a TCP-style socket that
|
|
|
|
* is already connected.
|
|
|
|
* It cannot be done even on a TCP-style listening socket.
|
|
|
|
*/
|
2016-07-16 03:38:19 +08:00
|
|
|
if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) ||
|
2005-06-21 04:14:57 +08:00
|
|
|
(sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) {
|
|
|
|
err = -EISCONN;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Walk through the addrs buffer and count the number of addresses. */
|
|
|
|
addr_buf = kaddrs;
|
|
|
|
while (walk_size < addrs_size) {
|
2014-07-31 02:40:53 +08:00
|
|
|
struct sctp_af *af;
|
|
|
|
|
2010-10-01 19:16:58 +08:00
|
|
|
if (walk_size + sizeof(sa_family_t) > addrs_size) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
2011-06-14 00:21:26 +08:00
|
|
|
sa_addr = addr_buf;
|
2006-11-21 09:10:20 +08:00
|
|
|
af = sctp_get_af_specific(sa_addr->sa.sa_family);
|
2005-06-21 04:14:57 +08:00
|
|
|
|
|
|
|
/* If the address family is not supported or if this address
|
|
|
|
* causes the address buffer to overflow return EINVAL.
|
|
|
|
*/
|
|
|
|
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
2010-10-01 19:16:58 +08:00
|
|
|
port = ntohs(sa_addr->v4.sin_port);
|
|
|
|
|
2007-08-01 22:56:43 +08:00
|
|
|
/* Save current address so we can work with it */
|
|
|
|
memcpy(&to, sa_addr, af->sockaddr_len);
|
|
|
|
|
|
|
|
err = sctp_verify_addr(sk, &to, af->sockaddr_len);
|
2005-06-21 04:14:57 +08:00
|
|
|
if (err)
|
|
|
|
goto out_free;
|
|
|
|
|
2007-05-05 04:34:09 +08:00
|
|
|
/* Make sure the destination port is correctly set
|
|
|
|
* in all addresses.
|
|
|
|
*/
|
2013-04-03 11:02:28 +08:00
|
|
|
if (asoc && asoc->peer.port && asoc->peer.port != port) {
|
|
|
|
err = -EINVAL;
|
2007-05-05 04:34:09 +08:00
|
|
|
goto out_free;
|
2013-04-03 11:02:28 +08:00
|
|
|
}
|
2005-06-21 04:14:57 +08:00
|
|
|
|
|
|
|
/* Check if there already is a matching association on the
|
|
|
|
* endpoint (other than the one created here).
|
|
|
|
*/
|
2007-08-01 22:56:43 +08:00
|
|
|
asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
|
2005-06-21 04:14:57 +08:00
|
|
|
if (asoc2 && asoc2 != asoc) {
|
|
|
|
if (asoc2->state >= SCTP_STATE_ESTABLISHED)
|
|
|
|
err = -EISCONN;
|
|
|
|
else
|
|
|
|
err = -EALREADY;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we could not find a matching association on the endpoint,
|
|
|
|
* make sure that there is no peeled-off association matching
|
|
|
|
* the peer address even on another socket.
|
|
|
|
*/
|
2007-08-01 22:56:43 +08:00
|
|
|
if (sctp_endpoint_is_peeled_off(ep, &to)) {
|
2005-06-21 04:14:57 +08:00
|
|
|
err = -EADDRNOTAVAIL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!asoc) {
|
|
|
|
/* If a bind() or sctp_bindx() is not called prior to
|
|
|
|
* an sctp_connectx() call, the system picks an
|
|
|
|
* ephemeral port and will choose an address set
|
|
|
|
* equivalent to binding with a wildcard address.
|
|
|
|
*/
|
|
|
|
if (!ep->base.bind_addr.port) {
|
|
|
|
if (sctp_autobind(sk)) {
|
|
|
|
err = -EAGAIN;
|
|
|
|
goto out_free;
|
|
|
|
}
|
2005-10-29 06:39:02 +08:00
|
|
|
} else {
|
|
|
|
/*
|
2007-02-09 22:25:18 +08:00
|
|
|
* If an unprivileged user inherits a 1-many
|
|
|
|
* style socket with open associations on a
|
|
|
|
* privileged port, it MAY be permitted to
|
|
|
|
* accept new associations, but it SHOULD NOT
|
2005-10-29 06:39:02 +08:00
|
|
|
* be permitted to open new associations.
|
|
|
|
*/
|
2017-01-21 09:49:11 +08:00
|
|
|
if (ep->base.bind_addr.port <
|
|
|
|
inet_prot_sock(net) &&
|
|
|
|
!ns_capable(net->user_ns,
|
|
|
|
CAP_NET_BIND_SERVICE)) {
|
2005-10-29 06:39:02 +08:00
|
|
|
err = -EACCES;
|
|
|
|
goto out_free;
|
|
|
|
}
|
2005-06-21 04:14:57 +08:00
|
|
|
}
|
|
|
|
|
2007-08-01 22:56:43 +08:00
|
|
|
scope = sctp_scope(&to);
|
2005-06-21 04:14:57 +08:00
|
|
|
asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
|
|
|
|
if (!asoc) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_free;
|
|
|
|
}
|
2009-11-10 16:57:34 +08:00
|
|
|
|
|
|
|
err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (err < 0) {
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
2005-06-21 04:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Prime the peer's transport structures. */
|
2007-08-01 22:56:43 +08:00
|
|
|
transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
|
2005-06-21 04:14:57 +08:00
|
|
|
SCTP_UNKNOWN);
|
|
|
|
if (!transport) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
addrcnt++;
|
|
|
|
addr_buf += af->sockaddr_len;
|
|
|
|
walk_size += af->sockaddr_len;
|
|
|
|
}
|
|
|
|
|
2009-06-02 00:41:15 +08:00
|
|
|
/* In case the user of sctp_connectx() wants an association
|
|
|
|
* id back, assign one now.
|
|
|
|
*/
|
|
|
|
if (assoc_id) {
|
|
|
|
err = sctp_assoc_set_id(asoc, GFP_KERNEL);
|
|
|
|
if (err < 0)
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
2012-08-07 15:25:24 +08:00
|
|
|
err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
|
2005-06-21 04:14:57 +08:00
|
|
|
if (err < 0) {
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize sk's dport and daddr for getpeername() */
|
2009-10-15 14:30:45 +08:00
|
|
|
inet_sk(sk)->inet_dport = htons(asoc->peer.port);
|
2014-07-31 02:40:53 +08:00
|
|
|
sp->pf->to_sk_daddr(sa_addr, sk);
|
2006-05-20 01:58:12 +08:00
|
|
|
sk->sk_err = 0;
|
2005-06-21 04:14:57 +08:00
|
|
|
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
|
2007-07-04 00:47:40 +08:00
|
|
|
|
2016-11-04 03:03:41 +08:00
|
|
|
if (assoc_id)
|
2008-05-10 06:14:11 +08:00
|
|
|
*assoc_id = asoc->assoc_id;
|
2018-02-14 04:56:24 +08:00
|
|
|
|
2016-11-04 03:03:41 +08:00
|
|
|
err = sctp_wait_for_connect(asoc, &timeo);
|
|
|
|
/* Note: the asoc may be freed after the return of
|
|
|
|
* sctp_wait_for_connect.
|
|
|
|
*/
|
2005-06-21 04:14:57 +08:00
|
|
|
|
|
|
|
/* Don't free association on exit. */
|
|
|
|
asoc = NULL;
|
|
|
|
|
|
|
|
out_free:
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
|
|
|
|
__func__, asoc, kaddrs, err);
|
2005-06-21 04:14:57 +08:00
|
|
|
|
sctp: Fix list corruption resulting from freeing an association on a list
A few days ago Dave Jones reported this oops:
[22766.294255] general protection fault: 0000 [#1] PREEMPT SMP
[22766.295376] CPU 0
[22766.295384] Modules linked in:
[22766.387137] ffffffffa169f292 6b6b6b6b6b6b6b6b ffff880147c03a90
ffff880147c03a74
[22766.387135] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 00000000000
[22766.387136] Process trinity-watchdo (pid: 10896, threadinfo ffff88013e7d2000,
[22766.387137] Stack:
[22766.387140] ffff880147c03a10
[22766.387140] ffffffffa169f2b6
[22766.387140] ffff88013ed95728
[22766.387143] 0000000000000002
[22766.387143] 0000000000000000
[22766.387143] ffff880003fad062
[22766.387144] ffff88013c120000
[22766.387144]
[22766.387145] Call Trace:
[22766.387145] <IRQ>
[22766.387150] [<ffffffffa169f292>] ? __sctp_lookup_association+0x62/0xd0
[sctp]
[22766.387154] [<ffffffffa169f2b6>] __sctp_lookup_association+0x86/0xd0 [sctp]
[22766.387157] [<ffffffffa169f597>] sctp_rcv+0x207/0xbb0 [sctp]
[22766.387161] [<ffffffff810d4da8>] ? trace_hardirqs_off_caller+0x28/0xd0
[22766.387163] [<ffffffff815827e3>] ? nf_hook_slow+0x133/0x210
[22766.387166] [<ffffffff815902fc>] ? ip_local_deliver_finish+0x4c/0x4c0
[22766.387168] [<ffffffff8159043d>] ip_local_deliver_finish+0x18d/0x4c0
[22766.387169] [<ffffffff815902fc>] ? ip_local_deliver_finish+0x4c/0x4c0
[22766.387171] [<ffffffff81590a07>] ip_local_deliver+0x47/0x80
[22766.387172] [<ffffffff8158fd80>] ip_rcv_finish+0x150/0x680
[22766.387174] [<ffffffff81590c54>] ip_rcv+0x214/0x320
[22766.387176] [<ffffffff81558c07>] __netif_receive_skb+0x7b7/0x910
[22766.387178] [<ffffffff8155856c>] ? __netif_receive_skb+0x11c/0x910
[22766.387180] [<ffffffff810d423e>] ? put_lock_stats.isra.25+0xe/0x40
[22766.387182] [<ffffffff81558f83>] netif_receive_skb+0x23/0x1f0
[22766.387183] [<ffffffff815596a9>] ? dev_gro_receive+0x139/0x440
[22766.387185] [<ffffffff81559280>] napi_skb_finish+0x70/0xa0
[22766.387187] [<ffffffff81559cb5>] napi_gro_receive+0xf5/0x130
[22766.387218] [<ffffffffa01c4679>] e1000_receive_skb+0x59/0x70 [e1000e]
[22766.387242] [<ffffffffa01c5aab>] e1000_clean_rx_irq+0x28b/0x460 [e1000e]
[22766.387266] [<ffffffffa01c9c18>] e1000e_poll+0x78/0x430 [e1000e]
[22766.387268] [<ffffffff81559fea>] net_rx_action+0x1aa/0x3d0
[22766.387270] [<ffffffff810a495f>] ? account_system_vtime+0x10f/0x130
[22766.387273] [<ffffffff810734d0>] __do_softirq+0xe0/0x420
[22766.387275] [<ffffffff8169826c>] call_softirq+0x1c/0x30
[22766.387278] [<ffffffff8101db15>] do_softirq+0xd5/0x110
[22766.387279] [<ffffffff81073bc5>] irq_exit+0xd5/0xe0
[22766.387281] [<ffffffff81698b03>] do_IRQ+0x63/0xd0
[22766.387283] [<ffffffff8168ee2f>] common_interrupt+0x6f/0x6f
[22766.387283] <EOI>
[22766.387284]
[22766.387285] [<ffffffff8168eed9>] ? retint_swapgs+0x13/0x1b
[22766.387285] Code: c0 90 5d c3 66 0f 1f 44 00 00 4c 89 c8 5d c3 0f 1f 00 55 48
89 e5 48 83
ec 20 48 89 5d e8 4c 89 65 f0 4c 89 6d f8 66 66 66 66 90 <0f> b7 87 98 00 00 00
48 89 fb
49 89 f5 66 c1 c0 08 66 39 46 02
[22766.387307]
[22766.387307] RIP
[22766.387311] [<ffffffffa168a2c9>] sctp_assoc_is_match+0x19/0x90 [sctp]
[22766.387311] RSP <ffff880147c039b0>
[22766.387142] ffffffffa16ab120
[22766.599537] ---[ end trace 3f6dae82e37b17f5 ]---
[22766.601221] Kernel panic - not syncing: Fatal exception in interrupt
It appears from his analysis and some staring at the code that this is likely
occuring because an association is getting freed while still on the
sctp_assoc_hashtable. As a result, we get a gpf when traversing the hashtable
while a freed node corrupts part of the list.
Nominally I would think that an mibalanced refcount was responsible for this,
but I can't seem to find any obvious imbalance. What I did note however was
that the two places where we create an association using
sctp_primitive_ASSOCIATE (__sctp_connect and sctp_sendmsg), have failure paths
which free a newly created association after calling sctp_primitive_ASSOCIATE.
sctp_primitive_ASSOCIATE brings us into the sctp_sf_do_prm_asoc path, which
issues a SCTP_CMD_NEW_ASOC side effect, which in turn adds a new association to
the aforementioned hash table. the sctp command interpreter that process side
effects has not way to unwind previously processed commands, so freeing the
association from the __sctp_connect or sctp_sendmsg error path would lead to a
freed association remaining on this hash table.
I've fixed this but modifying sctp_[un]hash_established to use hlist_del_init,
which allows us to proerly use hlist_unhashed to check if the node is on a
hashlist safely during a delete. That in turn alows us to safely call
sctp_unhash_established in the __sctp_connect and sctp_sendmsg error paths
before freeing them, regardles of what the associations state is on the hash
list.
I noted, while I was doing this, that the __sctp_unhash_endpoint was using
hlist_unhsashed in a simmilar fashion, but never nullified any removed nodes
pointers to make that function work properly, so I fixed that up in a simmilar
fashion.
I attempted to test this using a virtual guest running the SCTP_RR test from
netperf in a loop while running the trinity fuzzer, both in a loop. I wasn't
able to recreate the problem prior to this fix, nor was I able to trigger the
failure after (neither of which I suppose is suprising). Given the trace above
however, I think its likely that this is what we hit.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Reported-by: davej@redhat.com
CC: davej@redhat.com
CC: "David S. Miller" <davem@davemloft.net>
CC: Vlad Yasevich <vyasevich@gmail.com>
CC: Sridhar Samudrala <sri@us.ibm.com>
CC: linux-sctp@vger.kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-16 17:13:51 +08:00
|
|
|
if (asoc) {
|
|
|
|
/* sctp_primitive_ASSOCIATE may have added this association
|
|
|
|
* To the hash table, try to unhash it, just in case, its a noop
|
|
|
|
* if it wasn't hashed so we're safe
|
|
|
|
*/
|
2005-06-21 04:14:57 +08:00
|
|
|
sctp_association_free(asoc);
|
sctp: Fix list corruption resulting from freeing an association on a list
A few days ago Dave Jones reported this oops:
[22766.294255] general protection fault: 0000 [#1] PREEMPT SMP
[22766.295376] CPU 0
[22766.295384] Modules linked in:
[22766.387137] ffffffffa169f292 6b6b6b6b6b6b6b6b ffff880147c03a90
ffff880147c03a74
[22766.387135] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 00000000000
[22766.387136] Process trinity-watchdo (pid: 10896, threadinfo ffff88013e7d2000,
[22766.387137] Stack:
[22766.387140] ffff880147c03a10
[22766.387140] ffffffffa169f2b6
[22766.387140] ffff88013ed95728
[22766.387143] 0000000000000002
[22766.387143] 0000000000000000
[22766.387143] ffff880003fad062
[22766.387144] ffff88013c120000
[22766.387144]
[22766.387145] Call Trace:
[22766.387145] <IRQ>
[22766.387150] [<ffffffffa169f292>] ? __sctp_lookup_association+0x62/0xd0
[sctp]
[22766.387154] [<ffffffffa169f2b6>] __sctp_lookup_association+0x86/0xd0 [sctp]
[22766.387157] [<ffffffffa169f597>] sctp_rcv+0x207/0xbb0 [sctp]
[22766.387161] [<ffffffff810d4da8>] ? trace_hardirqs_off_caller+0x28/0xd0
[22766.387163] [<ffffffff815827e3>] ? nf_hook_slow+0x133/0x210
[22766.387166] [<ffffffff815902fc>] ? ip_local_deliver_finish+0x4c/0x4c0
[22766.387168] [<ffffffff8159043d>] ip_local_deliver_finish+0x18d/0x4c0
[22766.387169] [<ffffffff815902fc>] ? ip_local_deliver_finish+0x4c/0x4c0
[22766.387171] [<ffffffff81590a07>] ip_local_deliver+0x47/0x80
[22766.387172] [<ffffffff8158fd80>] ip_rcv_finish+0x150/0x680
[22766.387174] [<ffffffff81590c54>] ip_rcv+0x214/0x320
[22766.387176] [<ffffffff81558c07>] __netif_receive_skb+0x7b7/0x910
[22766.387178] [<ffffffff8155856c>] ? __netif_receive_skb+0x11c/0x910
[22766.387180] [<ffffffff810d423e>] ? put_lock_stats.isra.25+0xe/0x40
[22766.387182] [<ffffffff81558f83>] netif_receive_skb+0x23/0x1f0
[22766.387183] [<ffffffff815596a9>] ? dev_gro_receive+0x139/0x440
[22766.387185] [<ffffffff81559280>] napi_skb_finish+0x70/0xa0
[22766.387187] [<ffffffff81559cb5>] napi_gro_receive+0xf5/0x130
[22766.387218] [<ffffffffa01c4679>] e1000_receive_skb+0x59/0x70 [e1000e]
[22766.387242] [<ffffffffa01c5aab>] e1000_clean_rx_irq+0x28b/0x460 [e1000e]
[22766.387266] [<ffffffffa01c9c18>] e1000e_poll+0x78/0x430 [e1000e]
[22766.387268] [<ffffffff81559fea>] net_rx_action+0x1aa/0x3d0
[22766.387270] [<ffffffff810a495f>] ? account_system_vtime+0x10f/0x130
[22766.387273] [<ffffffff810734d0>] __do_softirq+0xe0/0x420
[22766.387275] [<ffffffff8169826c>] call_softirq+0x1c/0x30
[22766.387278] [<ffffffff8101db15>] do_softirq+0xd5/0x110
[22766.387279] [<ffffffff81073bc5>] irq_exit+0xd5/0xe0
[22766.387281] [<ffffffff81698b03>] do_IRQ+0x63/0xd0
[22766.387283] [<ffffffff8168ee2f>] common_interrupt+0x6f/0x6f
[22766.387283] <EOI>
[22766.387284]
[22766.387285] [<ffffffff8168eed9>] ? retint_swapgs+0x13/0x1b
[22766.387285] Code: c0 90 5d c3 66 0f 1f 44 00 00 4c 89 c8 5d c3 0f 1f 00 55 48
89 e5 48 83
ec 20 48 89 5d e8 4c 89 65 f0 4c 89 6d f8 66 66 66 66 90 <0f> b7 87 98 00 00 00
48 89 fb
49 89 f5 66 c1 c0 08 66 39 46 02
[22766.387307]
[22766.387307] RIP
[22766.387311] [<ffffffffa168a2c9>] sctp_assoc_is_match+0x19/0x90 [sctp]
[22766.387311] RSP <ffff880147c039b0>
[22766.387142] ffffffffa16ab120
[22766.599537] ---[ end trace 3f6dae82e37b17f5 ]---
[22766.601221] Kernel panic - not syncing: Fatal exception in interrupt
It appears from his analysis and some staring at the code that this is likely
occuring because an association is getting freed while still on the
sctp_assoc_hashtable. As a result, we get a gpf when traversing the hashtable
while a freed node corrupts part of the list.
Nominally I would think that an mibalanced refcount was responsible for this,
but I can't seem to find any obvious imbalance. What I did note however was
that the two places where we create an association using
sctp_primitive_ASSOCIATE (__sctp_connect and sctp_sendmsg), have failure paths
which free a newly created association after calling sctp_primitive_ASSOCIATE.
sctp_primitive_ASSOCIATE brings us into the sctp_sf_do_prm_asoc path, which
issues a SCTP_CMD_NEW_ASOC side effect, which in turn adds a new association to
the aforementioned hash table. the sctp command interpreter that process side
effects has not way to unwind previously processed commands, so freeing the
association from the __sctp_connect or sctp_sendmsg error path would lead to a
freed association remaining on this hash table.
I've fixed this but modifying sctp_[un]hash_established to use hlist_del_init,
which allows us to proerly use hlist_unhashed to check if the node is on a
hashlist safely during a delete. That in turn alows us to safely call
sctp_unhash_established in the __sctp_connect and sctp_sendmsg error paths
before freeing them, regardles of what the associations state is on the hash
list.
I noted, while I was doing this, that the __sctp_unhash_endpoint was using
hlist_unhsashed in a simmilar fashion, but never nullified any removed nodes
pointers to make that function work properly, so I fixed that up in a simmilar
fashion.
I attempted to test this using a virtual guest running the SCTP_RR test from
netperf in a loop while running the trinity fuzzer, both in a loop. I wasn't
able to recreate the problem prior to this fix, nor was I able to trigger the
failure after (neither of which I suppose is suprising). Given the trace above
however, I think its likely that this is what we hit.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Reported-by: davej@redhat.com
CC: davej@redhat.com
CC: "David S. Miller" <davem@davemloft.net>
CC: Vlad Yasevich <vyasevich@gmail.com>
CC: Sridhar Samudrala <sri@us.ibm.com>
CC: linux-sctp@vger.kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-07-16 17:13:51 +08:00
|
|
|
}
|
2005-06-21 04:14:57 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
|
|
|
|
*
|
|
|
|
* API 8.9
|
2008-05-10 06:14:11 +08:00
|
|
|
* int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
|
|
|
|
* sctp_assoc_t *asoc);
|
2005-06-21 04:14:57 +08:00
|
|
|
*
|
|
|
|
* If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
|
|
|
|
* If the sd is an IPv6 socket, the addresses passed can either be IPv4
|
|
|
|
* or IPv6 addresses.
|
|
|
|
*
|
|
|
|
* A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
|
|
|
|
* Section 3.1.2 for this usage.
|
|
|
|
*
|
|
|
|
* addrs is a pointer to an array of one or more socket addresses. Each
|
|
|
|
* address is contained in its appropriate structure (i.e. struct
|
|
|
|
* sockaddr_in or struct sockaddr_in6) the family of the address type
|
|
|
|
* must be used to distengish the address length (note that this
|
|
|
|
* representation is termed a "packed array" of addresses). The caller
|
|
|
|
* specifies the number of addresses in the array with addrcnt.
|
|
|
|
*
|
2008-05-10 06:14:11 +08:00
|
|
|
* On success, sctp_connectx() returns 0. It also sets the assoc_id to
|
|
|
|
* the association id of the new association. On failure, sctp_connectx()
|
|
|
|
* returns -1, and sets errno to the appropriate error code. The assoc_id
|
|
|
|
* is not touched by the kernel.
|
2005-06-21 04:14:57 +08:00
|
|
|
*
|
|
|
|
* For SCTP, the port given in each socket address must be the same, or
|
|
|
|
* sctp_connectx() will fail, setting errno to EINVAL.
|
|
|
|
*
|
|
|
|
* An application can use sctp_connectx to initiate an association with
|
|
|
|
* an endpoint that is multi-homed. Much like sctp_bindx() this call
|
|
|
|
* allows a caller to specify multiple addresses at which a peer can be
|
|
|
|
* reached. The way the SCTP stack uses the list of addresses to set up
|
2011-03-31 09:57:33 +08:00
|
|
|
* the association is implementation dependent. This function only
|
2005-06-21 04:14:57 +08:00
|
|
|
* specifies that the stack will try to make use of all the addresses in
|
|
|
|
* the list when needed.
|
|
|
|
*
|
|
|
|
* Note that the list of addresses passed in is only used for setting up
|
|
|
|
* the association. It does not necessarily equal the set of addresses
|
|
|
|
* the peer uses for the resulting association. If the caller wants to
|
|
|
|
* find out the set of peer addresses, it must use sctp_getpaddrs() to
|
|
|
|
* retrieve them after the association has been set up.
|
|
|
|
*
|
|
|
|
* Basically do nothing but copying the addresses from user to kernel
|
|
|
|
* land and invoking either sctp_connectx(). This is used for tunneling
|
|
|
|
* the sctp_connectx() request through sctp_setsockopt() from userspace.
|
|
|
|
*
|
|
|
|
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
|
|
|
|
* it.
|
|
|
|
*
|
|
|
|
* sk The sk of the socket
|
|
|
|
* addrs The pointer to the addresses in user land
|
|
|
|
* addrssize Size of the addrs buffer
|
|
|
|
*
|
2008-05-10 06:14:11 +08:00
|
|
|
* Returns >=0 if ok, <0 errno code on error.
|
2005-06-21 04:14:57 +08:00
|
|
|
*/
|
2013-12-23 12:16:51 +08:00
|
|
|
static int __sctp_setsockopt_connectx(struct sock *sk,
|
2005-06-21 04:14:57 +08:00
|
|
|
struct sockaddr __user *addrs,
|
2008-05-10 06:14:11 +08:00
|
|
|
int addrs_size,
|
|
|
|
sctp_assoc_t *assoc_id)
|
2005-06-21 04:14:57 +08:00
|
|
|
{
|
|
|
|
struct sockaddr *kaddrs;
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
int err = 0, flags = 0;
|
2005-06-21 04:14:57 +08:00
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
|
|
|
|
__func__, sk, addrs, addrs_size);
|
2005-06-21 04:14:57 +08:00
|
|
|
|
|
|
|
if (unlikely(addrs_size <= 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-01-08 02:19:09 +08:00
|
|
|
kaddrs = vmemdup_user(addrs, addrs_size);
|
|
|
|
if (unlikely(IS_ERR(kaddrs)))
|
|
|
|
return PTR_ERR(kaddrs);
|
2005-06-21 04:14:57 +08:00
|
|
|
|
2018-02-14 04:56:24 +08:00
|
|
|
/* Allow security module to validate connectx addresses. */
|
|
|
|
err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_CONNECTX,
|
|
|
|
(struct sockaddr *)kaddrs,
|
|
|
|
addrs_size);
|
|
|
|
if (err)
|
|
|
|
goto out_free;
|
|
|
|
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
/* in-kernel sockets don't generally have a file allocated to them
|
|
|
|
* if all they do is call sock_create_kern().
|
|
|
|
*/
|
|
|
|
if (sk->sk_socket->file)
|
|
|
|
flags = sk->sk_socket->file->f_flags;
|
|
|
|
|
|
|
|
err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
|
2018-02-14 04:56:24 +08:00
|
|
|
|
|
|
|
out_free:
|
2018-01-08 02:19:09 +08:00
|
|
|
kvfree(kaddrs);
|
2008-05-10 06:14:11 +08:00
|
|
|
|
2005-06-21 04:14:57 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-05-10 06:14:11 +08:00
|
|
|
/*
|
|
|
|
* This is an older interface. It's kept for backward compatibility
|
|
|
|
* to the option that doesn't provide association id.
|
|
|
|
*/
|
2013-12-23 12:16:51 +08:00
|
|
|
static int sctp_setsockopt_connectx_old(struct sock *sk,
|
2013-06-17 17:40:05 +08:00
|
|
|
struct sockaddr __user *addrs,
|
|
|
|
int addrs_size)
|
2008-05-10 06:14:11 +08:00
|
|
|
{
|
|
|
|
return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* New interface for the API. The since the API is done with a socket
|
|
|
|
* option, to make it simple we feed back the association id is as a return
|
|
|
|
* indication to the call. Error is always negative and association id is
|
|
|
|
* always positive.
|
|
|
|
*/
|
2013-12-23 12:16:51 +08:00
|
|
|
static int sctp_setsockopt_connectx(struct sock *sk,
|
2013-06-17 17:40:05 +08:00
|
|
|
struct sockaddr __user *addrs,
|
|
|
|
int addrs_size)
|
2008-05-10 06:14:11 +08:00
|
|
|
{
|
|
|
|
sctp_assoc_t assoc_id = 0;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
else
|
|
|
|
return assoc_id;
|
|
|
|
}
|
|
|
|
|
2009-06-02 00:41:15 +08:00
|
|
|
/*
|
2009-11-11 16:19:24 +08:00
|
|
|
* New (hopefully final) interface for the API.
|
|
|
|
* We use the sctp_getaddrs_old structure so that use-space library
|
2014-02-17 19:11:11 +08:00
|
|
|
* can avoid any unnecessary allocations. The only different part
|
2009-11-11 16:19:24 +08:00
|
|
|
* is that we store the actual length of the address buffer into the
|
2014-02-17 19:11:11 +08:00
|
|
|
* addrs_num structure member. That way we can re-use the existing
|
2009-11-11 16:19:24 +08:00
|
|
|
* code.
|
2009-06-02 00:41:15 +08:00
|
|
|
*/
|
2014-02-17 19:11:11 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
struct compat_sctp_getaddrs_old {
|
|
|
|
sctp_assoc_t assoc_id;
|
|
|
|
s32 addr_num;
|
|
|
|
compat_uptr_t addrs; /* struct sockaddr * */
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2013-12-23 12:16:51 +08:00
|
|
|
static int sctp_getsockopt_connectx3(struct sock *sk, int len,
|
2013-06-17 17:40:05 +08:00
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
2009-06-02 00:41:15 +08:00
|
|
|
{
|
2009-11-11 16:19:24 +08:00
|
|
|
struct sctp_getaddrs_old param;
|
2009-06-02 00:41:15 +08:00
|
|
|
sctp_assoc_t assoc_id = 0;
|
|
|
|
int err = 0;
|
|
|
|
|
2014-02-17 19:11:11 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
2016-03-23 05:25:07 +08:00
|
|
|
if (in_compat_syscall()) {
|
2014-02-17 19:11:11 +08:00
|
|
|
struct compat_sctp_getaddrs_old param32;
|
2009-06-02 00:41:15 +08:00
|
|
|
|
2014-02-17 19:11:11 +08:00
|
|
|
if (len < sizeof(param32))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(¶m32, optval, sizeof(param32)))
|
|
|
|
return -EFAULT;
|
2009-11-11 16:19:24 +08:00
|
|
|
|
2014-02-17 19:11:11 +08:00
|
|
|
param.assoc_id = param32.assoc_id;
|
|
|
|
param.addr_num = param32.addr_num;
|
|
|
|
param.addrs = compat_ptr(param32.addrs);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
if (len < sizeof(param))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(¶m, optval, sizeof(param)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2009-06-02 00:41:15 +08:00
|
|
|
|
2014-02-17 19:11:11 +08:00
|
|
|
err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
|
|
|
|
param.addrs, param.addr_num,
|
|
|
|
&assoc_id);
|
2009-06-02 00:41:15 +08:00
|
|
|
if (err == 0 || err == -EINPROGRESS) {
|
|
|
|
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
|
|
|
|
return -EFAULT;
|
|
|
|
if (put_user(sizeof(assoc_id), optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* API 3.1.4 close() - UDP Style Syntax
|
|
|
|
* Applications use close() to perform graceful shutdown (as described in
|
|
|
|
* Section 10.1 of [SCTP]) on ALL the associations currently represented
|
|
|
|
* by a UDP-style socket.
|
|
|
|
*
|
|
|
|
* The syntax is
|
|
|
|
*
|
|
|
|
* ret = close(int sd);
|
|
|
|
*
|
|
|
|
* sd - the socket descriptor of the associations to be closed.
|
|
|
|
*
|
|
|
|
* To gracefully shutdown a specific association represented by the
|
|
|
|
* UDP-style socket, an application should use the sendmsg() call,
|
|
|
|
* passing no user data, but including the appropriate flag in the
|
|
|
|
* ancillary data (see Section xxxx).
|
|
|
|
*
|
|
|
|
* If sd in the close() call is a branched-off socket representing only
|
|
|
|
* one association, the shutdown is performed on that association only.
|
|
|
|
*
|
|
|
|
* 4.1.6 close() - TCP Style Syntax
|
|
|
|
*
|
|
|
|
* Applications use close() to gracefully close down an association.
|
|
|
|
*
|
|
|
|
* The syntax is:
|
|
|
|
*
|
|
|
|
* int close(int sd);
|
|
|
|
*
|
|
|
|
* sd - the socket descriptor of the association to be closed.
|
|
|
|
*
|
|
|
|
* After an application calls close() on a socket descriptor, no further
|
|
|
|
* socket operations will succeed on that descriptor.
|
|
|
|
*
|
|
|
|
* API 7.1.4 SO_LINGER
|
|
|
|
*
|
|
|
|
* An application using the TCP-style socket can use this option to
|
|
|
|
* perform the SCTP ABORT primitive. The linger option structure is:
|
|
|
|
*
|
|
|
|
* struct linger {
|
|
|
|
* int l_onoff; // option on/off
|
|
|
|
* int l_linger; // linger time
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* To enable the option, set l_onoff to 1. If the l_linger value is set
|
|
|
|
* to 0, calling close() is the same as the ABORT primitive. If the
|
|
|
|
* value is set to a negative value, the setsockopt() call will return
|
|
|
|
* an error. If the value is set to a positive value linger_time, the
|
|
|
|
* close() can be blocked for at most linger_time ms. If the graceful
|
|
|
|
* shutdown phase does not finish during this period, close() will
|
|
|
|
* return but the graceful shutdown phase continues in the system.
|
|
|
|
*/
|
2013-06-17 17:40:05 +08:00
|
|
|
static void sctp_close(struct sock *sk, long timeout)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-08-07 15:25:24 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_endpoint *ep;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct list_head *pos, *temp;
|
2011-07-08 12:37:46 +08:00
|
|
|
unsigned int data_was_unread;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-10 14:56:56 +08:00
|
|
|
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
|
2005-04-17 06:20:36 +08:00
|
|
|
sk->sk_shutdown = SHUTDOWN_MASK;
|
2017-12-20 11:12:54 +08:00
|
|
|
inet_sk_set_state(sk, SCTP_SS_CLOSING);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
ep = sctp_sk(sk)->ep;
|
|
|
|
|
2011-07-08 12:37:46 +08:00
|
|
|
/* Clean up any skbs sitting on the receive queue. */
|
|
|
|
data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
|
|
|
|
data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
|
|
|
|
|
2006-05-20 02:01:18 +08:00
|
|
|
/* Walk all associations on an endpoint. */
|
2005-04-17 06:20:36 +08:00
|
|
|
list_for_each_safe(pos, temp, &ep->asocs) {
|
|
|
|
asoc = list_entry(pos, struct sctp_association, asocs);
|
|
|
|
|
|
|
|
if (sctp_style(sk, TCP)) {
|
|
|
|
/* A closed association can still be in the list if
|
|
|
|
* it belongs to a TCP-style listening socket that is
|
|
|
|
* not yet accepted. If so, free it. If not, send an
|
|
|
|
* ABORT or SHUTDOWN based on the linger options.
|
|
|
|
*/
|
|
|
|
if (sctp_state(asoc, CLOSED)) {
|
|
|
|
sctp_association_free(asoc);
|
2006-05-20 05:32:06 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-08 12:37:46 +08:00
|
|
|
if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
|
|
|
|
!skb_queue_empty(&asoc->ulpq.reasm) ||
|
2017-12-08 21:04:09 +08:00
|
|
|
!skb_queue_empty(&asoc->ulpq.reasm_uo) ||
|
2011-07-08 12:37:46 +08:00
|
|
|
(sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
|
2006-08-29 04:53:01 +08:00
|
|
|
struct sctp_chunk *chunk;
|
|
|
|
|
|
|
|
chunk = sctp_make_abort_user(asoc, NULL, 0);
|
2015-12-29 17:49:25 +08:00
|
|
|
sctp_primitive_ABORT(net, asoc, chunk);
|
2006-08-29 04:53:01 +08:00
|
|
|
} else
|
2012-08-07 15:25:24 +08:00
|
|
|
sctp_primitive_SHUTDOWN(net, asoc, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* On a TCP-style socket, block for at most linger_time if set. */
|
|
|
|
if (sctp_style(sk, TCP) && timeout)
|
|
|
|
sctp_wait_for_close(sk, timeout);
|
|
|
|
|
|
|
|
/* This will run the backlog queue. */
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Supposedly, no process has access to the socket, but
|
|
|
|
* the net layers still may.
|
2015-06-12 21:16:41 +08:00
|
|
|
* Also, sctp_destroy_sock() needs to be called with addr_wq_lock
|
|
|
|
* held and that should be grabbed before socket lock.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2015-06-12 21:16:41 +08:00
|
|
|
spin_lock_bh(&net->sctp.addr_wq_lock);
|
2017-06-10 14:56:56 +08:00
|
|
|
bh_lock_sock_nested(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Hold the sock, since sk_common_release() will put sock_put()
|
|
|
|
* and we have just a little more cleanup.
|
|
|
|
*/
|
|
|
|
sock_hold(sk);
|
|
|
|
sk_common_release(sk);
|
|
|
|
|
2014-01-21 15:44:12 +08:00
|
|
|
bh_unlock_sock(sk);
|
2015-06-12 21:16:41 +08:00
|
|
|
spin_unlock_bh(&net->sctp.addr_wq_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
sock_put(sk);
|
|
|
|
|
|
|
|
SCTP_DBG_OBJCNT_DEC(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle EPIPE error. */
|
|
|
|
static int sctp_error(struct sock *sk, int flags, int err)
|
|
|
|
{
|
|
|
|
if (err == -EPIPE)
|
|
|
|
err = sock_error(sk) ? : -EPIPE;
|
|
|
|
if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
|
|
|
|
send_sig(SIGPIPE, current, 0);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* API 3.1.3 sendmsg() - UDP Style Syntax
|
|
|
|
*
|
|
|
|
* An application uses sendmsg() and recvmsg() calls to transmit data to
|
|
|
|
* and receive data from its peer.
|
|
|
|
*
|
|
|
|
* ssize_t sendmsg(int socket, const struct msghdr *message,
|
|
|
|
* int flags);
|
|
|
|
*
|
|
|
|
* socket - the socket descriptor of the endpoint.
|
|
|
|
* message - pointer to the msghdr structure which contains a single
|
|
|
|
* user message and possibly some ancillary data.
|
|
|
|
*
|
|
|
|
* See Section 5 for complete description of the data
|
|
|
|
* structures.
|
|
|
|
*
|
|
|
|
* flags - flags sent or received with the user message, see Section
|
|
|
|
* 5 for complete description of the flags.
|
|
|
|
*
|
|
|
|
* Note: This function could use a rewrite especially when explicit
|
|
|
|
* connect support comes in.
|
|
|
|
*/
|
|
|
|
/* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
|
|
|
|
|
2017-08-11 10:23:48 +08:00
|
|
|
static int sctp_msghdr_parse(const struct msghdr *msg,
|
|
|
|
struct sctp_cmsgs *cmsgs);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-01 23:05:14 +08:00
|
|
|
static int sctp_sendmsg_parse(struct sock *sk, struct sctp_cmsgs *cmsgs,
|
|
|
|
struct sctp_sndrcvinfo *srinfo,
|
|
|
|
const struct msghdr *msg, size_t msg_len)
|
|
|
|
{
|
|
|
|
__u16 sflags;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (sctp_sstate(sk, LISTENING) && sctp_style(sk, TCP))
|
|
|
|
return -EPIPE;
|
|
|
|
|
|
|
|
if (msg_len > sk->sk_sndbuf)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
memset(cmsgs, 0, sizeof(*cmsgs));
|
|
|
|
err = sctp_msghdr_parse(msg, cmsgs);
|
|
|
|
if (err) {
|
|
|
|
pr_debug("%s: msghdr parse err:%x\n", __func__, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(srinfo, 0, sizeof(*srinfo));
|
|
|
|
if (cmsgs->srinfo) {
|
|
|
|
srinfo->sinfo_stream = cmsgs->srinfo->sinfo_stream;
|
|
|
|
srinfo->sinfo_flags = cmsgs->srinfo->sinfo_flags;
|
|
|
|
srinfo->sinfo_ppid = cmsgs->srinfo->sinfo_ppid;
|
|
|
|
srinfo->sinfo_context = cmsgs->srinfo->sinfo_context;
|
|
|
|
srinfo->sinfo_assoc_id = cmsgs->srinfo->sinfo_assoc_id;
|
|
|
|
srinfo->sinfo_timetolive = cmsgs->srinfo->sinfo_timetolive;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmsgs->sinfo) {
|
|
|
|
srinfo->sinfo_stream = cmsgs->sinfo->snd_sid;
|
|
|
|
srinfo->sinfo_flags = cmsgs->sinfo->snd_flags;
|
|
|
|
srinfo->sinfo_ppid = cmsgs->sinfo->snd_ppid;
|
|
|
|
srinfo->sinfo_context = cmsgs->sinfo->snd_context;
|
|
|
|
srinfo->sinfo_assoc_id = cmsgs->sinfo->snd_assoc_id;
|
|
|
|
}
|
|
|
|
|
2018-03-05 20:44:18 +08:00
|
|
|
if (cmsgs->prinfo) {
|
|
|
|
srinfo->sinfo_timetolive = cmsgs->prinfo->pr_value;
|
|
|
|
SCTP_PR_SET_POLICY(srinfo->sinfo_flags,
|
|
|
|
cmsgs->prinfo->pr_policy);
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:14 +08:00
|
|
|
sflags = srinfo->sinfo_flags;
|
|
|
|
if (!sflags && msg_len)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (sctp_style(sk, TCP) && (sflags & (SCTP_EOF | SCTP_ABORT)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (((sflags & SCTP_EOF) && msg_len > 0) ||
|
|
|
|
(!(sflags & (SCTP_EOF | SCTP_ABORT)) && msg_len == 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if ((sflags & SCTP_ADDR_OVER) && !msg->msg_name)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:11 +08:00
|
|
|
static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
|
|
|
|
struct sctp_cmsgs *cmsgs,
|
|
|
|
union sctp_addr *daddr,
|
|
|
|
struct sctp_transport **tp)
|
|
|
|
{
|
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
|
|
|
struct net *net = sock_net(sk);
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
enum sctp_scope scope;
|
2018-03-05 20:44:19 +08:00
|
|
|
struct cmsghdr *cmsg;
|
2018-07-02 18:21:14 +08:00
|
|
|
__be32 flowinfo = 0;
|
2018-04-07 06:39:26 +08:00
|
|
|
struct sctp_af *af;
|
2018-03-13 11:03:30 +08:00
|
|
|
int err;
|
2018-03-01 23:05:11 +08:00
|
|
|
|
|
|
|
*tp = NULL;
|
|
|
|
|
|
|
|
if (sflags & (SCTP_EOF | SCTP_ABORT))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (sctp_style(sk, TCP) && (sctp_sstate(sk, ESTABLISHED) ||
|
|
|
|
sctp_sstate(sk, CLOSING)))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
|
|
|
if (sctp_endpoint_is_peeled_off(ep, daddr))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
|
|
|
if (!ep->base.bind_addr.port) {
|
|
|
|
if (sctp_autobind(sk))
|
|
|
|
return -EAGAIN;
|
|
|
|
} else {
|
|
|
|
if (ep->base.bind_addr.port < inet_prot_sock(net) &&
|
|
|
|
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
|
|
|
scope = sctp_scope(daddr);
|
|
|
|
|
2018-04-07 06:39:26 +08:00
|
|
|
/* Label connection socket for first association 1-to-many
|
|
|
|
* style for client sequence socket()->sendmsg(). This
|
|
|
|
* needs to be done before sctp_assoc_add_peer() as that will
|
|
|
|
* set up the initial packet that needs to account for any
|
|
|
|
* security ip options (CIPSO/CALIPSO) added to the packet.
|
|
|
|
*/
|
|
|
|
af = sctp_get_af_specific(daddr->sa.sa_family);
|
|
|
|
if (!af)
|
|
|
|
return -EINVAL;
|
|
|
|
err = security_sctp_bind_connect(sk, SCTP_SENDMSG_CONNECT,
|
|
|
|
(struct sockaddr *)daddr,
|
|
|
|
af->sockaddr_len);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-01 23:05:11 +08:00
|
|
|
asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
|
|
|
|
if (!asoc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL) < 0) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmsgs->init) {
|
|
|
|
struct sctp_initmsg *init = cmsgs->init;
|
|
|
|
|
|
|
|
if (init->sinit_num_ostreams) {
|
|
|
|
__u16 outcnt = init->sinit_num_ostreams;
|
|
|
|
|
|
|
|
asoc->c.sinit_num_ostreams = outcnt;
|
|
|
|
/* outcnt has been changed, need to re-init stream */
|
|
|
|
err = sctp_stream_init(&asoc->stream, outcnt, 0,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (err)
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (init->sinit_max_instreams)
|
|
|
|
asoc->c.sinit_max_instreams = init->sinit_max_instreams;
|
|
|
|
|
|
|
|
if (init->sinit_max_attempts)
|
|
|
|
asoc->max_init_attempts = init->sinit_max_attempts;
|
|
|
|
|
|
|
|
if (init->sinit_max_init_timeo)
|
|
|
|
asoc->max_init_timeo =
|
|
|
|
msecs_to_jiffies(init->sinit_max_init_timeo);
|
|
|
|
}
|
|
|
|
|
|
|
|
*tp = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL, SCTP_UNKNOWN);
|
|
|
|
if (!*tp) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
2018-03-05 20:44:19 +08:00
|
|
|
if (!cmsgs->addrs_msg)
|
|
|
|
return 0;
|
|
|
|
|
2018-07-02 18:21:14 +08:00
|
|
|
if (daddr->sa.sa_family == AF_INET6)
|
|
|
|
flowinfo = daddr->v6.sin6_flowinfo;
|
|
|
|
|
2018-03-05 20:44:19 +08:00
|
|
|
/* sendv addr list parse */
|
|
|
|
for_each_cmsghdr(cmsg, cmsgs->addrs_msg) {
|
|
|
|
struct sctp_transport *transport;
|
|
|
|
struct sctp_association *old;
|
|
|
|
union sctp_addr _daddr;
|
|
|
|
int dlen;
|
|
|
|
|
|
|
|
if (cmsg->cmsg_level != IPPROTO_SCTP ||
|
|
|
|
(cmsg->cmsg_type != SCTP_DSTADDRV4 &&
|
|
|
|
cmsg->cmsg_type != SCTP_DSTADDRV6))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
daddr = &_daddr;
|
|
|
|
memset(daddr, 0, sizeof(*daddr));
|
|
|
|
dlen = cmsg->cmsg_len - sizeof(struct cmsghdr);
|
|
|
|
if (cmsg->cmsg_type == SCTP_DSTADDRV4) {
|
2018-03-13 11:03:30 +08:00
|
|
|
if (dlen < sizeof(struct in_addr)) {
|
|
|
|
err = -EINVAL;
|
2018-03-05 20:44:19 +08:00
|
|
|
goto free;
|
2018-03-13 11:03:30 +08:00
|
|
|
}
|
2018-03-05 20:44:19 +08:00
|
|
|
|
|
|
|
dlen = sizeof(struct in_addr);
|
|
|
|
daddr->v4.sin_family = AF_INET;
|
|
|
|
daddr->v4.sin_port = htons(asoc->peer.port);
|
|
|
|
memcpy(&daddr->v4.sin_addr, CMSG_DATA(cmsg), dlen);
|
|
|
|
} else {
|
2018-03-13 11:03:30 +08:00
|
|
|
if (dlen < sizeof(struct in6_addr)) {
|
|
|
|
err = -EINVAL;
|
2018-03-05 20:44:19 +08:00
|
|
|
goto free;
|
2018-03-13 11:03:30 +08:00
|
|
|
}
|
2018-03-05 20:44:19 +08:00
|
|
|
|
|
|
|
dlen = sizeof(struct in6_addr);
|
2018-07-02 18:21:14 +08:00
|
|
|
daddr->v6.sin6_flowinfo = flowinfo;
|
2018-03-05 20:44:19 +08:00
|
|
|
daddr->v6.sin6_family = AF_INET6;
|
|
|
|
daddr->v6.sin6_port = htons(asoc->peer.port);
|
|
|
|
memcpy(&daddr->v6.sin6_addr, CMSG_DATA(cmsg), dlen);
|
|
|
|
}
|
|
|
|
err = sctp_verify_addr(sk, daddr, sizeof(*daddr));
|
|
|
|
if (err)
|
|
|
|
goto free;
|
|
|
|
|
|
|
|
old = sctp_endpoint_lookup_assoc(ep, daddr, &transport);
|
|
|
|
if (old && old != asoc) {
|
|
|
|
if (old->state >= SCTP_STATE_ESTABLISHED)
|
|
|
|
err = -EISCONN;
|
|
|
|
else
|
|
|
|
err = -EALREADY;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sctp_endpoint_is_peeled_off(ep, daddr)) {
|
|
|
|
err = -EADDRNOTAVAIL;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
|
|
|
transport = sctp_assoc_add_peer(asoc, daddr, GFP_KERNEL,
|
|
|
|
SCTP_UNKNOWN);
|
|
|
|
if (!transport) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:11 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
free:
|
|
|
|
sctp_association_free(asoc);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:12 +08:00
|
|
|
static int sctp_sendmsg_check_sflags(struct sctp_association *asoc,
|
|
|
|
__u16 sflags, struct msghdr *msg,
|
|
|
|
size_t msg_len)
|
|
|
|
{
|
|
|
|
struct sock *sk = asoc->base.sk;
|
|
|
|
struct net *net = sock_net(sk);
|
|
|
|
|
|
|
|
if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP))
|
|
|
|
return -EPIPE;
|
|
|
|
|
2018-03-05 20:44:20 +08:00
|
|
|
if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP) &&
|
|
|
|
!sctp_state(asoc, ESTABLISHED))
|
|
|
|
return 0;
|
|
|
|
|
2018-03-01 23:05:12 +08:00
|
|
|
if (sflags & SCTP_EOF) {
|
|
|
|
pr_debug("%s: shutting down association:%p\n", __func__, asoc);
|
|
|
|
sctp_primitive_SHUTDOWN(net, asoc, NULL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sflags & SCTP_ABORT) {
|
|
|
|
struct sctp_chunk *chunk;
|
|
|
|
|
|
|
|
chunk = sctp_make_abort_user(asoc, msg, msg_len);
|
|
|
|
if (!chunk)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pr_debug("%s: aborting association:%p\n", __func__, asoc);
|
|
|
|
sctp_primitive_ABORT(net, asoc, chunk);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:10 +08:00
|
|
|
static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
|
|
|
|
struct msghdr *msg, size_t msg_len,
|
|
|
|
struct sctp_transport *transport,
|
|
|
|
struct sctp_sndrcvinfo *sinfo)
|
|
|
|
{
|
|
|
|
struct sock *sk = asoc->base.sk;
|
2018-04-27 03:59:00 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2018-03-01 23:05:10 +08:00
|
|
|
struct net *net = sock_net(sk);
|
|
|
|
struct sctp_datamsg *datamsg;
|
|
|
|
bool wait_connect = false;
|
|
|
|
struct sctp_chunk *chunk;
|
|
|
|
long timeo;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (sinfo->sinfo_stream >= asoc->stream.outcnt) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2018-08-11 01:11:42 +08:00
|
|
|
if (unlikely(!SCTP_SO(&asoc->stream, sinfo->sinfo_stream)->ext)) {
|
2018-03-01 23:05:10 +08:00
|
|
|
err = sctp_stream_init_ext(&asoc->stream, sinfo->sinfo_stream);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2018-04-27 03:59:00 +08:00
|
|
|
if (sp->disable_fragments && msg_len > asoc->frag_point) {
|
2018-03-01 23:05:10 +08:00
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2018-04-27 03:58:56 +08:00
|
|
|
if (asoc->pmtu_pending) {
|
2018-04-27 03:59:00 +08:00
|
|
|
if (sp->param_flags & SPP_PMTUD_ENABLE)
|
|
|
|
sctp_assoc_sync_pmtu(asoc);
|
2018-04-27 03:58:56 +08:00
|
|
|
asoc->pmtu_pending = 0;
|
|
|
|
}
|
2018-03-13 02:15:25 +08:00
|
|
|
|
2018-10-17 03:07:51 +08:00
|
|
|
if (sctp_wspace(asoc) < (int)msg_len)
|
2018-03-13 02:15:25 +08:00
|
|
|
sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
|
|
|
|
|
2018-10-17 03:07:51 +08:00
|
|
|
if (sctp_wspace(asoc) <= 0) {
|
2018-03-13 02:15:25 +08:00
|
|
|
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
|
|
|
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:10 +08:00
|
|
|
if (sctp_state(asoc, CLOSED)) {
|
|
|
|
err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
|
2018-04-27 03:59:00 +08:00
|
|
|
if (sp->strm_interleave) {
|
2018-03-01 23:05:10 +08:00
|
|
|
timeo = sock_sndtimeo(sk, 0);
|
|
|
|
err = sctp_wait_for_connect(asoc, &timeo);
|
2018-10-17 03:06:12 +08:00
|
|
|
if (err) {
|
|
|
|
err = -ESRCH;
|
2018-03-01 23:05:10 +08:00
|
|
|
goto err;
|
2018-10-17 03:06:12 +08:00
|
|
|
}
|
2018-03-01 23:05:10 +08:00
|
|
|
} else {
|
|
|
|
wait_connect = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug("%s: we associated primitively\n", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter);
|
|
|
|
if (IS_ERR(datamsg)) {
|
|
|
|
err = PTR_ERR(datamsg);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
|
|
|
|
|
|
|
|
list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
|
|
|
|
sctp_chunk_hold(chunk);
|
|
|
|
sctp_set_owner_w(chunk);
|
|
|
|
chunk->transport = transport;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = sctp_primitive_SEND(net, asoc, datamsg);
|
|
|
|
if (err) {
|
|
|
|
sctp_datamsg_free(datamsg);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug("%s: we sent primitively\n", __func__);
|
|
|
|
|
|
|
|
sctp_datamsg_put(datamsg);
|
|
|
|
|
|
|
|
if (unlikely(wait_connect)) {
|
|
|
|
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
|
|
|
sctp_wait_for_connect(asoc, &timeo);
|
|
|
|
}
|
|
|
|
|
|
|
|
err = msg_len;
|
|
|
|
|
|
|
|
err:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:13 +08:00
|
|
|
static union sctp_addr *sctp_sendmsg_get_daddr(struct sock *sk,
|
|
|
|
const struct msghdr *msg,
|
|
|
|
struct sctp_cmsgs *cmsgs)
|
|
|
|
{
|
|
|
|
union sctp_addr *daddr = NULL;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
|
|
|
|
int len = msg->msg_namelen;
|
|
|
|
|
|
|
|
if (len > sizeof(*daddr))
|
|
|
|
len = sizeof(*daddr);
|
|
|
|
|
|
|
|
daddr = (union sctp_addr *)msg->msg_name;
|
|
|
|
|
|
|
|
err = sctp_verify_addr(sk, daddr, len);
|
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
return daddr;
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:15 +08:00
|
|
|
static void sctp_sendmsg_update_sinfo(struct sctp_association *asoc,
|
|
|
|
struct sctp_sndrcvinfo *sinfo,
|
|
|
|
struct sctp_cmsgs *cmsgs)
|
|
|
|
{
|
|
|
|
if (!cmsgs->srinfo && !cmsgs->sinfo) {
|
|
|
|
sinfo->sinfo_stream = asoc->default_stream;
|
|
|
|
sinfo->sinfo_ppid = asoc->default_ppid;
|
|
|
|
sinfo->sinfo_context = asoc->default_context;
|
|
|
|
sinfo->sinfo_assoc_id = sctp_assoc2id(asoc);
|
2018-03-05 20:44:18 +08:00
|
|
|
|
|
|
|
if (!cmsgs->prinfo)
|
|
|
|
sinfo->sinfo_flags = asoc->default_flags;
|
2018-03-01 23:05:15 +08:00
|
|
|
}
|
|
|
|
|
2018-03-05 20:44:18 +08:00
|
|
|
if (!cmsgs->srinfo && !cmsgs->prinfo)
|
2018-03-01 23:05:15 +08:00
|
|
|
sinfo->sinfo_timetolive = asoc->default_timetolive;
|
2018-03-14 19:05:31 +08:00
|
|
|
|
|
|
|
if (cmsgs->authinfo) {
|
|
|
|
/* Reuse sinfo_tsn to indicate that authinfo was set and
|
|
|
|
* sinfo_ssn to save the keyid on tx path.
|
|
|
|
*/
|
|
|
|
sinfo->sinfo_tsn = 1;
|
|
|
|
sinfo->sinfo_ssn = cmsgs->authinfo->auth_keynumber;
|
|
|
|
}
|
2018-03-01 23:05:15 +08:00
|
|
|
}
|
|
|
|
|
2015-03-02 15:37:48 +08:00
|
|
|
static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-03-01 23:05:14 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2018-03-01 23:05:16 +08:00
|
|
|
struct sctp_transport *transport = NULL;
|
2018-03-01 23:05:14 +08:00
|
|
|
struct sctp_sndrcvinfo _sinfo, *sinfo;
|
2018-03-01 23:05:17 +08:00
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_cmsgs cmsgs;
|
2018-03-01 23:05:13 +08:00
|
|
|
union sctp_addr *daddr;
|
2018-03-01 23:05:17 +08:00
|
|
|
bool new = false;
|
|
|
|
__u16 sflags;
|
2014-07-13 02:30:36 +08:00
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-01 23:05:14 +08:00
|
|
|
/* Parse and get snd_info */
|
|
|
|
err = sctp_sendmsg_parse(sk, &cmsgs, &_sinfo, msg, msg_len);
|
|
|
|
if (err)
|
2018-03-01 23:05:17 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-01 23:05:14 +08:00
|
|
|
sinfo = &_sinfo;
|
2018-03-01 23:05:17 +08:00
|
|
|
sflags = sinfo->sinfo_flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-01 23:05:13 +08:00
|
|
|
/* Get daddr from msg */
|
|
|
|
daddr = sctp_sendmsg_get_daddr(sk, msg, &cmsgs);
|
|
|
|
if (IS_ERR(daddr)) {
|
|
|
|
err = PTR_ERR(daddr);
|
2018-03-01 23:05:17 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-05 20:44:20 +08:00
|
|
|
/* SCTP_SENDALL process */
|
|
|
|
if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
|
|
|
|
list_for_each_entry(asoc, &ep->asocs, asocs) {
|
|
|
|
err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
|
|
|
|
msg_len);
|
|
|
|
if (err == 0)
|
|
|
|
continue;
|
|
|
|
if (err < 0)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
sctp_sendmsg_update_sinfo(asoc, sinfo, &cmsgs);
|
|
|
|
|
|
|
|
err = sctp_sendmsg_to_asoc(asoc, msg, msg_len,
|
|
|
|
NULL, sinfo);
|
|
|
|
if (err < 0)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
iov_iter_revert(&msg->msg_iter, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:18 +08:00
|
|
|
/* Get and check or create asoc */
|
2018-03-01 23:05:13 +08:00
|
|
|
if (daddr) {
|
|
|
|
asoc = sctp_endpoint_lookup_assoc(ep, daddr, &transport);
|
2018-03-01 23:05:18 +08:00
|
|
|
if (asoc) {
|
|
|
|
err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
|
|
|
|
msg_len);
|
|
|
|
if (err <= 0)
|
|
|
|
goto out_unlock;
|
|
|
|
} else {
|
|
|
|
err = sctp_sendmsg_new_asoc(sk, sflags, &cmsgs, daddr,
|
|
|
|
&transport);
|
|
|
|
if (err)
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
asoc = transport->asoc;
|
|
|
|
new = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sctp_style(sk, TCP) && !(sflags & SCTP_ADDR_OVER))
|
|
|
|
transport = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2018-03-01 23:05:17 +08:00
|
|
|
asoc = sctp_id2assoc(sk, sinfo->sinfo_assoc_id);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!asoc) {
|
|
|
|
err = -EPIPE;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:17 +08:00
|
|
|
err = sctp_sendmsg_check_sflags(asoc, sflags, msg, msg_len);
|
2018-03-01 23:05:12 +08:00
|
|
|
if (err <= 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2018-03-01 23:05:15 +08:00
|
|
|
/* Update snd_info with the asoc */
|
|
|
|
sctp_sendmsg_update_sinfo(asoc, sinfo, &cmsgs);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-03-01 23:05:10 +08:00
|
|
|
/* Send msg to the asoc */
|
2018-03-01 23:05:16 +08:00
|
|
|
err = sctp_sendmsg_to_asoc(asoc, msg, msg_len, transport, sinfo);
|
2018-03-01 23:05:17 +08:00
|
|
|
if (err < 0 && err != -ESRCH && new)
|
2005-04-17 06:20:36 +08:00
|
|
|
sctp_association_free(asoc);
|
2018-03-01 23:05:16 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
out_unlock:
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2018-03-01 23:05:17 +08:00
|
|
|
out:
|
2018-03-01 23:05:10 +08:00
|
|
|
return sctp_error(sk, msg->msg_flags, err);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This is an extended version of skb_pull() that removes the data from the
|
|
|
|
* start of a skb even when data is spread across the list of skb's in the
|
|
|
|
* frag_list. len specifies the total amount of data that needs to be removed.
|
|
|
|
* when 'len' bytes could be removed from the skb, it returns 0.
|
|
|
|
* If 'len' exceeds the total skb length, it returns the no. of bytes that
|
|
|
|
* could not be removed.
|
|
|
|
*/
|
|
|
|
static int sctp_skb_pull(struct sk_buff *skb, int len)
|
|
|
|
{
|
|
|
|
struct sk_buff *list;
|
|
|
|
int skb_len = skb_headlen(skb);
|
|
|
|
int rlen;
|
|
|
|
|
|
|
|
if (len <= skb_len) {
|
|
|
|
__skb_pull(skb, len);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
len -= skb_len;
|
|
|
|
__skb_pull(skb, skb_len);
|
|
|
|
|
2009-06-09 15:22:35 +08:00
|
|
|
skb_walk_frags(skb, list) {
|
2005-04-17 06:20:36 +08:00
|
|
|
rlen = sctp_skb_pull(list, len);
|
|
|
|
skb->len -= (len-rlen);
|
|
|
|
skb->data_len -= (len-rlen);
|
|
|
|
|
|
|
|
if (!rlen)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
len = rlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* API 3.1.3 recvmsg() - UDP Style Syntax
|
|
|
|
*
|
|
|
|
* ssize_t recvmsg(int socket, struct msghdr *message,
|
|
|
|
* int flags);
|
|
|
|
*
|
|
|
|
* socket - the socket descriptor of the endpoint.
|
|
|
|
* message - pointer to the msghdr structure which contains a single
|
|
|
|
* user message and possibly some ancillary data.
|
|
|
|
*
|
|
|
|
* See Section 5 for complete description of the data
|
|
|
|
* structures.
|
|
|
|
*
|
|
|
|
* flags - flags sent or received with the user message, see Section
|
|
|
|
* 5 for complete description of the flags.
|
|
|
|
*/
|
2015-03-02 15:37:48 +08:00
|
|
|
static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
|
|
|
int noblock, int flags, int *addr_len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_ulpevent *event = NULL;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2016-07-14 02:08:57 +08:00
|
|
|
struct sk_buff *skb, *head_skb;
|
2005-04-17 06:20:36 +08:00
|
|
|
int copied;
|
|
|
|
int err = 0;
|
|
|
|
int skb_len;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
|
|
|
|
"addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
|
|
|
|
addr_len);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-07-16 03:38:19 +08:00
|
|
|
if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
|
2016-07-30 14:14:41 +08:00
|
|
|
!sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
err = -ENOTCONN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
|
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Get the total length of the skb including any skb's in the
|
|
|
|
* frag_list.
|
|
|
|
*/
|
|
|
|
skb_len = skb->len;
|
|
|
|
|
|
|
|
copied = skb_len;
|
|
|
|
if (copied > len)
|
|
|
|
copied = len;
|
|
|
|
|
2014-11-06 05:46:40 +08:00
|
|
|
err = skb_copy_datagram_msg(skb, 0, msg, copied);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
event = sctp_skb2event(skb);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out_free;
|
|
|
|
|
2016-07-14 02:08:57 +08:00
|
|
|
if (event->chunk && event->chunk->head_skb)
|
|
|
|
head_skb = event->chunk->head_skb;
|
|
|
|
else
|
|
|
|
head_skb = skb;
|
|
|
|
sock_recv_ts_and_drops(msg, sk, head_skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sctp_ulpevent_is_notification(event)) {
|
|
|
|
msg->msg_flags |= MSG_NOTIFICATION;
|
|
|
|
sp->pf->event_msgname(event, msg->msg_name, addr_len);
|
|
|
|
} else {
|
2016-07-14 02:08:57 +08:00
|
|
|
sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2014-07-13 02:30:38 +08:00
|
|
|
/* Check if we allow SCTP_NXTINFO. */
|
|
|
|
if (sp->recvnxtinfo)
|
|
|
|
sctp_ulpevent_read_nxtinfo(event, msg, sk);
|
2014-07-13 02:30:37 +08:00
|
|
|
/* Check if we allow SCTP_RCVINFO. */
|
|
|
|
if (sp->recvrcvinfo)
|
|
|
|
sctp_ulpevent_read_rcvinfo(event, msg);
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Check if we allow SCTP_SNDRCVINFO. */
|
2018-11-18 16:08:51 +08:00
|
|
|
if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_DATA_IO_EVENT))
|
2005-04-17 06:20:36 +08:00
|
|
|
sctp_ulpevent_read_sndrcvinfo(event, msg);
|
2014-07-13 02:30:37 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
err = copied;
|
|
|
|
|
|
|
|
/* If skb's length exceeds the user's buffer, update the skb and
|
|
|
|
* push it back to the receive_queue so that the next call to
|
|
|
|
* recvmsg() will return the remaining data. Don't set MSG_EOR.
|
|
|
|
*/
|
|
|
|
if (skb_len > copied) {
|
|
|
|
msg->msg_flags &= ~MSG_EOR;
|
|
|
|
if (flags & MSG_PEEK)
|
|
|
|
goto out_free;
|
|
|
|
sctp_skb_pull(skb, copied);
|
|
|
|
skb_queue_head(&sk->sk_receive_queue, skb);
|
|
|
|
|
Revert "net: sctp: Fix a_rwnd/rwnd management to reflect real state of the receiver's buffer"
This reverts commit ef2820a735f7 ("net: sctp: Fix a_rwnd/rwnd management
to reflect real state of the receiver's buffer") as it introduced a
serious performance regression on SCTP over IPv4 and IPv6, though a not
as dramatic on the latter. Measurements are on 10Gbit/s with ixgbe NICs.
Current state:
[root@Lab200slot2 ~]# iperf3 --sctp -4 -c 192.168.241.3 -V -l 1452 -t 60
iperf version 3.0.1 (10 January 2014)
Linux Lab200slot2 3.14.0 #1 SMP Thu Apr 3 23:18:29 EDT 2014 x86_64
Time: Fri, 11 Apr 2014 17:56:21 GMT
Connecting to host 192.168.241.3, port 5201
Cookie: Lab200slot2.1397238981.812898.548918
[ 4] local 192.168.241.2 port 38616 connected to 192.168.241.3 port 5201
Starting Test: protocol: SCTP, 1 streams, 1452 byte blocks, omitting 0 seconds, 60 second test
[ ID] Interval Transfer Bandwidth
[ 4] 0.00-1.09 sec 20.8 MBytes 161 Mbits/sec
[ 4] 1.09-2.13 sec 10.8 MBytes 86.8 Mbits/sec
[ 4] 2.13-3.15 sec 3.57 MBytes 29.5 Mbits/sec
[ 4] 3.15-4.16 sec 4.33 MBytes 35.7 Mbits/sec
[ 4] 4.16-6.21 sec 10.4 MBytes 42.7 Mbits/sec
[ 4] 6.21-6.21 sec 0.00 Bytes 0.00 bits/sec
[ 4] 6.21-7.35 sec 34.6 MBytes 253 Mbits/sec
[ 4] 7.35-11.45 sec 22.0 MBytes 45.0 Mbits/sec
[ 4] 11.45-11.45 sec 0.00 Bytes 0.00 bits/sec
[ 4] 11.45-11.45 sec 0.00 Bytes 0.00 bits/sec
[ 4] 11.45-11.45 sec 0.00 Bytes 0.00 bits/sec
[ 4] 11.45-12.51 sec 16.0 MBytes 126 Mbits/sec
[ 4] 12.51-13.59 sec 20.3 MBytes 158 Mbits/sec
[ 4] 13.59-14.65 sec 13.4 MBytes 107 Mbits/sec
[ 4] 14.65-16.79 sec 33.3 MBytes 130 Mbits/sec
[ 4] 16.79-16.79 sec 0.00 Bytes 0.00 bits/sec
[ 4] 16.79-17.82 sec 5.94 MBytes 48.7 Mbits/sec
(etc)
[root@Lab200slot2 ~]# iperf3 --sctp -6 -c 2001:db8:0:f101::1 -V -l 1400 -t 60
iperf version 3.0.1 (10 January 2014)
Linux Lab200slot2 3.14.0 #1 SMP Thu Apr 3 23:18:29 EDT 2014 x86_64
Time: Fri, 11 Apr 2014 19:08:41 GMT
Connecting to host 2001:db8:0:f101::1, port 5201
Cookie: Lab200slot2.1397243321.714295.2b3f7c
[ 4] local 2001:db8:0:f101::2 port 55804 connected to 2001:db8:0:f101::1 port 5201
Starting Test: protocol: SCTP, 1 streams, 1400 byte blocks, omitting 0 seconds, 60 second test
[ ID] Interval Transfer Bandwidth
[ 4] 0.00-1.00 sec 169 MBytes 1.42 Gbits/sec
[ 4] 1.00-2.00 sec 201 MBytes 1.69 Gbits/sec
[ 4] 2.00-3.00 sec 188 MBytes 1.58 Gbits/sec
[ 4] 3.00-4.00 sec 174 MBytes 1.46 Gbits/sec
[ 4] 4.00-5.00 sec 165 MBytes 1.39 Gbits/sec
[ 4] 5.00-6.00 sec 199 MBytes 1.67 Gbits/sec
[ 4] 6.00-7.00 sec 163 MBytes 1.36 Gbits/sec
[ 4] 7.00-8.00 sec 174 MBytes 1.46 Gbits/sec
[ 4] 8.00-9.00 sec 193 MBytes 1.62 Gbits/sec
[ 4] 9.00-10.00 sec 196 MBytes 1.65 Gbits/sec
[ 4] 10.00-11.00 sec 157 MBytes 1.31 Gbits/sec
[ 4] 11.00-12.00 sec 175 MBytes 1.47 Gbits/sec
[ 4] 12.00-13.00 sec 192 MBytes 1.61 Gbits/sec
[ 4] 13.00-14.00 sec 199 MBytes 1.67 Gbits/sec
(etc)
After patch:
[root@Lab200slot2 ~]# iperf3 --sctp -4 -c 192.168.240.3 -V -l 1452 -t 60
iperf version 3.0.1 (10 January 2014)
Linux Lab200slot2 3.14.0+ #1 SMP Mon Apr 14 12:06:40 EDT 2014 x86_64
Time: Mon, 14 Apr 2014 16:40:48 GMT
Connecting to host 192.168.240.3, port 5201
Cookie: Lab200slot2.1397493648.413274.65e131
[ 4] local 192.168.240.2 port 50548 connected to 192.168.240.3 port 5201
Starting Test: protocol: SCTP, 1 streams, 1452 byte blocks, omitting 0 seconds, 60 second test
[ ID] Interval Transfer Bandwidth
[ 4] 0.00-1.00 sec 240 MBytes 2.02 Gbits/sec
[ 4] 1.00-2.00 sec 239 MBytes 2.01 Gbits/sec
[ 4] 2.00-3.00 sec 240 MBytes 2.01 Gbits/sec
[ 4] 3.00-4.00 sec 239 MBytes 2.00 Gbits/sec
[ 4] 4.00-5.00 sec 245 MBytes 2.05 Gbits/sec
[ 4] 5.00-6.00 sec 240 MBytes 2.01 Gbits/sec
[ 4] 6.00-7.00 sec 240 MBytes 2.02 Gbits/sec
[ 4] 7.00-8.00 sec 239 MBytes 2.01 Gbits/sec
With the reverted patch applied, the SCTP/IPv4 performance is back
to normal on latest upstream for IPv4 and IPv6 and has same throughput
as 3.4.2 test kernel, steady and interval reports are smooth again.
Fixes: ef2820a735f7 ("net: sctp: Fix a_rwnd/rwnd management to reflect real state of the receiver's buffer")
Reported-by: Peter Butler <pbutler@sonusnet.com>
Reported-by: Dongsheng Song <dongsheng.song@gmail.com>
Reported-by: Fengguang Wu <fengguang.wu@intel.com>
Tested-by: Peter Butler <pbutler@sonusnet.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nsn.com>
Cc: Alexander Sverdlin <alexander.sverdlin@nsn.com>
Cc: Vlad Yasevich <vyasevich@gmail.com>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-15 03:45:17 +08:00
|
|
|
/* When only partial message is copied to the user, increase
|
|
|
|
* rwnd by that amount. If all the data in the skb is read,
|
|
|
|
* rwnd is updated when the event is freed.
|
|
|
|
*/
|
|
|
|
if (!sctp_ulpevent_is_notification(event))
|
|
|
|
sctp_assoc_rwnd_increase(event->asoc, copied);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
} else if ((event->msg_flags & MSG_NOTIFICATION) ||
|
|
|
|
(event->msg_flags & MSG_EOR))
|
|
|
|
msg->msg_flags |= MSG_EOR;
|
|
|
|
else
|
|
|
|
msg->msg_flags &= ~MSG_EOR;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
if (flags & MSG_PEEK) {
|
|
|
|
/* Release the skb reference acquired after peeking the skb in
|
|
|
|
* sctp_skb_recv_datagram().
|
|
|
|
*/
|
|
|
|
kfree_skb(skb);
|
|
|
|
} else {
|
|
|
|
/* Free the event which includes releasing the reference to
|
|
|
|
* the owner of the skb, freeing the skb and updating the
|
|
|
|
* rwnd.
|
|
|
|
*/
|
|
|
|
sctp_ulpevent_free(event);
|
|
|
|
}
|
|
|
|
out:
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
|
|
|
|
*
|
|
|
|
* This option is a on/off flag. If enabled no SCTP message
|
|
|
|
* fragmentation will be performed. Instead if a message being sent
|
|
|
|
* exceeds the current PMTU size, the message will NOT be sent and
|
|
|
|
* instead a error will be indicated to the user.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_disable_fragments(struct sock *sk,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (optlen < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (get_user(val, (int __user *)optval))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-11-18 16:08:51 +08:00
|
|
|
struct sctp_event_subscribe subscribe;
|
|
|
|
__u8 *sn_type = (__u8 *)&subscribe;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2018-11-18 16:08:52 +08:00
|
|
|
struct sctp_association *asoc;
|
2018-11-18 16:08:51 +08:00
|
|
|
int i;
|
2011-07-02 17:28:04 +08:00
|
|
|
|
2008-02-28 05:04:52 +08:00
|
|
|
if (optlen > sizeof(struct sctp_event_subscribe))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2018-11-18 16:08:51 +08:00
|
|
|
|
|
|
|
if (copy_from_user(&subscribe, optval, optlen))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
2011-07-02 17:28:04 +08:00
|
|
|
|
2018-11-18 16:08:51 +08:00
|
|
|
for (i = 0; i < optlen; i++)
|
|
|
|
sctp_ulpevent_type_set(&sp->subscribe, SCTP_SN_TYPE_BASE + i,
|
|
|
|
sn_type[i]);
|
|
|
|
|
2018-11-18 16:08:52 +08:00
|
|
|
list_for_each_entry(asoc, &sp->ep->asocs, asocs)
|
|
|
|
asoc->subscribe = sctp_sk(sk)->subscribe;
|
|
|
|
|
2014-07-13 02:30:40 +08:00
|
|
|
/* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
|
2011-07-02 17:28:04 +08:00
|
|
|
* if there is no data to be sent or retransmit, the stack will
|
|
|
|
* immediately send up this notification.
|
|
|
|
*/
|
2018-11-18 16:08:51 +08:00
|
|
|
if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_SENDER_DRY_EVENT)) {
|
|
|
|
struct sctp_ulpevent *event;
|
2011-07-02 17:28:04 +08:00
|
|
|
|
2018-11-18 16:08:52 +08:00
|
|
|
asoc = sctp_id2assoc(sk, 0);
|
2011-07-02 17:28:04 +08:00
|
|
|
if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
|
|
|
|
event = sctp_ulpevent_make_sender_dry_event(asoc,
|
2018-01-09 05:02:27 +08:00
|
|
|
GFP_USER | __GFP_NOWARN);
|
2011-07-02 17:28:04 +08:00
|
|
|
if (!event)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-12-08 21:04:05 +08:00
|
|
|
asoc->stream.si->enqueue_event(&asoc->ulpq, event);
|
2011-07-02 17:28:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
|
|
|
|
*
|
|
|
|
* This socket option is applicable to the UDP-style socket only. When
|
|
|
|
* set it will cause associations that are idle for more than the
|
|
|
|
* specified number of seconds to automatically close. An association
|
|
|
|
* being idle is defined an association that has NOT sent or received
|
|
|
|
* user data. The special value of '0' indicates that no automatic
|
|
|
|
* close of any associations should be performed. The option expects an
|
|
|
|
* integer defining the number of seconds of idle time before an
|
|
|
|
* association is closed.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2013-12-10 19:48:15 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Applicable to UDP-style socket only */
|
|
|
|
if (sctp_style(sk, TCP))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
if (optlen != sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&sp->autoclose, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2013-12-10 19:48:15 +08:00
|
|
|
if (sp->autoclose > net->sctp.max_autoclose)
|
|
|
|
sp->autoclose = net->sctp.max_autoclose;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
|
|
|
|
*
|
|
|
|
* Applications can enable or disable heartbeats for any peer address of
|
|
|
|
* an association, modify an address's heartbeat interval, force a
|
|
|
|
* heartbeat to be sent immediately, and adjust the address's maximum
|
|
|
|
* number of retransmissions sent before an address is considered
|
|
|
|
* unreachable. The following structure is used to access and modify an
|
|
|
|
* address's parameters:
|
|
|
|
*
|
|
|
|
* struct sctp_paddrparams {
|
2005-12-23 03:36:46 +08:00
|
|
|
* sctp_assoc_t spp_assoc_id;
|
|
|
|
* struct sockaddr_storage spp_address;
|
|
|
|
* uint32_t spp_hbinterval;
|
|
|
|
* uint16_t spp_pathmaxrxt;
|
|
|
|
* uint32_t spp_pathmtu;
|
|
|
|
* uint32_t spp_sackdelay;
|
|
|
|
* uint32_t spp_flags;
|
2018-07-02 18:21:13 +08:00
|
|
|
* uint32_t spp_ipv6_flowlabel;
|
|
|
|
* uint8_t spp_dscp;
|
2005-12-23 03:36:46 +08:00
|
|
|
* };
|
|
|
|
*
|
|
|
|
* spp_assoc_id - (one-to-many style socket) This is filled in the
|
|
|
|
* application, and identifies the association for
|
|
|
|
* this query.
|
2005-04-17 06:20:36 +08:00
|
|
|
* spp_address - This specifies which address is of interest.
|
|
|
|
* spp_hbinterval - This contains the value of the heartbeat interval,
|
2005-12-23 03:36:46 +08:00
|
|
|
* in milliseconds. If a value of zero
|
|
|
|
* is present in this field then no changes are to
|
|
|
|
* be made to this parameter.
|
2005-04-17 06:20:36 +08:00
|
|
|
* spp_pathmaxrxt - This contains the maximum number of
|
|
|
|
* retransmissions before this address shall be
|
2005-12-23 03:36:46 +08:00
|
|
|
* considered unreachable. If a value of zero
|
|
|
|
* is present in this field then no changes are to
|
|
|
|
* be made to this parameter.
|
|
|
|
* spp_pathmtu - When Path MTU discovery is disabled the value
|
|
|
|
* specified here will be the "fixed" path mtu.
|
|
|
|
* Note that if the spp_address field is empty
|
|
|
|
* then all associations on this address will
|
|
|
|
* have this fixed path mtu set upon them.
|
|
|
|
*
|
|
|
|
* spp_sackdelay - When delayed sack is enabled, this value specifies
|
|
|
|
* the number of milliseconds that sacks will be delayed
|
|
|
|
* for. This value will apply to all addresses of an
|
|
|
|
* association if the spp_address field is empty. Note
|
|
|
|
* also, that if delayed sack is enabled and this
|
|
|
|
* value is set to 0, no change is made to the last
|
|
|
|
* recorded delayed sack timer value.
|
|
|
|
*
|
|
|
|
* spp_flags - These flags are used to control various features
|
|
|
|
* on an association. The flag field may contain
|
|
|
|
* zero or more of the following options.
|
|
|
|
*
|
|
|
|
* SPP_HB_ENABLE - Enable heartbeats on the
|
|
|
|
* specified address. Note that if the address
|
|
|
|
* field is empty all addresses for the association
|
|
|
|
* have heartbeats enabled upon them.
|
|
|
|
*
|
|
|
|
* SPP_HB_DISABLE - Disable heartbeats on the
|
|
|
|
* speicifed address. Note that if the address
|
|
|
|
* field is empty all addresses for the association
|
|
|
|
* will have their heartbeats disabled. Note also
|
|
|
|
* that SPP_HB_ENABLE and SPP_HB_DISABLE are
|
|
|
|
* mutually exclusive, only one of these two should
|
|
|
|
* be specified. Enabling both fields will have
|
|
|
|
* undetermined results.
|
|
|
|
*
|
|
|
|
* SPP_HB_DEMAND - Request a user initiated heartbeat
|
|
|
|
* to be made immediately.
|
|
|
|
*
|
2007-03-24 02:33:12 +08:00
|
|
|
* SPP_HB_TIME_IS_ZERO - Specify's that the time for
|
|
|
|
* heartbeat delayis to be set to the value of 0
|
|
|
|
* milliseconds.
|
|
|
|
*
|
2005-12-23 03:36:46 +08:00
|
|
|
* SPP_PMTUD_ENABLE - This field will enable PMTU
|
|
|
|
* discovery upon the specified address. Note that
|
|
|
|
* if the address feild is empty then all addresses
|
|
|
|
* on the association are effected.
|
|
|
|
*
|
|
|
|
* SPP_PMTUD_DISABLE - This field will disable PMTU
|
|
|
|
* discovery upon the specified address. Note that
|
|
|
|
* if the address feild is empty then all addresses
|
|
|
|
* on the association are effected. Not also that
|
|
|
|
* SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
|
|
|
|
* exclusive. Enabling both will have undetermined
|
|
|
|
* results.
|
|
|
|
*
|
|
|
|
* SPP_SACKDELAY_ENABLE - Setting this flag turns
|
|
|
|
* on delayed sack. The time specified in spp_sackdelay
|
|
|
|
* is used to specify the sack delay for this address. Note
|
|
|
|
* that if spp_address is empty then all addresses will
|
|
|
|
* enable delayed sack and take on the sack delay
|
|
|
|
* value specified in spp_sackdelay.
|
|
|
|
* SPP_SACKDELAY_DISABLE - Setting this flag turns
|
|
|
|
* off delayed sack. If the spp_address field is blank then
|
|
|
|
* delayed sack is disabled for the entire association. Note
|
|
|
|
* also that this field is mutually exclusive to
|
|
|
|
* SPP_SACKDELAY_ENABLE, setting both will have undefined
|
|
|
|
* results.
|
2018-07-02 18:21:13 +08:00
|
|
|
*
|
|
|
|
* SPP_IPV6_FLOWLABEL: Setting this flag enables the
|
|
|
|
* setting of the IPV6 flow label value. The value is
|
|
|
|
* contained in the spp_ipv6_flowlabel field.
|
|
|
|
* Upon retrieval, this flag will be set to indicate that
|
|
|
|
* the spp_ipv6_flowlabel field has a valid value returned.
|
|
|
|
* If a specific destination address is set (in the
|
|
|
|
* spp_address field), then the value returned is that of
|
|
|
|
* the address. If just an association is specified (and
|
|
|
|
* no address), then the association's default flow label
|
|
|
|
* is returned. If neither an association nor a destination
|
|
|
|
* is specified, then the socket's default flow label is
|
|
|
|
* returned. For non-IPv6 sockets, this flag will be left
|
|
|
|
* cleared.
|
|
|
|
*
|
|
|
|
* SPP_DSCP: Setting this flag enables the setting of the
|
|
|
|
* Differentiated Services Code Point (DSCP) value
|
|
|
|
* associated with either the association or a specific
|
|
|
|
* address. The value is obtained in the spp_dscp field.
|
|
|
|
* Upon retrieval, this flag will be set to indicate that
|
|
|
|
* the spp_dscp field has a valid value returned. If a
|
|
|
|
* specific destination address is set when called (in the
|
|
|
|
* spp_address field), then that specific destination
|
|
|
|
* address's DSCP value is returned. If just an association
|
|
|
|
* is specified, then the association's default DSCP is
|
|
|
|
* returned. If neither an association nor a destination is
|
|
|
|
* specified, then the socket's default DSCP is returned.
|
|
|
|
*
|
|
|
|
* spp_ipv6_flowlabel
|
|
|
|
* - This field is used in conjunction with the
|
|
|
|
* SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
|
|
|
|
* The 20 least significant bits are used for the flow
|
|
|
|
* label. This setting has precedence over any IPv6-layer
|
|
|
|
* setting.
|
|
|
|
*
|
|
|
|
* spp_dscp - This field is used in conjunction with the SPP_DSCP flag
|
|
|
|
* and contains the DSCP. The 6 most significant bits are
|
|
|
|
* used for the DSCP. This setting has precedence over any
|
|
|
|
* IPv4- or IPv6- layer setting.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-09-18 15:40:38 +08:00
|
|
|
static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
|
|
|
|
struct sctp_transport *trans,
|
|
|
|
struct sctp_association *asoc,
|
|
|
|
struct sctp_sock *sp,
|
|
|
|
int hb_change,
|
|
|
|
int pmtud_change,
|
|
|
|
int sackdelay_change)
|
2005-12-23 03:36:46 +08:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (params->spp_flags & SPP_HB_DEMAND && trans) {
|
2012-08-07 15:25:24 +08:00
|
|
|
struct net *net = sock_net(trans->asoc->base.sk);
|
|
|
|
|
|
|
|
error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
|
2005-12-23 03:36:46 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-03-24 02:33:12 +08:00
|
|
|
/* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
|
|
|
|
* this field is ignored. Note also that a value of zero indicates
|
|
|
|
* the current setting should be left unchanged.
|
|
|
|
*/
|
|
|
|
if (params->spp_flags & SPP_HB_ENABLE) {
|
|
|
|
|
|
|
|
/* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
|
|
|
|
* set. This lets us use 0 value when this flag
|
|
|
|
* is set.
|
|
|
|
*/
|
|
|
|
if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
|
|
|
|
params->spp_hbinterval = 0;
|
|
|
|
|
|
|
|
if (params->spp_hbinterval ||
|
|
|
|
(params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
|
|
|
|
if (trans) {
|
|
|
|
trans->hbinterval =
|
|
|
|
msecs_to_jiffies(params->spp_hbinterval);
|
|
|
|
} else if (asoc) {
|
|
|
|
asoc->hbinterval =
|
|
|
|
msecs_to_jiffies(params->spp_hbinterval);
|
|
|
|
} else {
|
|
|
|
sp->hbinterval = params->spp_hbinterval;
|
|
|
|
}
|
2005-12-23 03:36:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hb_change) {
|
|
|
|
if (trans) {
|
|
|
|
trans->param_flags =
|
|
|
|
(trans->param_flags & ~SPP_HB) | hb_change;
|
|
|
|
} else if (asoc) {
|
|
|
|
asoc->param_flags =
|
|
|
|
(asoc->param_flags & ~SPP_HB) | hb_change;
|
|
|
|
} else {
|
|
|
|
sp->param_flags =
|
|
|
|
(sp->param_flags & ~SPP_HB) | hb_change;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-24 02:33:12 +08:00
|
|
|
/* When Path MTU discovery is disabled the value specified here will
|
|
|
|
* be the "fixed" path mtu (i.e. the value of the spp_flags field must
|
|
|
|
* include the flag SPP_PMTUD_DISABLE for this field to have any
|
|
|
|
* effect).
|
|
|
|
*/
|
|
|
|
if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
|
2005-12-23 03:36:46 +08:00
|
|
|
if (trans) {
|
|
|
|
trans->pathmtu = params->spp_pathmtu;
|
2017-04-04 13:39:55 +08:00
|
|
|
sctp_assoc_sync_pmtu(asoc);
|
2005-12-23 03:36:46 +08:00
|
|
|
} else if (asoc) {
|
2018-04-27 03:58:53 +08:00
|
|
|
sctp_assoc_set_pmtu(asoc, params->spp_pathmtu);
|
2005-12-23 03:36:46 +08:00
|
|
|
} else {
|
|
|
|
sp->pathmtu = params->spp_pathmtu;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pmtud_change) {
|
|
|
|
if (trans) {
|
|
|
|
int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
|
|
|
|
(params->spp_flags & SPP_PMTUD_ENABLE);
|
|
|
|
trans->param_flags =
|
|
|
|
(trans->param_flags & ~SPP_PMTUD) | pmtud_change;
|
|
|
|
if (update) {
|
2011-04-27 05:51:31 +08:00
|
|
|
sctp_transport_pmtu(trans, sctp_opt2sk(sp));
|
2017-04-04 13:39:55 +08:00
|
|
|
sctp_assoc_sync_pmtu(asoc);
|
2005-12-23 03:36:46 +08:00
|
|
|
}
|
|
|
|
} else if (asoc) {
|
|
|
|
asoc->param_flags =
|
|
|
|
(asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
|
|
|
|
} else {
|
|
|
|
sp->param_flags =
|
|
|
|
(sp->param_flags & ~SPP_PMTUD) | pmtud_change;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-24 02:33:12 +08:00
|
|
|
/* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
|
|
|
|
* value of this field is ignored. Note also that a value of zero
|
|
|
|
* indicates the current setting should be left unchanged.
|
|
|
|
*/
|
|
|
|
if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
|
2005-12-23 03:36:46 +08:00
|
|
|
if (trans) {
|
|
|
|
trans->sackdelay =
|
|
|
|
msecs_to_jiffies(params->spp_sackdelay);
|
|
|
|
} else if (asoc) {
|
|
|
|
asoc->sackdelay =
|
|
|
|
msecs_to_jiffies(params->spp_sackdelay);
|
|
|
|
} else {
|
|
|
|
sp->sackdelay = params->spp_sackdelay;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sackdelay_change) {
|
|
|
|
if (trans) {
|
|
|
|
trans->param_flags =
|
|
|
|
(trans->param_flags & ~SPP_SACKDELAY) |
|
|
|
|
sackdelay_change;
|
|
|
|
} else if (asoc) {
|
|
|
|
asoc->param_flags =
|
|
|
|
(asoc->param_flags & ~SPP_SACKDELAY) |
|
|
|
|
sackdelay_change;
|
|
|
|
} else {
|
|
|
|
sp->param_flags =
|
|
|
|
(sp->param_flags & ~SPP_SACKDELAY) |
|
|
|
|
sackdelay_change;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-24 04:53:57 +08:00
|
|
|
/* Note that a value of zero indicates the current setting should be
|
|
|
|
left unchanged.
|
2007-03-24 02:33:12 +08:00
|
|
|
*/
|
2009-11-24 04:53:57 +08:00
|
|
|
if (params->spp_pathmaxrxt) {
|
2005-12-23 03:36:46 +08:00
|
|
|
if (trans) {
|
|
|
|
trans->pathmaxrxt = params->spp_pathmaxrxt;
|
|
|
|
} else if (asoc) {
|
|
|
|
asoc->pathmaxrxt = params->spp_pathmaxrxt;
|
|
|
|
} else {
|
|
|
|
sp->pathmaxrxt = params->spp_pathmaxrxt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-02 18:21:13 +08:00
|
|
|
if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
|
2018-09-03 15:47:11 +08:00
|
|
|
if (trans) {
|
|
|
|
if (trans->ipaddr.sa.sa_family == AF_INET6) {
|
|
|
|
trans->flowlabel = params->spp_ipv6_flowlabel &
|
|
|
|
SCTP_FLOWLABEL_VAL_MASK;
|
|
|
|
trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
|
|
|
|
}
|
2018-07-02 18:21:13 +08:00
|
|
|
} else if (asoc) {
|
2018-09-03 15:47:10 +08:00
|
|
|
struct sctp_transport *t;
|
|
|
|
|
|
|
|
list_for_each_entry(t, &asoc->peer.transport_addr_list,
|
2018-07-02 18:21:13 +08:00
|
|
|
transports) {
|
2018-09-03 15:47:10 +08:00
|
|
|
if (t->ipaddr.sa.sa_family != AF_INET6)
|
2018-07-02 18:21:13 +08:00
|
|
|
continue;
|
2018-09-03 15:47:10 +08:00
|
|
|
t->flowlabel = params->spp_ipv6_flowlabel &
|
|
|
|
SCTP_FLOWLABEL_VAL_MASK;
|
|
|
|
t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
|
2018-07-02 18:21:13 +08:00
|
|
|
}
|
|
|
|
asoc->flowlabel = params->spp_ipv6_flowlabel &
|
|
|
|
SCTP_FLOWLABEL_VAL_MASK;
|
|
|
|
asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
|
|
|
|
} else if (sctp_opt2sk(sp)->sk_family == AF_INET6) {
|
|
|
|
sp->flowlabel = params->spp_ipv6_flowlabel &
|
|
|
|
SCTP_FLOWLABEL_VAL_MASK;
|
|
|
|
sp->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params->spp_flags & SPP_DSCP) {
|
|
|
|
if (trans) {
|
|
|
|
trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
|
|
|
|
trans->dscp |= SCTP_DSCP_SET_MASK;
|
|
|
|
} else if (asoc) {
|
2018-09-03 15:47:10 +08:00
|
|
|
struct sctp_transport *t;
|
|
|
|
|
|
|
|
list_for_each_entry(t, &asoc->peer.transport_addr_list,
|
2018-07-02 18:21:13 +08:00
|
|
|
transports) {
|
2018-09-03 15:47:10 +08:00
|
|
|
t->dscp = params->spp_dscp &
|
|
|
|
SCTP_DSCP_VAL_MASK;
|
|
|
|
t->dscp |= SCTP_DSCP_SET_MASK;
|
2018-07-02 18:21:13 +08:00
|
|
|
}
|
|
|
|
asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
|
|
|
|
asoc->dscp |= SCTP_DSCP_SET_MASK;
|
|
|
|
} else {
|
|
|
|
sp->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
|
|
|
|
sp->dscp |= SCTP_DSCP_SET_MASK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-12-23 03:36:46 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int sctp_setsockopt_peer_addr_params(struct sock *sk,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-12-23 03:36:46 +08:00
|
|
|
struct sctp_paddrparams params;
|
|
|
|
struct sctp_transport *trans = NULL;
|
|
|
|
struct sctp_association *asoc = NULL;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
2005-12-23 03:36:46 +08:00
|
|
|
int hb_change, pmtud_change, sackdelay_change;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-07-02 18:21:13 +08:00
|
|
|
if (optlen == sizeof(params)) {
|
|
|
|
if (copy_from_user(¶ms, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
} else if (optlen == ALIGN(offsetof(struct sctp_paddrparams,
|
|
|
|
spp_ipv6_flowlabel), 4)) {
|
|
|
|
if (copy_from_user(¶ms, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (params.spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL))
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
2013-12-23 12:16:50 +08:00
|
|
|
return -EINVAL;
|
2018-07-02 18:21:13 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-12-23 03:36:46 +08:00
|
|
|
/* Validate flags and value parameters. */
|
|
|
|
hb_change = params.spp_flags & SPP_HB;
|
|
|
|
pmtud_change = params.spp_flags & SPP_PMTUD;
|
|
|
|
sackdelay_change = params.spp_flags & SPP_SACKDELAY;
|
|
|
|
|
|
|
|
if (hb_change == SPP_HB ||
|
|
|
|
pmtud_change == SPP_PMTUD ||
|
|
|
|
sackdelay_change == SPP_SACKDELAY ||
|
|
|
|
params.spp_sackdelay > 500 ||
|
2009-11-30 08:55:45 +08:00
|
|
|
(params.spp_pathmtu &&
|
|
|
|
params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
|
2005-12-23 03:36:46 +08:00
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-12-23 03:36:46 +08:00
|
|
|
/* If an address other than INADDR_ANY is specified, and
|
|
|
|
* no transport is found, then the request is invalid.
|
|
|
|
*/
|
2013-12-23 12:16:50 +08:00
|
|
|
if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) {
|
2005-12-23 03:36:46 +08:00
|
|
|
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
|
|
|
|
params.spp_assoc_id);
|
|
|
|
if (!trans)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:24 +08:00
|
|
|
/* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
|
|
|
|
* socket is a one to many style socket, and an association
|
|
|
|
* was not found, then the id was invalid.
|
2005-12-23 03:36:46 +08:00
|
|
|
*/
|
|
|
|
asoc = sctp_id2assoc(sk, params.spp_assoc_id);
|
2019-01-28 15:08:24 +08:00
|
|
|
if (!asoc && params.spp_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2005-12-23 03:36:46 +08:00
|
|
|
/* Heartbeat demand can only be sent on a transport or
|
|
|
|
* association, but not a socket.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2005-12-23 03:36:46 +08:00
|
|
|
if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Process parameters. */
|
|
|
|
error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp,
|
|
|
|
hb_change, pmtud_change,
|
|
|
|
sackdelay_change);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-12-23 03:36:46 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/* If changes are for association, also apply parameters to each
|
|
|
|
* transport.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2005-12-23 03:36:46 +08:00
|
|
|
if (!trans && asoc) {
|
2008-04-13 09:54:24 +08:00
|
|
|
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
|
|
|
|
transports) {
|
2005-12-23 03:36:46 +08:00
|
|
|
sctp_apply_peer_addr_params(¶ms, trans, asoc, sp,
|
|
|
|
hb_change, pmtud_change,
|
|
|
|
sackdelay_change);
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-15 17:24:01 +08:00
|
|
|
static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
|
|
|
|
{
|
|
|
|
return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
|
|
|
|
{
|
|
|
|
return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
|
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:34 +08:00
|
|
|
static void sctp_apply_asoc_delayed_ack(struct sctp_sack_info *params,
|
|
|
|
struct sctp_association *asoc)
|
|
|
|
{
|
|
|
|
struct sctp_transport *trans;
|
|
|
|
|
|
|
|
if (params->sack_delay) {
|
|
|
|
asoc->sackdelay = msecs_to_jiffies(params->sack_delay);
|
|
|
|
asoc->param_flags =
|
|
|
|
sctp_spp_sackdelay_enable(asoc->param_flags);
|
|
|
|
}
|
|
|
|
if (params->sack_freq == 1) {
|
|
|
|
asoc->param_flags =
|
|
|
|
sctp_spp_sackdelay_disable(asoc->param_flags);
|
|
|
|
} else if (params->sack_freq > 1) {
|
|
|
|
asoc->sackfreq = params->sack_freq;
|
|
|
|
asoc->param_flags =
|
|
|
|
sctp_spp_sackdelay_enable(asoc->param_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
|
|
|
|
transports) {
|
|
|
|
if (params->sack_delay) {
|
|
|
|
trans->sackdelay = msecs_to_jiffies(params->sack_delay);
|
|
|
|
trans->param_flags =
|
|
|
|
sctp_spp_sackdelay_enable(trans->param_flags);
|
|
|
|
}
|
|
|
|
if (params->sack_freq == 1) {
|
|
|
|
trans->param_flags =
|
|
|
|
sctp_spp_sackdelay_disable(trans->param_flags);
|
|
|
|
} else if (params->sack_freq > 1) {
|
|
|
|
trans->sackfreq = params->sack_freq;
|
|
|
|
trans->param_flags =
|
|
|
|
sctp_spp_sackdelay_enable(trans->param_flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-10 06:13:26 +08:00
|
|
|
/*
|
|
|
|
* 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
|
|
|
|
*
|
|
|
|
* This option will effect the way delayed acks are performed. This
|
|
|
|
* option allows you to get or set the delayed ack time, in
|
|
|
|
* milliseconds. It also allows changing the delayed ack frequency.
|
|
|
|
* Changing the frequency to 1 disables the delayed sack algorithm. If
|
|
|
|
* the assoc_id is 0, then this sets or gets the endpoints default
|
|
|
|
* values. If the assoc_id field is non-zero, then the set or get
|
|
|
|
* effects the specified association for the one to many model (the
|
|
|
|
* assoc_id field is ignored by the one to one model). Note that if
|
|
|
|
* sack_delay or sack_freq are 0 when setting this option, then the
|
|
|
|
* current values will remain unchanged.
|
|
|
|
*
|
|
|
|
* struct sctp_sack_info {
|
|
|
|
* sctp_assoc_t sack_assoc_id;
|
|
|
|
* uint32_t sack_delay;
|
|
|
|
* uint32_t sack_freq;
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* sack_assoc_id - This parameter, indicates which association the user
|
|
|
|
* is performing an action upon. Note that if this field's value is
|
|
|
|
* zero then the endpoints default value is changed (effecting future
|
|
|
|
* associations only).
|
|
|
|
*
|
|
|
|
* sack_delay - This parameter contains the number of milliseconds that
|
|
|
|
* the user is requesting the delayed ACK timer be set to. Note that
|
|
|
|
* this value is defined in the standard to be between 200 and 500
|
|
|
|
* milliseconds.
|
|
|
|
*
|
|
|
|
* sack_freq - This parameter contains the number of packets that must
|
|
|
|
* be received before a sack is sent without waiting for the delay
|
|
|
|
* timer to expire. The default value for this is 2, setting this
|
|
|
|
* value to 1 will disable the delayed sack algorithm.
|
2005-12-23 03:37:30 +08:00
|
|
|
*/
|
|
|
|
|
2008-05-10 06:13:26 +08:00
|
|
|
static int sctp_setsockopt_delayed_ack(struct sock *sk,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval, unsigned int optlen)
|
2005-12-23 03:37:30 +08:00
|
|
|
{
|
2019-01-28 15:08:34 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_sack_info params;
|
2005-12-23 03:37:30 +08:00
|
|
|
|
2008-05-10 06:13:26 +08:00
|
|
|
if (optlen == sizeof(struct sctp_sack_info)) {
|
|
|
|
if (copy_from_user(¶ms, optval, optlen))
|
|
|
|
return -EFAULT;
|
2005-12-23 03:37:30 +08:00
|
|
|
|
2008-05-10 06:13:26 +08:00
|
|
|
if (params.sack_delay == 0 && params.sack_freq == 0)
|
|
|
|
return 0;
|
|
|
|
} else if (optlen == sizeof(struct sctp_assoc_value)) {
|
2013-12-23 21:29:43 +08:00
|
|
|
pr_warn_ratelimited(DEPRECATED
|
2014-01-03 01:54:27 +08:00
|
|
|
"%s (pid %d) "
|
2013-12-23 21:29:43 +08:00
|
|
|
"Use of struct sctp_assoc_value in delayed_ack socket option.\n"
|
2014-01-03 01:54:27 +08:00
|
|
|
"Use struct sctp_sack_info instead\n",
|
|
|
|
current->comm, task_pid_nr(current));
|
2008-05-10 06:13:26 +08:00
|
|
|
if (copy_from_user(¶ms, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (params.sack_delay == 0)
|
|
|
|
params.sack_freq = 1;
|
|
|
|
else
|
|
|
|
params.sack_freq = 0;
|
|
|
|
} else
|
2013-12-23 12:16:50 +08:00
|
|
|
return -EINVAL;
|
2005-12-23 03:37:30 +08:00
|
|
|
|
|
|
|
/* Validate value parameter. */
|
2008-05-10 06:13:26 +08:00
|
|
|
if (params.sack_delay > 500)
|
2005-12-23 03:37:30 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-28 15:08:34 +08:00
|
|
|
/* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
|
|
|
|
* socket is a one to many style socket, and an association
|
|
|
|
* was not found, then the id was invalid.
|
2007-02-09 22:25:18 +08:00
|
|
|
*/
|
2008-05-10 06:13:26 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.sack_assoc_id);
|
2019-01-28 15:08:34 +08:00
|
|
|
if (!asoc && params.sack_assoc_id > SCTP_ALL_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2005-12-23 03:37:30 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-28 15:08:34 +08:00
|
|
|
if (asoc) {
|
|
|
|
sctp_apply_asoc_delayed_ack(¶ms, asoc);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.sack_assoc_id == SCTP_FUTURE_ASSOC ||
|
|
|
|
params.sack_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
if (params.sack_delay) {
|
2008-05-10 06:13:26 +08:00
|
|
|
sp->sackdelay = params.sack_delay;
|
2007-02-09 22:25:18 +08:00
|
|
|
sp->param_flags =
|
2014-01-15 17:24:01 +08:00
|
|
|
sctp_spp_sackdelay_enable(sp->param_flags);
|
2005-12-23 03:37:30 +08:00
|
|
|
}
|
2019-01-28 15:08:34 +08:00
|
|
|
if (params.sack_freq == 1) {
|
2007-02-09 22:25:18 +08:00
|
|
|
sp->param_flags =
|
2014-01-15 17:24:01 +08:00
|
|
|
sctp_spp_sackdelay_disable(sp->param_flags);
|
2019-01-28 15:08:34 +08:00
|
|
|
} else if (params.sack_freq > 1) {
|
2008-05-10 06:13:26 +08:00
|
|
|
sp->sackfreq = params.sack_freq;
|
|
|
|
sp->param_flags =
|
2014-01-15 17:24:01 +08:00
|
|
|
sctp_spp_sackdelay_enable(sp->param_flags);
|
2008-05-10 06:13:26 +08:00
|
|
|
}
|
2005-12-23 03:37:30 +08:00
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:34 +08:00
|
|
|
if (params.sack_assoc_id == SCTP_CURRENT_ASSOC ||
|
|
|
|
params.sack_assoc_id == SCTP_ALL_ASSOC)
|
|
|
|
list_for_each_entry(asoc, &sp->ep->asocs, asocs)
|
|
|
|
sctp_apply_asoc_delayed_ack(¶ms, asoc);
|
2007-02-09 22:25:18 +08:00
|
|
|
|
2005-12-23 03:37:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
|
|
|
|
*
|
|
|
|
* Applications can specify protocol parameters for the default association
|
|
|
|
* initialization. The option name argument to setsockopt() and getsockopt()
|
|
|
|
* is SCTP_INITMSG.
|
|
|
|
*
|
|
|
|
* Setting initialization parameters is effective only on an unconnected
|
|
|
|
* socket (for UDP-style sockets only future associations are effected
|
|
|
|
* by the change). With TCP-style sockets, this option is inherited by
|
|
|
|
* sockets derived from a listener socket.
|
|
|
|
*/
|
2009-10-01 07:12:20 +08:00
|
|
|
static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_initmsg sinit;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
if (optlen != sizeof(struct sctp_initmsg))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&sinit, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (sinit.sinit_num_ostreams)
|
2007-02-09 22:25:18 +08:00
|
|
|
sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sinit.sinit_max_instreams)
|
2007-02-09 22:25:18 +08:00
|
|
|
sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sinit.sinit_max_attempts)
|
2007-02-09 22:25:18 +08:00
|
|
|
sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sinit.sinit_max_init_timeo)
|
2007-02-09 22:25:18 +08:00
|
|
|
sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
|
|
|
|
*
|
|
|
|
* Applications that wish to use the sendto() system call may wish to
|
|
|
|
* specify a default set of parameters that would normally be supplied
|
|
|
|
* through the inclusion of ancillary data. This socket option allows
|
|
|
|
* such an application to set the default sctp_sndrcvinfo structure.
|
|
|
|
* The application that wishes to use this socket option simply passes
|
|
|
|
* in to this call the sctp_sndrcvinfo structure defined in Section
|
|
|
|
* 5.2.2) The input parameters accepted by this call include
|
|
|
|
* sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
|
|
|
|
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
|
|
|
|
* to this call if the caller is using the UDP model.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_default_send_param(struct sock *sk,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2014-07-13 02:30:39 +08:00
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_sndrcvinfo info;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-07-13 02:30:39 +08:00
|
|
|
if (optlen != sizeof(info))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&info, optval, optlen))
|
|
|
|
return -EFAULT;
|
2014-07-13 02:30:39 +08:00
|
|
|
if (info.sinfo_flags &
|
|
|
|
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
|
|
|
|
SCTP_ABORT | SCTP_EOF))
|
|
|
|
return -EINVAL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
|
2019-01-28 15:08:35 +08:00
|
|
|
if (!asoc && info.sinfo_assoc_id > SCTP_ALL_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2019-01-28 15:08:35 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (asoc) {
|
|
|
|
asoc->default_stream = info.sinfo_stream;
|
|
|
|
asoc->default_flags = info.sinfo_flags;
|
|
|
|
asoc->default_ppid = info.sinfo_ppid;
|
|
|
|
asoc->default_context = info.sinfo_context;
|
|
|
|
asoc->default_timetolive = info.sinfo_timetolive;
|
2019-01-28 15:08:35 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
|
|
|
|
info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
|
2005-04-17 06:20:36 +08:00
|
|
|
sp->default_stream = info.sinfo_stream;
|
|
|
|
sp->default_flags = info.sinfo_flags;
|
|
|
|
sp->default_ppid = info.sinfo_ppid;
|
|
|
|
sp->default_context = info.sinfo_context;
|
|
|
|
sp->default_timetolive = info.sinfo_timetolive;
|
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:35 +08:00
|
|
|
if (info.sinfo_assoc_id == SCTP_CURRENT_ASSOC ||
|
|
|
|
info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
|
|
|
|
asoc->default_stream = info.sinfo_stream;
|
|
|
|
asoc->default_flags = info.sinfo_flags;
|
|
|
|
asoc->default_ppid = info.sinfo_ppid;
|
|
|
|
asoc->default_context = info.sinfo_context;
|
|
|
|
asoc->default_timetolive = info.sinfo_timetolive;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-13 02:30:39 +08:00
|
|
|
/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
|
|
|
|
* (SCTP_DEFAULT_SNDINFO)
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_default_sndinfo(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_sndinfo info;
|
|
|
|
|
|
|
|
if (optlen != sizeof(info))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&info, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (info.snd_flags &
|
|
|
|
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
|
|
|
|
SCTP_ABORT | SCTP_EOF))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, info.snd_assoc_id);
|
2019-01-28 15:08:36 +08:00
|
|
|
if (!asoc && info.snd_assoc_id > SCTP_ALL_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2014-07-13 02:30:39 +08:00
|
|
|
return -EINVAL;
|
2019-01-28 15:08:36 +08:00
|
|
|
|
2014-07-13 02:30:39 +08:00
|
|
|
if (asoc) {
|
|
|
|
asoc->default_stream = info.snd_sid;
|
|
|
|
asoc->default_flags = info.snd_flags;
|
|
|
|
asoc->default_ppid = info.snd_ppid;
|
|
|
|
asoc->default_context = info.snd_context;
|
2019-01-28 15:08:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info.snd_assoc_id == SCTP_FUTURE_ASSOC ||
|
|
|
|
info.snd_assoc_id == SCTP_ALL_ASSOC) {
|
2014-07-13 02:30:39 +08:00
|
|
|
sp->default_stream = info.snd_sid;
|
|
|
|
sp->default_flags = info.snd_flags;
|
|
|
|
sp->default_ppid = info.snd_ppid;
|
|
|
|
sp->default_context = info.snd_context;
|
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:36 +08:00
|
|
|
if (info.snd_assoc_id == SCTP_CURRENT_ASSOC ||
|
|
|
|
info.snd_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
|
|
|
|
asoc->default_stream = info.snd_sid;
|
|
|
|
asoc->default_flags = info.snd_flags;
|
|
|
|
asoc->default_ppid = info.snd_ppid;
|
|
|
|
asoc->default_context = info.snd_context;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-13 02:30:39 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
|
|
|
|
*
|
|
|
|
* Requests that the local SCTP stack use the enclosed peer address as
|
|
|
|
* the association primary. The enclosed address must be one of the
|
|
|
|
* association peer's addresses.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_prim prim;
|
|
|
|
struct sctp_transport *trans;
|
2018-02-14 04:56:24 +08:00
|
|
|
struct sctp_af *af;
|
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (optlen != sizeof(struct sctp_prim))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2018-02-14 04:56:24 +08:00
|
|
|
/* Allow security module to validate address but need address len. */
|
|
|
|
af = sctp_get_af_specific(prim.ssp_addr.ss_family);
|
|
|
|
if (!af)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
err = security_sctp_bind_connect(sk, SCTP_PRIMARY_ADDR,
|
|
|
|
(struct sockaddr *)&prim.ssp_addr,
|
|
|
|
af->sockaddr_len);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
|
|
|
|
if (!trans)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
sctp_assoc_set_primary(trans->asoc, trans);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 7.1.5 SCTP_NODELAY
|
|
|
|
*
|
|
|
|
* Turn on/off any Nagle-like algorithm. This means that packets are
|
|
|
|
* generally sent as soon as possible and no unnecessary delays are
|
|
|
|
* introduced, at the cost of more packets in the network. Expects an
|
|
|
|
* integer boolean flag.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (optlen < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
if (get_user(val, (int __user *)optval))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* 7.1.1 SCTP_RTOINFO
|
|
|
|
*
|
|
|
|
* The protocol parameters used to initialize and bound retransmission
|
|
|
|
* timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
|
|
|
|
* and modify these parameters.
|
|
|
|
* All parameters are time values, in milliseconds. A value of 0, when
|
|
|
|
* modifying the parameters, indicates that the current value should not
|
|
|
|
* be changed.
|
|
|
|
*
|
|
|
|
*/
|
2009-10-01 07:12:20 +08:00
|
|
|
static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
|
|
|
|
{
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_rtoinfo rtoinfo;
|
|
|
|
struct sctp_association *asoc;
|
2013-12-11 09:50:38 +08:00
|
|
|
unsigned long rto_min, rto_max;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (optlen != sizeof (struct sctp_rtoinfo))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(&rtoinfo, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
|
|
|
|
|
|
|
|
/* Set the values to the specific association */
|
2019-01-28 15:08:25 +08:00
|
|
|
if (!asoc && rtoinfo.srto_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-12-11 09:50:38 +08:00
|
|
|
rto_max = rtoinfo.srto_max;
|
|
|
|
rto_min = rtoinfo.srto_min;
|
|
|
|
|
|
|
|
if (rto_max)
|
|
|
|
rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
|
|
|
|
else
|
|
|
|
rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
|
|
|
|
|
|
|
|
if (rto_min)
|
|
|
|
rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
|
|
|
|
else
|
|
|
|
rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
|
|
|
|
|
|
|
|
if (rto_min > rto_max)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (asoc) {
|
|
|
|
if (rtoinfo.srto_initial != 0)
|
2007-02-09 22:25:18 +08:00
|
|
|
asoc->rto_initial =
|
2005-04-17 06:20:36 +08:00
|
|
|
msecs_to_jiffies(rtoinfo.srto_initial);
|
2013-12-11 09:50:38 +08:00
|
|
|
asoc->rto_max = rto_max;
|
|
|
|
asoc->rto_min = rto_min;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
/* If there is no association or the association-id = 0
|
|
|
|
* set the values to the endpoint.
|
|
|
|
*/
|
|
|
|
if (rtoinfo.srto_initial != 0)
|
|
|
|
sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
|
2013-12-11 09:50:38 +08:00
|
|
|
sp->rtoinfo.srto_max = rto_max;
|
|
|
|
sp->rtoinfo.srto_min = rto_min;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* 7.1.2 SCTP_ASSOCINFO
|
|
|
|
*
|
2007-05-09 14:57:56 +08:00
|
|
|
* This option is used to tune the maximum retransmission attempts
|
2005-04-17 06:20:36 +08:00
|
|
|
* of the association.
|
|
|
|
* Returns an error if the new association retransmission value is
|
|
|
|
* greater than the sum of the retransmission value of the peer.
|
|
|
|
* See [SCTP] for more information.
|
|
|
|
*
|
|
|
|
*/
|
2009-10-01 07:12:20 +08:00
|
|
|
static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
struct sctp_assocparams assocparams;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
|
|
|
|
if (optlen != sizeof(struct sctp_assocparams))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&assocparams, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
|
|
|
|
|
2019-01-28 15:08:26 +08:00
|
|
|
if (!asoc && assocparams.sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Set the values to the specific association */
|
|
|
|
if (asoc) {
|
2006-06-18 13:54:51 +08:00
|
|
|
if (assocparams.sasoc_asocmaxrxt != 0) {
|
|
|
|
__u32 path_sum = 0;
|
|
|
|
int paths = 0;
|
|
|
|
struct sctp_transport *peer_addr;
|
|
|
|
|
2008-04-13 09:54:24 +08:00
|
|
|
list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
|
|
|
|
transports) {
|
2006-06-18 13:54:51 +08:00
|
|
|
path_sum += peer_addr->pathmaxrxt;
|
|
|
|
paths++;
|
|
|
|
}
|
|
|
|
|
2008-10-17 01:02:37 +08:00
|
|
|
/* Only validate asocmaxrxt if we have more than
|
2006-06-18 13:54:51 +08:00
|
|
|
* one path/transport. We do this because path
|
|
|
|
* retransmissions are only counted when we have more
|
|
|
|
* then one path.
|
|
|
|
*/
|
|
|
|
if (paths > 1 &&
|
|
|
|
assocparams.sasoc_asocmaxrxt > path_sum)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
|
2006-06-18 13:54:51 +08:00
|
|
|
}
|
|
|
|
|
2013-06-26 00:17:27 +08:00
|
|
|
if (assocparams.sasoc_cookie_life != 0)
|
|
|
|
asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
/* Set the values to the endpoint */
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
if (assocparams.sasoc_asocmaxrxt != 0)
|
|
|
|
sp->assocparams.sasoc_asocmaxrxt =
|
|
|
|
assocparams.sasoc_asocmaxrxt;
|
|
|
|
if (assocparams.sasoc_cookie_life != 0)
|
|
|
|
sp->assocparams.sasoc_cookie_life =
|
|
|
|
assocparams.sasoc_cookie_life;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
|
|
|
|
*
|
|
|
|
* This socket option is a boolean flag which turns on or off mapped V4
|
|
|
|
* addresses. If this option is turned on and the socket is type
|
|
|
|
* PF_INET6, then IPv4 addresses will be mapped to V6 representation.
|
|
|
|
* If this option is turned off, then no mapping will be done of V4
|
|
|
|
* addresses and a user will receive both PF_INET6 and PF_INET type
|
|
|
|
* addresses on the socket.
|
|
|
|
*/
|
2009-10-01 07:12:20 +08:00
|
|
|
static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int val;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
if (optlen < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
if (get_user(val, (int __user *)optval))
|
|
|
|
return -EFAULT;
|
|
|
|
if (val)
|
|
|
|
sp->v4mapped = 1;
|
|
|
|
else
|
|
|
|
sp->v4mapped = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-12-26 08:54:58 +08:00
|
|
|
* 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
|
|
|
|
* This option will get or set the maximum size to put in any outgoing
|
|
|
|
* SCTP DATA chunk. If a message is larger than this size it will be
|
2005-04-17 06:20:36 +08:00
|
|
|
* fragmented by SCTP into the specified size. Note that the underlying
|
|
|
|
* SCTP implementation may fragment into smaller sized chunks when the
|
|
|
|
* PMTU of the underlying association is smaller than the value set by
|
2008-12-26 08:54:58 +08:00
|
|
|
* the user. The default value for this option is '0' which indicates
|
|
|
|
* the user is NOT limiting fragmentation and only the PMTU will effect
|
|
|
|
* SCTP's choice of DATA chunk size. Note also that values set larger
|
|
|
|
* than the maximum size of an IP datagram will effectively let SCTP
|
|
|
|
* control fragmentation (i.e. the same as setting this option to 0).
|
|
|
|
*
|
|
|
|
* The following structure is used to access and modify this parameter:
|
|
|
|
*
|
|
|
|
* struct sctp_assoc_value {
|
|
|
|
* sctp_assoc_t assoc_id;
|
|
|
|
* uint32_t assoc_value;
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* assoc_id: This parameter is ignored for one-to-one style sockets.
|
|
|
|
* For one-to-many style sockets this parameter indicates which
|
|
|
|
* association the user is performing an action upon. Note that if
|
|
|
|
* this field's value is zero then the endpoints default value is
|
|
|
|
* changed (effecting future associations only).
|
|
|
|
* assoc_value: This parameter specifies the maximum size in bytes.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2009-10-01 07:12:20 +08:00
|
|
|
static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-11-17 14:11:11 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2008-12-26 08:54:58 +08:00
|
|
|
struct sctp_assoc_value params;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_association *asoc;
|
|
|
|
int val;
|
|
|
|
|
2008-12-26 08:54:58 +08:00
|
|
|
if (optlen == sizeof(int)) {
|
2013-12-23 21:29:43 +08:00
|
|
|
pr_warn_ratelimited(DEPRECATED
|
2014-01-03 01:54:27 +08:00
|
|
|
"%s (pid %d) "
|
2013-12-23 21:29:43 +08:00
|
|
|
"Use of int in maxseg socket option.\n"
|
2014-01-03 01:54:27 +08:00
|
|
|
"Use struct sctp_assoc_value instead\n",
|
|
|
|
current->comm, task_pid_nr(current));
|
2008-12-26 08:54:58 +08:00
|
|
|
if (copy_from_user(&val, optval, optlen))
|
|
|
|
return -EFAULT;
|
2019-01-28 15:08:27 +08:00
|
|
|
params.assoc_id = SCTP_FUTURE_ASSOC;
|
2008-12-26 08:54:58 +08:00
|
|
|
} else if (optlen == sizeof(struct sctp_assoc_value)) {
|
|
|
|
if (copy_from_user(¶ms, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
val = params.assoc_value;
|
2017-11-17 14:11:11 +08:00
|
|
|
} else {
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2017-11-17 14:11:11 +08:00
|
|
|
}
|
2008-12-26 08:54:58 +08:00
|
|
|
|
2018-04-27 03:59:01 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
2019-01-28 15:08:27 +08:00
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
2018-04-27 03:59:01 +08:00
|
|
|
|
2017-11-17 14:11:11 +08:00
|
|
|
if (val) {
|
|
|
|
int min_len, max_len;
|
2018-04-27 03:59:01 +08:00
|
|
|
__u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) :
|
|
|
|
sizeof(struct sctp_data_chunk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-12-05 03:27:41 +08:00
|
|
|
min_len = sctp_min_frag_point(sp, datasize);
|
2018-04-27 03:59:01 +08:00
|
|
|
max_len = SCTP_MAX_CHUNK_LEN - datasize;
|
2008-12-26 08:54:58 +08:00
|
|
|
|
2017-11-17 14:11:11 +08:00
|
|
|
if (val < min_len || val > max_len)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2008-12-26 08:54:58 +08:00
|
|
|
if (asoc) {
|
2009-09-05 06:21:00 +08:00
|
|
|
asoc->user_frag = val;
|
2018-04-27 03:58:55 +08:00
|
|
|
sctp_assoc_update_frag_point(asoc);
|
2008-12-26 08:54:58 +08:00
|
|
|
} else {
|
|
|
|
sp->user_frag = val;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
|
|
|
|
*
|
|
|
|
* Requests that the peer mark the enclosed address as the association
|
|
|
|
* primary. The enclosed address must be one of the association's
|
|
|
|
* locally bound addresses. The following structure is used to make a
|
|
|
|
* set primary request:
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-08-07 15:29:57 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_sock *sp;
|
|
|
|
struct sctp_association *asoc = NULL;
|
|
|
|
struct sctp_setpeerprim prim;
|
|
|
|
struct sctp_chunk *chunk;
|
2010-12-08 01:11:09 +08:00
|
|
|
struct sctp_af *af;
|
2005-04-17 06:20:36 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
sp = sctp_sk(sk);
|
|
|
|
|
2012-08-07 15:29:57 +08:00
|
|
|
if (!net->sctp.addip_enable)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (optlen != sizeof(struct sctp_setpeerprim))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(&prim, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
|
2007-02-09 22:25:18 +08:00
|
|
|
if (!asoc)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!asoc->peer.asconf_capable)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (!sctp_state(asoc, ESTABLISHED))
|
|
|
|
return -ENOTCONN;
|
|
|
|
|
2010-12-08 01:11:09 +08:00
|
|
|
af = sctp_get_af_specific(prim.sspp_addr.ss_family);
|
|
|
|
if (!af)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
2018-02-14 04:56:24 +08:00
|
|
|
/* Allow security module to validate address. */
|
|
|
|
err = security_sctp_bind_connect(sk, SCTP_SET_PEER_PRIMARY_ADDR,
|
|
|
|
(struct sockaddr *)&prim.sspp_addr,
|
|
|
|
af->sockaddr_len);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Create an ASCONF chunk with SET_PRIMARY parameter */
|
|
|
|
chunk = sctp_make_asconf_set_prim(asoc,
|
|
|
|
(union sctp_addr *)&prim.sspp_addr);
|
|
|
|
if (!chunk)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = sctp_send_asconf(asoc, chunk);
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: we set peer primary addr primitively\n", __func__);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-12-21 08:07:04 +08:00
|
|
|
static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-12-21 08:07:04 +08:00
|
|
|
struct sctp_setadaptation adaptation;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-12-21 08:07:04 +08:00
|
|
|
if (optlen != sizeof(struct sctp_setadaptation))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2006-12-21 08:07:04 +08:00
|
|
|
if (copy_from_user(&adaptation, optval, optlen))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2006-12-21 08:07:04 +08:00
|
|
|
sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-12-14 08:34:22 +08:00
|
|
|
/*
|
|
|
|
* 7.1.29. Set or Get the default context (SCTP_CONTEXT)
|
|
|
|
*
|
|
|
|
* The context field in the sctp_sndrcvinfo structure is normally only
|
|
|
|
* used when a failed message is retrieved holding the value that was
|
|
|
|
* sent down on the actual send call. This option allows the setting of
|
|
|
|
* a default context on an association basis that will be received on
|
|
|
|
* reading messages from the peer. This is especially helpful in the
|
|
|
|
* one-2-many model for an application to keep some reference to an
|
|
|
|
* internal state machine that is processing messages on the
|
|
|
|
* association. Note that the setting of this value only effects
|
|
|
|
* received messages from the peer and does not effect the value that is
|
|
|
|
* saved with outbound messages.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2006-12-14 08:34:22 +08:00
|
|
|
{
|
2019-01-28 15:08:37 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2006-12-14 08:34:22 +08:00
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
|
|
|
|
if (optlen != sizeof(struct sctp_assoc_value))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(¶ms, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2019-01-28 15:08:37 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
2006-12-14 08:34:22 +08:00
|
|
|
|
2019-01-28 15:08:37 +08:00
|
|
|
if (asoc) {
|
2006-12-14 08:34:22 +08:00
|
|
|
asoc->default_rcv_context = params.assoc_value;
|
2019-01-28 15:08:37 +08:00
|
|
|
|
|
|
|
return 0;
|
2006-12-14 08:34:22 +08:00
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:37 +08:00
|
|
|
if (params.assoc_id == SCTP_FUTURE_ASSOC ||
|
|
|
|
params.assoc_id == SCTP_ALL_ASSOC)
|
|
|
|
sp->default_rcv_context = params.assoc_value;
|
|
|
|
|
|
|
|
if (params.assoc_id == SCTP_CURRENT_ASSOC ||
|
|
|
|
params.assoc_id == SCTP_ALL_ASSOC)
|
|
|
|
list_for_each_entry(asoc, &sp->ep->asocs, asocs)
|
|
|
|
asoc->default_rcv_context = params.assoc_value;
|
|
|
|
|
2006-12-14 08:34:22 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-04-21 03:23:15 +08:00
|
|
|
/*
|
|
|
|
* 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
|
|
|
|
*
|
|
|
|
* This options will at a minimum specify if the implementation is doing
|
|
|
|
* fragmented interleave. Fragmented interleave, for a one to many
|
|
|
|
* socket, is when subsequent calls to receive a message may return
|
|
|
|
* parts of messages from different associations. Some implementations
|
|
|
|
* may allow you to turn this value on or off. If so, when turned off,
|
|
|
|
* no fragment interleave will occur (which will cause a head of line
|
|
|
|
* blocking amongst multiple associations sharing the same one to many
|
|
|
|
* socket). When this option is turned on, then each receive call may
|
|
|
|
* come from a different association (thus the user must receive data
|
|
|
|
* with the extended calls (e.g. sctp_recvmsg) to keep track of which
|
|
|
|
* association each receive belongs to.
|
|
|
|
*
|
|
|
|
* This option takes a boolean value. A non-zero value indicates that
|
|
|
|
* fragmented interleave is on. A value of zero indicates that
|
|
|
|
* fragmented interleave is off.
|
|
|
|
*
|
|
|
|
* Note that it is important that an implementation that allows this
|
|
|
|
* option to be turned on, have it off by default. Otherwise an unaware
|
|
|
|
* application using the one to many model may become confused and act
|
|
|
|
* incorrectly.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_fragment_interleave(struct sock *sk,
|
|
|
|
char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2007-04-21 03:23:15 +08:00
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (optlen != sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
if (get_user(val, (int __user *)optval))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2017-12-08 21:03:58 +08:00
|
|
|
sctp_sk(sk)->frag_interleave = !!val;
|
|
|
|
|
|
|
|
if (!sctp_sk(sk)->frag_interleave)
|
|
|
|
sctp_sk(sk)->strm_interleave = 0;
|
2007-04-21 03:23:15 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-03-24 02:32:00 +08:00
|
|
|
/*
|
2008-12-26 08:59:03 +08:00
|
|
|
* 8.1.21. Set or Get the SCTP Partial Delivery Point
|
2007-03-24 02:32:00 +08:00
|
|
|
* (SCTP_PARTIAL_DELIVERY_POINT)
|
2008-12-26 08:59:03 +08:00
|
|
|
*
|
2007-03-24 02:32:00 +08:00
|
|
|
* This option will set or get the SCTP partial delivery point. This
|
|
|
|
* point is the size of a message where the partial delivery API will be
|
|
|
|
* invoked to help free up rwnd space for the peer. Setting this to a
|
2008-12-26 08:59:03 +08:00
|
|
|
* lower value will cause partial deliveries to happen more often. The
|
2007-03-24 02:32:00 +08:00
|
|
|
* calls argument is an integer that sets or gets the partial delivery
|
2008-12-26 08:59:03 +08:00
|
|
|
* point. Note also that the call will fail if the user attempts to set
|
|
|
|
* this value larger than the socket receive buffer size.
|
|
|
|
*
|
|
|
|
* Note that any single message having a length smaller than or equal to
|
|
|
|
* the SCTP partial delivery point will be delivered in one single read
|
|
|
|
* call as long as the user provided buffer is large enough to hold the
|
|
|
|
* message.
|
2007-03-24 02:32:00 +08:00
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
|
|
|
|
char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2007-03-24 02:32:00 +08:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
if (optlen != sizeof(u32))
|
|
|
|
return -EINVAL;
|
|
|
|
if (get_user(val, (int __user *)optval))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2008-12-26 08:59:03 +08:00
|
|
|
/* Note: We double the receive buffer from what the user sets
|
|
|
|
* it to be, also initial rwnd is based on rcvbuf/2.
|
|
|
|
*/
|
|
|
|
if (val > (sk->sk_rcvbuf >> 1))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-03-24 02:32:00 +08:00
|
|
|
sctp_sk(sk)->pd_point = val;
|
|
|
|
|
|
|
|
return 0; /* is this the right error code? */
|
|
|
|
}
|
|
|
|
|
2007-03-24 02:34:36 +08:00
|
|
|
/*
|
|
|
|
* 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
|
|
|
|
*
|
|
|
|
* This option will allow a user to change the maximum burst of packets
|
|
|
|
* that can be emitted by this association. Note that the default value
|
|
|
|
* is 4, and some implementations may restrict this setting so that it
|
|
|
|
* can only be lowered.
|
|
|
|
*
|
|
|
|
* NOTE: This text doesn't seem right. Do this on a socket basis with
|
|
|
|
* future associations inheriting the socket value.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_maxburst(struct sock *sk,
|
|
|
|
char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2007-03-24 02:34:36 +08:00
|
|
|
{
|
2019-01-28 15:08:38 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2008-03-06 05:44:46 +08:00
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
2007-03-24 02:34:36 +08:00
|
|
|
|
2008-03-06 05:44:46 +08:00
|
|
|
if (optlen == sizeof(int)) {
|
2013-12-23 21:29:43 +08:00
|
|
|
pr_warn_ratelimited(DEPRECATED
|
2014-01-03 01:54:27 +08:00
|
|
|
"%s (pid %d) "
|
2013-12-23 21:29:43 +08:00
|
|
|
"Use of int in max_burst socket option deprecated.\n"
|
2014-01-03 01:54:27 +08:00
|
|
|
"Use struct sctp_assoc_value instead\n",
|
|
|
|
current->comm, task_pid_nr(current));
|
2019-01-28 15:08:38 +08:00
|
|
|
if (copy_from_user(¶ms.assoc_value, optval, optlen))
|
2008-03-06 05:44:46 +08:00
|
|
|
return -EFAULT;
|
2019-01-28 15:08:38 +08:00
|
|
|
params.assoc_id = SCTP_FUTURE_ASSOC;
|
2008-03-06 05:44:46 +08:00
|
|
|
} else if (optlen == sizeof(struct sctp_assoc_value)) {
|
|
|
|
if (copy_from_user(¶ms, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
} else
|
2007-03-24 02:34:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-28 15:08:38 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
2008-03-06 05:44:46 +08:00
|
|
|
|
2019-01-28 15:08:38 +08:00
|
|
|
if (asoc) {
|
|
|
|
asoc->max_burst = params.assoc_value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.assoc_id == SCTP_FUTURE_ASSOC ||
|
|
|
|
params.assoc_id == SCTP_ALL_ASSOC)
|
|
|
|
sp->max_burst = params.assoc_value;
|
|
|
|
|
|
|
|
if (params.assoc_id == SCTP_CURRENT_ASSOC ||
|
|
|
|
params.assoc_id == SCTP_ALL_ASSOC)
|
|
|
|
list_for_each_entry(asoc, &sp->ep->asocs, asocs)
|
|
|
|
asoc->max_burst = params.assoc_value;
|
2007-03-24 02:34:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
/*
|
|
|
|
* 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
|
|
|
|
*
|
|
|
|
* This set option adds a chunk type that the user is requesting to be
|
|
|
|
* received only in an authenticated way. Changes to the list of chunks
|
|
|
|
* will only effect future associations on the socket.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_auth_chunk(struct sock *sk,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
2007-09-17 10:34:00 +08:00
|
|
|
{
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2007-09-17 10:34:00 +08:00
|
|
|
struct sctp_authchunk val;
|
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
if (!ep->auth_enable)
|
2008-08-21 18:34:25 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
if (optlen != sizeof(struct sctp_authchunk))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&val, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
switch (val.sauth_chunk) {
|
2011-07-01 17:43:11 +08:00
|
|
|
case SCTP_CID_INIT:
|
|
|
|
case SCTP_CID_INIT_ACK:
|
|
|
|
case SCTP_CID_SHUTDOWN_COMPLETE:
|
|
|
|
case SCTP_CID_AUTH:
|
|
|
|
return -EINVAL;
|
2007-09-17 10:34:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* add this chunk id to the endpoint */
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
|
2007-09-17 10:34:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
|
|
|
|
*
|
|
|
|
* This option gets or sets the list of HMAC algorithms that the local
|
|
|
|
* endpoint requires the peer to use.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_hmac_ident(struct sock *sk,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
2007-09-17 10:34:00 +08:00
|
|
|
{
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2007-09-17 10:34:00 +08:00
|
|
|
struct sctp_hmacalgo *hmacs;
|
2008-08-28 07:09:49 +08:00
|
|
|
u32 idents;
|
2007-09-17 10:34:00 +08:00
|
|
|
int err;
|
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
if (!ep->auth_enable)
|
2008-08-21 18:34:25 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
if (optlen < sizeof(struct sctp_hmacalgo))
|
|
|
|
return -EINVAL;
|
2018-01-09 05:02:28 +08:00
|
|
|
optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
|
|
|
|
SCTP_AUTH_NUM_HMACS * sizeof(u16));
|
2007-09-17 10:34:00 +08:00
|
|
|
|
2013-12-23 12:16:50 +08:00
|
|
|
hmacs = memdup_user(optval, optlen);
|
2011-04-19 03:13:18 +08:00
|
|
|
if (IS_ERR(hmacs))
|
|
|
|
return PTR_ERR(hmacs);
|
2007-09-17 10:34:00 +08:00
|
|
|
|
2008-08-28 07:09:49 +08:00
|
|
|
idents = hmacs->shmac_num_idents;
|
|
|
|
if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
|
|
|
|
(idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
|
2007-09-17 10:34:00 +08:00
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
err = sctp_auth_ep_set_hmacs(ep, hmacs);
|
2007-09-17 10:34:00 +08:00
|
|
|
out:
|
|
|
|
kfree(hmacs);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 7.1.20. Set a shared key (SCTP_AUTH_KEY)
|
|
|
|
*
|
|
|
|
* This option will set a shared secret key which is used to build an
|
|
|
|
* association shared key.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_auth_key(struct sock *sk,
|
|
|
|
char __user *optval,
|
2009-10-01 07:12:20 +08:00
|
|
|
unsigned int optlen)
|
2007-09-17 10:34:00 +08:00
|
|
|
{
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2007-09-17 10:34:00 +08:00
|
|
|
struct sctp_authkey *authkey;
|
|
|
|
struct sctp_association *asoc;
|
2019-01-28 15:08:39 +08:00
|
|
|
int ret = -EINVAL;
|
2007-09-17 10:34:00 +08:00
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
if (!ep->auth_enable)
|
2008-08-21 18:34:25 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
if (optlen <= sizeof(struct sctp_authkey))
|
|
|
|
return -EINVAL;
|
2018-01-09 05:02:28 +08:00
|
|
|
/* authkey->sca_keylength is u16, so optlen can't be bigger than
|
|
|
|
* this.
|
|
|
|
*/
|
2019-01-28 15:08:39 +08:00
|
|
|
optlen = min_t(unsigned int, optlen, USHRT_MAX + sizeof(*authkey));
|
2007-09-17 10:34:00 +08:00
|
|
|
|
2013-12-23 12:16:50 +08:00
|
|
|
authkey = memdup_user(optval, optlen);
|
2011-04-19 03:13:18 +08:00
|
|
|
if (IS_ERR(authkey))
|
|
|
|
return PTR_ERR(authkey);
|
2007-09-17 10:34:00 +08:00
|
|
|
|
2019-01-28 15:08:39 +08:00
|
|
|
if (authkey->sca_keylength > optlen - sizeof(*authkey))
|
2008-08-26 06:16:19 +08:00
|
|
|
goto out;
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
|
2019-01-28 15:08:39 +08:00
|
|
|
if (!asoc && authkey->sca_assoc_id > SCTP_ALL_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2007-09-17 10:34:00 +08:00
|
|
|
goto out;
|
2019-01-28 15:08:39 +08:00
|
|
|
|
|
|
|
if (asoc) {
|
|
|
|
ret = sctp_auth_set_key(ep, asoc, authkey);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC ||
|
|
|
|
authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
ret = sctp_auth_set_key(ep, asoc, authkey);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (authkey->sca_assoc_id == SCTP_CURRENT_ASSOC ||
|
|
|
|
authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
list_for_each_entry(asoc, &ep->asocs, asocs) {
|
|
|
|
int res = sctp_auth_set_key(ep, asoc, authkey);
|
|
|
|
|
|
|
|
if (res && !ret)
|
|
|
|
ret = res;
|
|
|
|
}
|
2007-09-17 10:34:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2013-02-08 11:04:34 +08:00
|
|
|
kzfree(authkey);
|
2007-09-17 10:34:00 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
|
|
|
|
*
|
|
|
|
* This option will get or set the active shared key to be used to build
|
|
|
|
* the association shared key.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_active_key(struct sock *sk,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
2007-09-17 10:34:00 +08:00
|
|
|
{
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2007-09-17 10:34:00 +08:00
|
|
|
struct sctp_association *asoc;
|
2019-01-28 15:08:40 +08:00
|
|
|
struct sctp_authkeyid val;
|
|
|
|
int ret = 0;
|
2007-09-17 10:34:00 +08:00
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
if (!ep->auth_enable)
|
2008-08-21 18:34:25 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
if (optlen != sizeof(struct sctp_authkeyid))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&val, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
|
2019-01-28 15:08:40 +08:00
|
|
|
if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-28 15:08:40 +08:00
|
|
|
if (asoc)
|
|
|
|
return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
|
|
|
|
|
|
|
|
if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
|
|
|
|
val.scact_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
|
|
|
|
val.scact_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
list_for_each_entry(asoc, &ep->asocs, asocs) {
|
|
|
|
int res = sctp_auth_set_active_key(ep, asoc,
|
|
|
|
val.scact_keynumber);
|
|
|
|
|
|
|
|
if (res && !ret)
|
|
|
|
ret = res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2007-09-17 10:34:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
|
|
|
|
*
|
|
|
|
* This set option will delete a shared secret key from use.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_del_key(struct sock *sk,
|
2009-10-01 07:12:20 +08:00
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
2007-09-17 10:34:00 +08:00
|
|
|
{
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2007-09-17 10:34:00 +08:00
|
|
|
struct sctp_association *asoc;
|
2019-01-28 15:08:41 +08:00
|
|
|
struct sctp_authkeyid val;
|
|
|
|
int ret = 0;
|
2007-09-17 10:34:00 +08:00
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
if (!ep->auth_enable)
|
2008-08-21 18:34:25 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
if (optlen != sizeof(struct sctp_authkeyid))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&val, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
|
2019-01-28 15:08:41 +08:00
|
|
|
if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-28 15:08:41 +08:00
|
|
|
if (asoc)
|
|
|
|
return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
|
2007-09-17 10:34:00 +08:00
|
|
|
|
2019-01-28 15:08:41 +08:00
|
|
|
if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
|
|
|
|
val.scact_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
|
|
|
|
val.scact_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
list_for_each_entry(asoc, &ep->asocs, asocs) {
|
|
|
|
int res = sctp_auth_del_key_id(ep, asoc,
|
|
|
|
val.scact_keynumber);
|
|
|
|
|
|
|
|
if (res && !ret)
|
|
|
|
ret = res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2007-09-17 10:34:00 +08:00
|
|
|
}
|
|
|
|
|
2018-03-14 19:05:32 +08:00
|
|
|
/*
|
|
|
|
* 8.3.4 Deactivate a Shared Key (SCTP_AUTH_DEACTIVATE_KEY)
|
|
|
|
*
|
|
|
|
* This set option will deactivate a shared secret key.
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
|
|
|
struct sctp_association *asoc;
|
2019-01-28 15:08:42 +08:00
|
|
|
struct sctp_authkeyid val;
|
|
|
|
int ret = 0;
|
2018-03-14 19:05:32 +08:00
|
|
|
|
|
|
|
if (!ep->auth_enable)
|
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
if (optlen != sizeof(struct sctp_authkeyid))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&val, optval, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
|
2019-01-28 15:08:42 +08:00
|
|
|
if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2018-03-14 19:05:32 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-28 15:08:42 +08:00
|
|
|
if (asoc)
|
|
|
|
return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
|
|
|
|
|
|
|
|
if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
|
|
|
|
val.scact_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
|
|
|
|
val.scact_assoc_id == SCTP_ALL_ASSOC) {
|
|
|
|
list_for_each_entry(asoc, &ep->asocs, asocs) {
|
|
|
|
int res = sctp_auth_deact_key_id(ep, asoc,
|
|
|
|
val.scact_keynumber);
|
|
|
|
|
|
|
|
if (res && !ret)
|
|
|
|
ret = res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2018-03-14 19:05:32 +08:00
|
|
|
}
|
|
|
|
|
2011-04-26 19:16:31 +08:00
|
|
|
/*
|
|
|
|
* 8.1.23 SCTP_AUTO_ASCONF
|
|
|
|
*
|
|
|
|
* This option will enable or disable the use of the automatic generation of
|
|
|
|
* ASCONF chunks to add and delete addresses to an existing association. Note
|
|
|
|
* that this option has two caveats namely: a) it only affects sockets that
|
|
|
|
* are bound to all addresses available to the SCTP stack, and b) the system
|
|
|
|
* administrator may have an overriding control that turns the ASCONF feature
|
|
|
|
* off no matter what setting the socket option may have.
|
|
|
|
* This option expects an integer boolean flag, where a non-zero value turns on
|
|
|
|
* the option, and a zero value turns off the option.
|
|
|
|
* Note. In this implementation, socket operation overrides default parameter
|
|
|
|
* being set by sysctl as well as FreeBSD implementation
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
if (optlen < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
if (get_user(val, (int __user *)optval))
|
|
|
|
return -EFAULT;
|
|
|
|
if (!sctp_is_ep_boundall(sk) && val)
|
|
|
|
return -EINVAL;
|
|
|
|
if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
|
|
|
|
return 0;
|
|
|
|
|
2015-06-12 21:16:41 +08:00
|
|
|
spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
|
2011-04-26 19:16:31 +08:00
|
|
|
if (val == 0 && sp->do_auto_asconf) {
|
|
|
|
list_del(&sp->auto_asconf_list);
|
|
|
|
sp->do_auto_asconf = 0;
|
|
|
|
} else if (val && !sp->do_auto_asconf) {
|
|
|
|
list_add_tail(&sp->auto_asconf_list,
|
2012-08-06 16:42:04 +08:00
|
|
|
&sock_net(sk)->sctp.auto_asconf_splist);
|
2011-04-26 19:16:31 +08:00
|
|
|
sp->do_auto_asconf = 1;
|
|
|
|
}
|
2015-06-12 21:16:41 +08:00
|
|
|
spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
|
2011-04-26 19:16:31 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-21 15:56:07 +08:00
|
|
|
/*
|
|
|
|
* SCTP_PEER_ADDR_THLDS
|
|
|
|
*
|
|
|
|
* This option allows us to alter the partially failed threshold for one or all
|
|
|
|
* transports in an association. See Section 6.1 of:
|
|
|
|
* http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
|
|
|
|
*/
|
|
|
|
static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_paddrthlds val;
|
|
|
|
struct sctp_transport *trans;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
|
|
|
|
if (optlen < sizeof(struct sctp_paddrthlds))
|
|
|
|
return -EINVAL;
|
|
|
|
if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
|
|
|
|
sizeof(struct sctp_paddrthlds)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2019-01-28 15:08:29 +08:00
|
|
|
if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
|
|
|
|
trans = sctp_addr_id2transport(sk, &val.spt_address,
|
|
|
|
val.spt_assoc_id);
|
|
|
|
if (!trans)
|
2012-07-21 15:56:07 +08:00
|
|
|
return -ENOENT;
|
2019-01-28 15:08:29 +08:00
|
|
|
|
|
|
|
if (val.spt_pathmaxrxt)
|
|
|
|
trans->pathmaxrxt = val.spt_pathmaxrxt;
|
|
|
|
trans->pf_retrans = val.spt_pathpfthld;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, val.spt_assoc_id);
|
|
|
|
if (!asoc && val.spt_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (asoc) {
|
2012-07-21 15:56:07 +08:00
|
|
|
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
|
|
|
|
transports) {
|
|
|
|
if (val.spt_pathmaxrxt)
|
|
|
|
trans->pathmaxrxt = val.spt_pathmaxrxt;
|
|
|
|
trans->pf_retrans = val.spt_pathpfthld;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (val.spt_pathmaxrxt)
|
|
|
|
asoc->pathmaxrxt = val.spt_pathmaxrxt;
|
|
|
|
asoc->pf_retrans = val.spt_pathpfthld;
|
|
|
|
} else {
|
2019-01-28 15:08:29 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2012-07-21 15:56:07 +08:00
|
|
|
|
|
|
|
if (val.spt_pathmaxrxt)
|
2019-01-28 15:08:29 +08:00
|
|
|
sp->pathmaxrxt = val.spt_pathmaxrxt;
|
|
|
|
sp->pf_retrans = val.spt_pathpfthld;
|
2012-07-21 15:56:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-13 02:30:37 +08:00
|
|
|
static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (optlen < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
if (get_user(val, (int __user *) optval))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-13 02:30:38 +08:00
|
|
|
static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (optlen < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
if (get_user(val, (int __user *) optval))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-09 19:47:40 +08:00
|
|
|
static int sctp_setsockopt_pr_supported(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_value params;
|
2019-01-28 15:08:30 +08:00
|
|
|
struct sctp_association *asoc;
|
2016-07-09 19:47:40 +08:00
|
|
|
|
|
|
|
if (optlen != sizeof(params))
|
2018-11-18 15:21:53 +08:00
|
|
|
return -EINVAL;
|
2016-07-09 19:47:40 +08:00
|
|
|
|
2018-11-18 15:21:53 +08:00
|
|
|
if (copy_from_user(¶ms, optval, optlen))
|
|
|
|
return -EFAULT;
|
2016-07-09 19:47:40 +08:00
|
|
|
|
2019-01-28 15:08:30 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-11-18 15:21:53 +08:00
|
|
|
sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
|
2016-07-09 19:47:40 +08:00
|
|
|
|
2018-11-18 15:21:53 +08:00
|
|
|
return 0;
|
2016-07-09 19:47:40 +08:00
|
|
|
}
|
|
|
|
|
2016-07-09 19:47:41 +08:00
|
|
|
static int sctp_setsockopt_default_prinfo(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_default_prinfo info;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EINVAL;
|
|
|
|
|
|
|
|
if (optlen != sizeof(info))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_from_user(&info, optval, sizeof(info))) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info.pr_policy & ~SCTP_PR_SCTP_MASK)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (info.pr_policy == SCTP_PR_SCTP_NONE)
|
|
|
|
info.pr_value = 0;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, info.pr_assoc_id);
|
|
|
|
if (asoc) {
|
|
|
|
SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
|
|
|
|
asoc->default_timetolive = info.pr_value;
|
|
|
|
} else if (!info.pr_assoc_id) {
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
|
|
|
|
sp->default_timetolive = info.pr_value;
|
|
|
|
} else {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-03-10 12:11:12 +08:00
|
|
|
static int sctp_setsockopt_reconfig_supported(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EINVAL;
|
|
|
|
|
|
|
|
if (optlen != sizeof(params))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_from_user(¶ms, optval, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
2019-01-28 15:08:31 +08:00
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2017-03-10 12:11:12 +08:00
|
|
|
goto out;
|
2019-01-28 15:08:31 +08:00
|
|
|
|
|
|
|
if (asoc)
|
|
|
|
asoc->reconf_enable = !!params.assoc_value;
|
|
|
|
else
|
|
|
|
sctp_sk(sk)->ep->reconf_enable = !!params.assoc_value;
|
2017-03-10 12:11:12 +08:00
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-01-18 00:44:46 +08:00
|
|
|
static int sctp_setsockopt_enable_strreset(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EINVAL;
|
|
|
|
|
|
|
|
if (optlen != sizeof(params))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_from_user(¶ms, optval, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (asoc) {
|
|
|
|
asoc->strreset_enable = params.assoc_value;
|
|
|
|
} else if (!params.assoc_id) {
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
sp->ep->strreset_enable = params.assoc_value;
|
|
|
|
} else {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-01-18 00:44:47 +08:00
|
|
|
static int sctp_setsockopt_reset_streams(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_reset_streams *params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EINVAL;
|
|
|
|
|
2017-12-10 15:40:51 +08:00
|
|
|
if (optlen < sizeof(*params))
|
2017-01-18 00:44:47 +08:00
|
|
|
return -EINVAL;
|
2018-01-09 05:02:28 +08:00
|
|
|
/* srs_number_streams is u16, so optlen can't be bigger than this. */
|
|
|
|
optlen = min_t(unsigned int, optlen, USHRT_MAX +
|
|
|
|
sizeof(__u16) * sizeof(*params));
|
2017-01-18 00:44:47 +08:00
|
|
|
|
|
|
|
params = memdup_user(optval, optlen);
|
|
|
|
if (IS_ERR(params))
|
|
|
|
return PTR_ERR(params);
|
|
|
|
|
2017-12-10 15:40:51 +08:00
|
|
|
if (params->srs_number_streams * sizeof(__u16) >
|
|
|
|
optlen - sizeof(*params))
|
|
|
|
goto out;
|
|
|
|
|
2017-01-18 00:44:47 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params->srs_assoc_id);
|
|
|
|
if (!asoc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = sctp_send_reset_streams(asoc, params);
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(params);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-02-09 01:18:18 +08:00
|
|
|
static int sctp_setsockopt_reset_assoc(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
sctp_assoc_t associd;
|
|
|
|
int retval = -EINVAL;
|
|
|
|
|
|
|
|
if (optlen != sizeof(associd))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_from_user(&associd, optval, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, associd);
|
|
|
|
if (!asoc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = sctp_send_reset_assoc(asoc);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-02-09 01:18:20 +08:00
|
|
|
static int sctp_setsockopt_add_streams(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_add_streams params;
|
|
|
|
int retval = -EINVAL;
|
|
|
|
|
|
|
|
if (optlen != sizeof(params))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_from_user(¶ms, optval, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.sas_assoc_id);
|
|
|
|
if (!asoc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = sctp_send_add_streams(asoc, ¶ms);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-10-04 06:20:14 +08:00
|
|
|
static int sctp_setsockopt_scheduler(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_assoc_value params;
|
|
|
|
int retval = -EINVAL;
|
|
|
|
|
|
|
|
if (optlen < sizeof(params))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
optlen = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.assoc_value > SCTP_SS_MAX)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (!asoc)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = sctp_sched_set_sched(asoc, params.assoc_value);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-10-04 06:20:15 +08:00
|
|
|
static int sctp_setsockopt_scheduler_value(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_stream_value params;
|
2019-01-28 15:08:33 +08:00
|
|
|
struct sctp_association *asoc;
|
2017-10-04 06:20:15 +08:00
|
|
|
int retval = -EINVAL;
|
|
|
|
|
|
|
|
if (optlen < sizeof(params))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
optlen = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
2019-01-28 15:08:33 +08:00
|
|
|
if (!asoc && params.assoc_id != SCTP_CURRENT_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2017-10-04 06:20:15 +08:00
|
|
|
goto out;
|
|
|
|
|
2019-01-28 15:08:33 +08:00
|
|
|
if (asoc) {
|
|
|
|
retval = sctp_sched_set_value(asoc, params.stream_id,
|
|
|
|
params.stream_value, GFP_KERNEL);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
list_for_each_entry(asoc, &sctp_sk(sk)->ep->asocs, asocs) {
|
|
|
|
int ret = sctp_sched_set_value(asoc, params.stream_id,
|
|
|
|
params.stream_value, GFP_KERNEL);
|
|
|
|
if (ret && !retval) /* try to return the 1st error. */
|
|
|
|
retval = ret;
|
|
|
|
}
|
2017-10-04 06:20:15 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-12-08 21:03:58 +08:00
|
|
|
static int sctp_setsockopt_interleaving_supported(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_assoc_value params;
|
2019-01-28 15:08:32 +08:00
|
|
|
struct sctp_association *asoc;
|
2017-12-08 21:03:58 +08:00
|
|
|
int retval = -EINVAL;
|
|
|
|
|
|
|
|
if (optlen < sizeof(params))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
optlen = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:32 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2017-12-08 21:03:58 +08:00
|
|
|
goto out;
|
|
|
|
|
2019-01-28 15:08:32 +08:00
|
|
|
if (!sock_net(sk)->sctp.intl_enable || !sp->frag_interleave) {
|
2017-12-08 21:03:58 +08:00
|
|
|
retval = -EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
sp->strm_interleave = !!params.assoc_value;
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2018-06-28 15:31:00 +08:00
|
|
|
static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (!sctp_style(sk, TCP))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (sctp_sk(sk)->ep->base.bind_addr.port)
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (optlen < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (get_user(val, (int __user *)optval))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
sctp_sk(sk)->reuse = !!val;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-18 16:08:54 +08:00
|
|
|
static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
|
|
|
|
unsigned int optlen)
|
|
|
|
{
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_ulpevent *event;
|
|
|
|
struct sctp_event param;
|
|
|
|
int retval = 0;
|
|
|
|
|
|
|
|
if (optlen < sizeof(param)) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
optlen = sizeof(param);
|
|
|
|
if (copy_from_user(¶m, optval, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (param.se_type < SCTP_SN_TYPE_BASE ||
|
|
|
|
param.se_type > SCTP_SN_TYPE_MAX) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, param.se_assoc_id);
|
|
|
|
if (!asoc) {
|
|
|
|
sctp_ulpevent_type_set(&sctp_sk(sk)->subscribe,
|
|
|
|
param.se_type, param.se_on);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
sctp_ulpevent_type_set(&asoc->subscribe, param.se_type, param.se_on);
|
|
|
|
|
|
|
|
if (param.se_type == SCTP_SENDER_DRY_EVENT && param.se_on) {
|
|
|
|
if (sctp_outq_is_empty(&asoc->outqueue)) {
|
|
|
|
event = sctp_ulpevent_make_sender_dry_event(asoc,
|
|
|
|
GFP_USER | __GFP_NOWARN);
|
|
|
|
if (!event) {
|
|
|
|
retval = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
asoc->stream.si->enqueue_event(&asoc->ulpq, event);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* API 6.2 setsockopt(), getsockopt()
|
|
|
|
*
|
|
|
|
* Applications use setsockopt() and getsockopt() to set or retrieve
|
|
|
|
* socket options. Socket options are used to change the default
|
|
|
|
* behavior of sockets calls. They are described in Section 7.
|
|
|
|
*
|
|
|
|
* The syntax is:
|
|
|
|
*
|
|
|
|
* ret = getsockopt(int sd, int level, int optname, void __user *optval,
|
|
|
|
* int __user *optlen);
|
|
|
|
* ret = setsockopt(int sd, int level, int optname, const void __user *optval,
|
|
|
|
* int optlen);
|
|
|
|
*
|
|
|
|
* sd - the socket descript.
|
|
|
|
* level - set to IPPROTO_SCTP for all SCTP options.
|
|
|
|
* optname - the option name.
|
|
|
|
* optval - the buffer to store the value of the option.
|
|
|
|
* optlen - the size of the buffer.
|
|
|
|
*/
|
2013-06-17 17:40:05 +08:00
|
|
|
static int sctp_setsockopt(struct sock *sk, int level, int optname,
|
|
|
|
char __user *optval, unsigned int optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int retval = 0;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* I can hardly begin to describe how wrong this is. This is
|
|
|
|
* so broken as to be worse than useless. The API draft
|
|
|
|
* REALLY is NOT helpful here... I am not convinced that the
|
|
|
|
* semantics of setsockopt() with a level OTHER THAN SOL_SCTP
|
|
|
|
* are at all well-founded.
|
|
|
|
*/
|
|
|
|
if (level != SOL_SCTP) {
|
|
|
|
struct sctp_af *af = sctp_sk(sk)->pf->af;
|
|
|
|
retval = af->setsockopt(sk, level, optname, optval, optlen);
|
|
|
|
goto out_nounlock;
|
|
|
|
}
|
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
switch (optname) {
|
|
|
|
case SCTP_SOCKOPT_BINDX_ADD:
|
|
|
|
/* 'optlen' is the size of the addresses buffer. */
|
|
|
|
retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
|
|
|
|
optlen, SCTP_BINDX_ADD_ADDR);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCTP_SOCKOPT_BINDX_REM:
|
|
|
|
/* 'optlen' is the size of the addresses buffer. */
|
|
|
|
retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
|
|
|
|
optlen, SCTP_BINDX_REM_ADDR);
|
|
|
|
break;
|
|
|
|
|
2008-05-10 06:14:11 +08:00
|
|
|
case SCTP_SOCKOPT_CONNECTX_OLD:
|
|
|
|
/* 'optlen' is the size of the addresses buffer. */
|
|
|
|
retval = sctp_setsockopt_connectx_old(sk,
|
|
|
|
(struct sockaddr __user *)optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
|
|
|
|
2005-06-21 04:14:57 +08:00
|
|
|
case SCTP_SOCKOPT_CONNECTX:
|
|
|
|
/* 'optlen' is the size of the addresses buffer. */
|
2008-05-10 06:14:11 +08:00
|
|
|
retval = sctp_setsockopt_connectx(sk,
|
|
|
|
(struct sockaddr __user *)optval,
|
|
|
|
optlen);
|
2005-06-21 04:14:57 +08:00
|
|
|
break;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
case SCTP_DISABLE_FRAGMENTS:
|
|
|
|
retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCTP_EVENTS:
|
|
|
|
retval = sctp_setsockopt_events(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCTP_AUTOCLOSE:
|
|
|
|
retval = sctp_setsockopt_autoclose(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SCTP_PEER_ADDR_PARAMS:
|
|
|
|
retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
|
2011-01-19 06:39:00 +08:00
|
|
|
case SCTP_DELAYED_SACK:
|
2008-05-10 06:13:26 +08:00
|
|
|
retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
|
2005-12-23 03:37:30 +08:00
|
|
|
break;
|
2007-03-24 02:32:00 +08:00
|
|
|
case SCTP_PARTIAL_DELIVERY_POINT:
|
|
|
|
retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
|
|
|
|
break;
|
2005-12-23 03:37:30 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
case SCTP_INITMSG:
|
|
|
|
retval = sctp_setsockopt_initmsg(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_DEFAULT_SEND_PARAM:
|
|
|
|
retval = sctp_setsockopt_default_send_param(sk, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2014-07-13 02:30:39 +08:00
|
|
|
case SCTP_DEFAULT_SNDINFO:
|
|
|
|
retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
case SCTP_PRIMARY_ADDR:
|
|
|
|
retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_SET_PEER_PRIMARY_ADDR:
|
|
|
|
retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_NODELAY:
|
|
|
|
retval = sctp_setsockopt_nodelay(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_RTOINFO:
|
|
|
|
retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_ASSOCINFO:
|
|
|
|
retval = sctp_setsockopt_associnfo(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_I_WANT_MAPPED_V4_ADDR:
|
|
|
|
retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_MAXSEG:
|
|
|
|
retval = sctp_setsockopt_maxseg(sk, optval, optlen);
|
|
|
|
break;
|
2006-12-21 08:07:04 +08:00
|
|
|
case SCTP_ADAPTATION_LAYER:
|
|
|
|
retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
2006-12-14 08:34:22 +08:00
|
|
|
case SCTP_CONTEXT:
|
|
|
|
retval = sctp_setsockopt_context(sk, optval, optlen);
|
|
|
|
break;
|
2007-04-21 03:23:15 +08:00
|
|
|
case SCTP_FRAGMENT_INTERLEAVE:
|
|
|
|
retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
|
|
|
|
break;
|
2007-03-24 02:34:36 +08:00
|
|
|
case SCTP_MAX_BURST:
|
|
|
|
retval = sctp_setsockopt_maxburst(sk, optval, optlen);
|
|
|
|
break;
|
2007-09-17 10:34:00 +08:00
|
|
|
case SCTP_AUTH_CHUNK:
|
|
|
|
retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_HMAC_IDENT:
|
|
|
|
retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_AUTH_KEY:
|
|
|
|
retval = sctp_setsockopt_auth_key(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_AUTH_ACTIVE_KEY:
|
|
|
|
retval = sctp_setsockopt_active_key(sk, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_AUTH_DELETE_KEY:
|
|
|
|
retval = sctp_setsockopt_del_key(sk, optval, optlen);
|
|
|
|
break;
|
2018-03-14 19:05:32 +08:00
|
|
|
case SCTP_AUTH_DEACTIVATE_KEY:
|
|
|
|
retval = sctp_setsockopt_deactivate_key(sk, optval, optlen);
|
|
|
|
break;
|
2011-04-26 19:16:31 +08:00
|
|
|
case SCTP_AUTO_ASCONF:
|
|
|
|
retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
|
|
|
|
break;
|
2012-07-21 15:56:07 +08:00
|
|
|
case SCTP_PEER_ADDR_THLDS:
|
|
|
|
retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
|
|
|
|
break;
|
2014-07-13 02:30:37 +08:00
|
|
|
case SCTP_RECVRCVINFO:
|
|
|
|
retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
|
|
|
|
break;
|
2014-07-13 02:30:38 +08:00
|
|
|
case SCTP_RECVNXTINFO:
|
|
|
|
retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
|
|
|
|
break;
|
2016-07-09 19:47:40 +08:00
|
|
|
case SCTP_PR_SUPPORTED:
|
|
|
|
retval = sctp_setsockopt_pr_supported(sk, optval, optlen);
|
|
|
|
break;
|
2016-07-09 19:47:41 +08:00
|
|
|
case SCTP_DEFAULT_PRINFO:
|
|
|
|
retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
|
|
|
|
break;
|
2017-03-10 12:11:12 +08:00
|
|
|
case SCTP_RECONFIG_SUPPORTED:
|
|
|
|
retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
|
|
|
|
break;
|
2017-01-18 00:44:46 +08:00
|
|
|
case SCTP_ENABLE_STREAM_RESET:
|
|
|
|
retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
|
|
|
|
break;
|
2017-01-18 00:44:47 +08:00
|
|
|
case SCTP_RESET_STREAMS:
|
|
|
|
retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
|
|
|
|
break;
|
2017-02-09 01:18:18 +08:00
|
|
|
case SCTP_RESET_ASSOC:
|
|
|
|
retval = sctp_setsockopt_reset_assoc(sk, optval, optlen);
|
|
|
|
break;
|
2017-02-09 01:18:20 +08:00
|
|
|
case SCTP_ADD_STREAMS:
|
|
|
|
retval = sctp_setsockopt_add_streams(sk, optval, optlen);
|
|
|
|
break;
|
2017-10-04 06:20:14 +08:00
|
|
|
case SCTP_STREAM_SCHEDULER:
|
|
|
|
retval = sctp_setsockopt_scheduler(sk, optval, optlen);
|
|
|
|
break;
|
2017-10-04 06:20:15 +08:00
|
|
|
case SCTP_STREAM_SCHEDULER_VALUE:
|
|
|
|
retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
|
|
|
|
break;
|
2017-12-08 21:03:58 +08:00
|
|
|
case SCTP_INTERLEAVING_SUPPORTED:
|
|
|
|
retval = sctp_setsockopt_interleaving_supported(sk, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2018-06-28 15:31:00 +08:00
|
|
|
case SCTP_REUSE_PORT:
|
|
|
|
retval = sctp_setsockopt_reuse_port(sk, optval, optlen);
|
|
|
|
break;
|
2018-11-18 16:08:54 +08:00
|
|
|
case SCTP_EVENT:
|
|
|
|
retval = sctp_setsockopt_event(sk, optval, optlen);
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
|
|
|
retval = -ENOPROTOOPT;
|
|
|
|
break;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
out_nounlock:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* API 3.1.6 connect() - UDP Style Syntax
|
|
|
|
*
|
|
|
|
* An application may use the connect() call in the UDP model to initiate an
|
|
|
|
* association without sending data.
|
|
|
|
*
|
|
|
|
* The syntax is:
|
|
|
|
*
|
|
|
|
* ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
|
|
|
|
*
|
|
|
|
* sd: the socket descriptor to have a new association added to.
|
|
|
|
*
|
|
|
|
* nam: the address structure (either struct sockaddr_in or struct
|
|
|
|
* sockaddr_in6 defined in RFC2553 [7]).
|
|
|
|
*
|
|
|
|
* len: the size of the address.
|
|
|
|
*/
|
2013-06-17 17:40:05 +08:00
|
|
|
static int sctp_connect(struct sock *sk, struct sockaddr *addr,
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
int addr_len, int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
2005-06-21 04:14:57 +08:00
|
|
|
struct sctp_af *af;
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
int err = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
|
|
|
|
addr, addr_len);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
/* We may need to bind the socket. */
|
|
|
|
if (!inet->inet_num) {
|
|
|
|
if (sk->sk_prot->get_port(sk, 0)) {
|
|
|
|
release_sock(sk);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
inet->inet_sport = htons(inet->inet_num);
|
|
|
|
}
|
|
|
|
|
2005-06-21 04:14:57 +08:00
|
|
|
/* Validate addr_len before calling common connect/connectx routine. */
|
|
|
|
af = sctp_get_af_specific(addr->sa_family);
|
|
|
|
if (!af || addr_len < af->sockaddr_len) {
|
|
|
|
err = -EINVAL;
|
|
|
|
} else {
|
|
|
|
/* Pass correct addr len to common routine (so it knows there
|
|
|
|
* is only one address being passed.
|
|
|
|
*/
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
sctp: fix the issue that flags are ignored when using kernel_connect
Now sctp uses inet_dgram_connect as its proto_ops .connect, and the flags
param can't be passed into its proto .connect where this flags is really
needed.
sctp works around it by getting flags from socket file in __sctp_connect.
It works for connecting from userspace, as inherently the user sock has
socket file and it passes f_flags as the flags param into the proto_ops
.connect.
However, the sock created by sock_create_kern doesn't have a socket file,
and it passes the flags (like O_NONBLOCK) by using the flags param in
kernel_connect, which calls proto_ops .connect later.
So to fix it, this patch defines a new proto_ops .connect for sctp,
sctp_inet_connect, which calls __sctp_connect() directly with this
flags param. After this, the sctp's proto .connect can be removed.
Note that sctp_inet_connect doesn't need to do some checks that are not
needed for sctp, which makes thing better than with inet_dgram_connect.
Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: Michal Kubecek <mkubecek@suse.cz>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-20 16:39:10 +08:00
|
|
|
int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
|
|
|
|
int addr_len, int flags)
|
|
|
|
{
|
|
|
|
if (addr_len < sizeof(uaddr->sa_family))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (uaddr->sa_family == AF_UNSPEC)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return sctp_connect(sock->sk, uaddr, addr_len, flags);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* FIXME: Write comments. */
|
2013-06-17 17:40:05 +08:00
|
|
|
static int sctp_disconnect(struct sock *sk, int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
return -EOPNOTSUPP; /* STUB */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 4.1.4 accept() - TCP Style Syntax
|
|
|
|
*
|
|
|
|
* Applications use accept() call to remove an established SCTP
|
|
|
|
* association from the accept queue of the endpoint. A new socket
|
|
|
|
* descriptor will be returned from accept() to represent the newly
|
|
|
|
* formed association.
|
|
|
|
*/
|
2017-03-09 16:09:05 +08:00
|
|
|
static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_sock *sp;
|
|
|
|
struct sctp_endpoint *ep;
|
|
|
|
struct sock *newsk = NULL;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
long timeo;
|
|
|
|
int error = 0;
|
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
sp = sctp_sk(sk);
|
|
|
|
ep = sp->ep;
|
|
|
|
|
|
|
|
if (!sctp_style(sk, TCP)) {
|
|
|
|
error = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sctp_sstate(sk, LISTENING)) {
|
|
|
|
error = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2006-08-22 15:24:09 +08:00
|
|
|
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
error = sctp_wait_for_accept(sk, timeo);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* We treat the list of associations on the endpoint as the accept
|
|
|
|
* queue and pick the first association on the list.
|
|
|
|
*/
|
|
|
|
asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
|
|
|
|
|
2017-03-09 16:09:05 +08:00
|
|
|
newsk = sp->pf->create_accept_sk(sk, asoc, kern);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!newsk) {
|
|
|
|
error = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Populate the fields of the newsk from the oldsk and migrate the
|
|
|
|
* asoc to the newsk.
|
|
|
|
*/
|
|
|
|
sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
|
|
|
|
|
|
|
|
out:
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2007-02-09 22:25:18 +08:00
|
|
|
*err = error;
|
2005-04-17 06:20:36 +08:00
|
|
|
return newsk;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The SCTP ioctl handler. */
|
2013-06-17 17:40:05 +08:00
|
|
|
static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-09-03 11:47:03 +08:00
|
|
|
int rc = -ENOTCONN;
|
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2010-09-03 11:47:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* SEQPACKET-style sockets in LISTENING state are valid, for
|
|
|
|
* SCTP, so only discard TCP-style sockets in LISTENING state.
|
|
|
|
*/
|
|
|
|
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCINQ: {
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned int amount = 0;
|
|
|
|
|
|
|
|
skb = skb_peek(&sk->sk_receive_queue);
|
|
|
|
if (skb != NULL) {
|
|
|
|
/*
|
|
|
|
* We will only return the amount of this packet since
|
|
|
|
* that is all that will be read.
|
|
|
|
*/
|
|
|
|
amount = skb->len;
|
|
|
|
}
|
|
|
|
rc = put_user(amount, (int __user *)arg);
|
|
|
|
break;
|
2010-10-04 13:14:37 +08:00
|
|
|
}
|
2010-09-03 11:47:03 +08:00
|
|
|
default:
|
|
|
|
rc = -ENOIOCTLCMD;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out:
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2010-09-03 11:47:03 +08:00
|
|
|
return rc;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This is the function which gets called during socket creation to
|
|
|
|
* initialized the SCTP-specific portion of the sock.
|
|
|
|
* The sock structure should already be zero-filled memory.
|
|
|
|
*/
|
2013-06-17 17:40:05 +08:00
|
|
|
static int sctp_init_sock(struct sock *sk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-08-07 15:29:57 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_sock *sp;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p\n", __func__, sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
/* Initialize the SCTP per socket area. */
|
|
|
|
switch (sk->sk_type) {
|
|
|
|
case SOCK_SEQPACKET:
|
|
|
|
sp->type = SCTP_SOCKET_UDP;
|
|
|
|
break;
|
|
|
|
case SOCK_STREAM:
|
|
|
|
sp->type = SCTP_SOCKET_TCP;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ESOCKTNOSUPPORT;
|
|
|
|
}
|
|
|
|
|
2016-06-03 02:05:43 +08:00
|
|
|
sk->sk_gso_type = SKB_GSO_SCTP;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Initialize default send parameters. These parameters can be
|
|
|
|
* modified with the SCTP_DEFAULT_SEND_PARAM socket option.
|
|
|
|
*/
|
|
|
|
sp->default_stream = 0;
|
|
|
|
sp->default_ppid = 0;
|
|
|
|
sp->default_flags = 0;
|
|
|
|
sp->default_context = 0;
|
|
|
|
sp->default_timetolive = 0;
|
|
|
|
|
2006-12-14 08:34:22 +08:00
|
|
|
sp->default_rcv_context = 0;
|
2012-08-07 15:29:57 +08:00
|
|
|
sp->max_burst = net->sctp.max_burst;
|
2006-12-14 08:34:22 +08:00
|
|
|
|
2012-10-24 17:20:03 +08:00
|
|
|
sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Initialize default setup parameters. These parameters
|
|
|
|
* can be modified with the SCTP_INITMSG socket option or
|
|
|
|
* overridden by the SCTP_INIT CMSG.
|
|
|
|
*/
|
|
|
|
sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
|
|
|
|
sp->initmsg.sinit_max_instreams = sctp_max_instreams;
|
2012-08-07 15:29:57 +08:00
|
|
|
sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
|
|
|
|
sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Initialize default RTO related parameters. These parameters can
|
|
|
|
* be modified for with the SCTP_RTOINFO socket option.
|
|
|
|
*/
|
2012-08-07 15:29:57 +08:00
|
|
|
sp->rtoinfo.srto_initial = net->sctp.rto_initial;
|
|
|
|
sp->rtoinfo.srto_max = net->sctp.rto_max;
|
|
|
|
sp->rtoinfo.srto_min = net->sctp.rto_min;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Initialize default association related parameters. These parameters
|
|
|
|
* can be modified with the SCTP_ASSOCINFO socket option.
|
|
|
|
*/
|
2012-08-07 15:29:57 +08:00
|
|
|
sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
|
2005-04-17 06:20:36 +08:00
|
|
|
sp->assocparams.sasoc_number_peer_destinations = 0;
|
|
|
|
sp->assocparams.sasoc_peer_rwnd = 0;
|
|
|
|
sp->assocparams.sasoc_local_rwnd = 0;
|
2012-08-07 15:29:57 +08:00
|
|
|
sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Initialize default event subscriptions. By default, all the
|
2007-02-09 22:25:18 +08:00
|
|
|
* options are off.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2018-11-18 16:08:51 +08:00
|
|
|
sp->subscribe = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Default Peer Address Parameters. These defaults can
|
|
|
|
* be modified via SCTP_PEER_ADDR_PARAMS
|
|
|
|
*/
|
2012-08-07 15:29:57 +08:00
|
|
|
sp->hbinterval = net->sctp.hb_interval;
|
|
|
|
sp->pathmaxrxt = net->sctp.max_retrans_path;
|
2019-01-28 15:08:29 +08:00
|
|
|
sp->pf_retrans = net->sctp.pf_retrans;
|
2013-12-23 12:16:54 +08:00
|
|
|
sp->pathmtu = 0; /* allow default discovery */
|
2012-08-07 15:29:57 +08:00
|
|
|
sp->sackdelay = net->sctp.sack_timeout;
|
2008-06-10 06:45:05 +08:00
|
|
|
sp->sackfreq = 2;
|
2005-12-23 03:36:46 +08:00
|
|
|
sp->param_flags = SPP_HB_ENABLE |
|
2007-02-09 22:25:18 +08:00
|
|
|
SPP_PMTUD_ENABLE |
|
|
|
|
SPP_SACKDELAY_ENABLE;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* If enabled no SCTP message fragmentation will be performed.
|
|
|
|
* Configure through SCTP_DISABLE_FRAGMENTS socket option.
|
|
|
|
*/
|
|
|
|
sp->disable_fragments = 0;
|
|
|
|
|
2006-09-30 08:08:01 +08:00
|
|
|
/* Enable Nagle algorithm by default. */
|
|
|
|
sp->nodelay = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-07-13 02:30:37 +08:00
|
|
|
sp->recvrcvinfo = 0;
|
2014-07-13 02:30:38 +08:00
|
|
|
sp->recvnxtinfo = 0;
|
2014-07-13 02:30:37 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Enable by default. */
|
|
|
|
sp->v4mapped = 1;
|
|
|
|
|
|
|
|
/* Auto-close idle associations after the configured
|
|
|
|
* number of seconds. A value of 0 disables this
|
|
|
|
* feature. Configure through the SCTP_AUTOCLOSE socket option,
|
|
|
|
* for UDP-style sockets only.
|
|
|
|
*/
|
|
|
|
sp->autoclose = 0;
|
|
|
|
|
|
|
|
/* User specified fragmentation limit. */
|
|
|
|
sp->user_frag = 0;
|
|
|
|
|
2006-12-21 08:07:04 +08:00
|
|
|
sp->adaptation_ind = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
sp->pf = sctp_get_pf_specific(sk->sk_family);
|
|
|
|
|
|
|
|
/* Control variables for partial data delivery. */
|
2007-04-21 03:23:15 +08:00
|
|
|
atomic_set(&sp->pd_mode, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
skb_queue_head_init(&sp->pd_lobby);
|
2007-04-21 03:23:15 +08:00
|
|
|
sp->frag_interleave = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Create a per socket endpoint structure. Even if we
|
|
|
|
* change the data structure relationships, this may still
|
|
|
|
* be useful for storing pre-connect address information.
|
|
|
|
*/
|
2013-06-15 00:24:06 +08:00
|
|
|
sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
|
|
|
|
if (!sp->ep)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
sp->hmac = NULL;
|
|
|
|
|
2013-06-26 00:17:29 +08:00
|
|
|
sk->sk_destruct = sctp_destruct_sock;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
SCTP_DBG_OBJCNT_INC(sock);
|
2008-11-24 09:34:03 +08:00
|
|
|
|
|
|
|
local_bh_disable();
|
2017-12-23 02:15:20 +08:00
|
|
|
sk_sockets_allocated_inc(sk);
|
2012-08-07 15:29:57 +08:00
|
|
|
sock_prot_inuse_add(net, sk->sk_prot, 1);
|
2015-06-12 21:16:41 +08:00
|
|
|
|
|
|
|
/* Nothing can fail after this block, otherwise
|
|
|
|
* sctp_destroy_sock() will be called without addr_wq_lock held
|
|
|
|
*/
|
2012-08-07 15:29:57 +08:00
|
|
|
if (net->sctp.default_auto_asconf) {
|
2015-06-12 21:16:41 +08:00
|
|
|
spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
|
2011-04-26 18:32:51 +08:00
|
|
|
list_add_tail(&sp->auto_asconf_list,
|
2012-08-07 15:29:57 +08:00
|
|
|
&net->sctp.auto_asconf_splist);
|
2011-04-26 18:32:51 +08:00
|
|
|
sp->do_auto_asconf = 1;
|
2015-06-12 21:16:41 +08:00
|
|
|
spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
|
|
|
|
} else {
|
2011-04-26 18:32:51 +08:00
|
|
|
sp->do_auto_asconf = 0;
|
2015-06-12 21:16:41 +08:00
|
|
|
}
|
|
|
|
|
2008-11-24 09:34:03 +08:00
|
|
|
local_bh_enable();
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-12 21:16:41 +08:00
|
|
|
/* Cleanup any SCTP per socket resources. Must be called with
|
|
|
|
* sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
|
|
|
|
*/
|
2013-06-17 17:40:05 +08:00
|
|
|
static void sctp_destroy_sock(struct sock *sk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-04-26 18:32:51 +08:00
|
|
|
struct sctp_sock *sp;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p\n", __func__, sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Release our hold on the endpoint. */
|
2011-04-26 18:32:51 +08:00
|
|
|
sp = sctp_sk(sk);
|
net: sctp: fix NULL pointer dereference in socket destruction
While stress testing sctp sockets, I hit the following panic:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000020
IP: [<ffffffffa0490c4e>] sctp_endpoint_free+0xe/0x40 [sctp]
PGD 7cead067 PUD 7ce76067 PMD 0
Oops: 0000 [#1] SMP
Modules linked in: sctp(F) libcrc32c(F) [...]
CPU: 7 PID: 2950 Comm: acc Tainted: GF 3.10.0-rc2+ #1
Hardware name: Dell Inc. PowerEdge T410/0H19HD, BIOS 1.6.3 02/01/2011
task: ffff88007ce0e0c0 ti: ffff88007b568000 task.ti: ffff88007b568000
RIP: 0010:[<ffffffffa0490c4e>] [<ffffffffa0490c4e>] sctp_endpoint_free+0xe/0x40 [sctp]
RSP: 0018:ffff88007b569e08 EFLAGS: 00010292
RAX: 0000000000000000 RBX: ffff88007db78a00 RCX: dead000000200200
RDX: ffffffffa049fdb0 RSI: ffff8800379baf38 RDI: 0000000000000000
RBP: ffff88007b569e18 R08: ffff88007c230da0 R09: 0000000000000001
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
R13: ffff880077990d00 R14: 0000000000000084 R15: ffff88007db78a00
FS: 00007fc18ab61700(0000) GS:ffff88007fc60000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 0000000000000020 CR3: 000000007cf9d000 CR4: 00000000000007e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Stack:
ffff88007b569e38 ffff88007db78a00 ffff88007b569e38 ffffffffa049fded
ffffffff81abf0c0 ffff88007db78a00 ffff88007b569e58 ffffffff8145b60e
0000000000000000 0000000000000000 ffff88007b569eb8 ffffffff814df36e
Call Trace:
[<ffffffffa049fded>] sctp_destroy_sock+0x3d/0x80 [sctp]
[<ffffffff8145b60e>] sk_common_release+0x1e/0xf0
[<ffffffff814df36e>] inet_create+0x2ae/0x350
[<ffffffff81455a6f>] __sock_create+0x11f/0x240
[<ffffffff81455bf0>] sock_create+0x30/0x40
[<ffffffff8145696c>] SyS_socket+0x4c/0xc0
[<ffffffff815403be>] ? do_page_fault+0xe/0x10
[<ffffffff8153cb32>] ? page_fault+0x22/0x30
[<ffffffff81544e02>] system_call_fastpath+0x16/0x1b
Code: 0c c9 c3 66 2e 0f 1f 84 00 00 00 00 00 e8 fb fe ff ff c9 c3 66 0f
1f 84 00 00 00 00 00 55 48 89 e5 53 48 83 ec 08 66 66 66 66 90 <48>
8b 47 20 48 89 fb c6 47 1c 01 c6 40 12 07 e8 9e 68 01 00 48
RIP [<ffffffffa0490c4e>] sctp_endpoint_free+0xe/0x40 [sctp]
RSP <ffff88007b569e08>
CR2: 0000000000000020
---[ end trace e0d71ec1108c1dd9 ]---
I did not hit this with the lksctp-tools functional tests, but with a
small, multi-threaded test program, that heavily allocates, binds,
listens and waits in accept on sctp sockets, and then randomly kills
some of them (no need for an actual client in this case to hit this).
Then, again, allocating, binding, etc, and then killing child processes.
This panic then only occurs when ``echo 1 > /proc/sys/net/sctp/auth_enable''
is set. The cause for that is actually very simple: in sctp_endpoint_init()
we enter the path of sctp_auth_init_hmacs(). There, we try to allocate
our crypto transforms through crypto_alloc_hash(). In our scenario,
it then can happen that crypto_alloc_hash() fails with -EINTR from
crypto_larval_wait(), thus we bail out and release the socket via
sk_common_release(), sctp_destroy_sock() and hit the NULL pointer
dereference as soon as we try to access members in the endpoint during
sctp_endpoint_free(), since endpoint at that time is still NULL. Now,
if we have that case, we do not need to do any cleanup work and just
leave the destruction handler.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-06 21:53:47 +08:00
|
|
|
/* This could happen during socket init, thus we bail out
|
|
|
|
* early, since the rest of the below is not setup either.
|
|
|
|
*/
|
|
|
|
if (sp->ep == NULL)
|
|
|
|
return;
|
|
|
|
|
2011-04-26 18:32:51 +08:00
|
|
|
if (sp->do_auto_asconf) {
|
|
|
|
sp->do_auto_asconf = 0;
|
|
|
|
list_del(&sp->auto_asconf_list);
|
|
|
|
}
|
|
|
|
sctp_endpoint_free(sp->ep);
|
2008-11-26 05:53:27 +08:00
|
|
|
local_bh_disable();
|
2017-12-23 02:15:20 +08:00
|
|
|
sk_sockets_allocated_dec(sk);
|
2008-11-17 18:41:00 +08:00
|
|
|
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
2008-11-26 05:53:27 +08:00
|
|
|
local_bh_enable();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-06-26 00:17:29 +08:00
|
|
|
/* Triggered when there are no references on the socket anymore */
|
|
|
|
static void sctp_destruct_sock(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
/* Free up the HMAC transform. */
|
2016-01-24 21:20:12 +08:00
|
|
|
crypto_free_shash(sp->hmac);
|
2013-06-26 00:17:29 +08:00
|
|
|
|
|
|
|
inet_sock_destruct(sk);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* API 4.1.7 shutdown() - TCP Style Syntax
|
|
|
|
* int shutdown(int socket, int how);
|
|
|
|
*
|
|
|
|
* sd - the socket descriptor of the association to be closed.
|
|
|
|
* how - Specifies the type of shutdown. The values are
|
|
|
|
* as follows:
|
|
|
|
* SHUT_RD
|
|
|
|
* Disables further receive operations. No SCTP
|
|
|
|
* protocol action is taken.
|
|
|
|
* SHUT_WR
|
|
|
|
* Disables further send operations, and initiates
|
|
|
|
* the SCTP shutdown sequence.
|
|
|
|
* SHUT_RDWR
|
|
|
|
* Disables further send and receive operations
|
|
|
|
* and initiates the SCTP shutdown sequence.
|
|
|
|
*/
|
2013-06-17 17:40:05 +08:00
|
|
|
static void sctp_shutdown(struct sock *sk, int how)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-08-07 15:25:24 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_endpoint *ep;
|
|
|
|
|
|
|
|
if (!sctp_style(sk, TCP))
|
|
|
|
return;
|
|
|
|
|
2016-11-13 21:44:37 +08:00
|
|
|
ep = sctp_sk(sk)->ep;
|
|
|
|
if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
|
2017-12-20 11:12:54 +08:00
|
|
|
inet_sk_set_state(sk, SCTP_SS_CLOSING);
|
2016-11-13 21:44:37 +08:00
|
|
|
asoc = list_entry(ep->asocs.next,
|
|
|
|
struct sctp_association, asocs);
|
|
|
|
sctp_primitive_SHUTDOWN(net, asoc, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-14 15:35:30 +08:00
|
|
|
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
|
|
|
|
struct sctp_info *info)
|
|
|
|
{
|
|
|
|
struct sctp_transport *prim;
|
|
|
|
struct list_head *pos;
|
|
|
|
int mask;
|
|
|
|
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
if (!asoc) {
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
info->sctpi_s_autoclose = sp->autoclose;
|
|
|
|
info->sctpi_s_adaptation_ind = sp->adaptation_ind;
|
|
|
|
info->sctpi_s_pd_point = sp->pd_point;
|
|
|
|
info->sctpi_s_nodelay = sp->nodelay;
|
|
|
|
info->sctpi_s_disable_fragments = sp->disable_fragments;
|
|
|
|
info->sctpi_s_v4mapped = sp->v4mapped;
|
|
|
|
info->sctpi_s_frag_interleave = sp->frag_interleave;
|
2016-05-29 17:42:13 +08:00
|
|
|
info->sctpi_s_type = sp->type;
|
2016-04-14 15:35:30 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->sctpi_tag = asoc->c.my_vtag;
|
|
|
|
info->sctpi_state = asoc->state;
|
|
|
|
info->sctpi_rwnd = asoc->a_rwnd;
|
|
|
|
info->sctpi_unackdata = asoc->unack_data;
|
|
|
|
info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
|
2017-05-31 16:36:31 +08:00
|
|
|
info->sctpi_instrms = asoc->stream.incnt;
|
|
|
|
info->sctpi_outstrms = asoc->stream.outcnt;
|
2016-04-14 15:35:30 +08:00
|
|
|
list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
|
|
|
|
info->sctpi_inqueue++;
|
|
|
|
list_for_each(pos, &asoc->outqueue.out_chunk_list)
|
|
|
|
info->sctpi_outqueue++;
|
|
|
|
info->sctpi_overall_error = asoc->overall_error_count;
|
|
|
|
info->sctpi_max_burst = asoc->max_burst;
|
|
|
|
info->sctpi_maxseg = asoc->frag_point;
|
|
|
|
info->sctpi_peer_rwnd = asoc->peer.rwnd;
|
|
|
|
info->sctpi_peer_tag = asoc->c.peer_vtag;
|
|
|
|
|
|
|
|
mask = asoc->peer.ecn_capable << 1;
|
|
|
|
mask = (mask | asoc->peer.ipv4_address) << 1;
|
|
|
|
mask = (mask | asoc->peer.ipv6_address) << 1;
|
|
|
|
mask = (mask | asoc->peer.hostname_address) << 1;
|
|
|
|
mask = (mask | asoc->peer.asconf_capable) << 1;
|
|
|
|
mask = (mask | asoc->peer.prsctp_capable) << 1;
|
|
|
|
mask = (mask | asoc->peer.auth_capable);
|
|
|
|
info->sctpi_peer_capable = mask;
|
|
|
|
mask = asoc->peer.sack_needed << 1;
|
|
|
|
mask = (mask | asoc->peer.sack_generation) << 1;
|
|
|
|
mask = (mask | asoc->peer.zero_window_announced);
|
|
|
|
info->sctpi_peer_sack = mask;
|
|
|
|
|
|
|
|
info->sctpi_isacks = asoc->stats.isacks;
|
|
|
|
info->sctpi_osacks = asoc->stats.osacks;
|
|
|
|
info->sctpi_opackets = asoc->stats.opackets;
|
|
|
|
info->sctpi_ipackets = asoc->stats.ipackets;
|
|
|
|
info->sctpi_rtxchunks = asoc->stats.rtxchunks;
|
|
|
|
info->sctpi_outofseqtsns = asoc->stats.outofseqtsns;
|
|
|
|
info->sctpi_idupchunks = asoc->stats.idupchunks;
|
|
|
|
info->sctpi_gapcnt = asoc->stats.gapcnt;
|
|
|
|
info->sctpi_ouodchunks = asoc->stats.ouodchunks;
|
|
|
|
info->sctpi_iuodchunks = asoc->stats.iuodchunks;
|
|
|
|
info->sctpi_oodchunks = asoc->stats.oodchunks;
|
|
|
|
info->sctpi_iodchunks = asoc->stats.iodchunks;
|
|
|
|
info->sctpi_octrlchunks = asoc->stats.octrlchunks;
|
|
|
|
info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
|
|
|
|
|
|
|
|
prim = asoc->peer.primary_path;
|
2017-08-23 19:27:13 +08:00
|
|
|
memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
|
2016-04-14 15:35:30 +08:00
|
|
|
info->sctpi_p_state = prim->state;
|
|
|
|
info->sctpi_p_cwnd = prim->cwnd;
|
|
|
|
info->sctpi_p_srtt = prim->srtt;
|
|
|
|
info->sctpi_p_rto = jiffies_to_msecs(prim->rto);
|
|
|
|
info->sctpi_p_hbinterval = prim->hbinterval;
|
|
|
|
info->sctpi_p_pathmaxrxt = prim->pathmaxrxt;
|
|
|
|
info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay);
|
|
|
|
info->sctpi_p_ssthresh = prim->ssthresh;
|
|
|
|
info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked;
|
|
|
|
info->sctpi_p_flight_size = prim->flight_size;
|
|
|
|
info->sctpi_p_error = prim->error_count;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
|
|
|
|
|
2016-04-14 15:35:31 +08:00
|
|
|
/* use callback to avoid exporting the core structure */
|
2017-12-05 02:31:41 +08:00
|
|
|
void sctp_transport_walk_start(struct rhashtable_iter *iter)
|
2016-04-14 15:35:31 +08:00
|
|
|
{
|
2016-11-15 23:23:11 +08:00
|
|
|
rhltable_walk_enter(&sctp_transport_hashtable, iter);
|
2016-04-14 15:35:31 +08:00
|
|
|
|
2017-12-05 02:31:41 +08:00
|
|
|
rhashtable_walk_start(iter);
|
2016-04-14 15:35:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void sctp_transport_walk_stop(struct rhashtable_iter *iter)
|
|
|
|
{
|
|
|
|
rhashtable_walk_stop(iter);
|
|
|
|
rhashtable_walk_exit(iter);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct sctp_transport *sctp_transport_get_next(struct net *net,
|
|
|
|
struct rhashtable_iter *iter)
|
|
|
|
{
|
|
|
|
struct sctp_transport *t;
|
|
|
|
|
|
|
|
t = rhashtable_walk_next(iter);
|
|
|
|
for (; t; t = rhashtable_walk_next(iter)) {
|
|
|
|
if (IS_ERR(t)) {
|
|
|
|
if (PTR_ERR(t) == -EAGAIN)
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-08-27 18:38:31 +08:00
|
|
|
if (!sctp_transport_hold(t))
|
|
|
|
continue;
|
|
|
|
|
2016-04-14 15:35:31 +08:00
|
|
|
if (net_eq(sock_net(t->asoc->base.sk), net) &&
|
|
|
|
t->asoc->peer.primary_path == t)
|
|
|
|
break;
|
2018-08-27 18:38:31 +08:00
|
|
|
|
|
|
|
sctp_transport_put(t);
|
2016-04-14 15:35:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct sctp_transport *sctp_transport_get_idx(struct net *net,
|
|
|
|
struct rhashtable_iter *iter,
|
|
|
|
int pos)
|
|
|
|
{
|
2018-08-27 18:38:31 +08:00
|
|
|
struct sctp_transport *t;
|
2016-04-14 15:35:31 +08:00
|
|
|
|
2018-08-27 18:38:31 +08:00
|
|
|
if (!pos)
|
|
|
|
return SEQ_START_TOKEN;
|
2016-04-14 15:35:31 +08:00
|
|
|
|
2018-08-27 18:38:31 +08:00
|
|
|
while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
|
|
|
|
if (!--pos)
|
|
|
|
break;
|
|
|
|
sctp_transport_put(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
return t;
|
2016-04-14 15:35:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
|
|
|
|
void *p) {
|
|
|
|
int err = 0;
|
|
|
|
int hash = 0;
|
|
|
|
struct sctp_ep_common *epb;
|
|
|
|
struct sctp_hashbucket *head;
|
|
|
|
|
|
|
|
for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
|
|
|
|
hash++, head++) {
|
2017-06-10 14:48:14 +08:00
|
|
|
read_lock_bh(&head->lock);
|
2016-04-14 15:35:31 +08:00
|
|
|
sctp_for_each_hentry(epb, &head->chain) {
|
|
|
|
err = cb(sctp_ep(epb), p);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
2017-06-10 14:48:14 +08:00
|
|
|
read_unlock_bh(&head->lock);
|
2016-04-14 15:35:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
|
|
|
|
|
|
|
|
int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
|
|
|
|
struct net *net,
|
|
|
|
const union sctp_addr *laddr,
|
|
|
|
const union sctp_addr *paddr, void *p)
|
|
|
|
{
|
|
|
|
struct sctp_transport *transport;
|
2016-12-15 23:05:52 +08:00
|
|
|
int err;
|
2016-04-14 15:35:31 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
transport = sctp_addrs_lookup_transport(net, laddr, paddr);
|
2016-12-15 23:05:52 +08:00
|
|
|
rcu_read_unlock();
|
2016-11-15 23:23:11 +08:00
|
|
|
if (!transport)
|
2016-12-15 23:05:52 +08:00
|
|
|
return -ENOENT;
|
2016-09-29 02:55:44 +08:00
|
|
|
|
|
|
|
err = cb(transport, p);
|
2016-10-31 20:32:31 +08:00
|
|
|
sctp_transport_put(transport);
|
2016-09-29 02:55:44 +08:00
|
|
|
|
2016-04-14 15:35:31 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
|
|
|
|
|
|
|
|
int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
|
2017-09-15 11:02:21 +08:00
|
|
|
int (*cb_done)(struct sctp_transport *, void *),
|
|
|
|
struct net *net, int *pos, void *p) {
|
2016-04-14 15:35:31 +08:00
|
|
|
struct rhashtable_iter hti;
|
2017-09-15 11:02:21 +08:00
|
|
|
struct sctp_transport *tsp;
|
|
|
|
int ret;
|
2016-04-14 15:35:31 +08:00
|
|
|
|
2017-09-15 11:02:21 +08:00
|
|
|
again:
|
2018-01-23 18:22:25 +08:00
|
|
|
ret = 0;
|
2017-12-05 02:31:41 +08:00
|
|
|
sctp_transport_walk_start(&hti);
|
2016-04-14 15:35:31 +08:00
|
|
|
|
2017-09-15 11:02:21 +08:00
|
|
|
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
|
|
|
|
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
|
|
|
|
ret = cb(tsp, p);
|
|
|
|
if (ret)
|
2016-04-14 15:35:31 +08:00
|
|
|
break;
|
2017-09-15 11:02:21 +08:00
|
|
|
(*pos)++;
|
|
|
|
sctp_transport_put(tsp);
|
2016-04-14 15:35:31 +08:00
|
|
|
}
|
|
|
|
sctp_transport_walk_stop(&hti);
|
2016-04-14 15:35:35 +08:00
|
|
|
|
2017-09-15 11:02:21 +08:00
|
|
|
if (ret) {
|
|
|
|
if (cb_done && !cb_done(tsp, p)) {
|
|
|
|
(*pos)++;
|
|
|
|
sctp_transport_put(tsp);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
sctp_transport_put(tsp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2016-04-14 15:35:31 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(sctp_for_each_transport);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* 7.2.1 Association Status (SCTP_STATUS)
|
|
|
|
|
|
|
|
* Applications can retrieve current status information about an
|
|
|
|
* association, including association state, peer receiver window size,
|
|
|
|
* number of unacked data chunks, and number of data chunks pending
|
|
|
|
* receipt. This information is read-only.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_status status;
|
|
|
|
struct sctp_association *asoc = NULL;
|
|
|
|
struct sctp_transport *transport;
|
|
|
|
sctp_assoc_t associd;
|
|
|
|
int retval = 0;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof(status)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(status);
|
|
|
|
if (copy_from_user(&status, optval, len)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
associd = status.sstat_assoc_id;
|
|
|
|
asoc = sctp_id2assoc(sk, associd);
|
|
|
|
if (!asoc) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
transport = asoc->peer.primary_path;
|
|
|
|
|
|
|
|
status.sstat_assoc_id = sctp_assoc2id(asoc);
|
net: sctp: fix ABI mismatch through sctp_assoc_to_state helper
Since SCTP day 1, that is, 19b55a2af145 ("Initial commit") from lksctp
tree, the official <netinet/sctp.h> header carries a copy of enum
sctp_sstat_state that looks like (compared to the current in-kernel
enumeration):
User definition: Kernel definition:
enum sctp_sstat_state { typedef enum {
SCTP_EMPTY = 0, <removed>
SCTP_CLOSED = 1, SCTP_STATE_CLOSED = 0,
SCTP_COOKIE_WAIT = 2, SCTP_STATE_COOKIE_WAIT = 1,
SCTP_COOKIE_ECHOED = 3, SCTP_STATE_COOKIE_ECHOED = 2,
SCTP_ESTABLISHED = 4, SCTP_STATE_ESTABLISHED = 3,
SCTP_SHUTDOWN_PENDING = 5, SCTP_STATE_SHUTDOWN_PENDING = 4,
SCTP_SHUTDOWN_SENT = 6, SCTP_STATE_SHUTDOWN_SENT = 5,
SCTP_SHUTDOWN_RECEIVED = 7, SCTP_STATE_SHUTDOWN_RECEIVED = 6,
SCTP_SHUTDOWN_ACK_SENT = 8, SCTP_STATE_SHUTDOWN_ACK_SENT = 7,
}; } sctp_state_t;
This header was later on also placed into the uapi, so that user space
programs can compile without having <netinet/sctp.h>, but the shipped
with <linux/sctp.h> instead.
While RFC6458 under 8.2.1.Association Status (SCTP_STATUS) says that
sstat_state can range from SCTP_CLOSED to SCTP_SHUTDOWN_ACK_SENT, we
nevertheless have a what it appears to be dummy SCTP_EMPTY state from
the very early days.
While it seems to do just nothing, commit 0b8f9e25b0aa ("sctp: remove
completely unsed EMPTY state") did the right thing and removed this dead
code. That however, causes an off-by-one when the user asks the SCTP
stack via SCTP_STATUS API and checks for the current socket state thus
yielding possibly undefined behaviour in applications as they expect
the kernel to tell the right thing.
The enumeration had to be changed however as based on the current socket
state, we access a function pointer lookup-table through this. Therefore,
I think the best way to deal with this is just to add a helper function
sctp_assoc_to_state() to encapsulate the off-by-one quirk.
Reported-by: Tristan Su <sooqing@gmail.com>
Fixes: 0b8f9e25b0aa ("sctp: remove completely unsed EMPTY state")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-08-28 21:28:26 +08:00
|
|
|
status.sstat_state = sctp_assoc_to_state(asoc);
|
2005-04-17 06:20:36 +08:00
|
|
|
status.sstat_rwnd = asoc->peer.rwnd;
|
|
|
|
status.sstat_unackdata = asoc->unack_data;
|
|
|
|
|
|
|
|
status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
|
2017-05-31 16:36:31 +08:00
|
|
|
status.sstat_instrms = asoc->stream.incnt;
|
|
|
|
status.sstat_outstrms = asoc->stream.outcnt;
|
2005-04-17 06:20:36 +08:00
|
|
|
status.sstat_fragmentation_point = asoc->frag_point;
|
|
|
|
status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
|
2006-11-21 09:23:01 +08:00
|
|
|
memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
|
|
|
|
transport->af_specific->sockaddr_len);
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Map ipv4 address into v4-mapped-on-v6 address. */
|
2014-07-31 02:40:53 +08:00
|
|
|
sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
|
2005-04-17 06:20:36 +08:00
|
|
|
(union sctp_addr *)&status.sstat_primary.spinfo_address);
|
2005-06-21 04:14:57 +08:00
|
|
|
status.sstat_primary.spinfo_state = transport->state;
|
2005-04-17 06:20:36 +08:00
|
|
|
status.sstat_primary.spinfo_cwnd = transport->cwnd;
|
|
|
|
status.sstat_primary.spinfo_srtt = transport->srtt;
|
|
|
|
status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
|
2005-12-23 03:36:46 +08:00
|
|
|
status.sstat_primary.spinfo_mtu = transport->pathmtu;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-06-21 04:14:57 +08:00
|
|
|
if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
|
|
|
|
status.sstat_primary.spinfo_state = SCTP_ACTIVE;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (put_user(len, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
|
|
|
|
__func__, len, status.sstat_state, status.sstat_rwnd,
|
|
|
|
status.sstat_assoc_id);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (copy_to_user(optval, &status, len)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2010-09-23 04:43:57 +08:00
|
|
|
return retval;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
|
|
|
|
*
|
|
|
|
* Applications can retrieve information about a specific peer address
|
|
|
|
* of an association, including its reachability state, congestion
|
|
|
|
* window, and retransmission timer values. This information is
|
|
|
|
* read-only.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_paddrinfo pinfo;
|
|
|
|
struct sctp_transport *transport;
|
|
|
|
int retval = 0;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof(pinfo)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(pinfo);
|
|
|
|
if (copy_from_user(&pinfo, optval, len)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
|
|
|
|
pinfo.spinfo_assoc_id);
|
|
|
|
if (!transport)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
|
2005-06-21 04:14:57 +08:00
|
|
|
pinfo.spinfo_state = transport->state;
|
2005-04-17 06:20:36 +08:00
|
|
|
pinfo.spinfo_cwnd = transport->cwnd;
|
|
|
|
pinfo.spinfo_srtt = transport->srtt;
|
|
|
|
pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
|
2005-12-23 03:36:46 +08:00
|
|
|
pinfo.spinfo_mtu = transport->pathmtu;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-06-21 04:14:57 +08:00
|
|
|
if (pinfo.spinfo_state == SCTP_UNKNOWN)
|
|
|
|
pinfo.spinfo_state = SCTP_ACTIVE;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (put_user(len, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_to_user(optval, &pinfo, len)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2010-09-23 04:43:57 +08:00
|
|
|
return retval;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
|
|
|
|
*
|
|
|
|
* This option is a on/off flag. If enabled no SCTP message
|
|
|
|
* fragmentation will be performed. Instead if a message being sent
|
|
|
|
* exceeds the current PMTU size, the message will NOT be sent and
|
|
|
|
* instead a error will be indicated to the user.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (len < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(int);
|
|
|
|
val = (sctp_sk(sk)->disable_fragments == 1);
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
|
|
|
|
*
|
|
|
|
* This socket option is used to specify various notifications and
|
|
|
|
* ancillary data the user wishes to receive.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
2018-11-18 16:08:51 +08:00
|
|
|
struct sctp_event_subscribe subscribe;
|
|
|
|
__u8 *sn_type = (__u8 *)&subscribe;
|
|
|
|
int i;
|
|
|
|
|
2016-10-21 20:13:24 +08:00
|
|
|
if (len == 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2012-04-04 06:17:53 +08:00
|
|
|
if (len > sizeof(struct sctp_event_subscribe))
|
|
|
|
len = sizeof(struct sctp_event_subscribe);
|
2007-06-17 02:03:45 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
2018-11-18 16:08:51 +08:00
|
|
|
|
|
|
|
for (i = 0; i < len; i++)
|
|
|
|
sn_type[i] = sctp_ulpevent_type_enabled(sctp_sk(sk)->subscribe,
|
|
|
|
SCTP_SN_TYPE_BASE + i);
|
|
|
|
|
|
|
|
if (copy_to_user(optval, &subscribe, len))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
2018-11-18 16:08:51 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
|
|
|
|
*
|
|
|
|
* This socket option is applicable to the UDP-style socket only. When
|
|
|
|
* set it will cause associations that are idle for more than the
|
|
|
|
* specified number of seconds to automatically close. An association
|
|
|
|
* being idle is defined an association that has NOT sent or received
|
|
|
|
* user data. The special value of '0' indicates that no automatic
|
|
|
|
* close of any associations should be performed. The option expects an
|
|
|
|
* integer defining the number of seconds of idle time before an
|
|
|
|
* association is closed.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
/* Applicable to UDP-style socket only */
|
|
|
|
if (sctp_style(sk, TCP))
|
|
|
|
return -EOPNOTSUPP;
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof(int))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(int);
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
2017-06-11 10:50:43 +08:00
|
|
|
if (put_user(sctp_sk(sk)->autoclose, (int __user *)optval))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper routine to branch off an association to a new socket. */
|
2012-03-08 13:55:58 +08:00
|
|
|
int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2012-03-08 13:55:58 +08:00
|
|
|
struct sctp_association *asoc = sctp_id2assoc(sk, id);
|
2014-07-31 02:40:53 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct socket *sock;
|
|
|
|
int err = 0;
|
|
|
|
|
sctp: do not peel off an assoc from one netns to another one
Now when peeling off an association to the sock in another netns, all
transports in this assoc are not to be rehashed and keep use the old
key in hashtable.
As a transport uses sk->net as the hash key to insert into hashtable,
it would miss removing these transports from hashtable due to the new
netns when closing the sock and all transports are being freeed, then
later an use-after-free issue could be caused when looking up an asoc
and dereferencing those transports.
This is a very old issue since very beginning, ChunYu found it with
syzkaller fuzz testing with this series:
socket$inet6_sctp()
bind$inet6()
sendto$inet6()
unshare(0x40000000)
getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST()
getsockopt$inet_sctp6_SCTP_SOCKOPT_PEELOFF()
This patch is to block this call when peeling one assoc off from one
netns to another one, so that the netns of all transport would not
go out-sync with the key in hashtable.
Note that this patch didn't fix it by rehashing transports, as it's
difficult to handle the situation when the tuple is already in use
in the new netns. Besides, no one would like to peel off one assoc
to another netns, considering ipaddrs, ifaces, etc. are usually
different.
Reported-by: ChunYu Wang <chunwang@redhat.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-17 23:26:10 +08:00
|
|
|
/* Do not peel off from one netns to another one. */
|
|
|
|
if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-03-08 13:55:58 +08:00
|
|
|
if (!asoc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* An association cannot be branched off from an already peeled-off
|
|
|
|
* socket, nor is this supported for tcp style sockets.
|
|
|
|
*/
|
|
|
|
if (!sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Create a new socket. */
|
|
|
|
err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2009-02-13 16:33:44 +08:00
|
|
|
sctp_copy_sock(sock->sk, sk, asoc);
|
2006-10-31 10:54:32 +08:00
|
|
|
|
|
|
|
/* Make peeled-off sockets more like 1-1 accepted sockets.
|
2018-02-25 00:18:51 +08:00
|
|
|
* Set the daddr and initialize id to something more random and also
|
|
|
|
* copy over any ip options.
|
2006-10-31 10:54:32 +08:00
|
|
|
*/
|
2014-07-31 02:40:53 +08:00
|
|
|
sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
|
2018-02-25 00:18:51 +08:00
|
|
|
sp->pf->copy_ip_options(sk, sock->sk);
|
2009-02-13 16:33:44 +08:00
|
|
|
|
|
|
|
/* Populate the fields of the newsk from the oldsk and migrate the
|
|
|
|
* asoc to the newsk.
|
|
|
|
*/
|
|
|
|
sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
|
2006-10-31 10:54:32 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
*sockp = sock;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2012-03-08 13:55:58 +08:00
|
|
|
EXPORT_SYMBOL(sctp_do_peeloff);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-07-01 01:32:57 +08:00
|
|
|
static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff,
|
|
|
|
struct file **newfile, unsigned flags)
|
|
|
|
{
|
|
|
|
struct socket *newsock;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
retval = sctp_do_peeloff(sk, peeloff->associd, &newsock);
|
|
|
|
if (retval < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Map the socket to an unused fd that can be returned to the user. */
|
|
|
|
retval = get_unused_fd_flags(flags & SOCK_CLOEXEC);
|
|
|
|
if (retval < 0) {
|
|
|
|
sock_release(newsock);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
*newfile = sock_alloc_file(newsock, 0, NULL);
|
|
|
|
if (IS_ERR(*newfile)) {
|
|
|
|
put_unused_fd(retval);
|
|
|
|
retval = PTR_ERR(*newfile);
|
|
|
|
*newfile = NULL;
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
|
|
|
|
retval);
|
|
|
|
|
|
|
|
peeloff->sd = retval;
|
|
|
|
|
|
|
|
if (flags & SOCK_NONBLOCK)
|
|
|
|
(*newfile)->f_flags |= O_NONBLOCK;
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
sctp_peeloff_arg_t peeloff;
|
2017-07-01 01:32:57 +08:00
|
|
|
struct file *newfile = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
int retval = 0;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof(sctp_peeloff_arg_t))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(sctp_peeloff_arg_t);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (copy_from_user(&peeloff, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2017-07-01 01:32:57 +08:00
|
|
|
retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (retval < 0)
|
|
|
|
goto out;
|
|
|
|
|
2017-07-01 01:32:57 +08:00
|
|
|
/* Return the fd mapped to the new socket. */
|
|
|
|
if (put_user(len, optlen)) {
|
|
|
|
fput(newfile);
|
|
|
|
put_unused_fd(retval);
|
|
|
|
return -EFAULT;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-07-01 01:32:57 +08:00
|
|
|
if (copy_to_user(optval, &peeloff, len)) {
|
|
|
|
fput(newfile);
|
2012-08-18 12:25:51 +08:00
|
|
|
put_unused_fd(retval);
|
2017-07-01 01:32:57 +08:00
|
|
|
return -EFAULT;
|
2012-08-18 12:25:51 +08:00
|
|
|
}
|
2017-07-01 01:32:57 +08:00
|
|
|
fd_install(retval, newfile);
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
2012-08-18 12:25:51 +08:00
|
|
|
|
2017-07-01 01:32:57 +08:00
|
|
|
static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
sctp_peeloff_flags_arg_t peeloff;
|
|
|
|
struct file *newfile = NULL;
|
|
|
|
int retval = 0;
|
|
|
|
|
|
|
|
if (len < sizeof(sctp_peeloff_flags_arg_t))
|
|
|
|
return -EINVAL;
|
|
|
|
len = sizeof(sctp_peeloff_flags_arg_t);
|
|
|
|
if (copy_from_user(&peeloff, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg,
|
|
|
|
&newfile, peeloff.flags);
|
|
|
|
if (retval < 0)
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Return the fd mapped to the new socket. */
|
2012-08-18 12:25:51 +08:00
|
|
|
if (put_user(len, optlen)) {
|
|
|
|
fput(newfile);
|
|
|
|
put_unused_fd(retval);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2017-07-01 01:32:57 +08:00
|
|
|
|
2012-08-18 12:25:51 +08:00
|
|
|
if (copy_to_user(optval, &peeloff, len)) {
|
|
|
|
fput(newfile);
|
|
|
|
put_unused_fd(retval);
|
2007-06-17 02:03:45 +08:00
|
|
|
return -EFAULT;
|
2012-08-18 12:25:51 +08:00
|
|
|
}
|
|
|
|
fd_install(retval, newfile);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
|
|
|
|
*
|
|
|
|
* Applications can enable or disable heartbeats for any peer address of
|
|
|
|
* an association, modify an address's heartbeat interval, force a
|
|
|
|
* heartbeat to be sent immediately, and adjust the address's maximum
|
|
|
|
* number of retransmissions sent before an address is considered
|
|
|
|
* unreachable. The following structure is used to access and modify an
|
|
|
|
* address's parameters:
|
|
|
|
*
|
|
|
|
* struct sctp_paddrparams {
|
2005-12-23 03:36:46 +08:00
|
|
|
* sctp_assoc_t spp_assoc_id;
|
|
|
|
* struct sockaddr_storage spp_address;
|
|
|
|
* uint32_t spp_hbinterval;
|
|
|
|
* uint16_t spp_pathmaxrxt;
|
|
|
|
* uint32_t spp_pathmtu;
|
|
|
|
* uint32_t spp_sackdelay;
|
|
|
|
* uint32_t spp_flags;
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* spp_assoc_id - (one-to-many style socket) This is filled in the
|
|
|
|
* application, and identifies the association for
|
|
|
|
* this query.
|
2005-04-17 06:20:36 +08:00
|
|
|
* spp_address - This specifies which address is of interest.
|
|
|
|
* spp_hbinterval - This contains the value of the heartbeat interval,
|
2005-12-23 03:36:46 +08:00
|
|
|
* in milliseconds. If a value of zero
|
|
|
|
* is present in this field then no changes are to
|
|
|
|
* be made to this parameter.
|
2005-04-17 06:20:36 +08:00
|
|
|
* spp_pathmaxrxt - This contains the maximum number of
|
|
|
|
* retransmissions before this address shall be
|
2005-12-23 03:36:46 +08:00
|
|
|
* considered unreachable. If a value of zero
|
|
|
|
* is present in this field then no changes are to
|
|
|
|
* be made to this parameter.
|
|
|
|
* spp_pathmtu - When Path MTU discovery is disabled the value
|
|
|
|
* specified here will be the "fixed" path mtu.
|
|
|
|
* Note that if the spp_address field is empty
|
|
|
|
* then all associations on this address will
|
|
|
|
* have this fixed path mtu set upon them.
|
|
|
|
*
|
|
|
|
* spp_sackdelay - When delayed sack is enabled, this value specifies
|
|
|
|
* the number of milliseconds that sacks will be delayed
|
|
|
|
* for. This value will apply to all addresses of an
|
|
|
|
* association if the spp_address field is empty. Note
|
|
|
|
* also, that if delayed sack is enabled and this
|
|
|
|
* value is set to 0, no change is made to the last
|
|
|
|
* recorded delayed sack timer value.
|
|
|
|
*
|
|
|
|
* spp_flags - These flags are used to control various features
|
|
|
|
* on an association. The flag field may contain
|
|
|
|
* zero or more of the following options.
|
|
|
|
*
|
|
|
|
* SPP_HB_ENABLE - Enable heartbeats on the
|
|
|
|
* specified address. Note that if the address
|
|
|
|
* field is empty all addresses for the association
|
|
|
|
* have heartbeats enabled upon them.
|
|
|
|
*
|
|
|
|
* SPP_HB_DISABLE - Disable heartbeats on the
|
|
|
|
* speicifed address. Note that if the address
|
|
|
|
* field is empty all addresses for the association
|
|
|
|
* will have their heartbeats disabled. Note also
|
|
|
|
* that SPP_HB_ENABLE and SPP_HB_DISABLE are
|
|
|
|
* mutually exclusive, only one of these two should
|
|
|
|
* be specified. Enabling both fields will have
|
|
|
|
* undetermined results.
|
|
|
|
*
|
|
|
|
* SPP_HB_DEMAND - Request a user initiated heartbeat
|
|
|
|
* to be made immediately.
|
|
|
|
*
|
|
|
|
* SPP_PMTUD_ENABLE - This field will enable PMTU
|
|
|
|
* discovery upon the specified address. Note that
|
|
|
|
* if the address feild is empty then all addresses
|
|
|
|
* on the association are effected.
|
|
|
|
*
|
|
|
|
* SPP_PMTUD_DISABLE - This field will disable PMTU
|
|
|
|
* discovery upon the specified address. Note that
|
|
|
|
* if the address feild is empty then all addresses
|
|
|
|
* on the association are effected. Not also that
|
|
|
|
* SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
|
|
|
|
* exclusive. Enabling both will have undetermined
|
|
|
|
* results.
|
|
|
|
*
|
|
|
|
* SPP_SACKDELAY_ENABLE - Setting this flag turns
|
|
|
|
* on delayed sack. The time specified in spp_sackdelay
|
|
|
|
* is used to specify the sack delay for this address. Note
|
|
|
|
* that if spp_address is empty then all addresses will
|
|
|
|
* enable delayed sack and take on the sack delay
|
|
|
|
* value specified in spp_sackdelay.
|
|
|
|
* SPP_SACKDELAY_DISABLE - Setting this flag turns
|
|
|
|
* off delayed sack. If the spp_address field is blank then
|
|
|
|
* delayed sack is disabled for the entire association. Note
|
|
|
|
* also that this field is mutually exclusive to
|
|
|
|
* SPP_SACKDELAY_ENABLE, setting both will have undefined
|
|
|
|
* results.
|
2018-07-02 18:21:13 +08:00
|
|
|
*
|
|
|
|
* SPP_IPV6_FLOWLABEL: Setting this flag enables the
|
|
|
|
* setting of the IPV6 flow label value. The value is
|
|
|
|
* contained in the spp_ipv6_flowlabel field.
|
|
|
|
* Upon retrieval, this flag will be set to indicate that
|
|
|
|
* the spp_ipv6_flowlabel field has a valid value returned.
|
|
|
|
* If a specific destination address is set (in the
|
|
|
|
* spp_address field), then the value returned is that of
|
|
|
|
* the address. If just an association is specified (and
|
|
|
|
* no address), then the association's default flow label
|
|
|
|
* is returned. If neither an association nor a destination
|
|
|
|
* is specified, then the socket's default flow label is
|
|
|
|
* returned. For non-IPv6 sockets, this flag will be left
|
|
|
|
* cleared.
|
|
|
|
*
|
|
|
|
* SPP_DSCP: Setting this flag enables the setting of the
|
|
|
|
* Differentiated Services Code Point (DSCP) value
|
|
|
|
* associated with either the association or a specific
|
|
|
|
* address. The value is obtained in the spp_dscp field.
|
|
|
|
* Upon retrieval, this flag will be set to indicate that
|
|
|
|
* the spp_dscp field has a valid value returned. If a
|
|
|
|
* specific destination address is set when called (in the
|
|
|
|
* spp_address field), then that specific destination
|
|
|
|
* address's DSCP value is returned. If just an association
|
|
|
|
* is specified, then the association's default DSCP is
|
|
|
|
* returned. If neither an association nor a destination is
|
|
|
|
* specified, then the socket's default DSCP is returned.
|
|
|
|
*
|
|
|
|
* spp_ipv6_flowlabel
|
|
|
|
* - This field is used in conjunction with the
|
|
|
|
* SPP_IPV6_FLOWLABEL flag and contains the IPv6 flow label.
|
|
|
|
* The 20 least significant bits are used for the flow
|
|
|
|
* label. This setting has precedence over any IPv6-layer
|
|
|
|
* setting.
|
|
|
|
*
|
|
|
|
* spp_dscp - This field is used in conjunction with the SPP_DSCP flag
|
|
|
|
* and contains the DSCP. The 6 most significant bits are
|
|
|
|
* used for the DSCP. This setting has precedence over any
|
|
|
|
* IPv4- or IPv6- layer setting.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
|
2005-12-23 03:36:46 +08:00
|
|
|
char __user *optval, int __user *optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2005-12-23 03:36:46 +08:00
|
|
|
struct sctp_paddrparams params;
|
|
|
|
struct sctp_transport *trans = NULL;
|
|
|
|
struct sctp_association *asoc = NULL;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-07-02 18:21:13 +08:00
|
|
|
if (len >= sizeof(params))
|
|
|
|
len = sizeof(params);
|
|
|
|
else if (len >= ALIGN(offsetof(struct sctp_paddrparams,
|
|
|
|
spp_ipv6_flowlabel), 4))
|
|
|
|
len = ALIGN(offsetof(struct sctp_paddrparams,
|
|
|
|
spp_ipv6_flowlabel), 4);
|
|
|
|
else
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2018-07-02 18:21:13 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2005-12-23 03:36:46 +08:00
|
|
|
/* If an address other than INADDR_ANY is specified, and
|
|
|
|
* no transport is found, then the request is invalid.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-12-23 12:16:50 +08:00
|
|
|
if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) {
|
2005-12-23 03:36:46 +08:00
|
|
|
trans = sctp_addr_id2transport(sk, ¶ms.spp_address,
|
|
|
|
params.spp_assoc_id);
|
|
|
|
if (!trans) {
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: failed no transport\n", __func__);
|
2005-12-23 03:36:46 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:24 +08:00
|
|
|
/* Get association, if assoc_id != SCTP_FUTURE_ASSOC and the
|
|
|
|
* socket is a one to many style socket, and an association
|
|
|
|
* was not found, then the id was invalid.
|
2005-12-23 03:36:46 +08:00
|
|
|
*/
|
|
|
|
asoc = sctp_id2assoc(sk, params.spp_assoc_id);
|
2019-01-28 15:08:24 +08:00
|
|
|
if (!asoc && params.spp_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP)) {
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: failed no association\n", __func__);
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2005-12-23 03:36:46 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-12-23 03:36:46 +08:00
|
|
|
if (trans) {
|
|
|
|
/* Fetch transport values. */
|
|
|
|
params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
|
|
|
|
params.spp_pathmtu = trans->pathmtu;
|
|
|
|
params.spp_pathmaxrxt = trans->pathmaxrxt;
|
|
|
|
params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
|
|
|
|
|
|
|
|
/*draft-11 doesn't say what to return in spp_flags*/
|
|
|
|
params.spp_flags = trans->param_flags;
|
2018-07-02 18:21:13 +08:00
|
|
|
if (trans->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
|
|
|
|
params.spp_ipv6_flowlabel = trans->flowlabel &
|
|
|
|
SCTP_FLOWLABEL_VAL_MASK;
|
|
|
|
params.spp_flags |= SPP_IPV6_FLOWLABEL;
|
|
|
|
}
|
|
|
|
if (trans->dscp & SCTP_DSCP_SET_MASK) {
|
|
|
|
params.spp_dscp = trans->dscp & SCTP_DSCP_VAL_MASK;
|
|
|
|
params.spp_flags |= SPP_DSCP;
|
|
|
|
}
|
2005-12-23 03:36:46 +08:00
|
|
|
} else if (asoc) {
|
|
|
|
/* Fetch association values. */
|
|
|
|
params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
|
|
|
|
params.spp_pathmtu = asoc->pathmtu;
|
|
|
|
params.spp_pathmaxrxt = asoc->pathmaxrxt;
|
|
|
|
params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
|
|
|
|
|
|
|
|
/*draft-11 doesn't say what to return in spp_flags*/
|
|
|
|
params.spp_flags = asoc->param_flags;
|
2018-07-02 18:21:13 +08:00
|
|
|
if (asoc->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
|
|
|
|
params.spp_ipv6_flowlabel = asoc->flowlabel &
|
|
|
|
SCTP_FLOWLABEL_VAL_MASK;
|
|
|
|
params.spp_flags |= SPP_IPV6_FLOWLABEL;
|
|
|
|
}
|
|
|
|
if (asoc->dscp & SCTP_DSCP_SET_MASK) {
|
|
|
|
params.spp_dscp = asoc->dscp & SCTP_DSCP_VAL_MASK;
|
|
|
|
params.spp_flags |= SPP_DSCP;
|
|
|
|
}
|
2005-12-23 03:36:46 +08:00
|
|
|
} else {
|
|
|
|
/* Fetch socket values. */
|
|
|
|
params.spp_hbinterval = sp->hbinterval;
|
|
|
|
params.spp_pathmtu = sp->pathmtu;
|
|
|
|
params.spp_sackdelay = sp->sackdelay;
|
|
|
|
params.spp_pathmaxrxt = sp->pathmaxrxt;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-12-23 03:36:46 +08:00
|
|
|
/*draft-11 doesn't say what to return in spp_flags*/
|
|
|
|
params.spp_flags = sp->param_flags;
|
2018-07-02 18:21:13 +08:00
|
|
|
if (sp->flowlabel & SCTP_FLOWLABEL_SET_MASK) {
|
|
|
|
params.spp_ipv6_flowlabel = sp->flowlabel &
|
|
|
|
SCTP_FLOWLABEL_VAL_MASK;
|
|
|
|
params.spp_flags |= SPP_IPV6_FLOWLABEL;
|
|
|
|
}
|
|
|
|
if (sp->dscp & SCTP_DSCP_SET_MASK) {
|
|
|
|
params.spp_dscp = sp->dscp & SCTP_DSCP_VAL_MASK;
|
|
|
|
params.spp_flags |= SPP_DSCP;
|
|
|
|
}
|
2005-12-23 03:36:46 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-10 06:13:26 +08:00
|
|
|
/*
|
|
|
|
* 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
|
|
|
|
*
|
|
|
|
* This option will effect the way delayed acks are performed. This
|
|
|
|
* option allows you to get or set the delayed ack time, in
|
|
|
|
* milliseconds. It also allows changing the delayed ack frequency.
|
|
|
|
* Changing the frequency to 1 disables the delayed sack algorithm. If
|
|
|
|
* the assoc_id is 0, then this sets or gets the endpoints default
|
|
|
|
* values. If the assoc_id field is non-zero, then the set or get
|
|
|
|
* effects the specified association for the one to many model (the
|
|
|
|
* assoc_id field is ignored by the one to one model). Note that if
|
|
|
|
* sack_delay or sack_freq are 0 when setting this option, then the
|
|
|
|
* current values will remain unchanged.
|
|
|
|
*
|
|
|
|
* struct sctp_sack_info {
|
|
|
|
* sctp_assoc_t sack_assoc_id;
|
|
|
|
* uint32_t sack_delay;
|
|
|
|
* uint32_t sack_freq;
|
|
|
|
* };
|
2005-12-23 03:37:30 +08:00
|
|
|
*
|
2008-05-10 06:13:26 +08:00
|
|
|
* sack_assoc_id - This parameter, indicates which association the user
|
|
|
|
* is performing an action upon. Note that if this field's value is
|
|
|
|
* zero then the endpoints default value is changed (effecting future
|
|
|
|
* associations only).
|
2005-12-23 03:37:30 +08:00
|
|
|
*
|
2008-05-10 06:13:26 +08:00
|
|
|
* sack_delay - This parameter contains the number of milliseconds that
|
|
|
|
* the user is requesting the delayed ACK timer be set to. Note that
|
|
|
|
* this value is defined in the standard to be between 200 and 500
|
|
|
|
* milliseconds.
|
2005-12-23 03:37:30 +08:00
|
|
|
*
|
2008-05-10 06:13:26 +08:00
|
|
|
* sack_freq - This parameter contains the number of packets that must
|
|
|
|
* be received before a sack is sent without waiting for the delay
|
|
|
|
* timer to expire. The default value for this is 2, setting this
|
|
|
|
* value to 1 will disable the delayed sack algorithm.
|
2005-12-23 03:37:30 +08:00
|
|
|
*/
|
2008-05-10 06:13:26 +08:00
|
|
|
static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
|
2005-12-23 03:37:30 +08:00
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
2008-05-10 06:13:26 +08:00
|
|
|
struct sctp_sack_info params;
|
2005-12-23 03:37:30 +08:00
|
|
|
struct sctp_association *asoc = NULL;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
2008-05-10 06:13:26 +08:00
|
|
|
if (len >= sizeof(struct sctp_sack_info)) {
|
|
|
|
len = sizeof(struct sctp_sack_info);
|
2005-12-23 03:37:30 +08:00
|
|
|
|
2008-05-10 06:13:26 +08:00
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
} else if (len == sizeof(struct sctp_assoc_value)) {
|
2013-12-23 21:29:43 +08:00
|
|
|
pr_warn_ratelimited(DEPRECATED
|
2014-01-03 01:54:27 +08:00
|
|
|
"%s (pid %d) "
|
2013-12-23 21:29:43 +08:00
|
|
|
"Use of struct sctp_assoc_value in delayed_ack socket option.\n"
|
2014-01-03 01:54:27 +08:00
|
|
|
"Use struct sctp_sack_info instead\n",
|
|
|
|
current->comm, task_pid_nr(current));
|
2008-05-10 06:13:26 +08:00
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
} else
|
2013-12-23 12:16:50 +08:00
|
|
|
return -EINVAL;
|
2005-12-23 03:37:30 +08:00
|
|
|
|
2019-01-28 15:08:34 +08:00
|
|
|
/* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
|
|
|
|
* socket is a one to many style socket, and an association
|
|
|
|
* was not found, then the id was invalid.
|
2007-02-09 22:25:18 +08:00
|
|
|
*/
|
2008-05-10 06:13:26 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.sack_assoc_id);
|
2019-01-28 15:08:34 +08:00
|
|
|
if (!asoc && params.sack_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2005-12-23 03:37:30 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (asoc) {
|
|
|
|
/* Fetch association values. */
|
2008-05-10 06:13:26 +08:00
|
|
|
if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
|
2019-01-28 15:08:34 +08:00
|
|
|
params.sack_delay = jiffies_to_msecs(asoc->sackdelay);
|
2008-05-10 06:13:26 +08:00
|
|
|
params.sack_freq = asoc->sackfreq;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
params.sack_delay = 0;
|
|
|
|
params.sack_freq = 1;
|
|
|
|
}
|
2005-12-23 03:37:30 +08:00
|
|
|
} else {
|
|
|
|
/* Fetch socket values. */
|
2008-05-10 06:13:26 +08:00
|
|
|
if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
|
|
|
|
params.sack_delay = sp->sackdelay;
|
|
|
|
params.sack_freq = sp->sackfreq;
|
|
|
|
} else {
|
|
|
|
params.sack_delay = 0;
|
|
|
|
params.sack_freq = 1;
|
|
|
|
}
|
2005-12-23 03:37:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* 7.1.3 Initialization Parameters (SCTP_INITMSG)
|
|
|
|
*
|
|
|
|
* Applications can specify protocol parameters for the default association
|
|
|
|
* initialization. The option name argument to setsockopt() and getsockopt()
|
|
|
|
* is SCTP_INITMSG.
|
|
|
|
*
|
|
|
|
* Setting initialization parameters is effective only on an unconnected
|
|
|
|
* socket (for UDP-style sockets only future associations are effected
|
|
|
|
* by the change). With TCP-style sockets, this option is inherited by
|
|
|
|
* sockets derived from a listener socket.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
|
|
|
|
{
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof(struct sctp_initmsg))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(struct sctp_initmsg);
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-10-07 12:36:17 +08:00
|
|
|
|
|
|
|
static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int cnt = 0;
|
|
|
|
struct sctp_getaddrs getaddrs;
|
|
|
|
struct sctp_transport *from;
|
|
|
|
void __user *to;
|
|
|
|
union sctp_addr temp;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
int addrlen;
|
|
|
|
size_t space_left;
|
|
|
|
int bytes_copied;
|
|
|
|
|
|
|
|
if (len < sizeof(struct sctp_getaddrs))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* For UDP-style sockets, id specifies the association to query. */
|
|
|
|
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
|
|
|
|
if (!asoc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-12-23 12:16:50 +08:00
|
|
|
to = optval + offsetof(struct sctp_getaddrs, addrs);
|
|
|
|
space_left = len - offsetof(struct sctp_getaddrs, addrs);
|
2005-10-07 12:36:17 +08:00
|
|
|
|
2008-04-13 09:54:24 +08:00
|
|
|
list_for_each_entry(from, &asoc->peer.transport_addr_list,
|
|
|
|
transports) {
|
2006-11-21 09:22:43 +08:00
|
|
|
memcpy(&temp, &from->ipaddr, sizeof(temp));
|
2014-07-31 02:40:53 +08:00
|
|
|
addrlen = sctp_get_pf_specific(sk->sk_family)
|
|
|
|
->addr_to_user(sp, &temp);
|
2007-04-29 12:09:04 +08:00
|
|
|
if (space_left < addrlen)
|
2005-10-07 12:36:17 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
if (copy_to_user(to, &temp, addrlen))
|
|
|
|
return -EFAULT;
|
|
|
|
to += addrlen;
|
|
|
|
cnt++;
|
|
|
|
space_left -= addrlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
|
|
|
|
return -EFAULT;
|
|
|
|
bytes_copied = ((char __user *)to) - optval;
|
|
|
|
if (put_user(bytes_copied, optlen))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-04-29 12:09:04 +08:00
|
|
|
static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
|
|
|
|
size_t space_left, int *bytes_copied)
|
2005-10-07 12:36:17 +08:00
|
|
|
{
|
|
|
|
struct sctp_sockaddr_entry *addr;
|
|
|
|
union sctp_addr temp;
|
|
|
|
int cnt = 0;
|
|
|
|
int addrlen;
|
2012-08-06 16:42:04 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2005-10-07 12:36:17 +08:00
|
|
|
|
2007-09-17 07:02:12 +08:00
|
|
|
rcu_read_lock();
|
2012-08-06 16:42:04 +08:00
|
|
|
list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
|
2007-09-17 07:02:12 +08:00
|
|
|
if (!addr->valid)
|
|
|
|
continue;
|
|
|
|
|
2007-02-09 22:25:18 +08:00
|
|
|
if ((PF_INET == sk->sk_family) &&
|
2006-11-21 09:21:44 +08:00
|
|
|
(AF_INET6 == addr->a.sa.sa_family))
|
2005-10-07 12:36:17 +08:00
|
|
|
continue;
|
2008-07-19 14:05:40 +08:00
|
|
|
if ((PF_INET6 == sk->sk_family) &&
|
|
|
|
inet_v6_ipv6only(sk) &&
|
|
|
|
(AF_INET == addr->a.sa.sa_family))
|
|
|
|
continue;
|
2006-11-21 09:21:44 +08:00
|
|
|
memcpy(&temp, &addr->a, sizeof(temp));
|
2008-01-29 03:25:36 +08:00
|
|
|
if (!temp.v4.sin_port)
|
|
|
|
temp.v4.sin_port = htons(port);
|
|
|
|
|
2014-07-31 02:40:53 +08:00
|
|
|
addrlen = sctp_get_pf_specific(sk->sk_family)
|
|
|
|
->addr_to_user(sctp_sk(sk), &temp);
|
|
|
|
|
2007-09-17 07:02:12 +08:00
|
|
|
if (space_left < addrlen) {
|
|
|
|
cnt = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
2007-04-29 12:09:04 +08:00
|
|
|
memcpy(to, &temp, addrlen);
|
2006-12-14 08:26:26 +08:00
|
|
|
|
2007-04-29 12:09:04 +08:00
|
|
|
to += addrlen;
|
2013-12-23 12:16:50 +08:00
|
|
|
cnt++;
|
2005-10-07 12:36:17 +08:00
|
|
|
space_left -= addrlen;
|
2007-07-04 00:43:12 +08:00
|
|
|
*bytes_copied += addrlen;
|
2005-10-07 12:36:17 +08:00
|
|
|
}
|
2007-09-17 07:02:12 +08:00
|
|
|
rcu_read_unlock();
|
2005-10-07 12:36:17 +08:00
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-10-07 12:36:17 +08:00
|
|
|
static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_bind_addr *bp;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int cnt = 0;
|
|
|
|
struct sctp_getaddrs getaddrs;
|
|
|
|
struct sctp_sockaddr_entry *addr;
|
|
|
|
void __user *to;
|
|
|
|
union sctp_addr temp;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
int addrlen;
|
|
|
|
int err = 0;
|
|
|
|
size_t space_left;
|
2007-04-29 12:09:04 +08:00
|
|
|
int bytes_copied = 0;
|
|
|
|
void *addrs;
|
2007-05-10 04:51:31 +08:00
|
|
|
void *buf;
|
2005-10-07 12:36:17 +08:00
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof(struct sctp_getaddrs))
|
2005-10-07 12:36:17 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For UDP-style sockets, id specifies the association to query.
|
|
|
|
* If the id field is set to the value '0' then the locally bound
|
|
|
|
* addresses are returned without regard to any particular
|
|
|
|
* association.
|
|
|
|
*/
|
|
|
|
if (0 == getaddrs.assoc_id) {
|
|
|
|
bp = &sctp_sk(sk)->ep->base.bind_addr;
|
|
|
|
} else {
|
|
|
|
asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
|
|
|
|
if (!asoc)
|
|
|
|
return -EINVAL;
|
|
|
|
bp = &asoc->base.bind_addr;
|
|
|
|
}
|
|
|
|
|
2013-12-23 12:16:50 +08:00
|
|
|
to = optval + offsetof(struct sctp_getaddrs, addrs);
|
|
|
|
space_left = len - offsetof(struct sctp_getaddrs, addrs);
|
2007-06-19 07:59:16 +08:00
|
|
|
|
2015-12-01 00:32:54 +08:00
|
|
|
addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
|
2007-04-29 12:09:04 +08:00
|
|
|
if (!addrs)
|
|
|
|
return -ENOMEM;
|
2005-10-07 12:36:17 +08:00
|
|
|
|
|
|
|
/* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
|
|
|
|
* addresses from the global local address list.
|
|
|
|
*/
|
|
|
|
if (sctp_list_single_entry(&bp->address_list)) {
|
|
|
|
addr = list_entry(bp->address_list.next,
|
|
|
|
struct sctp_sockaddr_entry, list);
|
2008-08-18 22:34:34 +08:00
|
|
|
if (sctp_is_any(sk, &addr->a)) {
|
2007-04-29 12:09:04 +08:00
|
|
|
cnt = sctp_copy_laddrs(sk, bp->port, addrs,
|
|
|
|
space_left, &bytes_copied);
|
2005-10-07 12:36:17 +08:00
|
|
|
if (cnt < 0) {
|
|
|
|
err = cnt;
|
2007-09-17 07:03:28 +08:00
|
|
|
goto out;
|
2005-10-07 12:36:17 +08:00
|
|
|
}
|
2007-02-09 22:25:18 +08:00
|
|
|
goto copy_getaddrs;
|
2005-10-07 12:36:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-10 04:51:31 +08:00
|
|
|
buf = addrs;
|
2007-09-17 07:03:28 +08:00
|
|
|
/* Protection on the bound address list is not needed since
|
|
|
|
* in the socket option context we hold a socket lock and
|
|
|
|
* thus the bound address list can't change.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(addr, &bp->address_list, list) {
|
2006-11-21 09:21:44 +08:00
|
|
|
memcpy(&temp, &addr->a, sizeof(temp));
|
2014-07-31 02:40:53 +08:00
|
|
|
addrlen = sctp_get_pf_specific(sk->sk_family)
|
|
|
|
->addr_to_user(sp, &temp);
|
2007-04-29 12:09:04 +08:00
|
|
|
if (space_left < addrlen) {
|
|
|
|
err = -ENOMEM; /*fixme: right error?*/
|
2007-09-17 07:03:28 +08:00
|
|
|
goto out;
|
2005-10-07 12:36:17 +08:00
|
|
|
}
|
2007-05-10 04:51:31 +08:00
|
|
|
memcpy(buf, &temp, addrlen);
|
|
|
|
buf += addrlen;
|
2007-04-29 12:09:04 +08:00
|
|
|
bytes_copied += addrlen;
|
2013-12-23 12:16:50 +08:00
|
|
|
cnt++;
|
2005-10-07 12:36:17 +08:00
|
|
|
space_left -= addrlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
copy_getaddrs:
|
2007-04-29 12:09:04 +08:00
|
|
|
if (copy_to_user(to, addrs, bytes_copied)) {
|
|
|
|
err = -EFAULT;
|
2007-07-28 04:55:59 +08:00
|
|
|
goto out;
|
2007-04-29 12:09:04 +08:00
|
|
|
}
|
2007-05-23 23:11:37 +08:00
|
|
|
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
|
|
|
|
err = -EFAULT;
|
2007-07-28 04:55:59 +08:00
|
|
|
goto out;
|
2007-05-23 23:11:37 +08:00
|
|
|
}
|
2018-01-09 05:02:29 +08:00
|
|
|
/* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
|
|
|
|
* but we can't change it anymore.
|
|
|
|
*/
|
2005-10-07 12:36:17 +08:00
|
|
|
if (put_user(bytes_copied, optlen))
|
2007-05-23 23:11:37 +08:00
|
|
|
err = -EFAULT;
|
2007-07-28 04:55:59 +08:00
|
|
|
out:
|
2007-04-29 12:09:04 +08:00
|
|
|
kfree(addrs);
|
2005-10-07 12:36:17 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
|
|
|
|
*
|
|
|
|
* Requests that the local SCTP stack use the enclosed peer address as
|
|
|
|
* the association primary. The enclosed address must be one of the
|
|
|
|
* association peer's addresses.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_prim prim;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof(struct sctp_prim))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(struct sctp_prim);
|
|
|
|
|
|
|
|
if (copy_from_user(&prim, optval, len))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
|
|
|
|
if (!asoc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!asoc->peer.primary_path)
|
|
|
|
return -ENOTCONN;
|
2007-02-09 22:25:18 +08:00
|
|
|
|
2006-11-21 09:23:01 +08:00
|
|
|
memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
|
|
|
|
asoc->peer.primary_path->af_specific->sockaddr_len);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-07-31 02:40:53 +08:00
|
|
|
sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
|
2005-04-17 06:20:36 +08:00
|
|
|
(union sctp_addr *)&prim.ssp_addr);
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &prim, len))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-12-21 08:07:04 +08:00
|
|
|
* 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2006-12-21 08:07:04 +08:00
|
|
|
* Requests that the local endpoint set the specified Adaptation Layer
|
2005-04-17 06:20:36 +08:00
|
|
|
* Indication parameter for all future INIT and INIT-ACK exchanges.
|
|
|
|
*/
|
2006-12-21 08:07:04 +08:00
|
|
|
static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
|
2005-04-17 06:20:36 +08:00
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
2006-12-21 08:07:04 +08:00
|
|
|
struct sctp_setadaptation adaptation;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof(struct sctp_setadaptation))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(struct sctp_setadaptation);
|
|
|
|
|
2006-12-21 08:07:04 +08:00
|
|
|
adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
|
2007-06-17 02:03:45 +08:00
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
2006-12-21 08:07:04 +08:00
|
|
|
if (copy_to_user(optval, &adaptation, len))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
2005-10-29 06:33:24 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
|
|
|
|
*
|
|
|
|
* Applications that wish to use the sendto() system call may wish to
|
|
|
|
* specify a default set of parameters that would normally be supplied
|
|
|
|
* through the inclusion of ancillary data. This socket option allows
|
|
|
|
* such an application to set the default sctp_sndrcvinfo structure.
|
|
|
|
|
|
|
|
|
|
|
|
* The application that wishes to use this socket option simply passes
|
|
|
|
* in to this call the sctp_sndrcvinfo structure defined in Section
|
|
|
|
* 5.2.2) The input parameters accepted by this call include
|
|
|
|
* sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
|
|
|
|
* sinfo_timetolive. The user must provide the sinfo_assoc_id field in
|
|
|
|
* to this call if the caller is using the UDP model.
|
|
|
|
*
|
|
|
|
* For getsockopt, it get the default sctp_sndrcvinfo structure.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_default_send_param(struct sock *sk,
|
|
|
|
int len, char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2014-07-13 02:30:39 +08:00
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_sndrcvinfo info;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-07-13 02:30:39 +08:00
|
|
|
if (len < sizeof(info))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2007-06-17 02:03:45 +08:00
|
|
|
|
2014-07-13 02:30:39 +08:00
|
|
|
len = sizeof(info);
|
2007-06-17 02:03:45 +08:00
|
|
|
|
|
|
|
if (copy_from_user(&info, optval, len))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
|
2019-01-28 15:08:35 +08:00
|
|
|
if (!asoc && info.sinfo_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2019-01-28 15:08:35 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (asoc) {
|
|
|
|
info.sinfo_stream = asoc->default_stream;
|
|
|
|
info.sinfo_flags = asoc->default_flags;
|
|
|
|
info.sinfo_ppid = asoc->default_ppid;
|
|
|
|
info.sinfo_context = asoc->default_context;
|
|
|
|
info.sinfo_timetolive = asoc->default_timetolive;
|
|
|
|
} else {
|
|
|
|
info.sinfo_stream = sp->default_stream;
|
|
|
|
info.sinfo_flags = sp->default_flags;
|
|
|
|
info.sinfo_ppid = sp->default_ppid;
|
|
|
|
info.sinfo_context = sp->default_context;
|
|
|
|
info.sinfo_timetolive = sp->default_timetolive;
|
|
|
|
}
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &info, len))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-13 02:30:39 +08:00
|
|
|
/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
|
|
|
|
* (SCTP_DEFAULT_SNDINFO)
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_sndinfo info;
|
|
|
|
|
|
|
|
if (len < sizeof(info))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(info);
|
|
|
|
|
|
|
|
if (copy_from_user(&info, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, info.snd_assoc_id);
|
2019-01-28 15:08:36 +08:00
|
|
|
if (!asoc && info.snd_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2014-07-13 02:30:39 +08:00
|
|
|
return -EINVAL;
|
2019-01-28 15:08:36 +08:00
|
|
|
|
2014-07-13 02:30:39 +08:00
|
|
|
if (asoc) {
|
|
|
|
info.snd_sid = asoc->default_stream;
|
|
|
|
info.snd_flags = asoc->default_flags;
|
|
|
|
info.snd_ppid = asoc->default_ppid;
|
|
|
|
info.snd_context = asoc->default_context;
|
|
|
|
} else {
|
|
|
|
info.snd_sid = sp->default_stream;
|
|
|
|
info.snd_flags = sp->default_flags;
|
|
|
|
info.snd_ppid = sp->default_ppid;
|
|
|
|
info.snd_context = sp->default_context;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &info, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* 7.1.5 SCTP_NODELAY
|
|
|
|
*
|
|
|
|
* Turn on/off any Nagle-like algorithm. This means that packets are
|
|
|
|
* generally sent as soon as possible and no unnecessary delays are
|
|
|
|
* introduced, at the cost of more packets in the network. Expects an
|
|
|
|
* integer boolean flag.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int sctp_getsockopt_nodelay(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (len < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(int);
|
|
|
|
val = (sctp_sk(sk)->nodelay == 1);
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* 7.1.1 SCTP_RTOINFO
|
|
|
|
*
|
|
|
|
* The protocol parameters used to initialize and bound retransmission
|
|
|
|
* timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
|
|
|
|
* and modify these parameters.
|
|
|
|
* All parameters are time values, in milliseconds. A value of 0, when
|
|
|
|
* modifying the parameters, indicates that the current value should not
|
|
|
|
* be changed.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen) {
|
|
|
|
struct sctp_rtoinfo rtoinfo;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof (struct sctp_rtoinfo))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(struct sctp_rtoinfo);
|
|
|
|
|
|
|
|
if (copy_from_user(&rtoinfo, optval, len))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
|
|
|
|
|
2019-01-28 15:08:25 +08:00
|
|
|
if (!asoc && rtoinfo.srto_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Values corresponding to the specific association. */
|
|
|
|
if (asoc) {
|
|
|
|
rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
|
|
|
|
rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
|
|
|
|
rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
|
|
|
|
} else {
|
|
|
|
/* Values corresponding to the endpoint. */
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
|
|
|
|
rtoinfo.srto_max = sp->rtoinfo.srto_max;
|
|
|
|
rtoinfo.srto_min = sp->rtoinfo.srto_min;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, &rtoinfo, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* 7.1.2 SCTP_ASSOCINFO
|
|
|
|
*
|
2007-05-09 14:57:56 +08:00
|
|
|
* This option is used to tune the maximum retransmission attempts
|
2005-04-17 06:20:36 +08:00
|
|
|
* of the association.
|
|
|
|
* Returns an error if the new association retransmission value is
|
|
|
|
* greater than the sum of the retransmission value of the peer.
|
|
|
|
* See [SCTP] for more information.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_associnfo(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct sctp_assocparams assocparams;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct list_head *pos;
|
|
|
|
int cnt = 0;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof (struct sctp_assocparams))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(struct sctp_assocparams);
|
|
|
|
|
|
|
|
if (copy_from_user(&assocparams, optval, len))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
|
|
|
|
|
2019-01-28 15:08:26 +08:00
|
|
|
if (!asoc && assocparams.sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Values correspoinding to the specific association */
|
2005-04-29 02:57:54 +08:00
|
|
|
if (asoc) {
|
2005-04-17 06:20:36 +08:00
|
|
|
assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
|
|
|
|
assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
|
|
|
|
assocparams.sasoc_local_rwnd = asoc->a_rwnd;
|
2013-06-26 00:17:27 +08:00
|
|
|
assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
list_for_each(pos, &asoc->peer.transport_addr_list) {
|
2013-12-23 12:16:50 +08:00
|
|
|
cnt++;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
assocparams.sasoc_number_peer_destinations = cnt;
|
|
|
|
} else {
|
|
|
|
/* Values corresponding to the endpoint */
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
|
|
|
|
assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
|
|
|
|
assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
|
|
|
|
assocparams.sasoc_cookie_life =
|
|
|
|
sp->assocparams.sasoc_cookie_life;
|
|
|
|
assocparams.sasoc_number_peer_destinations =
|
|
|
|
sp->assocparams.
|
|
|
|
sasoc_number_peer_destinations;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, &assocparams, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
|
|
|
|
*
|
|
|
|
* This socket option is a boolean flag which turns on or off mapped V4
|
|
|
|
* addresses. If this option is turned on and the socket is type
|
|
|
|
* PF_INET6, then IPv4 addresses will be mapped to V6 representation.
|
|
|
|
* If this option is turned off, then no mapping will be done of V4
|
|
|
|
* addresses and a user will receive both PF_INET6 and PF_INET type
|
|
|
|
* addresses on the socket.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
if (len < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(int);
|
|
|
|
val = sp->v4mapped;
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-12-14 08:34:22 +08:00
|
|
|
/*
|
|
|
|
* 7.1.29. Set or Get the default context (SCTP_CONTEXT)
|
|
|
|
* (chapter and verse is quoted at sctp_setsockopt_context())
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_context(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
if (len < sizeof(struct sctp_assoc_value))
|
2006-12-14 08:34:22 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2007-06-17 02:03:45 +08:00
|
|
|
len = sizeof(struct sctp_assoc_value);
|
|
|
|
|
2006-12-14 08:34:22 +08:00
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2019-01-28 15:08:37 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
2006-12-14 08:34:22 +08:00
|
|
|
|
2019-01-28 15:08:37 +08:00
|
|
|
params.assoc_value = asoc ? asoc->default_rcv_context
|
|
|
|
: sctp_sk(sk)->default_rcv_context;
|
2006-12-14 08:34:22 +08:00
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2008-12-26 08:54:58 +08:00
|
|
|
* 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
|
|
|
|
* This option will get or set the maximum size to put in any outgoing
|
|
|
|
* SCTP DATA chunk. If a message is larger than this size it will be
|
2005-04-17 06:20:36 +08:00
|
|
|
* fragmented by SCTP into the specified size. Note that the underlying
|
|
|
|
* SCTP implementation may fragment into smaller sized chunks when the
|
|
|
|
* PMTU of the underlying association is smaller than the value set by
|
2008-12-26 08:54:58 +08:00
|
|
|
* the user. The default value for this option is '0' which indicates
|
|
|
|
* the user is NOT limiting fragmentation and only the PMTU will effect
|
|
|
|
* SCTP's choice of DATA chunk size. Note also that values set larger
|
|
|
|
* than the maximum size of an IP datagram will effectively let SCTP
|
|
|
|
* control fragmentation (i.e. the same as setting this option to 0).
|
|
|
|
*
|
|
|
|
* The following structure is used to access and modify this parameter:
|
|
|
|
*
|
|
|
|
* struct sctp_assoc_value {
|
|
|
|
* sctp_assoc_t assoc_id;
|
|
|
|
* uint32_t assoc_value;
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
* assoc_id: This parameter is ignored for one-to-one style sockets.
|
|
|
|
* For one-to-many style sockets this parameter indicates which
|
|
|
|
* association the user is performing an action upon. Note that if
|
|
|
|
* this field's value is zero then the endpoints default value is
|
|
|
|
* changed (effecting future associations only).
|
|
|
|
* assoc_value: This parameter specifies the maximum size in bytes.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_maxseg(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
2008-12-26 08:54:58 +08:00
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-12-26 08:54:58 +08:00
|
|
|
if (len == sizeof(int)) {
|
2013-12-23 21:29:43 +08:00
|
|
|
pr_warn_ratelimited(DEPRECATED
|
2014-01-03 01:54:27 +08:00
|
|
|
"%s (pid %d) "
|
2013-12-23 21:29:43 +08:00
|
|
|
"Use of int in maxseg socket option.\n"
|
2014-01-03 01:54:27 +08:00
|
|
|
"Use struct sctp_assoc_value instead\n",
|
|
|
|
current->comm, task_pid_nr(current));
|
2019-01-28 15:08:27 +08:00
|
|
|
params.assoc_id = SCTP_FUTURE_ASSOC;
|
2008-12-26 08:54:58 +08:00
|
|
|
} else if (len >= sizeof(struct sctp_assoc_value)) {
|
|
|
|
len = sizeof(struct sctp_assoc_value);
|
2018-01-09 05:02:29 +08:00
|
|
|
if (copy_from_user(¶ms, optval, len))
|
2008-12-26 08:54:58 +08:00
|
|
|
return -EFAULT;
|
|
|
|
} else
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2008-12-26 08:54:58 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
2019-01-28 15:08:27 +08:00
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2008-12-26 08:54:58 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (asoc)
|
|
|
|
params.assoc_value = asoc->frag_point;
|
|
|
|
else
|
|
|
|
params.assoc_value = sctp_sk(sk)->user_frag;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
2008-12-26 08:54:58 +08:00
|
|
|
if (len == sizeof(int)) {
|
|
|
|
if (copy_to_user(optval, ¶ms.assoc_value, len))
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-04-21 03:23:15 +08:00
|
|
|
/*
|
|
|
|
* 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
|
|
|
|
* (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (len < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(int);
|
|
|
|
|
|
|
|
val = sctp_sk(sk)->frag_interleave;
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-03-24 02:32:00 +08:00
|
|
|
/*
|
|
|
|
* 7.1.25. Set or Get the sctp partial delivery point
|
|
|
|
* (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
2007-07-19 09:44:50 +08:00
|
|
|
u32 val;
|
2007-03-24 02:32:00 +08:00
|
|
|
|
|
|
|
if (len < sizeof(u32))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(u32);
|
|
|
|
|
|
|
|
val = sctp_sk(sk)->pd_point;
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2010-12-15 00:10:41 +08:00
|
|
|
return 0;
|
2007-03-24 02:32:00 +08:00
|
|
|
}
|
|
|
|
|
2007-03-24 02:34:36 +08:00
|
|
|
/*
|
|
|
|
* 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
|
|
|
|
* (chapter and verse is quoted at sctp_setsockopt_maxburst())
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_maxburst(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
2008-03-06 05:44:46 +08:00
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
2007-03-24 02:34:36 +08:00
|
|
|
|
2008-03-06 05:44:46 +08:00
|
|
|
if (len == sizeof(int)) {
|
2013-12-23 21:29:43 +08:00
|
|
|
pr_warn_ratelimited(DEPRECATED
|
2014-01-03 01:54:27 +08:00
|
|
|
"%s (pid %d) "
|
2013-12-23 21:29:43 +08:00
|
|
|
"Use of int in max_burst socket option.\n"
|
2014-01-03 01:54:27 +08:00
|
|
|
"Use struct sctp_assoc_value instead\n",
|
|
|
|
current->comm, task_pid_nr(current));
|
2019-01-28 15:08:38 +08:00
|
|
|
params.assoc_id = SCTP_FUTURE_ASSOC;
|
2009-03-02 17:46:12 +08:00
|
|
|
} else if (len >= sizeof(struct sctp_assoc_value)) {
|
|
|
|
len = sizeof(struct sctp_assoc_value);
|
2008-03-06 05:44:46 +08:00
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
} else
|
|
|
|
return -EINVAL;
|
2007-03-24 02:34:36 +08:00
|
|
|
|
2019-01-28 15:08:38 +08:00
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
2008-03-06 05:44:46 +08:00
|
|
|
|
2019-01-28 15:08:38 +08:00
|
|
|
params.assoc_value = asoc ? asoc->max_burst : sctp_sk(sk)->max_burst;
|
2008-03-06 05:44:46 +08:00
|
|
|
|
|
|
|
if (len == sizeof(int)) {
|
|
|
|
if (copy_to_user(optval, ¶ms.assoc_value, len))
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2007-03-24 02:34:36 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2008-08-21 18:34:25 +08:00
|
|
|
struct sctp_hmacalgo __user *p = (void __user *)optval;
|
2007-09-17 10:34:00 +08:00
|
|
|
struct sctp_hmac_algo_param *hmacs;
|
2008-08-21 18:34:25 +08:00
|
|
|
__u16 data_len = 0;
|
|
|
|
u32 num_idents;
|
2016-02-03 23:33:30 +08:00
|
|
|
int i;
|
2008-08-21 18:34:25 +08:00
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
if (!ep->auth_enable)
|
2008-08-21 18:34:25 +08:00
|
|
|
return -EACCES;
|
2007-09-17 10:34:00 +08:00
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
hmacs = ep->auth_hmacs_list;
|
2017-06-30 11:52:16 +08:00
|
|
|
data_len = ntohs(hmacs->param_hdr.length) -
|
|
|
|
sizeof(struct sctp_paramhdr);
|
2007-09-17 10:34:00 +08:00
|
|
|
|
2008-08-21 18:34:25 +08:00
|
|
|
if (len < sizeof(struct sctp_hmacalgo) + data_len)
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EINVAL;
|
2008-08-21 18:34:25 +08:00
|
|
|
|
|
|
|
len = sizeof(struct sctp_hmacalgo) + data_len;
|
|
|
|
num_idents = data_len / sizeof(u16);
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
2008-08-21 18:34:25 +08:00
|
|
|
if (put_user(num_idents, &p->shmac_num_idents))
|
|
|
|
return -EFAULT;
|
2016-02-03 23:33:30 +08:00
|
|
|
for (i = 0; i < num_idents; i++) {
|
|
|
|
__u16 hmacid = ntohs(hmacs->hmac_ids[i]);
|
|
|
|
|
|
|
|
if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2007-09-17 10:34:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sctp_getsockopt_active_key(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2007-09-17 10:34:00 +08:00
|
|
|
struct sctp_authkeyid val;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
if (!ep->auth_enable)
|
2008-08-21 18:34:25 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
if (len < sizeof(struct sctp_authkeyid))
|
|
|
|
return -EINVAL;
|
2018-01-09 05:02:29 +08:00
|
|
|
|
|
|
|
len = sizeof(struct sctp_authkeyid);
|
|
|
|
if (copy_from_user(&val, optval, len))
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
|
|
|
|
if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (asoc)
|
|
|
|
val.scact_keynumber = asoc->active_key_id;
|
|
|
|
else
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
val.scact_keynumber = ep->active_key_id;
|
2007-09-17 10:34:00 +08:00
|
|
|
|
2008-08-21 18:34:25 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2007-09-17 10:34:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2007-10-15 02:21:20 +08:00
|
|
|
struct sctp_authchunks __user *p = (void __user *)optval;
|
2007-09-17 10:34:00 +08:00
|
|
|
struct sctp_authchunks val;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_chunks_param *ch;
|
2008-08-21 18:34:25 +08:00
|
|
|
u32 num_chunks = 0;
|
2007-09-17 10:34:00 +08:00
|
|
|
char __user *to;
|
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
if (!ep->auth_enable)
|
2008-08-21 18:34:25 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
if (len < sizeof(struct sctp_authchunks))
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-01-09 05:02:29 +08:00
|
|
|
if (copy_from_user(&val, optval, sizeof(val)))
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2007-10-15 02:21:20 +08:00
|
|
|
to = p->gauth_chunks;
|
2007-09-17 10:34:00 +08:00
|
|
|
asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
|
|
|
|
if (!asoc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ch = asoc->peer.peer_chunks;
|
2008-08-21 18:34:25 +08:00
|
|
|
if (!ch)
|
|
|
|
goto num;
|
2007-09-17 10:34:00 +08:00
|
|
|
|
|
|
|
/* See if the user provided enough room for all the data */
|
2017-06-30 11:52:16 +08:00
|
|
|
num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
|
2008-02-28 03:40:37 +08:00
|
|
|
if (len < num_chunks)
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2008-08-21 18:34:25 +08:00
|
|
|
if (copy_to_user(to, ch->chunks, num_chunks))
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EFAULT;
|
2008-08-21 18:34:25 +08:00
|
|
|
num:
|
|
|
|
len = sizeof(struct sctp_authchunks) + num_chunks;
|
2013-12-23 12:16:53 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
2008-02-28 05:04:52 +08:00
|
|
|
if (put_user(num_chunks, &p->gauth_number_of_chunks))
|
|
|
|
return -EFAULT;
|
2007-09-17 10:34:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2007-10-15 02:21:20 +08:00
|
|
|
struct sctp_authchunks __user *p = (void __user *)optval;
|
2007-09-17 10:34:00 +08:00
|
|
|
struct sctp_authchunks val;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_chunks_param *ch;
|
2008-08-21 18:34:25 +08:00
|
|
|
u32 num_chunks = 0;
|
2007-09-17 10:34:00 +08:00
|
|
|
char __user *to;
|
|
|
|
|
net: sctp: cache auth_enable per endpoint
Currently, it is possible to create an SCTP socket, then switch
auth_enable via sysctl setting to 1 and crash the system on connect:
Oops[#1]:
CPU: 0 PID: 0 Comm: swapper Not tainted 3.14.1-mipsgit-20140415 #1
task: ffffffff8056ce80 ti: ffffffff8055c000 task.ti: ffffffff8055c000
[...]
Call Trace:
[<ffffffff8043c4e8>] sctp_auth_asoc_set_default_hmac+0x68/0x80
[<ffffffff8042b300>] sctp_process_init+0x5e0/0x8a4
[<ffffffff8042188c>] sctp_sf_do_5_1B_init+0x234/0x34c
[<ffffffff804228c8>] sctp_do_sm+0xb4/0x1e8
[<ffffffff80425a08>] sctp_endpoint_bh_rcv+0x1c4/0x214
[<ffffffff8043af68>] sctp_rcv+0x588/0x630
[<ffffffff8043e8e8>] sctp6_rcv+0x10/0x24
[<ffffffff803acb50>] ip6_input+0x2c0/0x440
[<ffffffff8030fc00>] __netif_receive_skb_core+0x4a8/0x564
[<ffffffff80310650>] process_backlog+0xb4/0x18c
[<ffffffff80313cbc>] net_rx_action+0x12c/0x210
[<ffffffff80034254>] __do_softirq+0x17c/0x2ac
[<ffffffff800345e0>] irq_exit+0x54/0xb0
[<ffffffff800075a4>] ret_from_irq+0x0/0x4
[<ffffffff800090ec>] rm7k_wait_irqoff+0x24/0x48
[<ffffffff8005e388>] cpu_startup_entry+0xc0/0x148
[<ffffffff805a88b0>] start_kernel+0x37c/0x398
Code: dd0900b8 000330f8 0126302d <dcc60000> 50c0fff1 0047182a a48306a0
03e00008 00000000
---[ end trace b530b0551467f2fd ]---
Kernel panic - not syncing: Fatal exception in interrupt
What happens while auth_enable=0 in that case is, that
ep->auth_hmacs is initialized to NULL in sctp_auth_init_hmacs()
when endpoint is being created.
After that point, if an admin switches over to auth_enable=1,
the machine can crash due to NULL pointer dereference during
reception of an INIT chunk. When we enter sctp_process_init()
via sctp_sf_do_5_1B_init() in order to respond to an INIT chunk,
the INIT verification succeeds and while we walk and process
all INIT params via sctp_process_param() we find that
net->sctp.auth_enable is set, therefore do not fall through,
but invoke sctp_auth_asoc_set_default_hmac() instead, and thus,
dereference what we have set to NULL during endpoint
initialization phase.
The fix is to make auth_enable immutable by caching its value
during endpoint initialization, so that its original value is
being carried along until destruction. The bug seems to originate
from the very first days.
Fix in joint work with Daniel Borkmann.
Reported-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Tested-by: Joshua Kinard <kumba@gentoo.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-17 23:26:50 +08:00
|
|
|
if (!ep->auth_enable)
|
2008-08-21 18:34:25 +08:00
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
if (len < sizeof(struct sctp_authchunks))
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-01-09 05:02:29 +08:00
|
|
|
if (copy_from_user(&val, optval, sizeof(val)))
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2007-10-15 02:21:20 +08:00
|
|
|
to = p->gauth_chunks;
|
2007-09-17 10:34:00 +08:00
|
|
|
asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
|
2019-01-28 15:08:28 +08:00
|
|
|
if (!asoc && val.gauth_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-28 15:08:28 +08:00
|
|
|
ch = asoc ? (struct sctp_chunks_param *)asoc->c.auth_chunks
|
|
|
|
: ep->auth_chunk_list;
|
2008-08-21 18:34:25 +08:00
|
|
|
if (!ch)
|
|
|
|
goto num;
|
|
|
|
|
2017-06-30 11:52:16 +08:00
|
|
|
num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
|
2008-08-21 18:34:25 +08:00
|
|
|
if (len < sizeof(struct sctp_authchunks) + num_chunks)
|
2007-09-17 10:34:00 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2008-08-21 18:34:25 +08:00
|
|
|
if (copy_to_user(to, ch->chunks, num_chunks))
|
|
|
|
return -EFAULT;
|
|
|
|
num:
|
|
|
|
len = sizeof(struct sctp_authchunks) + num_chunks;
|
2007-09-17 10:34:00 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
2008-02-28 05:04:52 +08:00
|
|
|
if (put_user(num_chunks, &p->gauth_number_of_chunks))
|
|
|
|
return -EFAULT;
|
2007-09-17 10:34:00 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-12-26 08:57:24 +08:00
|
|
|
/*
|
|
|
|
* 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
|
|
|
|
* This option gets the current number of associations that are attached
|
|
|
|
* to a one-to-many style socket. The option value is an uint32_t.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
u32 val = 0;
|
|
|
|
|
|
|
|
if (sctp_style(sk, TCP))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (len < sizeof(u32))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(u32);
|
|
|
|
|
|
|
|
list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
|
|
|
|
val++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-26 19:16:31 +08:00
|
|
|
/*
|
|
|
|
* 8.1.23 SCTP_AUTO_ASCONF
|
|
|
|
* See the corresponding setsockopt entry as description
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
int val = 0;
|
|
|
|
|
|
|
|
if (len < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(int);
|
|
|
|
if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
|
|
|
|
val = 1;
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-18 01:27:08 +08:00
|
|
|
/*
|
|
|
|
* 8.2.6. Get the Current Identifiers of Associations
|
|
|
|
* (SCTP_GET_ASSOC_ID_LIST)
|
|
|
|
*
|
|
|
|
* This option gets the current list of SCTP association identifiers of
|
|
|
|
* the SCTP associations handled by a one-to-many style socket.
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
|
|
|
|
char __user *optval, int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_assoc_ids *ids;
|
|
|
|
u32 num = 0;
|
|
|
|
|
|
|
|
if (sctp_style(sk, TCP))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (len < sizeof(struct sctp_assoc_ids))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
|
|
|
|
num++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
|
|
|
|
|
2015-12-24 02:28:40 +08:00
|
|
|
ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
|
2011-04-18 01:27:08 +08:00
|
|
|
if (unlikely(!ids))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ids->gaids_number_of_ids = num;
|
|
|
|
num = 0;
|
|
|
|
list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
|
|
|
|
ids->gaids_assoc_id[num++] = asoc->assoc_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
|
|
|
|
kfree(ids);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(ids);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-21 15:56:07 +08:00
|
|
|
/*
|
|
|
|
* SCTP_PEER_ADDR_THLDS
|
|
|
|
*
|
|
|
|
* This option allows us to fetch the partially failed threshold for one or all
|
|
|
|
* transports in an association. See Section 6.1 of:
|
|
|
|
* http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
|
|
|
|
char __user *optval,
|
|
|
|
int len,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_paddrthlds val;
|
|
|
|
struct sctp_transport *trans;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
|
|
|
|
if (len < sizeof(struct sctp_paddrthlds))
|
|
|
|
return -EINVAL;
|
|
|
|
len = sizeof(struct sctp_paddrthlds);
|
|
|
|
if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2019-01-28 15:08:29 +08:00
|
|
|
if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
|
2012-07-21 15:56:07 +08:00
|
|
|
trans = sctp_addr_id2transport(sk, &val.spt_address,
|
|
|
|
val.spt_assoc_id);
|
|
|
|
if (!trans)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
val.spt_pathmaxrxt = trans->pathmaxrxt;
|
|
|
|
val.spt_pathpfthld = trans->pf_retrans;
|
2019-01-28 15:08:29 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, val.spt_assoc_id);
|
|
|
|
if (!asoc && val.spt_assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (asoc) {
|
|
|
|
val.spt_pathpfthld = asoc->pf_retrans;
|
|
|
|
val.spt_pathmaxrxt = asoc->pathmaxrxt;
|
|
|
|
} else {
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
val.spt_pathpfthld = sp->pf_retrans;
|
|
|
|
val.spt_pathmaxrxt = sp->pathmaxrxt;
|
2012-07-21 15:56:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen) || copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-01 12:49:42 +08:00
|
|
|
/*
|
|
|
|
* SCTP_GET_ASSOC_STATS
|
|
|
|
*
|
|
|
|
* This option retrieves local per endpoint statistics. It is modeled
|
|
|
|
* after OpenSolaris' implementation
|
|
|
|
*/
|
|
|
|
static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_stats sas;
|
|
|
|
struct sctp_association *asoc = NULL;
|
|
|
|
|
|
|
|
/* User must provide at least the assoc id */
|
|
|
|
if (len < sizeof(sctp_assoc_t))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-02-27 18:57:31 +08:00
|
|
|
/* Allow the struct to grow and fill in as much as possible */
|
|
|
|
len = min_t(size_t, len, sizeof(sas));
|
|
|
|
|
2012-12-01 12:49:42 +08:00
|
|
|
if (copy_from_user(&sas, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
|
|
|
|
if (!asoc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
sas.sas_rtxchunks = asoc->stats.rtxchunks;
|
|
|
|
sas.sas_gapcnt = asoc->stats.gapcnt;
|
|
|
|
sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
|
|
|
|
sas.sas_osacks = asoc->stats.osacks;
|
|
|
|
sas.sas_isacks = asoc->stats.isacks;
|
|
|
|
sas.sas_octrlchunks = asoc->stats.octrlchunks;
|
|
|
|
sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
|
|
|
|
sas.sas_oodchunks = asoc->stats.oodchunks;
|
|
|
|
sas.sas_iodchunks = asoc->stats.iodchunks;
|
|
|
|
sas.sas_ouodchunks = asoc->stats.ouodchunks;
|
|
|
|
sas.sas_iuodchunks = asoc->stats.iuodchunks;
|
|
|
|
sas.sas_idupchunks = asoc->stats.idupchunks;
|
|
|
|
sas.sas_opackets = asoc->stats.opackets;
|
|
|
|
sas.sas_ipackets = asoc->stats.ipackets;
|
|
|
|
|
|
|
|
/* New high max rto observed, will return 0 if not a single
|
|
|
|
* RTO update took place. obs_rto_ipaddr will be bogus
|
|
|
|
* in such a case
|
|
|
|
*/
|
|
|
|
sas.sas_maxrto = asoc->stats.max_obs_rto;
|
|
|
|
memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
|
|
|
|
sizeof(struct sockaddr_storage));
|
|
|
|
|
|
|
|
/* Mark beginning of a new observation period */
|
|
|
|
asoc->stats.max_obs_rto = asoc->rto_min;
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
|
2012-12-01 12:49:42 +08:00
|
|
|
|
|
|
|
if (copy_to_user(optval, &sas, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-13 02:30:37 +08:00
|
|
|
static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
int val = 0;
|
|
|
|
|
|
|
|
if (len < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(int);
|
|
|
|
if (sctp_sk(sk)->recvrcvinfo)
|
|
|
|
val = 1;
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-13 02:30:38 +08:00
|
|
|
static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
int val = 0;
|
|
|
|
|
|
|
|
if (len < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(int);
|
|
|
|
if (sctp_sk(sk)->recvnxtinfo)
|
|
|
|
val = 1;
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-09 19:47:40 +08:00
|
|
|
static int sctp_getsockopt_pr_supported(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EFAULT;
|
|
|
|
|
|
|
|
if (len < sizeof(params)) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
2019-01-28 15:08:30 +08:00
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP)) {
|
2016-07-09 19:47:40 +08:00
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:30 +08:00
|
|
|
params.assoc_value = asoc ? asoc->prsctp_enable
|
|
|
|
: sctp_sk(sk)->ep->prsctp_enable;
|
|
|
|
|
2016-07-09 19:47:40 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2016-07-09 19:47:41 +08:00
|
|
|
static int sctp_getsockopt_default_prinfo(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_default_prinfo info;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EFAULT;
|
|
|
|
|
|
|
|
if (len < sizeof(info)) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = sizeof(info);
|
|
|
|
if (copy_from_user(&info, optval, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, info.pr_assoc_id);
|
|
|
|
if (asoc) {
|
|
|
|
info.pr_policy = SCTP_PR_POLICY(asoc->default_flags);
|
|
|
|
info.pr_value = asoc->default_timetolive;
|
|
|
|
} else if (!info.pr_assoc_id) {
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
info.pr_policy = SCTP_PR_POLICY(sp->default_flags);
|
|
|
|
info.pr_value = sp->default_timetolive;
|
|
|
|
} else {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, &info, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2016-07-09 19:47:42 +08:00
|
|
|
static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_prstatus params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int policy;
|
|
|
|
int retval = -EINVAL;
|
|
|
|
|
|
|
|
if (len < sizeof(params))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
len = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, len)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
policy = params.sprstat_policy;
|
2018-10-29 23:13:11 +08:00
|
|
|
if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
|
|
|
|
((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
|
2016-07-09 19:47:42 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
|
|
|
|
if (!asoc)
|
|
|
|
goto out;
|
|
|
|
|
2018-10-29 23:13:11 +08:00
|
|
|
if (policy == SCTP_PR_SCTP_ALL) {
|
2016-07-09 19:47:42 +08:00
|
|
|
params.sprstat_abandoned_unsent = 0;
|
|
|
|
params.sprstat_abandoned_sent = 0;
|
|
|
|
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
|
|
|
|
params.sprstat_abandoned_unsent +=
|
|
|
|
asoc->abandoned_unsent[policy];
|
|
|
|
params.sprstat_abandoned_sent +=
|
|
|
|
asoc->abandoned_sent[policy];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
params.sprstat_abandoned_unsent =
|
|
|
|
asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)];
|
|
|
|
params.sprstat_abandoned_sent =
|
|
|
|
asoc->abandoned_sent[__SCTP_PR_INDEX(policy)];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶ms, len)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-04-01 17:07:46 +08:00
|
|
|
static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
2017-10-04 06:20:11 +08:00
|
|
|
struct sctp_stream_out_ext *streamoute;
|
2017-04-01 17:07:46 +08:00
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_prstatus params;
|
|
|
|
int retval = -EINVAL;
|
|
|
|
int policy;
|
|
|
|
|
|
|
|
if (len < sizeof(params))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
len = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, len)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
policy = params.sprstat_policy;
|
2018-10-29 23:13:11 +08:00
|
|
|
if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
|
|
|
|
((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
|
2017-04-01 17:07:46 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
|
2017-05-31 16:36:31 +08:00
|
|
|
if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
|
2017-04-01 17:07:46 +08:00
|
|
|
goto out;
|
|
|
|
|
2018-08-11 01:11:42 +08:00
|
|
|
streamoute = SCTP_SO(&asoc->stream, params.sprstat_sid)->ext;
|
2017-10-04 06:20:11 +08:00
|
|
|
if (!streamoute) {
|
|
|
|
/* Not allocated yet, means all stats are 0 */
|
|
|
|
params.sprstat_abandoned_unsent = 0;
|
|
|
|
params.sprstat_abandoned_sent = 0;
|
|
|
|
retval = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2018-10-16 15:52:02 +08:00
|
|
|
if (policy == SCTP_PR_SCTP_ALL) {
|
2017-04-01 17:07:46 +08:00
|
|
|
params.sprstat_abandoned_unsent = 0;
|
|
|
|
params.sprstat_abandoned_sent = 0;
|
|
|
|
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
|
|
|
|
params.sprstat_abandoned_unsent +=
|
2017-10-04 06:20:11 +08:00
|
|
|
streamoute->abandoned_unsent[policy];
|
2017-04-01 17:07:46 +08:00
|
|
|
params.sprstat_abandoned_sent +=
|
2017-10-04 06:20:11 +08:00
|
|
|
streamoute->abandoned_sent[policy];
|
2017-04-01 17:07:46 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
params.sprstat_abandoned_unsent =
|
2017-10-04 06:20:11 +08:00
|
|
|
streamoute->abandoned_unsent[__SCTP_PR_INDEX(policy)];
|
2017-04-01 17:07:46 +08:00
|
|
|
params.sprstat_abandoned_sent =
|
2017-10-04 06:20:11 +08:00
|
|
|
streamoute->abandoned_sent[__SCTP_PR_INDEX(policy)];
|
2017-04-01 17:07:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen) || copy_to_user(optval, ¶ms, len)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-03-10 12:11:12 +08:00
|
|
|
static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EFAULT;
|
|
|
|
|
|
|
|
if (len < sizeof(params)) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
2019-01-28 15:08:31 +08:00
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP)) {
|
2017-03-10 12:11:12 +08:00
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:31 +08:00
|
|
|
params.assoc_value = asoc ? asoc->reconf_enable
|
|
|
|
: sctp_sk(sk)->ep->reconf_enable;
|
|
|
|
|
2017-03-10 12:11:12 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-01-18 00:44:46 +08:00
|
|
|
static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EFAULT;
|
|
|
|
|
|
|
|
if (len < sizeof(params)) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (asoc) {
|
|
|
|
params.assoc_value = asoc->strreset_enable;
|
|
|
|
} else if (!params.assoc_id) {
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
|
|
|
|
params.assoc_value = sp->ep->strreset_enable;
|
|
|
|
} else {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-10-04 06:20:14 +08:00
|
|
|
static int sctp_getsockopt_scheduler(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EFAULT;
|
|
|
|
|
|
|
|
if (len < sizeof(params)) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (!asoc) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
params.assoc_value = sctp_sched_get_sched(asoc);
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-10-04 06:20:15 +08:00
|
|
|
static int sctp_getsockopt_scheduler_value(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_stream_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EFAULT;
|
|
|
|
|
|
|
|
if (len < sizeof(params)) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
|
|
|
if (!asoc) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = sctp_sched_get_value(asoc, params.stream_id,
|
|
|
|
¶ms.stream_value);
|
|
|
|
if (retval)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (put_user(len, optlen)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶ms, len)) {
|
|
|
|
retval = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2017-12-08 21:03:58 +08:00
|
|
|
static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_assoc_value params;
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
int retval = -EFAULT;
|
|
|
|
|
|
|
|
if (len < sizeof(params)) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = sizeof(params);
|
|
|
|
if (copy_from_user(¶ms, optval, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, params.assoc_id);
|
2019-01-28 15:08:32 +08:00
|
|
|
if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
|
|
|
|
sctp_style(sk, UDP)) {
|
2017-12-08 21:03:58 +08:00
|
|
|
retval = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-01-28 15:08:32 +08:00
|
|
|
params.assoc_value = asoc ? asoc->intl_enable
|
|
|
|
: sctp_sk(sk)->strm_interleave;
|
|
|
|
|
2017-12-08 21:03:58 +08:00
|
|
|
if (put_user(len, optlen))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶ms, len))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2018-06-28 15:31:00 +08:00
|
|
|
static int sctp_getsockopt_reuse_port(struct sock *sk, int len,
|
|
|
|
char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (len < sizeof(int))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(int);
|
|
|
|
val = sctp_sk(sk)->reuse;
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, &val, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-18 16:08:54 +08:00
|
|
|
static int sctp_getsockopt_event(struct sock *sk, int len, char __user *optval,
|
|
|
|
int __user *optlen)
|
|
|
|
{
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
struct sctp_event param;
|
|
|
|
__u16 subscribe;
|
|
|
|
|
|
|
|
if (len < sizeof(param))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
len = sizeof(param);
|
|
|
|
if (copy_from_user(¶m, optval, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (param.se_type < SCTP_SN_TYPE_BASE ||
|
|
|
|
param.se_type > SCTP_SN_TYPE_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
asoc = sctp_id2assoc(sk, param.se_assoc_id);
|
|
|
|
subscribe = asoc ? asoc->subscribe : sctp_sk(sk)->subscribe;
|
|
|
|
param.se_on = sctp_ulpevent_type_enabled(subscribe, param.se_type);
|
|
|
|
|
|
|
|
if (put_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (copy_to_user(optval, ¶m, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-17 17:40:05 +08:00
|
|
|
static int sctp_getsockopt(struct sock *sk, int level, int optname,
|
|
|
|
char __user *optval, int __user *optlen)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int retval = 0;
|
|
|
|
int len;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* I can hardly begin to describe how wrong this is. This is
|
|
|
|
* so broken as to be worse than useless. The API draft
|
|
|
|
* REALLY is NOT helpful here... I am not convinced that the
|
|
|
|
* semantics of getsockopt() with a level OTHER THAN SOL_SCTP
|
|
|
|
* are at all well-founded.
|
|
|
|
*/
|
|
|
|
if (level != SOL_SCTP) {
|
|
|
|
struct sctp_af *af = sctp_sk(sk)->pf->af;
|
|
|
|
|
|
|
|
retval = af->getsockopt(sk, level, optname, optval, optlen);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (get_user(len, optlen))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2016-10-21 20:13:24 +08:00
|
|
|
if (len < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
switch (optname) {
|
|
|
|
case SCTP_STATUS:
|
|
|
|
retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_DISABLE_FRAGMENTS:
|
|
|
|
retval = sctp_getsockopt_disable_fragments(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_EVENTS:
|
|
|
|
retval = sctp_getsockopt_events(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_AUTOCLOSE:
|
|
|
|
retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_SOCKOPT_PEELOFF:
|
|
|
|
retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
|
|
|
|
break;
|
2017-07-01 01:32:57 +08:00
|
|
|
case SCTP_SOCKOPT_PEELOFF_FLAGS:
|
|
|
|
retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen);
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
case SCTP_PEER_ADDR_PARAMS:
|
|
|
|
retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2011-01-19 06:39:00 +08:00
|
|
|
case SCTP_DELAYED_SACK:
|
2008-05-10 06:13:26 +08:00
|
|
|
retval = sctp_getsockopt_delayed_ack(sk, len, optval,
|
2005-12-23 03:37:30 +08:00
|
|
|
optlen);
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
case SCTP_INITMSG:
|
|
|
|
retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_GET_PEER_ADDRS:
|
|
|
|
retval = sctp_getsockopt_peer_addrs(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_GET_LOCAL_ADDRS:
|
|
|
|
retval = sctp_getsockopt_local_addrs(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2009-06-02 00:41:15 +08:00
|
|
|
case SCTP_SOCKOPT_CONNECTX3:
|
|
|
|
retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
case SCTP_DEFAULT_SEND_PARAM:
|
|
|
|
retval = sctp_getsockopt_default_send_param(sk, len,
|
|
|
|
optval, optlen);
|
|
|
|
break;
|
2014-07-13 02:30:39 +08:00
|
|
|
case SCTP_DEFAULT_SNDINFO:
|
|
|
|
retval = sctp_getsockopt_default_sndinfo(sk, len,
|
|
|
|
optval, optlen);
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
case SCTP_PRIMARY_ADDR:
|
|
|
|
retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_NODELAY:
|
|
|
|
retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_RTOINFO:
|
|
|
|
retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_ASSOCINFO:
|
|
|
|
retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_I_WANT_MAPPED_V4_ADDR:
|
|
|
|
retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_MAXSEG:
|
|
|
|
retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_GET_PEER_ADDR_INFO:
|
|
|
|
retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2006-12-21 08:07:04 +08:00
|
|
|
case SCTP_ADAPTATION_LAYER:
|
|
|
|
retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
|
2005-04-17 06:20:36 +08:00
|
|
|
optlen);
|
|
|
|
break;
|
2006-12-14 08:34:22 +08:00
|
|
|
case SCTP_CONTEXT:
|
|
|
|
retval = sctp_getsockopt_context(sk, len, optval, optlen);
|
|
|
|
break;
|
2007-04-21 03:23:15 +08:00
|
|
|
case SCTP_FRAGMENT_INTERLEAVE:
|
|
|
|
retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2007-03-24 02:32:00 +08:00
|
|
|
case SCTP_PARTIAL_DELIVERY_POINT:
|
|
|
|
retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2007-03-24 02:34:36 +08:00
|
|
|
case SCTP_MAX_BURST:
|
|
|
|
retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
|
|
|
|
break;
|
2007-09-17 10:34:00 +08:00
|
|
|
case SCTP_AUTH_KEY:
|
|
|
|
case SCTP_AUTH_CHUNK:
|
|
|
|
case SCTP_AUTH_DELETE_KEY:
|
2018-03-14 19:05:32 +08:00
|
|
|
case SCTP_AUTH_DEACTIVATE_KEY:
|
2007-09-17 10:34:00 +08:00
|
|
|
retval = -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
case SCTP_HMAC_IDENT:
|
|
|
|
retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_AUTH_ACTIVE_KEY:
|
|
|
|
retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_PEER_AUTH_CHUNKS:
|
|
|
|
retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
|
|
|
case SCTP_LOCAL_AUTH_CHUNKS:
|
|
|
|
retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2008-12-26 08:57:24 +08:00
|
|
|
case SCTP_GET_ASSOC_NUMBER:
|
|
|
|
retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
|
|
|
|
break;
|
2011-04-18 01:27:08 +08:00
|
|
|
case SCTP_GET_ASSOC_ID_LIST:
|
|
|
|
retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
|
|
|
|
break;
|
2011-04-26 19:16:31 +08:00
|
|
|
case SCTP_AUTO_ASCONF:
|
|
|
|
retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
|
|
|
|
break;
|
2012-07-21 15:56:07 +08:00
|
|
|
case SCTP_PEER_ADDR_THLDS:
|
|
|
|
retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
|
|
|
|
break;
|
2012-12-01 12:49:42 +08:00
|
|
|
case SCTP_GET_ASSOC_STATS:
|
|
|
|
retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
|
|
|
|
break;
|
2014-07-13 02:30:37 +08:00
|
|
|
case SCTP_RECVRCVINFO:
|
|
|
|
retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
|
|
|
|
break;
|
2014-07-13 02:30:38 +08:00
|
|
|
case SCTP_RECVNXTINFO:
|
|
|
|
retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
|
|
|
|
break;
|
2016-07-09 19:47:40 +08:00
|
|
|
case SCTP_PR_SUPPORTED:
|
|
|
|
retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen);
|
|
|
|
break;
|
2016-07-09 19:47:41 +08:00
|
|
|
case SCTP_DEFAULT_PRINFO:
|
|
|
|
retval = sctp_getsockopt_default_prinfo(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2016-07-09 19:47:42 +08:00
|
|
|
case SCTP_PR_ASSOC_STATUS:
|
|
|
|
retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2017-04-01 17:07:46 +08:00
|
|
|
case SCTP_PR_STREAM_STATUS:
|
|
|
|
retval = sctp_getsockopt_pr_streamstatus(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2017-03-10 12:11:12 +08:00
|
|
|
case SCTP_RECONFIG_SUPPORTED:
|
|
|
|
retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2017-01-18 00:44:46 +08:00
|
|
|
case SCTP_ENABLE_STREAM_RESET:
|
|
|
|
retval = sctp_getsockopt_enable_strreset(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2017-10-04 06:20:14 +08:00
|
|
|
case SCTP_STREAM_SCHEDULER:
|
|
|
|
retval = sctp_getsockopt_scheduler(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2017-10-04 06:20:15 +08:00
|
|
|
case SCTP_STREAM_SCHEDULER_VALUE:
|
|
|
|
retval = sctp_getsockopt_scheduler_value(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2017-12-08 21:03:58 +08:00
|
|
|
case SCTP_INTERLEAVING_SUPPORTED:
|
|
|
|
retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
|
|
|
|
optlen);
|
|
|
|
break;
|
2018-06-28 15:31:00 +08:00
|
|
|
case SCTP_REUSE_PORT:
|
|
|
|
retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen);
|
|
|
|
break;
|
2018-11-18 16:08:54 +08:00
|
|
|
case SCTP_EVENT:
|
|
|
|
retval = sctp_getsockopt_event(sk, len, optval, optlen);
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
|
|
|
retval = -ENOPROTOOPT;
|
|
|
|
break;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2016-02-11 00:50:35 +08:00
|
|
|
static int sctp_hash(struct sock *sk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
/* STUB */
|
2016-02-11 00:50:35 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sctp_unhash(struct sock *sk)
|
|
|
|
{
|
|
|
|
/* STUB */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if port is acceptable. Possibly find first available port.
|
|
|
|
*
|
|
|
|
* The port hash table (contained in the 'global' SCTP protocol storage
|
|
|
|
* returned by struct sctp_protocol *sctp_get_protocol()). The hash
|
|
|
|
* table is an array of 4096 lists (sctp_bind_hashbucket). Each
|
|
|
|
* list (the list number is the port number hashed out, so as you
|
|
|
|
* would expect from a hash function, all the ports in a given list have
|
|
|
|
* such a number that hashes out to the same list number; you were
|
|
|
|
* expecting that, right?); so each list has a set of ports, with a
|
|
|
|
* link to the socket (struct sock) that uses it, the port number and
|
|
|
|
* a fastreuse flag (FIXME: NPI ipg).
|
|
|
|
*/
|
|
|
|
static struct sctp_bind_bucket *sctp_bucket_create(
|
2012-08-06 16:39:38 +08:00
|
|
|
struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
|
|
|
|
{
|
2018-11-12 18:27:17 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
bool reuse = (sk->sk_reuse || sp->reuse);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_bind_hashbucket *head; /* hash list */
|
2018-11-12 18:27:17 +08:00
|
|
|
kuid_t uid = sock_i_uid(sk);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
struct sctp_bind_bucket *pp;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned short snum;
|
|
|
|
int ret;
|
|
|
|
|
2006-11-21 09:02:01 +08:00
|
|
|
snum = ntohs(addr->v4.sin_port);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: begins, snum:%d\n", __func__, snum);
|
|
|
|
|
2014-01-21 15:44:07 +08:00
|
|
|
local_bh_disable();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (snum == 0) {
|
2007-10-11 08:30:18 +08:00
|
|
|
/* Search for an available port. */
|
2007-10-11 08:30:46 +08:00
|
|
|
int low, high, remaining, index;
|
|
|
|
unsigned int rover;
|
2014-05-13 07:04:53 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2007-10-11 08:30:46 +08:00
|
|
|
|
2014-05-13 07:04:53 +08:00
|
|
|
inet_get_local_port_range(net, &low, &high);
|
2007-10-11 08:30:46 +08:00
|
|
|
remaining = (high - low) + 1;
|
2014-01-11 20:15:59 +08:00
|
|
|
rover = prandom_u32() % remaining + low;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
rover++;
|
|
|
|
if ((rover < low) || (rover > high))
|
|
|
|
rover = low;
|
2014-05-13 07:04:53 +08:00
|
|
|
if (inet_is_local_reserved_port(net, rover))
|
2010-05-05 08:27:06 +08:00
|
|
|
continue;
|
2012-08-06 16:39:38 +08:00
|
|
|
index = sctp_phashfn(sock_net(sk), rover);
|
2005-04-17 06:20:36 +08:00
|
|
|
head = &sctp_port_hashtable[index];
|
2014-01-21 15:44:08 +08:00
|
|
|
spin_lock(&head->lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
sctp_for_each_hentry(pp, &head->chain)
|
2012-08-06 16:39:38 +08:00
|
|
|
if ((pp->port == rover) &&
|
|
|
|
net_eq(sock_net(sk), pp->net))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto next;
|
|
|
|
break;
|
|
|
|
next:
|
2014-01-21 15:44:08 +08:00
|
|
|
spin_unlock(&head->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (--remaining > 0);
|
|
|
|
|
|
|
|
/* Exhausted local port range during search? */
|
|
|
|
ret = 1;
|
|
|
|
if (remaining <= 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
/* OK, here is the one we will use. HEAD (the port
|
|
|
|
* hash table list entry) is non-NULL and we hold it's
|
|
|
|
* mutex.
|
|
|
|
*/
|
|
|
|
snum = rover;
|
|
|
|
} else {
|
|
|
|
/* We are given an specific port number; we verify
|
|
|
|
* that it is not being used. If it is used, we will
|
|
|
|
* exahust the search in the hash list corresponding
|
|
|
|
* to the port number (snum) - we detect that with the
|
|
|
|
* port iterator, pp being NULL.
|
|
|
|
*/
|
2012-08-06 16:39:38 +08:00
|
|
|
head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
|
2014-01-21 15:44:08 +08:00
|
|
|
spin_lock(&head->lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
sctp_for_each_hentry(pp, &head->chain) {
|
2012-08-06 16:39:38 +08:00
|
|
|
if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto pp_found;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pp = NULL;
|
|
|
|
goto pp_not_found;
|
|
|
|
pp_found:
|
|
|
|
if (!hlist_empty(&pp->owner)) {
|
|
|
|
/* We had a port hash table hit - there is an
|
|
|
|
* available port (pp != NULL) and it is being
|
|
|
|
* used by other socket (pp->owner not empty); that other
|
|
|
|
* socket is going to be sk2.
|
|
|
|
*/
|
|
|
|
struct sock *sk2;
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: found a possible match\n", __func__);
|
|
|
|
|
2018-11-12 18:27:17 +08:00
|
|
|
if ((pp->fastreuse && reuse &&
|
|
|
|
sk->sk_state != SCTP_SS_LISTENING) ||
|
|
|
|
(pp->fastreuseport && sk->sk_reuseport &&
|
|
|
|
uid_eq(pp->fastuid, uid)))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto success;
|
|
|
|
|
|
|
|
/* Run through the list of sockets bound to the port
|
|
|
|
* (pp->port) [via the pointers bind_next and
|
|
|
|
* bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
|
|
|
|
* we get the endpoint they describe and run through
|
|
|
|
* the endpoint's list of IP (v4 or v6) addresses,
|
|
|
|
* comparing each of the addresses with the address of
|
|
|
|
* the socket sk. If we find a match, then that means
|
|
|
|
* that this port/socket (sk) combination are already
|
|
|
|
* in an endpoint.
|
|
|
|
*/
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
sk_for_each_bound(sk2, &pp->owner) {
|
2018-11-12 18:27:17 +08:00
|
|
|
struct sctp_sock *sp2 = sctp_sk(sk2);
|
|
|
|
struct sctp_endpoint *ep2 = sp2->ep;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-07-19 14:06:32 +08:00
|
|
|
if (sk == sk2 ||
|
2018-11-12 18:27:17 +08:00
|
|
|
(reuse && (sk2->sk_reuse || sp2->reuse) &&
|
|
|
|
sk2->sk_state != SCTP_SS_LISTENING) ||
|
|
|
|
(sk->sk_reuseport && sk2->sk_reuseport &&
|
|
|
|
uid_eq(uid, sock_i_uid(sk2))))
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
|
|
|
|
2018-11-12 18:27:17 +08:00
|
|
|
if (sctp_bind_addr_conflict(&ep2->base.bind_addr,
|
|
|
|
addr, sp2, sp)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = (long)sk2;
|
|
|
|
goto fail_unlock;
|
|
|
|
}
|
|
|
|
}
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
|
|
|
|
pr_debug("%s: found a match\n", __func__);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
pp_not_found:
|
|
|
|
/* If there was a hash table miss, create a new port. */
|
|
|
|
ret = 1;
|
2012-08-06 16:39:38 +08:00
|
|
|
if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto fail_unlock;
|
|
|
|
|
|
|
|
/* In either case (hit or miss), make sure fastreuse is 1 only
|
|
|
|
* if sk->sk_reuse is too (that is, if the caller requested
|
|
|
|
* SO_REUSEADDR on this socket -sk-).
|
|
|
|
*/
|
2007-05-05 04:34:49 +08:00
|
|
|
if (hlist_empty(&pp->owner)) {
|
2018-06-28 15:31:00 +08:00
|
|
|
if (reuse && sk->sk_state != SCTP_SS_LISTENING)
|
2007-05-05 04:34:49 +08:00
|
|
|
pp->fastreuse = 1;
|
|
|
|
else
|
|
|
|
pp->fastreuse = 0;
|
2018-11-12 18:27:17 +08:00
|
|
|
|
|
|
|
if (sk->sk_reuseport) {
|
|
|
|
pp->fastreuseport = 1;
|
|
|
|
pp->fastuid = uid;
|
|
|
|
} else {
|
|
|
|
pp->fastreuseport = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (pp->fastreuse &&
|
|
|
|
(!reuse || sk->sk_state == SCTP_SS_LISTENING))
|
|
|
|
pp->fastreuse = 0;
|
|
|
|
|
|
|
|
if (pp->fastreuseport &&
|
|
|
|
(!sk->sk_reuseport || !uid_eq(pp->fastuid, uid)))
|
|
|
|
pp->fastreuseport = 0;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* We are set, so fill up all the data in the hash table
|
|
|
|
* entry, tie the socket list information with the rest of the
|
|
|
|
* sockets FIXME: Blurry, NPI (ipg).
|
|
|
|
*/
|
|
|
|
success:
|
2018-11-12 18:27:17 +08:00
|
|
|
if (!sp->bind_hash) {
|
2009-10-15 14:30:45 +08:00
|
|
|
inet_sk(sk)->inet_num = snum;
|
2005-04-17 06:20:36 +08:00
|
|
|
sk_add_bind_node(sk, &pp->owner);
|
2018-11-12 18:27:17 +08:00
|
|
|
sp->bind_hash = pp;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
fail_unlock:
|
2014-01-21 15:44:08 +08:00
|
|
|
spin_unlock(&head->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
fail:
|
2014-01-21 15:44:07 +08:00
|
|
|
local_bh_enable();
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
|
|
|
|
* port is requested.
|
|
|
|
*/
|
|
|
|
static int sctp_get_port(struct sock *sk, unsigned short snum)
|
|
|
|
{
|
|
|
|
union sctp_addr addr;
|
|
|
|
struct sctp_af *af = sctp_sk(sk)->pf->af;
|
|
|
|
|
|
|
|
/* Set up a dummy address struct from the sk. */
|
|
|
|
af->from_sk(&addr, sk);
|
|
|
|
addr.v4.sin_port = htons(snum);
|
|
|
|
|
|
|
|
/* Note: sk->sk_num gets filled in if ephemeral port request. */
|
2013-06-26 00:17:30 +08:00
|
|
|
return !!sctp_get_port_local(sk, &addr);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-03-12 17:49:17 +08:00
|
|
|
* Move a socket to LISTENING state.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-06-17 17:40:05 +08:00
|
|
|
static int sctp_listen_start(struct sock *sk, int backlog)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_endpoint *ep = sp->ep;
|
2016-01-24 21:20:12 +08:00
|
|
|
struct crypto_shash *tfm = NULL;
|
2012-10-24 17:20:03 +08:00
|
|
|
char alg[32];
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-03-12 17:49:17 +08:00
|
|
|
/* Allocate HMAC for generating cookie. */
|
2012-10-24 17:20:03 +08:00
|
|
|
if (!sp->hmac && sp->sctp_hmac_alg) {
|
|
|
|
sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
|
2016-01-24 21:20:12 +08:00
|
|
|
tfm = crypto_alloc_shash(alg, 0, 0);
|
2009-03-12 17:49:17 +08:00
|
|
|
if (IS_ERR(tfm)) {
|
2012-05-14 05:56:26 +08:00
|
|
|
net_info_ratelimited("failed to load transform for %s: %ld\n",
|
2012-10-24 17:20:03 +08:00
|
|
|
sp->sctp_hmac_alg, PTR_ERR(tfm));
|
2009-03-12 17:49:17 +08:00
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
sctp_sk(sk)->hmac = tfm;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a bind() or sctp_bindx() is not called prior to a listen()
|
|
|
|
* call that allows new associations to be accepted, the system
|
|
|
|
* picks an ephemeral port and will choose an address set equivalent
|
|
|
|
* to binding with a wildcard address.
|
|
|
|
*
|
|
|
|
* This is not currently spelled out in the SCTP sockets
|
|
|
|
* extensions draft, but follows the practice as seen in TCP
|
|
|
|
* sockets.
|
2007-05-05 04:34:49 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2017-12-20 11:12:54 +08:00
|
|
|
inet_sk_set_state(sk, SCTP_SS_LISTENING);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!ep->base.bind_addr.port) {
|
|
|
|
if (sctp_autobind(sk))
|
|
|
|
return -EAGAIN;
|
2008-07-19 14:06:32 +08:00
|
|
|
} else {
|
2009-10-15 14:30:45 +08:00
|
|
|
if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
|
2017-12-20 11:12:54 +08:00
|
|
|
inet_sk_set_state(sk, SCTP_SS_CLOSED);
|
2008-07-19 14:06:32 +08:00
|
|
|
return -EADDRINUSE;
|
|
|
|
}
|
|
|
|
}
|
2007-05-05 04:34:49 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
sk->sk_max_ack_backlog = backlog;
|
2018-11-12 18:27:16 +08:00
|
|
|
return sctp_hash_endpoint(ep);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-03-12 17:49:17 +08:00
|
|
|
* 4.1.3 / 5.1.3 listen()
|
|
|
|
*
|
|
|
|
* By default, new associations are not accepted for UDP style sockets.
|
|
|
|
* An application uses listen() to mark a socket as being able to
|
|
|
|
* accept new associations.
|
|
|
|
*
|
|
|
|
* On TCP style sockets, applications use listen() to ready the SCTP
|
|
|
|
* endpoint for accepting inbound associations.
|
|
|
|
*
|
|
|
|
* On both types of endpoints a backlog of '0' disables listening.
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Move a socket to LISTENING state.
|
|
|
|
*/
|
|
|
|
int sctp_inet_listen(struct socket *sock, int backlog)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
2009-03-12 17:49:17 +08:00
|
|
|
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
2005-04-17 06:20:36 +08:00
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
if (unlikely(backlog < 0))
|
2009-03-12 17:49:17 +08:00
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-03-12 17:49:17 +08:00
|
|
|
/* Peeled-off sockets are not allowed to listen(). */
|
|
|
|
if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
|
|
|
|
goto out;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sock->state != SS_UNCONNECTED)
|
|
|
|
goto out;
|
|
|
|
|
2017-04-06 13:10:52 +08:00
|
|
|
if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
|
|
|
|
goto out;
|
|
|
|
|
2009-03-12 17:49:17 +08:00
|
|
|
/* If backlog is zero, disable listening. */
|
|
|
|
if (!backlog) {
|
|
|
|
if (sctp_sstate(sk, CLOSED))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
|
2009-03-12 17:49:17 +08:00
|
|
|
err = 0;
|
|
|
|
sctp_unhash_endpoint(ep);
|
|
|
|
sk->sk_state = SCTP_SS_CLOSED;
|
2018-06-28 15:31:00 +08:00
|
|
|
if (sk->sk_reuse || sctp_sk(sk)->reuse)
|
2009-03-12 17:49:17 +08:00
|
|
|
sctp_sk(sk)->bind_hash->fastreuse = 1;
|
|
|
|
goto out;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
|
|
|
|
2009-03-12 17:49:17 +08:00
|
|
|
/* If we are already listening, just update the backlog */
|
|
|
|
if (sctp_sstate(sk, LISTENING))
|
|
|
|
sk->sk_max_ack_backlog = backlog;
|
|
|
|
else {
|
|
|
|
err = sctp_listen_start(sk, backlog);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-03-12 17:49:17 +08:00
|
|
|
err = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is done by modeling the current datagram_poll() and the
|
|
|
|
* tcp_poll(). Note that, based on these implementations, we don't
|
|
|
|
* lock the socket in this function, even though it seems that,
|
|
|
|
* ideally, locking or some other mechanisms can be used to ensure
|
2005-12-20 06:24:40 +08:00
|
|
|
* the integrity of the counters (sndbuf and wmem_alloc) used
|
2005-04-17 06:20:36 +08:00
|
|
|
* in this place. We assume that we don't need locks either until proven
|
|
|
|
* otherwise.
|
|
|
|
*
|
|
|
|
* Another thing to note is that we include the Async I/O support
|
|
|
|
* here, again, by modeling the current TCP/UDP code. We don't have
|
|
|
|
* a good way to test with it yet.
|
|
|
|
*/
|
2018-06-29 00:43:44 +08:00
|
|
|
__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
2017-07-03 12:01:49 +08:00
|
|
|
__poll_t mask;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-06-29 00:43:44 +08:00
|
|
|
poll_wait(file, sk_sleep(sk), wait);
|
|
|
|
|
2016-04-13 05:11:31 +08:00
|
|
|
sock_rps_record_flow(sk);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* A TCP-style listening socket becomes readable when the accept queue
|
|
|
|
* is not empty.
|
|
|
|
*/
|
|
|
|
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
|
|
|
|
return (!list_empty(&sp->ep->asocs)) ?
|
2018-02-12 06:34:03 +08:00
|
|
|
(EPOLLIN | EPOLLRDNORM) : 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
mask = 0;
|
|
|
|
|
|
|
|
/* Is there any exceptional events? */
|
|
|
|
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
|
2018-02-12 06:34:03 +08:00
|
|
|
mask |= EPOLLERR |
|
|
|
|
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
|
2006-03-25 19:07:39 +08:00
|
|
|
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
2018-02-12 06:34:03 +08:00
|
|
|
mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sk->sk_shutdown == SHUTDOWN_MASK)
|
2018-02-12 06:34:03 +08:00
|
|
|
mask |= EPOLLHUP;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Is it readable? Reconsider this code with TCP-style support. */
|
2010-09-06 19:13:50 +08:00
|
|
|
if (!skb_queue_empty(&sk->sk_receive_queue))
|
2018-02-12 06:34:03 +08:00
|
|
|
mask |= EPOLLIN | EPOLLRDNORM;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* The association is either gone or not ready. */
|
|
|
|
if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
|
|
|
|
return mask;
|
|
|
|
|
|
|
|
/* Is it writable? */
|
|
|
|
if (sctp_writeable(sk)) {
|
2018-02-12 06:34:03 +08:00
|
|
|
mask |= EPOLLOUT | EPOLLWRNORM;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2015-11-30 12:03:10 +08:00
|
|
|
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Since the socket is not locked, the buffer
|
|
|
|
* might be made available after the writeable check and
|
|
|
|
* before the bit is set. This could cause a lost I/O
|
|
|
|
* signal. tcp_poll() has a race breaker for this race
|
|
|
|
* condition. Based on their implementation, we put
|
|
|
|
* in the following code to cover it as well.
|
|
|
|
*/
|
|
|
|
if (sctp_writeable(sk))
|
2018-02-12 06:34:03 +08:00
|
|
|
mask |= EPOLLOUT | EPOLLWRNORM;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/********************************************************************
|
|
|
|
* 2nd Level Abstractions
|
|
|
|
********************************************************************/
|
|
|
|
|
|
|
|
static struct sctp_bind_bucket *sctp_bucket_create(
|
2012-08-06 16:39:38 +08:00
|
|
|
struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_bind_bucket *pp;
|
|
|
|
|
2006-12-07 12:33:16 +08:00
|
|
|
pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (pp) {
|
2008-04-10 16:58:06 +08:00
|
|
|
SCTP_DBG_OBJCNT_INC(bind_bucket);
|
2005-04-17 06:20:36 +08:00
|
|
|
pp->port = snum;
|
|
|
|
pp->fastreuse = 0;
|
|
|
|
INIT_HLIST_HEAD(&pp->owner);
|
2012-08-06 16:39:38 +08:00
|
|
|
pp->net = net;
|
2007-11-10 00:43:40 +08:00
|
|
|
hlist_add_head(&pp->node, &head->chain);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return pp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Caller must hold hashbucket lock for this tb with local BH disabled */
|
|
|
|
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
|
|
|
|
{
|
2006-07-22 05:45:47 +08:00
|
|
|
if (pp && hlist_empty(&pp->owner)) {
|
2007-11-10 00:43:40 +08:00
|
|
|
__hlist_del(&pp->node);
|
2005-04-17 06:20:36 +08:00
|
|
|
kmem_cache_free(sctp_bucket_cachep, pp);
|
|
|
|
SCTP_DBG_OBJCNT_DEC(bind_bucket);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release this socket's reference to a local port. */
|
|
|
|
static inline void __sctp_put_port(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct sctp_bind_hashbucket *head =
|
2012-08-06 16:39:38 +08:00
|
|
|
&sctp_port_hashtable[sctp_phashfn(sock_net(sk),
|
|
|
|
inet_sk(sk)->inet_num)];
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sctp_bind_bucket *pp;
|
|
|
|
|
2014-01-21 15:44:08 +08:00
|
|
|
spin_lock(&head->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
pp = sctp_sk(sk)->bind_hash;
|
|
|
|
__sk_del_bind_node(sk);
|
|
|
|
sctp_sk(sk)->bind_hash = NULL;
|
2009-10-15 14:30:45 +08:00
|
|
|
inet_sk(sk)->inet_num = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
sctp_bucket_destroy(pp);
|
2014-01-21 15:44:08 +08:00
|
|
|
spin_unlock(&head->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void sctp_put_port(struct sock *sk)
|
|
|
|
{
|
2014-01-21 15:44:07 +08:00
|
|
|
local_bh_disable();
|
2005-04-17 06:20:36 +08:00
|
|
|
__sctp_put_port(sk);
|
2014-01-21 15:44:07 +08:00
|
|
|
local_bh_enable();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The system picks an ephemeral port and choose an address set equivalent
|
|
|
|
* to binding with a wildcard address.
|
|
|
|
* One of those addresses will be the primary address for the association.
|
|
|
|
* This automatically enables the multihoming capability of SCTP.
|
|
|
|
*/
|
|
|
|
static int sctp_autobind(struct sock *sk)
|
|
|
|
{
|
|
|
|
union sctp_addr autoaddr;
|
|
|
|
struct sctp_af *af;
|
2006-11-21 09:24:53 +08:00
|
|
|
__be16 port;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Initialize a local sockaddr structure to INADDR_ANY. */
|
|
|
|
af = sctp_sk(sk)->pf->af;
|
|
|
|
|
2009-10-15 14:30:45 +08:00
|
|
|
port = htons(inet_sk(sk)->inet_num);
|
2005-04-17 06:20:36 +08:00
|
|
|
af->inaddr_any(&autoaddr, port);
|
|
|
|
|
|
|
|
return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
|
|
|
|
*
|
|
|
|
* From RFC 2292
|
|
|
|
* 4.2 The cmsghdr Structure *
|
|
|
|
*
|
|
|
|
* When ancillary data is sent or received, any number of ancillary data
|
|
|
|
* objects can be specified by the msg_control and msg_controllen members of
|
|
|
|
* the msghdr structure, because each object is preceded by
|
|
|
|
* a cmsghdr structure defining the object's length (the cmsg_len member).
|
|
|
|
* Historically Berkeley-derived implementations have passed only one object
|
|
|
|
* at a time, but this API allows multiple objects to be
|
|
|
|
* passed in a single call to sendmsg() or recvmsg(). The following example
|
|
|
|
* shows two ancillary data objects in a control buffer.
|
|
|
|
*
|
|
|
|
* |<--------------------------- msg_controllen -------------------------->|
|
|
|
|
* | |
|
|
|
|
*
|
|
|
|
* |<----- ancillary data object ----->|<----- ancillary data object ----->|
|
|
|
|
*
|
|
|
|
* |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
|
|
|
|
* | | |
|
|
|
|
*
|
|
|
|
* |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
|
|
|
|
*
|
|
|
|
* |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
|
|
|
|
* | | | | |
|
|
|
|
*
|
|
|
|
* +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
|
|
|
|
* |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
|
|
|
|
*
|
|
|
|
* |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
|
|
|
|
*
|
|
|
|
* +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
|
|
|
|
* ^
|
|
|
|
* |
|
|
|
|
*
|
|
|
|
* msg_control
|
|
|
|
* points here
|
|
|
|
*/
|
2017-08-11 10:23:48 +08:00
|
|
|
static int sctp_msghdr_parse(const struct msghdr *msg, struct sctp_cmsgs *cmsgs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-04-13 09:40:06 +08:00
|
|
|
struct msghdr *my_msg = (struct msghdr *)msg;
|
2017-08-11 10:23:48 +08:00
|
|
|
struct cmsghdr *cmsg;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-12-11 11:22:04 +08:00
|
|
|
for_each_cmsghdr(cmsg, my_msg) {
|
2008-04-13 09:40:06 +08:00
|
|
|
if (!CMSG_OK(my_msg, cmsg))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Should we parse this header or ignore? */
|
|
|
|
if (cmsg->cmsg_level != IPPROTO_SCTP)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Strictly check lengths following example in SCM code. */
|
|
|
|
switch (cmsg->cmsg_type) {
|
|
|
|
case SCTP_INIT:
|
|
|
|
/* SCTP Socket API Extension
|
2014-07-13 02:30:36 +08:00
|
|
|
* 5.3.1 SCTP Initiation Structure (SCTP_INIT)
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This cmsghdr structure provides information for
|
|
|
|
* initializing new SCTP associations with sendmsg().
|
|
|
|
* The SCTP_INITMSG socket option uses this same data
|
|
|
|
* structure. This structure is not used for
|
|
|
|
* recvmsg().
|
|
|
|
*
|
|
|
|
* cmsg_level cmsg_type cmsg_data[]
|
|
|
|
* ------------ ------------ ----------------------
|
|
|
|
* IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
|
|
|
|
*/
|
2014-07-13 02:30:36 +08:00
|
|
|
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2014-07-13 02:30:36 +08:00
|
|
|
|
|
|
|
cmsgs->init = CMSG_DATA(cmsg);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SCTP_SNDRCV:
|
|
|
|
/* SCTP Socket API Extension
|
2014-07-13 02:30:36 +08:00
|
|
|
* 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* This cmsghdr structure specifies SCTP options for
|
|
|
|
* sendmsg() and describes SCTP header information
|
|
|
|
* about a received message through recvmsg().
|
|
|
|
*
|
|
|
|
* cmsg_level cmsg_type cmsg_data[]
|
|
|
|
* ------------ ------------ ----------------------
|
|
|
|
* IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
|
|
|
|
*/
|
2014-07-13 02:30:36 +08:00
|
|
|
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2014-07-13 02:30:36 +08:00
|
|
|
cmsgs->srinfo = CMSG_DATA(cmsg);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-07-13 02:30:36 +08:00
|
|
|
if (cmsgs->srinfo->sinfo_flags &
|
2005-10-29 06:10:00 +08:00
|
|
|
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
|
2018-03-05 20:44:20 +08:00
|
|
|
SCTP_SACK_IMMEDIATELY | SCTP_SENDALL |
|
|
|
|
SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
|
2014-07-13 02:30:36 +08:00
|
|
|
case SCTP_SNDINFO:
|
|
|
|
/* SCTP Socket API Extension
|
|
|
|
* 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
|
|
|
|
*
|
|
|
|
* This cmsghdr structure specifies SCTP options for
|
|
|
|
* sendmsg(). This structure and SCTP_RCVINFO replaces
|
|
|
|
* SCTP_SNDRCV which has been deprecated.
|
|
|
|
*
|
|
|
|
* cmsg_level cmsg_type cmsg_data[]
|
|
|
|
* ------------ ------------ ---------------------
|
|
|
|
* IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
|
|
|
|
*/
|
|
|
|
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cmsgs->sinfo = CMSG_DATA(cmsg);
|
|
|
|
|
|
|
|
if (cmsgs->sinfo->snd_flags &
|
|
|
|
~(SCTP_UNORDERED | SCTP_ADDR_OVER |
|
2018-03-05 20:44:20 +08:00
|
|
|
SCTP_SACK_IMMEDIATELY | SCTP_SENDALL |
|
|
|
|
SCTP_PR_SCTP_MASK | SCTP_ABORT | SCTP_EOF))
|
2014-07-13 02:30:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
break;
|
2018-03-05 20:44:18 +08:00
|
|
|
case SCTP_PRINFO:
|
|
|
|
/* SCTP Socket API Extension
|
|
|
|
* 5.3.7 SCTP PR-SCTP Information Structure (SCTP_PRINFO)
|
|
|
|
*
|
|
|
|
* This cmsghdr structure specifies SCTP options for sendmsg().
|
|
|
|
*
|
|
|
|
* cmsg_level cmsg_type cmsg_data[]
|
|
|
|
* ------------ ------------ ---------------------
|
|
|
|
* IPPROTO_SCTP SCTP_PRINFO struct sctp_prinfo
|
|
|
|
*/
|
|
|
|
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_prinfo)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cmsgs->prinfo = CMSG_DATA(cmsg);
|
|
|
|
if (cmsgs->prinfo->pr_policy & ~SCTP_PR_SCTP_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (cmsgs->prinfo->pr_policy == SCTP_PR_SCTP_NONE)
|
|
|
|
cmsgs->prinfo->pr_value = 0;
|
|
|
|
break;
|
2018-03-14 19:05:31 +08:00
|
|
|
case SCTP_AUTHINFO:
|
|
|
|
/* SCTP Socket API Extension
|
|
|
|
* 5.3.8 SCTP AUTH Information Structure (SCTP_AUTHINFO)
|
|
|
|
*
|
|
|
|
* This cmsghdr structure specifies SCTP options for sendmsg().
|
|
|
|
*
|
|
|
|
* cmsg_level cmsg_type cmsg_data[]
|
|
|
|
* ------------ ------------ ---------------------
|
|
|
|
* IPPROTO_SCTP SCTP_AUTHINFO struct sctp_authinfo
|
|
|
|
*/
|
|
|
|
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_authinfo)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
cmsgs->authinfo = CMSG_DATA(cmsg);
|
|
|
|
break;
|
2018-03-05 20:44:19 +08:00
|
|
|
case SCTP_DSTADDRV4:
|
|
|
|
case SCTP_DSTADDRV6:
|
|
|
|
/* SCTP Socket API Extension
|
|
|
|
* 5.3.9/10 SCTP Destination IPv4/6 Address Structure (SCTP_DSTADDRV4/6)
|
|
|
|
*
|
|
|
|
* This cmsghdr structure specifies SCTP options for sendmsg().
|
|
|
|
*
|
|
|
|
* cmsg_level cmsg_type cmsg_data[]
|
|
|
|
* ------------ ------------ ---------------------
|
|
|
|
* IPPROTO_SCTP SCTP_DSTADDRV4 struct in_addr
|
|
|
|
* ------------ ------------ ---------------------
|
|
|
|
* IPPROTO_SCTP SCTP_DSTADDRV6 struct in6_addr
|
|
|
|
*/
|
|
|
|
cmsgs->addrs_msg = my_msg;
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
2007-04-21 08:09:22 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2014-07-13 02:30:36 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for a packet..
|
|
|
|
* Note: This function is the same function as in core/datagram.c
|
|
|
|
* with a few modifications to make lksctp work.
|
|
|
|
*/
|
2013-12-23 12:16:51 +08:00
|
|
|
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
2010-04-20 21:03:51 +08:00
|
|
|
prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Socket errors? */
|
|
|
|
error = sock_error(sk);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!skb_queue_empty(&sk->sk_receive_queue))
|
|
|
|
goto ready;
|
|
|
|
|
|
|
|
/* Socket shut down? */
|
|
|
|
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Sequenced packets can come disconnected. If so we report the
|
|
|
|
* problem.
|
|
|
|
*/
|
|
|
|
error = -ENOTCONN;
|
|
|
|
|
|
|
|
/* Is there a good reason to think that we may receive some data? */
|
|
|
|
if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Handle signals. */
|
|
|
|
if (signal_pending(current))
|
|
|
|
goto interrupted;
|
|
|
|
|
|
|
|
/* Let another process have a go. Since we are going to sleep
|
|
|
|
* anyway. Note: This may cause odd behaviors if the message
|
|
|
|
* does not fit in the user's buffer, but this seems to be the
|
|
|
|
* only way to honor MSG_DONTWAIT realistically.
|
|
|
|
*/
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
*timeo_p = schedule_timeout(*timeo_p);
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
ready:
|
2010-04-20 21:03:51 +08:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
interrupted:
|
|
|
|
error = sock_intr_errno(*timeo_p);
|
|
|
|
|
|
|
|
out:
|
2010-04-20 21:03:51 +08:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2005-04-17 06:20:36 +08:00
|
|
|
*err = error;
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Receive a datagram.
|
|
|
|
* Note: This is pretty much the same routine as in core/datagram.c
|
|
|
|
* with a few changes to make lksctp work.
|
|
|
|
*/
|
2014-07-13 02:30:38 +08:00
|
|
|
struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
|
|
|
|
int noblock, int *err)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
long timeo;
|
|
|
|
|
|
|
|
timeo = sock_rcvtimeo(sk, noblock);
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
|
|
|
|
MAX_SCHEDULE_TIMEOUT);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
/* Again only user level code calls this function,
|
|
|
|
* so nothing interrupt level
|
|
|
|
* will suddenly eat the receive_queue.
|
|
|
|
*
|
|
|
|
* Look at current nfs client by the way...
|
2010-12-02 17:01:55 +08:00
|
|
|
* However, this function was correct in any case. 8)
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
if (flags & MSG_PEEK) {
|
|
|
|
skb = skb_peek(&sk->sk_receive_queue);
|
|
|
|
if (skb)
|
2017-06-30 18:07:58 +08:00
|
|
|
refcount_inc(&skb->users);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2016-04-14 06:12:29 +08:00
|
|
|
skb = __skb_dequeue(&sk->sk_receive_queue);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (skb)
|
|
|
|
return skb;
|
|
|
|
|
2005-12-03 12:30:06 +08:00
|
|
|
/* Caller is allowed not to check sk->sk_err before calling. */
|
|
|
|
error = sock_error(sk);
|
|
|
|
if (error)
|
|
|
|
goto no_packet;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
|
|
|
break;
|
|
|
|
|
2017-03-25 01:08:12 +08:00
|
|
|
if (sk_can_busy_loop(sk)) {
|
|
|
|
sk_busy_loop(sk, noblock);
|
|
|
|
|
|
|
|
if (!skb_queue_empty(&sk->sk_receive_queue))
|
|
|
|
continue;
|
|
|
|
}
|
2014-04-18 03:26:51 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* User doesn't want to wait. */
|
|
|
|
error = -EAGAIN;
|
|
|
|
if (!timeo)
|
|
|
|
goto no_packet;
|
|
|
|
} while (sctp_wait_for_packet(sk, err, &timeo) == 0);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
no_packet:
|
|
|
|
*err = error;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If sndbuf has changed, wake up per association sndbuf waiters. */
|
|
|
|
static void __sctp_write_space(struct sctp_association *asoc)
|
|
|
|
{
|
|
|
|
struct sock *sk = asoc->base.sk;
|
|
|
|
|
2015-11-30 12:03:11 +08:00
|
|
|
if (sctp_wspace(asoc) <= 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (waitqueue_active(&asoc->wait))
|
|
|
|
wake_up_interruptible(&asoc->wait);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-11-30 12:03:11 +08:00
|
|
|
if (sctp_writeable(sk)) {
|
|
|
|
struct socket_wq *wq;
|
2011-02-18 11:26:36 +08:00
|
|
|
|
2015-11-30 12:03:11 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
wq = rcu_dereference(sk->sk_wq);
|
|
|
|
if (wq) {
|
|
|
|
if (waitqueue_active(&wq->wait))
|
|
|
|
wake_up_interruptible(&wq->wait);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Note that we try to include the Async I/O support
|
|
|
|
* here by modeling from the current TCP/UDP code.
|
|
|
|
* We have not tested with it yet.
|
|
|
|
*/
|
2011-02-18 11:26:36 +08:00
|
|
|
if (!(sk->sk_shutdown & SEND_SHUTDOWN))
|
2015-11-30 12:03:11 +08:00
|
|
|
sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2015-11-30 12:03:11 +08:00
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
net: sctp: wake up all assocs if sndbuf policy is per socket
SCTP charges chunks for wmem accounting via skb->truesize in
sctp_set_owner_w(), and sctp_wfree() respectively as the
reverse operation. If a sender runs out of wmem, it needs to
wait via sctp_wait_for_sndbuf(), and gets woken up by a call
to __sctp_write_space() mostly via sctp_wfree().
__sctp_write_space() is being called per association. Although
we assign sk->sk_write_space() to sctp_write_space(), which
is then being done per socket, it is only used if send space
is increased per socket option (SO_SNDBUF), as SOCK_USE_WRITE_QUEUE
is set and therefore not invoked in sock_wfree().
Commit 4c3a5bdae293 ("sctp: Don't charge for data in sndbuf
again when transmitting packet") fixed an issue where in case
sctp_packet_transmit() manages to queue up more than sndbuf
bytes, sctp_wait_for_sndbuf() will never be woken up again
unless it is interrupted by a signal. However, a still
remaining issue is that if net.sctp.sndbuf_policy=0, that is
accounting per socket, and one-to-many sockets are in use,
the reclaimed write space from sctp_wfree() is 'unfairly'
handed back on the server to the association that is the lucky
one to be woken up again via __sctp_write_space(), while
the remaining associations are never be woken up again
(unless by a signal).
The effect disappears with net.sctp.sndbuf_policy=1, that
is wmem accounting per association, as it guarantees a fair
share of wmem among associations.
Therefore, if we have reclaimed memory in case of per socket
accounting, wake all related associations to a socket in a
fair manner, that is, traverse the socket association list
starting from the current neighbour of the association and
issue a __sctp_write_space() to everyone until we end up
waking ourselves. This guarantees that no association is
preferred over another and even if more associations are
taken into the one-to-many session, all receivers will get
messages from the server and are not stalled forever on
high load. This setting still leaves the advantage of per
socket accounting in touch as an association can still use
up global limits if unused by others.
Fixes: 4eb701dfc618 ("[SCTP] Fix SCTP sendbuffer accouting.")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Thomas Graf <tgraf@suug.ch>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: Vlad Yasevich <vyasevic@redhat.com>
Acked-by: Vlad Yasevich <vyasevic@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-08 23:26:13 +08:00
|
|
|
static void sctp_wake_up_waiters(struct sock *sk,
|
|
|
|
struct sctp_association *asoc)
|
|
|
|
{
|
|
|
|
struct sctp_association *tmp = asoc;
|
|
|
|
|
|
|
|
/* We do accounting for the sndbuf space per association,
|
|
|
|
* so we only need to wake our own association.
|
|
|
|
*/
|
|
|
|
if (asoc->ep->sndbuf_policy)
|
|
|
|
return __sctp_write_space(asoc);
|
|
|
|
|
net: sctp: test if association is dead in sctp_wake_up_waiters
In function sctp_wake_up_waiters(), we need to involve a test
if the association is declared dead. If so, we don't have any
reference to a possible sibling association anymore and need
to invoke sctp_write_space() instead, and normally walk the
socket's associations and notify them of new wmem space. The
reason for special casing is that otherwise, we could run
into the following issue when a sctp_primitive_SEND() call
from sctp_sendmsg() fails, and tries to flush an association's
outq, i.e. in the following way:
sctp_association_free()
`-> list_del(&asoc->asocs) <-- poisons list pointer
asoc->base.dead = true
sctp_outq_free(&asoc->outqueue)
`-> __sctp_outq_teardown()
`-> sctp_chunk_free()
`-> consume_skb()
`-> sctp_wfree()
`-> sctp_wake_up_waiters() <-- dereferences poisoned pointers
if asoc->ep->sndbuf_policy=0
Therefore, only walk the list in an 'optimized' way if we find
that the current association is still active. We could also use
list_del_init() in addition when we call sctp_association_free(),
but as Vlad suggests, we want to trap such bugs and thus leave
it poisoned as is.
Why is it safe to resolve the issue by testing for asoc->base.dead?
Parallel calls to sctp_sendmsg() are protected under socket lock,
that is lock_sock()/release_sock(). Only within that path under
lock held, we're setting skb/chunk owner via sctp_set_owner_w().
Eventually, chunks are freed directly by an association still
under that lock. So when traversing association list on destruction
time from sctp_wake_up_waiters() via sctp_wfree(), a different
CPU can't be running sctp_wfree() while another one calls
sctp_association_free() as both happens under the same lock.
Therefore, this can also not race with setting/testing against
asoc->base.dead as we are guaranteed for this to happen in order,
under lock. Further, Vlad says: the times we check asoc->base.dead
is when we've cached an association pointer for later processing.
In between cache and processing, the association may have been
freed and is simply still around due to reference counts. We check
asoc->base.dead under a lock, so it should always be safe to check
and not race against sctp_association_free(). Stress-testing seems
fine now, too.
Fixes: cd253f9f357d ("net: sctp: wake up all assocs if sndbuf policy is per socket")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Vlad Yasevich <vyasevic@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Vlad Yasevich <vyasevic@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-09 22:10:20 +08:00
|
|
|
/* If association goes down and is just flushing its
|
|
|
|
* outq, then just normally notify others.
|
|
|
|
*/
|
|
|
|
if (asoc->base.dead)
|
|
|
|
return sctp_write_space(sk);
|
|
|
|
|
net: sctp: wake up all assocs if sndbuf policy is per socket
SCTP charges chunks for wmem accounting via skb->truesize in
sctp_set_owner_w(), and sctp_wfree() respectively as the
reverse operation. If a sender runs out of wmem, it needs to
wait via sctp_wait_for_sndbuf(), and gets woken up by a call
to __sctp_write_space() mostly via sctp_wfree().
__sctp_write_space() is being called per association. Although
we assign sk->sk_write_space() to sctp_write_space(), which
is then being done per socket, it is only used if send space
is increased per socket option (SO_SNDBUF), as SOCK_USE_WRITE_QUEUE
is set and therefore not invoked in sock_wfree().
Commit 4c3a5bdae293 ("sctp: Don't charge for data in sndbuf
again when transmitting packet") fixed an issue where in case
sctp_packet_transmit() manages to queue up more than sndbuf
bytes, sctp_wait_for_sndbuf() will never be woken up again
unless it is interrupted by a signal. However, a still
remaining issue is that if net.sctp.sndbuf_policy=0, that is
accounting per socket, and one-to-many sockets are in use,
the reclaimed write space from sctp_wfree() is 'unfairly'
handed back on the server to the association that is the lucky
one to be woken up again via __sctp_write_space(), while
the remaining associations are never be woken up again
(unless by a signal).
The effect disappears with net.sctp.sndbuf_policy=1, that
is wmem accounting per association, as it guarantees a fair
share of wmem among associations.
Therefore, if we have reclaimed memory in case of per socket
accounting, wake all related associations to a socket in a
fair manner, that is, traverse the socket association list
starting from the current neighbour of the association and
issue a __sctp_write_space() to everyone until we end up
waking ourselves. This guarantees that no association is
preferred over another and even if more associations are
taken into the one-to-many session, all receivers will get
messages from the server and are not stalled forever on
high load. This setting still leaves the advantage of per
socket accounting in touch as an association can still use
up global limits if unused by others.
Fixes: 4eb701dfc618 ("[SCTP] Fix SCTP sendbuffer accouting.")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Thomas Graf <tgraf@suug.ch>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: Vlad Yasevich <vyasevic@redhat.com>
Acked-by: Vlad Yasevich <vyasevic@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-08 23:26:13 +08:00
|
|
|
/* Accounting for the sndbuf space is per socket, so we
|
|
|
|
* need to wake up others, try to be fair and in case of
|
|
|
|
* other associations, let them have a go first instead
|
|
|
|
* of just doing a sctp_write_space() call.
|
|
|
|
*
|
|
|
|
* Note that we reach sctp_wake_up_waiters() only when
|
|
|
|
* associations free up queued chunks, thus we are under
|
|
|
|
* lock and the list of associations on a socket is
|
|
|
|
* guaranteed not to change.
|
|
|
|
*/
|
|
|
|
for (tmp = list_next_entry(tmp, asocs); 1;
|
|
|
|
tmp = list_next_entry(tmp, asocs)) {
|
|
|
|
/* Manually skip the head element. */
|
|
|
|
if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
|
|
|
|
continue;
|
|
|
|
/* Wake up association. */
|
|
|
|
__sctp_write_space(tmp);
|
|
|
|
/* We've reached the end. */
|
|
|
|
if (tmp == asoc)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Do accounting for the sndbuf space.
|
|
|
|
* Decrement the used sndbuf space of the corresponding association by the
|
|
|
|
* data size which was just transmitted(freed).
|
|
|
|
*/
|
|
|
|
static void sctp_wfree(struct sk_buff *skb)
|
|
|
|
{
|
net: sctp: keep owned chunk in destructor_arg instead of skb->cb
It's just silly to hold the skb destructor argument around inside
skb->cb[] as we currently do in SCTP.
Nowadays, we're sort of cheating on data accounting in the sense
that due to commit 4c3a5bdae293 ("sctp: Don't charge for data in
sndbuf again when transmitting packet"), we orphan the skb already
in the SCTP output path, i.e. giving back charged data memory, and
use a different destructor only to make sure the sk doesn't vanish
on skb destruction time. Thus, cb[] is still valid here as we
operate within the SCTP layer. (It's generally actually a big
candidate for future rework, imho.)
However, storing the destructor in the cb[] can easily cause issues
should an non sctp_packet_set_owner_w()'ed skb ever escape the SCTP
layer, since cb[] may get overwritten by lower layers and thus can
corrupt the chunk pointer. There are no such issues at present,
but lets keep the chunk in destructor_arg, as this is the actual
purpose for it.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-20 08:54:48 +08:00
|
|
|
struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
|
|
|
|
struct sctp_association *asoc = chunk->asoc;
|
|
|
|
struct sock *sk = asoc->base.sk;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-12-31 16:11:19 +08:00
|
|
|
sk_mem_uncharge(sk, skb->truesize);
|
2018-10-17 03:07:50 +08:00
|
|
|
sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
|
|
|
|
asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
|
|
|
|
WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk),
|
|
|
|
&sk->sk_wmem_alloc));
|
2007-08-16 07:07:44 +08:00
|
|
|
|
2018-03-14 19:05:33 +08:00
|
|
|
if (chunk->shkey) {
|
|
|
|
struct sctp_shared_key *shkey = chunk->shkey;
|
|
|
|
|
|
|
|
/* refcnt == 2 and !list_empty mean after this release, it's
|
|
|
|
* not being used anywhere, and it's time to notify userland
|
|
|
|
* that this shkey can be freed if it's been deactivated.
|
|
|
|
*/
|
|
|
|
if (shkey->deactivated && !list_empty(&shkey->key_list) &&
|
|
|
|
refcount_read(&shkey->refcnt) == 2) {
|
|
|
|
struct sctp_ulpevent *ev;
|
|
|
|
|
|
|
|
ev = sctp_ulpevent_make_authkey(asoc, shkey->key_id,
|
|
|
|
SCTP_AUTH_FREE_KEY,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (ev)
|
|
|
|
asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
|
|
|
|
}
|
2018-03-14 19:05:30 +08:00
|
|
|
sctp_auth_shkey_release(chunk->shkey);
|
2018-03-14 19:05:33 +08:00
|
|
|
}
|
2018-03-14 19:05:30 +08:00
|
|
|
|
2005-04-29 03:02:04 +08:00
|
|
|
sock_wfree(skb);
|
net: sctp: wake up all assocs if sndbuf policy is per socket
SCTP charges chunks for wmem accounting via skb->truesize in
sctp_set_owner_w(), and sctp_wfree() respectively as the
reverse operation. If a sender runs out of wmem, it needs to
wait via sctp_wait_for_sndbuf(), and gets woken up by a call
to __sctp_write_space() mostly via sctp_wfree().
__sctp_write_space() is being called per association. Although
we assign sk->sk_write_space() to sctp_write_space(), which
is then being done per socket, it is only used if send space
is increased per socket option (SO_SNDBUF), as SOCK_USE_WRITE_QUEUE
is set and therefore not invoked in sock_wfree().
Commit 4c3a5bdae293 ("sctp: Don't charge for data in sndbuf
again when transmitting packet") fixed an issue where in case
sctp_packet_transmit() manages to queue up more than sndbuf
bytes, sctp_wait_for_sndbuf() will never be woken up again
unless it is interrupted by a signal. However, a still
remaining issue is that if net.sctp.sndbuf_policy=0, that is
accounting per socket, and one-to-many sockets are in use,
the reclaimed write space from sctp_wfree() is 'unfairly'
handed back on the server to the association that is the lucky
one to be woken up again via __sctp_write_space(), while
the remaining associations are never be woken up again
(unless by a signal).
The effect disappears with net.sctp.sndbuf_policy=1, that
is wmem accounting per association, as it guarantees a fair
share of wmem among associations.
Therefore, if we have reclaimed memory in case of per socket
accounting, wake all related associations to a socket in a
fair manner, that is, traverse the socket association list
starting from the current neighbour of the association and
issue a __sctp_write_space() to everyone until we end up
waking ourselves. This guarantees that no association is
preferred over another and even if more associations are
taken into the one-to-many session, all receivers will get
messages from the server and are not stalled forever on
high load. This setting still leaves the advantage of per
socket accounting in touch as an association can still use
up global limits if unused by others.
Fixes: 4eb701dfc618 ("[SCTP] Fix SCTP sendbuffer accouting.")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Thomas Graf <tgraf@suug.ch>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: Vlad Yasevich <vyasevic@redhat.com>
Acked-by: Vlad Yasevich <vyasevic@redhat.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-04-08 23:26:13 +08:00
|
|
|
sctp_wake_up_waiters(sk, asoc);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
sctp_association_put(asoc);
|
|
|
|
}
|
|
|
|
|
2006-10-10 12:34:04 +08:00
|
|
|
/* Do accounting for the receive space on the socket.
|
|
|
|
* Accounting for the association is done in ulpevent.c
|
|
|
|
* We set this as a destructor for the cloned data skbs so that
|
|
|
|
* accounting is done at the correct time.
|
|
|
|
*/
|
|
|
|
void sctp_sock_rfree(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct sock *sk = skb->sk;
|
|
|
|
struct sctp_ulpevent *event = sctp_skb2event(skb);
|
|
|
|
|
|
|
|
atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
|
2007-08-16 07:07:44 +08:00
|
|
|
|
|
|
|
/*
|
2007-12-31 16:11:19 +08:00
|
|
|
* Mimic the behavior of sock_rfree
|
2007-08-16 07:07:44 +08:00
|
|
|
*/
|
2007-12-31 16:11:19 +08:00
|
|
|
sk_mem_uncharge(sk, event->rmem_len);
|
2006-10-10 12:34:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Helper function to wait for space in the sndbuf. */
|
|
|
|
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
2018-01-15 17:01:36 +08:00
|
|
|
size_t msg_len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sock *sk = asoc->base.sk;
|
|
|
|
long current_timeo = *timeo_p;
|
|
|
|
DEFINE_WAIT(wait);
|
2018-01-15 17:01:36 +08:00
|
|
|
int err = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
|
|
|
|
*timeo_p, msg_len);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Increment the association's refcnt. */
|
|
|
|
sctp_association_hold(asoc);
|
|
|
|
|
|
|
|
/* Wait on the association specific sndbuf space. */
|
|
|
|
for (;;) {
|
|
|
|
prepare_to_wait_exclusive(&asoc->wait, &wait,
|
|
|
|
TASK_INTERRUPTIBLE);
|
2017-11-15 16:55:54 +08:00
|
|
|
if (asoc->base.dead)
|
|
|
|
goto do_dead;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!*timeo_p)
|
|
|
|
goto do_nonblock;
|
2017-11-15 16:55:54 +08:00
|
|
|
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto do_error;
|
|
|
|
if (signal_pending(current))
|
|
|
|
goto do_interrupted;
|
2018-10-17 03:07:51 +08:00
|
|
|
if ((int)msg_len <= sctp_wspace(asoc))
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* Let another process have a go. Since we are going
|
|
|
|
* to sleep anyway.
|
|
|
|
*/
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
current_timeo = schedule_timeout(current_timeo);
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2018-01-15 17:01:36 +08:00
|
|
|
if (sk != asoc->base.sk)
|
|
|
|
goto do_error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
*timeo_p = current_timeo;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
finish_wait(&asoc->wait, &wait);
|
|
|
|
|
|
|
|
/* Release the association's refcnt. */
|
|
|
|
sctp_association_put(asoc);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
2017-11-15 16:55:54 +08:00
|
|
|
do_dead:
|
|
|
|
err = -ESRCH;
|
|
|
|
goto out;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
do_error:
|
|
|
|
err = -EPIPE;
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
do_interrupted:
|
|
|
|
err = sock_intr_errno(*timeo_p);
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
do_nonblock:
|
|
|
|
err = -EAGAIN;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-04-12 04:15:36 +08:00
|
|
|
void sctp_data_ready(struct sock *sk)
|
2010-04-28 16:47:18 +08:00
|
|
|
{
|
2010-05-03 12:43:40 +08:00
|
|
|
struct socket_wq *wq;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
wq = rcu_dereference(sk->sk_wq);
|
2015-11-26 13:55:39 +08:00
|
|
|
if (skwq_has_sleeper(wq))
|
2018-02-12 06:34:03 +08:00
|
|
|
wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
|
|
|
|
EPOLLRDNORM | EPOLLRDBAND);
|
2010-04-28 16:47:18 +08:00
|
|
|
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
|
2010-05-03 12:43:40 +08:00
|
|
|
rcu_read_unlock();
|
2010-04-28 16:47:18 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* If socket sndbuf has changed, wake up all per association waiters. */
|
|
|
|
void sctp_write_space(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct sctp_association *asoc;
|
|
|
|
|
|
|
|
/* Wake up the tasks in each wait queue. */
|
2008-04-13 09:54:24 +08:00
|
|
|
list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
|
2005-04-17 06:20:36 +08:00
|
|
|
__sctp_write_space(asoc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is there any sndbuf space available on the socket?
|
|
|
|
*
|
2005-12-20 06:24:40 +08:00
|
|
|
* Note that sk_wmem_alloc is the sum of the send buffers on all of the
|
2005-04-17 06:20:36 +08:00
|
|
|
* associations on the same socket. For a UDP-style socket with
|
|
|
|
* multiple associations, it is possible for it to be "unwriteable"
|
|
|
|
* prematurely. I assume that this is acceptable because
|
|
|
|
* a premature "unwriteable" is better than an accidental "writeable" which
|
|
|
|
* would cause an unwanted block under certain circumstances. For the 1-1
|
|
|
|
* UDP-style sockets or TCP-style sockets, this code should work.
|
|
|
|
* - Daisy
|
|
|
|
*/
|
2018-10-17 03:07:51 +08:00
|
|
|
static bool sctp_writeable(struct sock *sk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-10-17 03:07:51 +08:00
|
|
|
return sk->sk_sndbuf > sk->sk_wmem_queued;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
|
|
|
|
* returns immediately with EINPROGRESS.
|
|
|
|
*/
|
|
|
|
static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
|
|
|
|
{
|
|
|
|
struct sock *sk = asoc->base.sk;
|
|
|
|
int err = 0;
|
|
|
|
long current_timeo = *timeo_p;
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
net: sctp: rework debugging framework to use pr_debug and friends
We should get rid of all own SCTP debug printk macros and use the ones
that the kernel offers anyway instead. This makes the code more readable
and conform to the kernel code, and offers all the features of dynamic
debbuging that pr_debug() et al has, such as only turning on/off portions
of debug messages at runtime through debugfs. The runtime cost of having
CONFIG_DYNAMIC_DEBUG enabled, but none of the debug statements printing,
is negligible [1]. If kernel debugging is completly turned off, then these
statements will also compile into "empty" functions.
While we're at it, we also need to change the Kconfig option as it /now/
only refers to the ifdef'ed code portions in outqueue.c that enable further
debugging/tracing of SCTP transaction fields. Also, since SCTP_ASSERT code
was enabled with this Kconfig option and has now been removed, we
transform those code parts into WARNs resp. where appropriate BUG_ONs so
that those bugs can be more easily detected as probably not many people
have SCTP debugging permanently turned on.
To turn on all SCTP debugging, the following steps are needed:
# mount -t debugfs none /sys/kernel/debug
# echo -n 'module sctp +p' > /sys/kernel/debug/dynamic_debug/control
This can be done more fine-grained on a per file, per line basis and others
as described in [2].
[1] https://www.kernel.org/doc/ols/2009/ols2009-pages-39-46.pdf
[2] Documentation/dynamic-debug-howto.txt
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-06-29 01:49:40 +08:00
|
|
|
pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Increment the association's refcnt. */
|
|
|
|
sctp_association_hold(asoc);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
prepare_to_wait_exclusive(&asoc->wait, &wait,
|
|
|
|
TASK_INTERRUPTIBLE);
|
|
|
|
if (!*timeo_p)
|
|
|
|
goto do_nonblock;
|
|
|
|
if (sk->sk_shutdown & RCV_SHUTDOWN)
|
|
|
|
break;
|
|
|
|
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
|
|
|
|
asoc->base.dead)
|
|
|
|
goto do_error;
|
|
|
|
if (signal_pending(current))
|
|
|
|
goto do_interrupted;
|
|
|
|
|
|
|
|
if (sctp_state(asoc, ESTABLISHED))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Let another process have a go. Since we are going
|
|
|
|
* to sleep anyway.
|
|
|
|
*/
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
current_timeo = schedule_timeout(current_timeo);
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
*timeo_p = current_timeo;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
finish_wait(&asoc->wait, &wait);
|
|
|
|
|
|
|
|
/* Release the association's refcnt. */
|
|
|
|
sctp_association_put(asoc);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
do_error:
|
2006-01-31 07:59:54 +08:00
|
|
|
if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
|
2005-04-17 06:20:36 +08:00
|
|
|
err = -ETIMEDOUT;
|
|
|
|
else
|
|
|
|
err = -ECONNREFUSED;
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
do_interrupted:
|
|
|
|
err = sock_intr_errno(*timeo_p);
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
do_nonblock:
|
|
|
|
err = -EINPROGRESS;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sctp_wait_for_accept(struct sock *sk, long timeo)
|
|
|
|
{
|
|
|
|
struct sctp_endpoint *ep;
|
|
|
|
int err = 0;
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
|
|
ep = sctp_sk(sk)->ep;
|
|
|
|
|
|
|
|
|
|
|
|
for (;;) {
|
2010-04-20 21:03:51 +08:00
|
|
|
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
|
2005-04-17 06:20:36 +08:00
|
|
|
TASK_INTERRUPTIBLE);
|
|
|
|
|
|
|
|
if (list_empty(&ep->asocs)) {
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
timeo = schedule_timeout(timeo);
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (!sctp_sstate(sk, LISTENING))
|
|
|
|
break;
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
if (!list_empty(&ep->asocs))
|
|
|
|
break;
|
|
|
|
|
|
|
|
err = sock_intr_errno(timeo);
|
|
|
|
if (signal_pending(current))
|
|
|
|
break;
|
|
|
|
|
|
|
|
err = -EAGAIN;
|
|
|
|
if (!timeo)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-04-20 21:03:51 +08:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-07-27 05:21:31 +08:00
|
|
|
static void sctp_wait_for_close(struct sock *sk, long timeout)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
|
|
do {
|
2010-04-20 21:03:51 +08:00
|
|
|
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (list_empty(&sctp_sk(sk)->ep->asocs))
|
|
|
|
break;
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
timeout = schedule_timeout(timeout);
|
2014-01-21 15:44:11 +08:00
|
|
|
lock_sock(sk);
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (!signal_pending(current) && timeout);
|
|
|
|
|
2010-04-20 21:03:51 +08:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-04-18 03:49:53 +08:00
|
|
|
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
|
|
|
|
{
|
|
|
|
struct sk_buff *frag;
|
|
|
|
|
|
|
|
if (!skb->data_len)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/* Don't forget the fragments. */
|
2009-06-09 15:22:35 +08:00
|
|
|
skb_walk_frags(skb, frag)
|
2007-04-18 03:49:53 +08:00
|
|
|
sctp_skb_set_owner_r_frag(frag, sk);
|
|
|
|
|
|
|
|
done:
|
|
|
|
sctp_skb_set_owner_r(skb, sk);
|
|
|
|
}
|
|
|
|
|
2009-02-13 16:33:44 +08:00
|
|
|
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
|
|
|
|
struct sctp_association *asoc)
|
|
|
|
{
|
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
2010-01-21 18:43:20 +08:00
|
|
|
struct inet_sock *newinet;
|
2018-02-14 04:56:24 +08:00
|
|
|
struct sctp_sock *sp = sctp_sk(sk);
|
|
|
|
struct sctp_endpoint *ep = sp->ep;
|
2009-02-13 16:33:44 +08:00
|
|
|
|
|
|
|
newsk->sk_type = sk->sk_type;
|
|
|
|
newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
|
|
|
|
newsk->sk_flags = sk->sk_flags;
|
2015-12-05 01:14:05 +08:00
|
|
|
newsk->sk_tsflags = sk->sk_tsflags;
|
2014-05-23 23:47:19 +08:00
|
|
|
newsk->sk_no_check_tx = sk->sk_no_check_tx;
|
|
|
|
newsk->sk_no_check_rx = sk->sk_no_check_rx;
|
2009-02-13 16:33:44 +08:00
|
|
|
newsk->sk_reuse = sk->sk_reuse;
|
2018-06-28 15:31:00 +08:00
|
|
|
sctp_sk(newsk)->reuse = sp->reuse;
|
2009-02-13 16:33:44 +08:00
|
|
|
|
|
|
|
newsk->sk_shutdown = sk->sk_shutdown;
|
2013-06-26 00:17:29 +08:00
|
|
|
newsk->sk_destruct = sctp_destruct_sock;
|
2009-02-13 16:33:44 +08:00
|
|
|
newsk->sk_family = sk->sk_family;
|
|
|
|
newsk->sk_protocol = IPPROTO_SCTP;
|
|
|
|
newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
|
|
|
|
newsk->sk_sndbuf = sk->sk_sndbuf;
|
|
|
|
newsk->sk_rcvbuf = sk->sk_rcvbuf;
|
|
|
|
newsk->sk_lingertime = sk->sk_lingertime;
|
|
|
|
newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
|
|
|
|
newsk->sk_sndtimeo = sk->sk_sndtimeo;
|
2016-04-13 05:11:31 +08:00
|
|
|
newsk->sk_rxhash = sk->sk_rxhash;
|
2009-02-13 16:33:44 +08:00
|
|
|
|
|
|
|
newinet = inet_sk(newsk);
|
|
|
|
|
|
|
|
/* Initialize sk's sport, dport, rcv_saddr and daddr for
|
|
|
|
* getsockname() and getpeername()
|
|
|
|
*/
|
2009-10-15 14:30:45 +08:00
|
|
|
newinet->inet_sport = inet->inet_sport;
|
|
|
|
newinet->inet_saddr = inet->inet_saddr;
|
|
|
|
newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
|
|
|
|
newinet->inet_dport = htons(asoc->peer.port);
|
2009-02-13 16:33:44 +08:00
|
|
|
newinet->pmtudisc = inet->pmtudisc;
|
2009-10-15 14:30:45 +08:00
|
|
|
newinet->inet_id = asoc->next_tsn ^ jiffies;
|
2009-02-13 16:33:44 +08:00
|
|
|
|
|
|
|
newinet->uc_ttl = inet->uc_ttl;
|
|
|
|
newinet->mc_loop = 1;
|
|
|
|
newinet->mc_ttl = 1;
|
|
|
|
newinet->mc_index = 0;
|
|
|
|
newinet->mc_list = NULL;
|
2015-12-05 01:14:04 +08:00
|
|
|
|
|
|
|
if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
|
|
|
|
net_enable_timestamp();
|
2015-12-24 02:44:09 +08:00
|
|
|
|
2018-02-14 04:56:24 +08:00
|
|
|
/* Set newsk security attributes from orginal sk and connection
|
|
|
|
* security attribute from ep.
|
|
|
|
*/
|
|
|
|
security_sctp_sk_clone(ep, sk, newsk);
|
2009-02-13 16:33:44 +08:00
|
|
|
}
|
|
|
|
|
2015-06-12 21:16:41 +08:00
|
|
|
static inline void sctp_copy_descendant(struct sock *sk_to,
|
|
|
|
const struct sock *sk_from)
|
|
|
|
{
|
|
|
|
int ancestor_size = sizeof(struct inet_sock) +
|
|
|
|
sizeof(struct sctp_sock) -
|
|
|
|
offsetof(struct sctp_sock, auto_asconf_list);
|
|
|
|
|
|
|
|
if (sk_from->sk_family == PF_INET6)
|
|
|
|
ancestor_size += sizeof(struct ipv6_pinfo);
|
|
|
|
|
|
|
|
__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Populate the fields of the newsk from the oldsk and migrate the assoc
|
|
|
|
* and its messages to the newsk.
|
|
|
|
*/
|
|
|
|
static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|
|
|
struct sctp_association *assoc,
|
2017-08-11 10:23:50 +08:00
|
|
|
enum sctp_socket_type type)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct sctp_sock *oldsp = sctp_sk(oldsk);
|
|
|
|
struct sctp_sock *newsp = sctp_sk(newsk);
|
|
|
|
struct sctp_bind_bucket *pp; /* hash list port iterator */
|
|
|
|
struct sctp_endpoint *newep = newsp->ep;
|
|
|
|
struct sk_buff *skb, *tmp;
|
|
|
|
struct sctp_ulpevent *event;
|
2007-12-07 14:50:27 +08:00
|
|
|
struct sctp_bind_hashbucket *head;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Migrate socket buffer sizes and all the socket level options to the
|
|
|
|
* new socket.
|
|
|
|
*/
|
|
|
|
newsk->sk_sndbuf = oldsk->sk_sndbuf;
|
|
|
|
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
|
|
|
|
/* Brute force copy old sctp opt. */
|
2015-06-12 21:16:41 +08:00
|
|
|
sctp_copy_descendant(newsk, oldsk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Restore the ep value that was overwritten with the above structure
|
|
|
|
* copy.
|
|
|
|
*/
|
|
|
|
newsp->ep = newep;
|
|
|
|
newsp->hmac = NULL;
|
|
|
|
|
|
|
|
/* Hook this new socket in to the bind_hash list. */
|
2012-08-06 16:39:38 +08:00
|
|
|
head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
|
|
|
|
inet_sk(oldsk)->inet_num)];
|
2016-03-13 18:48:24 +08:00
|
|
|
spin_lock_bh(&head->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
pp = sctp_sk(oldsk)->bind_hash;
|
|
|
|
sk_add_bind_node(newsk, &pp->owner);
|
|
|
|
sctp_sk(newsk)->bind_hash = pp;
|
2009-10-15 14:30:45 +08:00
|
|
|
inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
|
2016-03-13 18:48:24 +08:00
|
|
|
spin_unlock_bh(&head->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-06-14 06:10:49 +08:00
|
|
|
/* Copy the bind_addr list from the original endpoint to the new
|
|
|
|
* endpoint so that we can handle restarts properly
|
|
|
|
*/
|
2007-12-07 14:50:54 +08:00
|
|
|
sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
|
|
|
|
&oldsp->ep->base.bind_addr, GFP_KERNEL);
|
2005-06-14 06:10:49 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Move any messages in the old socket's receive queue that are for the
|
|
|
|
* peeled off association to the new socket's receive queue.
|
|
|
|
*/
|
|
|
|
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
|
|
|
|
event = sctp_skb2event(skb);
|
|
|
|
if (event->asoc == assoc) {
|
2005-08-10 10:25:21 +08:00
|
|
|
__skb_unlink(skb, &oldsk->sk_receive_queue);
|
2005-04-17 06:20:36 +08:00
|
|
|
__skb_queue_tail(&newsk->sk_receive_queue, skb);
|
2007-04-18 03:49:53 +08:00
|
|
|
sctp_skb_set_owner_r_frag(skb, newsk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clean up any messages pending delivery due to partial
|
|
|
|
* delivery. Three cases:
|
|
|
|
* 1) No partial deliver; no work.
|
|
|
|
* 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
|
|
|
|
* 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
|
|
|
|
*/
|
|
|
|
skb_queue_head_init(&newsp->pd_lobby);
|
2007-04-21 03:23:15 +08:00
|
|
|
atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-04-21 03:23:15 +08:00
|
|
|
if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sk_buff_head *queue;
|
|
|
|
|
|
|
|
/* Decide which queue to move pd_lobby skbs to. */
|
|
|
|
if (assoc->ulpq.pd_mode) {
|
|
|
|
queue = &newsp->pd_lobby;
|
|
|
|
} else
|
|
|
|
queue = &newsk->sk_receive_queue;
|
|
|
|
|
|
|
|
/* Walk through the pd_lobby, looking for skbs that
|
|
|
|
* need moved to the new socket.
|
|
|
|
*/
|
|
|
|
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
|
|
|
|
event = sctp_skb2event(skb);
|
|
|
|
if (event->asoc == assoc) {
|
2005-08-10 10:25:21 +08:00
|
|
|
__skb_unlink(skb, &oldsp->pd_lobby);
|
2005-04-17 06:20:36 +08:00
|
|
|
__skb_queue_tail(queue, skb);
|
2007-04-18 03:49:53 +08:00
|
|
|
sctp_skb_set_owner_r_frag(skb, newsk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear up any skbs waiting for the partial
|
|
|
|
* delivery to finish.
|
|
|
|
*/
|
|
|
|
if (assoc->ulpq.pd_mode)
|
2007-04-21 03:23:15 +08:00
|
|
|
sctp_clear_pd(oldsk, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-12-08 21:04:09 +08:00
|
|
|
sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
|
2007-04-18 03:49:53 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Set the type of socket to indicate that it is peeled off from the
|
|
|
|
* original UDP-style socket or created with the accept() call on a
|
|
|
|
* TCP-style socket..
|
|
|
|
*/
|
|
|
|
newsp->type = type;
|
|
|
|
|
2006-05-20 02:01:18 +08:00
|
|
|
/* Mark the new socket "in-use" by the user so that any packets
|
|
|
|
* that may arrive on the association after we've moved it are
|
|
|
|
* queued to the backlog. This prevents a potential race between
|
|
|
|
* backlog processing on the old socket and new-packet processing
|
|
|
|
* on the new socket.
|
2007-06-23 06:14:46 +08:00
|
|
|
*
|
|
|
|
* The caller has just allocated newsk so we can guarantee that other
|
|
|
|
* paths won't try to lock it and then oldsk.
|
2006-05-20 02:01:18 +08:00
|
|
|
*/
|
2007-06-23 06:14:46 +08:00
|
|
|
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
|
2017-10-28 02:13:29 +08:00
|
|
|
sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
|
2005-04-17 06:20:36 +08:00
|
|
|
sctp_assoc_migrate(assoc, newsk);
|
2017-10-28 02:13:29 +08:00
|
|
|
sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* If the association on the newsk is already closed before accept()
|
|
|
|
* is called, set RCV_SHUTDOWN flag.
|
|
|
|
*/
|
2016-06-09 22:48:18 +08:00
|
|
|
if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
|
2017-12-20 11:12:54 +08:00
|
|
|
inet_sk_set_state(newsk, SCTP_SS_CLOSED);
|
2005-04-17 06:20:36 +08:00
|
|
|
newsk->sk_shutdown |= RCV_SHUTDOWN;
|
2016-06-09 22:48:18 +08:00
|
|
|
} else {
|
2017-12-20 11:12:54 +08:00
|
|
|
inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
|
2016-06-09 22:48:18 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-01-21 15:44:11 +08:00
|
|
|
release_sock(newsk);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-08-16 07:07:44 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* This proto struct describes the ULP interface for SCTP. */
|
|
|
|
struct proto sctp_prot = {
|
|
|
|
.name = "SCTP",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.close = sctp_close,
|
|
|
|
.disconnect = sctp_disconnect,
|
|
|
|
.accept = sctp_accept,
|
|
|
|
.ioctl = sctp_ioctl,
|
|
|
|
.init = sctp_init_sock,
|
|
|
|
.destroy = sctp_destroy_sock,
|
|
|
|
.shutdown = sctp_shutdown,
|
|
|
|
.setsockopt = sctp_setsockopt,
|
|
|
|
.getsockopt = sctp_getsockopt,
|
|
|
|
.sendmsg = sctp_sendmsg,
|
|
|
|
.recvmsg = sctp_recvmsg,
|
|
|
|
.bind = sctp_bind,
|
|
|
|
.backlog_rcv = sctp_backlog_rcv,
|
|
|
|
.hash = sctp_hash,
|
|
|
|
.unhash = sctp_unhash,
|
|
|
|
.get_port = sctp_get_port,
|
|
|
|
.obj_size = sizeof(struct sctp_sock),
|
2017-08-25 07:57:57 +08:00
|
|
|
.useroffset = offsetof(struct sctp_sock, subscribe),
|
|
|
|
.usersize = offsetof(struct sctp_sock, initmsg) -
|
|
|
|
offsetof(struct sctp_sock, subscribe) +
|
|
|
|
sizeof_field(struct sctp_sock, initmsg),
|
2007-08-16 07:07:44 +08:00
|
|
|
.sysctl_mem = sysctl_sctp_mem,
|
|
|
|
.sysctl_rmem = sysctl_sctp_rmem,
|
|
|
|
.sysctl_wmem = sysctl_sctp_wmem,
|
|
|
|
.memory_pressure = &sctp_memory_pressure,
|
|
|
|
.enter_memory_pressure = sctp_enter_memory_pressure,
|
|
|
|
.memory_allocated = &sctp_memory_allocated,
|
2008-02-20 16:23:01 +08:00
|
|
|
.sockets_allocated = &sctp_sockets_allocated,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2011-12-10 17:48:31 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2007-11-06 15:40:28 +08:00
|
|
|
|
2015-12-01 23:20:07 +08:00
|
|
|
#include <net/transp_v6.h>
|
|
|
|
static void sctp_v6_destroy_sock(struct sock *sk)
|
|
|
|
{
|
|
|
|
sctp_destroy_sock(sk);
|
|
|
|
inet6_destroy_sock(sk);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
struct proto sctpv6_prot = {
|
|
|
|
.name = "SCTPv6",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.close = sctp_close,
|
|
|
|
.disconnect = sctp_disconnect,
|
|
|
|
.accept = sctp_accept,
|
|
|
|
.ioctl = sctp_ioctl,
|
|
|
|
.init = sctp_init_sock,
|
2015-12-01 23:20:07 +08:00
|
|
|
.destroy = sctp_v6_destroy_sock,
|
2005-04-17 06:20:36 +08:00
|
|
|
.shutdown = sctp_shutdown,
|
|
|
|
.setsockopt = sctp_setsockopt,
|
|
|
|
.getsockopt = sctp_getsockopt,
|
|
|
|
.sendmsg = sctp_sendmsg,
|
|
|
|
.recvmsg = sctp_recvmsg,
|
|
|
|
.bind = sctp_bind,
|
|
|
|
.backlog_rcv = sctp_backlog_rcv,
|
|
|
|
.hash = sctp_hash,
|
|
|
|
.unhash = sctp_unhash,
|
|
|
|
.get_port = sctp_get_port,
|
|
|
|
.obj_size = sizeof(struct sctp6_sock),
|
2017-08-25 07:57:57 +08:00
|
|
|
.useroffset = offsetof(struct sctp6_sock, sctp.subscribe),
|
|
|
|
.usersize = offsetof(struct sctp6_sock, sctp.initmsg) -
|
|
|
|
offsetof(struct sctp6_sock, sctp.subscribe) +
|
|
|
|
sizeof_field(struct sctp6_sock, sctp.initmsg),
|
2007-08-16 07:07:44 +08:00
|
|
|
.sysctl_mem = sysctl_sctp_mem,
|
|
|
|
.sysctl_rmem = sysctl_sctp_rmem,
|
|
|
|
.sysctl_wmem = sysctl_sctp_wmem,
|
|
|
|
.memory_pressure = &sctp_memory_pressure,
|
|
|
|
.enter_memory_pressure = sctp_enter_memory_pressure,
|
|
|
|
.memory_allocated = &sctp_memory_allocated,
|
2008-02-20 16:23:01 +08:00
|
|
|
.sockets_allocated = &sctp_sockets_allocated,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2011-12-10 17:48:31 +08:00
|
|
|
#endif /* IS_ENABLED(CONFIG_IPV6) */
|