2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-30 16:13:54 +08:00

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [TCP]: DSACK signals data receival, be conservative
  [TCP]: Also handle snd_una changes in tcp_cwnd_down
  [TIPC]: Fix two minor sparse warnings.
  [TIPC]: Make function tipc_nameseq_subscribe static.
  [PF_KEY]: Fix ipsec not working in 2.6.23-rc1-git10
  [TCP]: Invoke tcp_sendmsg() directly, do not use inet_sendmsg().
  [IPV4] route.c: mostly kmalloc + memset conversion to k[cz]alloc
  [IPV4] raw.c: kmalloc + memset conversion to kzalloc
  [NETFILTER] nf_conntrack_l3proto_ipv4_compat.c: kmalloc + memset conversion to kzalloc
  [NETFILTER] nf_conntrack_expect.c: kmalloc + memset conversion to kzalloc
  [NET]: Removal of duplicated include net/wanrouter/wanmain.c
  SCTP: remove useless code in function sctp_init_cause
  SCTP: drop SACK if ctsn is not less than the next tsn of assoc
  SCTP: IPv4 mapped addr not returned in SCTPv6 accept()
  SCTP: IPv4 mapped addr not returned in SCTPv6 accept()
  sctp: fix shadow symbol in net/sctp/tsnmap.c
  sctp: try to fix readlock
  sctp: remove shadowed symbols
  sctp: move global declaration to header file.
  sctp: make locally used function static
This commit is contained in:
Linus Torvalds 2007-08-03 14:57:41 -07:00
commit e1d7e7fcf8
23 changed files with 162 additions and 96 deletions

View File

@ -189,6 +189,16 @@ int sctp_assocs_proc_init(void);
void sctp_assocs_proc_exit(void);
/*
* Module global variables
*/
/*
* sctp/protocol.c
*/
extern struct kmem_cache *sctp_chunk_cachep __read_mostly;
extern struct kmem_cache *sctp_bucket_cachep __read_mostly;
/*
* Section: Macros, externs, and inlines
*/

View File

@ -281,7 +281,7 @@ extern int tcp_v4_remember_stamp(struct sock *sk);
extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size);
extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);

View File

@ -831,7 +831,7 @@ const struct proto_ops inet_stream_ops = {
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.sendmsg = tcp_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = tcp_sendpage,

View File

@ -294,15 +294,14 @@ static int exp_open(struct inode *inode, struct file *file)
struct ct_expect_iter_state *st;
int ret;
st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
if (st == NULL)
st = kzalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
if (!st)
return -ENOMEM;
ret = seq_open(file, &exp_seq_ops);
if (ret)
goto out_free;
seq = file->private_data;
seq->private = st;
memset(st, 0, sizeof(struct ct_expect_iter_state));
return ret;
out_free:
kfree(st);

View File

@ -900,8 +900,9 @@ static int raw_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
struct raw_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
struct raw_iter_state *s;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
rc = seq_open(file, &raw_seq_ops);
@ -910,7 +911,6 @@ static int raw_seq_open(struct inode *inode, struct file *file)
seq = file->private_data;
seq->private = s;
memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:

View File

@ -374,8 +374,9 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
struct rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
struct rt_cache_iter_state *s;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
rc = seq_open(file, &rt_cache_seq_ops);
@ -383,7 +384,6 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file)
goto out_kfree;
seq = file->private_data;
seq->private = s;
memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:

View File

@ -658,9 +658,10 @@ static inline int select_size(struct sock *sk)
return tmp;
}
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;

View File

@ -102,11 +102,14 @@ int sysctl_tcp_abc __read_mostly;
#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
#define IsReno(tp) ((tp)->rx_opt.sack_ok == 0)
#define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
@ -964,12 +967,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
/* Check for D-SACK. */
if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
flag |= FLAG_DSACKING_ACK;
found_dup_sack = 1;
tp->rx_opt.sack_ok |= 4;
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1 &&
!after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
!before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
flag |= FLAG_DSACKING_ACK;
found_dup_sack = 1;
tp->rx_opt.sack_ok |= 4;
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
@ -1856,7 +1861,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
struct tcp_sock *tp = tcp_sk(sk);
int decr = tp->snd_cwnd_cnt + 1;
if ((flag&FLAG_FORWARD_PROGRESS) ||
if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) ||
(IsReno(tp) && !(flag&FLAG_NOT_DUP))) {
tp->snd_cwnd_cnt = decr&1;
decr >>= 1;
@ -2107,15 +2112,13 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
* tcp_xmit_retransmit_queue().
*/
static void
tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
int prior_packets, int flag)
tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int is_dupack = (tp->snd_una == prior_snd_una &&
(!(flag&FLAG_NOT_DUP) ||
((flag&FLAG_DATA_SACKED) &&
(tp->fackets_out > tp->reordering))));
int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP));
int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) &&
(tp->fackets_out > tp->reordering));
/* Some technical things:
* 1. Reno does not count dupacks (sacked_out) automatically. */
@ -2192,14 +2195,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
/* F. Process state. */
switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery:
if (prior_snd_una == tp->snd_una) {
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (IsReno(tp) && is_dupack)
tcp_add_reno_sack(sk);
} else {
int acked = prior_packets - tp->packets_out;
if (IsReno(tp))
tcp_remove_reno_sacks(sk, acked);
is_dupack = tcp_try_undo_partial(sk, acked);
do_lost = tcp_try_undo_partial(sk, acked);
}
break;
case TCP_CA_Loss:
@ -2215,7 +2218,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
/* Loss is undone; fall through to processing in Open state. */
default:
if (IsReno(tp)) {
if (tp->snd_una != prior_snd_una)
if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp);
if (is_dupack)
tcp_add_reno_sack(sk);
@ -2264,7 +2267,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
tcp_set_ca_state(sk, TCP_CA_Recovery);
}
if (is_dupack || tcp_head_timedout(sk))
if (do_lost || tcp_head_timedout(sk))
tcp_update_scoreboard(sk);
tcp_cwnd_down(sk, flag);
tcp_xmit_retransmit_queue(sk);
@ -2684,7 +2687,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
* to prove that the RTO is indeed spurious. It transfers the control
* from F-RTO to the conventional RTO recovery
*/
static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
static int tcp_process_frto(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
@ -2704,8 +2707,7 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
* ACK isn't duplicate nor advances window, e.g., opposite dir
* data, winupdate
*/
if ((tp->snd_una == prior_snd_una) && (flag&FLAG_NOT_DUP) &&
!(flag&FLAG_FORWARD_PROGRESS))
if (!(flag&FLAG_ANY_PROGRESS) && (flag&FLAG_NOT_DUP))
return 1;
if (!(flag&FLAG_DATA_ACKED)) {
@ -2785,6 +2787,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
if (before(ack, prior_snd_una))
goto old_ack;
if (after(ack, prior_snd_una))
flag |= FLAG_SND_UNA_ADVANCED;
if (sysctl_tcp_abc) {
if (icsk->icsk_ca_state < TCP_CA_CWR)
tp->bytes_acked += ack - prior_snd_una;
@ -2837,14 +2842,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, prior_snd_una, flag);
frto_cwnd = tcp_process_frto(sk, flag);
if (tcp_ack_is_dubious(sk, flag)) {
/* Advance CWND, if state allows this. */
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight, 0);
tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
tcp_fastretrans_alert(sk, prior_packets, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight, 1);

View File

@ -2425,7 +2425,6 @@ struct proto tcp_prot = {
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
.sendmsg = tcp_sendmsg,
.recvmsg = tcp_recvmsg,
.backlog_rcv = tcp_v4_do_rcv,
.hash = tcp_v4_hash,

View File

@ -484,7 +484,7 @@ const struct proto_ops inet6_stream_ops = {
.shutdown = inet_shutdown, /* ok */
.setsockopt = sock_common_setsockopt, /* ok */
.getsockopt = sock_common_getsockopt, /* ok */
.sendmsg = inet_sendmsg, /* ok */
.sendmsg = tcp_sendmsg, /* ok */
.recvmsg = sock_common_recvmsg, /* ok */
.mmap = sock_no_mmap,
.sendpage = tcp_sendpage,

View File

@ -2115,7 +2115,6 @@ struct proto tcpv6_prot = {
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
.sendmsg = tcp_sendmsg,
.recvmsg = tcp_recvmsg,
.backlog_rcv = tcp_v6_do_rcv,
.hash = tcp_v6_hash,

View File

@ -1206,6 +1206,9 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
x->sel.prefixlen_s = addr->sadb_address_prefixlen;
}
if (!x->sel.family)
x->sel.family = x->props.family;
if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) {
struct sadb_x_nat_t_type* n_type;
struct xfrm_encap_tmpl *natt;

View File

@ -477,15 +477,14 @@ static int exp_open(struct inode *inode, struct file *file)
struct ct_expect_iter_state *st;
int ret;
st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
if (st == NULL)
st = kzalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
if (!st)
return -ENOMEM;
ret = seq_open(file, &exp_seq_ops);
if (ret)
goto out_free;
seq = file->private_data;
seq->private = st;
memset(st, 0, sizeof(struct ct_expect_iter_state));
return ret;
out_free:
kfree(st);

View File

@ -590,7 +590,7 @@ out_unlock:
* Return 0 - If further processing is needed.
* Return 1 - If the packet can be discarded right away.
*/
int sctp_rcv_ootb(struct sk_buff *skb)
static int sctp_rcv_ootb(struct sk_buff *skb)
{
sctp_chunkhdr_t *ch;
__u8 *ch_end;

View File

@ -641,6 +641,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
newsctp6sk = (struct sctp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newsctp6sk->inet6;
sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped;
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);

View File

@ -65,8 +65,6 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
extern struct kmem_cache *sctp_chunk_cachep;
SCTP_STATIC
struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
__u8 type, __u8 flags, int paylen);
@ -115,15 +113,12 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
const void *payload, size_t paylen)
{
sctp_errhdr_t err;
int padlen;
__u16 len;
/* Cause code constants are now defined in network order. */
err.cause = cause_code;
len = sizeof(sctp_errhdr_t) + paylen;
padlen = len % 4;
err.length = htons(len);
len += padlen;
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
sctp_addto_chunk(chunk, paylen, payload);
}
@ -1454,7 +1449,6 @@ no_hmac:
do_gettimeofday(&tv);
if (!asoc && tv_lt(bear_cookie->expiration, tv)) {
__u16 len;
/*
* Section 3.3.10.3 Stale Cookie Error (3)
*

View File

@ -97,6 +97,13 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
const struct sctp_association *asoc,
struct sctp_transport *transport);
static sctp_disposition_t sctp_sf_abort_violation(
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen);
static sctp_disposition_t sctp_sf_violation_chunklen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
@ -104,6 +111,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_ctsn(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
/* Small helper function that checks if the chunk length
* is of the appropriate length. The 'required_length' argument
* is set to be the size of a specific chunk we are testing.
@ -2880,6 +2894,13 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
/* Return this SACK for further processing. */
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh));
@ -3691,40 +3712,21 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_VIOLATION;
}
/*
* Handle a protocol violation when the chunk length is invalid.
* "Invalid" length is identified as smaller then the minimal length a
* given chunk can be. For example, a SACK chunk has invalid length
* if it's length is set to be smaller then the size of sctp_sack_chunk_t.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
*
* Section: Not specified
* Verification Tag: Nothing to do
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (reply_msg, msg_up, counters)
*
* Generate an ABORT chunk and terminate the association.
* Common function to handle a protocol violation.
*/
static sctp_disposition_t sctp_sf_violation_chunklen(
const struct sctp_endpoint *ep,
static sctp_disposition_t sctp_sf_abort_violation(
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL;
char err_str[]="The following chunk had invalid length:";
/* Make the abort chunk. */
abort = sctp_make_abort_violation(asoc, chunk, err_str,
sizeof(err_str));
abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
if (!abort)
goto nomem;
@ -3756,6 +3758,57 @@ nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle a protocol violation when the chunk length is invalid.
* "Invalid" length is identified as smaller then the minimal length a
* given chunk can be. For example, a SACK chunk has invalid length
* if it's length is set to be smaller then the size of sctp_sack_chunk_t.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
*
* Section: Not specified
* Verification Tag: Nothing to do
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (reply_msg, msg_up, counters)
*
* Generate an ABORT chunk and terminate the association.
*/
static sctp_disposition_t sctp_sf_violation_chunklen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
char err_str[]="The following chunk had invalid length:";
return sctp_sf_abort_violation(asoc, arg, commands, err_str,
sizeof(err_str));
}
/* Handle a protocol violation when the peer trying to advance the
* cumulative tsn ack to a point beyond the max tsn currently sent.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
*/
static sctp_disposition_t sctp_sf_violation_ctsn(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
return sctp_sf_abort_violation(asoc, arg, commands, err_str,
sizeof(err_str));
}
/***************************************************************************
* These are the state functions for handling primitive (Section 10) events.
***************************************************************************/

View File

@ -107,8 +107,6 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
struct sctp_association *, sctp_socket_type_t);
static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
extern struct kmem_cache *sctp_bucket_cachep;
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
@ -433,7 +431,7 @@ out:
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
{
int cnt;
int retval = 0;
@ -602,7 +600,7 @@ out:
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
@ -977,7 +975,7 @@ static int __sctp_connect(struct sock* sk,
int err = 0;
int addrcnt = 0;
int walk_size = 0;
union sctp_addr *sa_addr;
union sctp_addr *sa_addr = NULL;
void *addr_buf;
unsigned short port;
unsigned int f_flags = 0;
@ -1011,7 +1009,10 @@ static int __sctp_connect(struct sock* sk,
goto out_free;
}
err = sctp_verify_addr(sk, sa_addr, af->sockaddr_len);
/* Save current address so we can work with it */
memcpy(&to, sa_addr, af->sockaddr_len);
err = sctp_verify_addr(sk, &to, af->sockaddr_len);
if (err)
goto out_free;
@ -1021,12 +1022,11 @@ static int __sctp_connect(struct sock* sk,
if (asoc && asoc->peer.port && asoc->peer.port != port)
goto out_free;
memcpy(&to, sa_addr, af->sockaddr_len);
/* Check if there already is a matching association on the
* endpoint (other than the one created here).
*/
asoc2 = sctp_endpoint_lookup_assoc(ep, sa_addr, &transport);
asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (asoc2 && asoc2 != asoc) {
if (asoc2->state >= SCTP_STATE_ESTABLISHED)
err = -EISCONN;
@ -1039,7 +1039,7 @@ static int __sctp_connect(struct sock* sk,
* make sure that there is no peeled-off association matching
* the peer address even on another socket.
*/
if (sctp_endpoint_is_peeled_off(ep, sa_addr)) {
if (sctp_endpoint_is_peeled_off(ep, &to)) {
err = -EADDRNOTAVAIL;
goto out_free;
}
@ -1070,7 +1070,7 @@ static int __sctp_connect(struct sock* sk,
}
}
scope = sctp_scope(sa_addr);
scope = sctp_scope(&to);
asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
if (!asoc) {
err = -ENOMEM;
@ -1079,7 +1079,7 @@ static int __sctp_connect(struct sock* sk,
}
/* Prime the peer's transport structures. */
transport = sctp_assoc_add_peer(asoc, sa_addr, GFP_KERNEL,
transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
SCTP_UNKNOWN);
if (!transport) {
err = -ENOMEM;
@ -1103,8 +1103,8 @@ static int __sctp_connect(struct sock* sk,
/* Initialize sk's dport and daddr for getpeername() */
inet_sk(sk)->dport = htons(asoc->peer.port);
af = sctp_get_af_specific(to.sa.sa_family);
af->to_sk_daddr(&to, sk);
af = sctp_get_af_specific(sa_addr->sa.sa_family);
af->to_sk_daddr(sa_addr, sk);
sk->sk_err = 0;
/* in-kernel sockets don't generally have a file allocated to them
@ -1531,7 +1531,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
}
if (sinfo_flags & SCTP_ABORT) {
struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, msg, msg_len);
if (!chunk) {
@ -4353,7 +4352,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
space_left, &bytes_copied);
if (cnt < 0) {
err = cnt;
goto error;
goto error_lock;
}
goto copy_getaddrs;
}
@ -4367,7 +4366,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen) {
err = -ENOMEM; /*fixme: right error?*/
goto error;
goto error_lock;
}
memcpy(buf, &temp, addrlen);
buf += addrlen;
@ -4381,15 +4380,21 @@ copy_getaddrs:
if (copy_to_user(to, addrs, bytes_copied)) {
err = -EFAULT;
goto error;
goto out;
}
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
err = -EFAULT;
goto error;
goto out;
}
if (put_user(bytes_copied, optlen))
err = -EFAULT;
error:
goto out;
error_lock:
sctp_read_unlock(addr_lock);
out:
kfree(addrs);
return err;
}
@ -5964,7 +5969,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
return err;
}
void sctp_wait_for_close(struct sock *sk, long timeout)
static void sctp_wait_for_close(struct sock *sk, long timeout)
{
DEFINE_WAIT(wait);

View File

@ -161,7 +161,7 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
__u16 *start, __u16 *end)
{
int started, ended;
__u16 _start, _end, offset;
__u16 start_, end_, offset;
/* We haven't found a gap yet. */
started = ended = 0;
@ -175,7 +175,7 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
offset = iter->start - map->base_tsn;
sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len, 0,
&started, &_start, &ended, &_end);
&started, &start_, &ended, &end_);
}
/* Do we need to check the overflow map? */
@ -193,8 +193,8 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
offset,
map->len,
map->len,
&started, &_start,
&ended, &_end);
&started, &start_,
&ended, &end_);
}
/* The Gap Ack Block happens to end at the end of the
@ -202,7 +202,7 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
*/
if (started && !ended) {
ended++;
_end = map->len + map->len - 1;
end_ = map->len + map->len - 1;
}
/* If we found a Gap Ack Block, return the start and end and
@ -215,8 +215,8 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
int gap = map->cumulative_tsn_ack_point -
map->base_tsn;
*start = _start - gap;
*end = _end - gap;
*start = start_ - gap;
*end = end_ - gap;
/* Move the iterator forward. */
iter->start = map->cumulative_tsn_ack_point + *end + 1;

View File

@ -2383,10 +2383,10 @@ void tipc_link_changeover(struct link *l_ptr)
struct tipc_msg *msg = buf_msg(crs);
if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
u32 msgcount = msg_msgcnt(msg);
struct tipc_msg *m = msg_get_wrapped(msg);
unchar* pos = (unchar*)m;
msgcount = msg_msgcnt(msg);
while (msgcount--) {
msg_set_seqno(m,msg_seqno(msg));
tipc_link_tunnel(l_ptr, &tunnel_hdr, m,

View File

@ -501,7 +501,7 @@ end_node:
* sequence overlapping with the requested sequence
*/
void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
{
struct sub_seq *sseq = nseq->sseqs;

View File

@ -241,8 +241,6 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
char addr_string[16];
if (n_ptr->link_cnt >= 2) {
char addr_string[16];
err("Attempt to create third link to %s\n",
addr_string_fill(addr_string, n_ptr->addr));
return NULL;

View File

@ -46,7 +46,6 @@
#include <linux/capability.h>
#include <linux/errno.h> /* return codes */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h> /* support for loadable modules */
#include <linux/slab.h> /* kmalloc(), kfree() */
#include <linux/mm.h>