mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 17:23:55 +08:00
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
This commit is contained in:
commit
508862e493
@ -715,6 +715,7 @@ enum {
|
||||
NET_SCTP_PRSCTP_ENABLE = 14,
|
||||
NET_SCTP_SNDBUF_POLICY = 15,
|
||||
NET_SCTP_SACK_TIMEOUT = 16,
|
||||
NET_SCTP_RCVBUF_POLICY = 17,
|
||||
};
|
||||
|
||||
/* /proc/sys/net/bridge */
|
||||
|
@ -120,6 +120,7 @@ typedef union {
|
||||
int error;
|
||||
sctp_state_t state;
|
||||
sctp_event_timeout_t to;
|
||||
unsigned long zero;
|
||||
void *ptr;
|
||||
struct sctp_chunk *chunk;
|
||||
struct sctp_association *asoc;
|
||||
@ -148,17 +149,17 @@ static inline sctp_arg_t SCTP_NULL(void)
|
||||
}
|
||||
static inline sctp_arg_t SCTP_NOFORCE(void)
|
||||
{
|
||||
sctp_arg_t retval; retval.i32 = 0; return retval;
|
||||
sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 0; return retval;
|
||||
}
|
||||
static inline sctp_arg_t SCTP_FORCE(void)
|
||||
{
|
||||
sctp_arg_t retval; retval.i32 = 1; return retval;
|
||||
sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 1; return retval;
|
||||
}
|
||||
|
||||
#define SCTP_ARG_CONSTRUCTOR(name, type, elt) \
|
||||
static inline sctp_arg_t \
|
||||
SCTP_## name (type arg) \
|
||||
{ sctp_arg_t retval; retval.elt = arg; return retval; }
|
||||
{ sctp_arg_t retval = {.zero = 0UL}; retval.elt = arg; return retval; }
|
||||
|
||||
SCTP_ARG_CONSTRUCTOR(I32, __s32, i32)
|
||||
SCTP_ARG_CONSTRUCTOR(U32, __u32, u32)
|
||||
|
@ -161,6 +161,13 @@ extern struct sctp_globals {
|
||||
*/
|
||||
int sndbuf_policy;
|
||||
|
||||
/*
|
||||
* Policy for preforming sctp/socket accounting
|
||||
* 0 - do socket level accounting, all assocs share sk_rcvbuf
|
||||
* 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
|
||||
*/
|
||||
int rcvbuf_policy;
|
||||
|
||||
/* Delayed SACK timeout 200ms default*/
|
||||
int sack_timeout;
|
||||
|
||||
@ -218,6 +225,7 @@ extern struct sctp_globals {
|
||||
#define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable)
|
||||
#define sctp_max_retrans_association (sctp_globals.max_retrans_association)
|
||||
#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy)
|
||||
#define sctp_rcvbuf_policy (sctp_globals.rcvbuf_policy)
|
||||
#define sctp_max_retrans_path (sctp_globals.max_retrans_path)
|
||||
#define sctp_max_retrans_init (sctp_globals.max_retrans_init)
|
||||
#define sctp_sack_timeout (sctp_globals.sack_timeout)
|
||||
@ -1222,11 +1230,11 @@ struct sctp_endpoint {
|
||||
int last_key;
|
||||
int key_changed_at;
|
||||
|
||||
/* Default timeouts. */
|
||||
int timeouts[SCTP_NUM_TIMEOUT_TYPES];
|
||||
|
||||
/* sendbuf acct. policy. */
|
||||
__u32 sndbuf_policy;
|
||||
|
||||
/* rcvbuf acct. policy. */
|
||||
__u32 rcvbuf_policy;
|
||||
};
|
||||
|
||||
/* Recover the outter endpoint structure. */
|
||||
@ -1553,6 +1561,11 @@ struct sctp_association {
|
||||
*/
|
||||
int sndbuf_used;
|
||||
|
||||
/* This is the amount of memory that this association has allocated
|
||||
* in the receive path at any given time.
|
||||
*/
|
||||
atomic_t rmem_alloc;
|
||||
|
||||
/* This is the wait queue head for send requests waiting on
|
||||
* the association sndbuf space.
|
||||
*/
|
||||
|
@ -699,12 +699,14 @@ static int __init inet6_init(void)
|
||||
/* Register the family here so that the init calls below will
|
||||
* be able to create sockets. (?? is this dangerous ??)
|
||||
*/
|
||||
(void) sock_register(&inet6_family_ops);
|
||||
err = sock_register(&inet6_family_ops);
|
||||
if (err)
|
||||
goto out_unregister_raw_proto;
|
||||
|
||||
/* Initialise ipv6 mibs */
|
||||
err = init_ipv6_mibs();
|
||||
if (err)
|
||||
goto out_unregister_raw_proto;
|
||||
goto out_unregister_sock;
|
||||
|
||||
/*
|
||||
* ipngwg API draft makes clear that the correct semantics
|
||||
@ -796,6 +798,8 @@ icmp_fail:
|
||||
ipv6_sysctl_unregister();
|
||||
#endif
|
||||
cleanup_ipv6_mibs();
|
||||
out_unregister_sock:
|
||||
sock_unregister(PF_INET6);
|
||||
out_unregister_raw_proto:
|
||||
proto_unregister(&rawv6_prot);
|
||||
out_unregister_udp_proto:
|
||||
|
@ -128,9 +128,29 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
|
||||
*/
|
||||
asoc->max_burst = sctp_max_burst;
|
||||
|
||||
/* Copy things from the endpoint. */
|
||||
/* initialize association timers */
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
|
||||
|
||||
/* sctpimpguide Section 2.12.2
|
||||
* If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
|
||||
* recommended value of 5 times 'RTO.Max'.
|
||||
*/
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
|
||||
= 5 * asoc->rto_max;
|
||||
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
|
||||
SCTP_DEFAULT_TIMEOUT_SACK;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
|
||||
sp->autoclose * HZ;
|
||||
|
||||
/* Initilizes the timers */
|
||||
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
|
||||
asoc->timeouts[i] = ep->timeouts[i];
|
||||
init_timer(&asoc->timers[i]);
|
||||
asoc->timers[i].function = sctp_timer_events[i];
|
||||
asoc->timers[i].data = (unsigned long) asoc;
|
||||
@ -157,10 +177,10 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
|
||||
* RFC 6 - A SCTP receiver MUST be able to receive a minimum of
|
||||
* 1500 bytes in one SCTP packet.
|
||||
*/
|
||||
if (sk->sk_rcvbuf < SCTP_DEFAULT_MINWINDOW)
|
||||
if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
|
||||
asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
|
||||
else
|
||||
asoc->rwnd = sk->sk_rcvbuf;
|
||||
asoc->rwnd = sk->sk_rcvbuf/2;
|
||||
|
||||
asoc->a_rwnd = asoc->rwnd;
|
||||
|
||||
@ -172,6 +192,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
|
||||
/* Set the sndbuf size for transmit. */
|
||||
asoc->sndbuf_used = 0;
|
||||
|
||||
/* Initialize the receive memory counter */
|
||||
atomic_set(&asoc->rmem_alloc, 0);
|
||||
|
||||
init_waitqueue_head(&asoc->wait);
|
||||
|
||||
asoc->c.my_vtag = sctp_generate_tag(ep);
|
||||
@ -380,6 +403,8 @@ static void sctp_association_destroy(struct sctp_association *asoc)
|
||||
spin_unlock_bh(&sctp_assocs_id_lock);
|
||||
}
|
||||
|
||||
BUG_TRAP(!atomic_read(&asoc->rmem_alloc));
|
||||
|
||||
if (asoc->base.malloced) {
|
||||
kfree(asoc);
|
||||
SCTP_DBG_OBJCNT_DEC(assoc);
|
||||
|
@ -70,7 +70,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||
struct sock *sk,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct sctp_sock *sp = sctp_sk(sk);
|
||||
memset(ep, 0, sizeof(struct sctp_endpoint));
|
||||
|
||||
/* Initialize the base structure. */
|
||||
@ -100,33 +99,14 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||
/* Create the lists of associations. */
|
||||
INIT_LIST_HEAD(&ep->asocs);
|
||||
|
||||
/* Set up the base timeout information. */
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
|
||||
msecs_to_jiffies(sp->rtoinfo.srto_initial);
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
|
||||
msecs_to_jiffies(sp->rtoinfo.srto_initial);
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] =
|
||||
msecs_to_jiffies(sp->rtoinfo.srto_initial);
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
|
||||
|
||||
/* sctpimpguide-05 Section 2.12.2
|
||||
* If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
|
||||
* recommended value of 5 times 'RTO.Max'.
|
||||
*/
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
|
||||
= 5 * msecs_to_jiffies(sp->rtoinfo.srto_max);
|
||||
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] = sctp_sack_timeout;
|
||||
ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
|
||||
|
||||
/* Use SCTP specific send buffer space queues. */
|
||||
ep->sndbuf_policy = sctp_sndbuf_policy;
|
||||
sk->sk_write_space = sctp_write_space;
|
||||
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
|
||||
|
||||
/* Get the receive buffer policy for this endpoint */
|
||||
ep->rcvbuf_policy = sctp_rcvbuf_policy;
|
||||
|
||||
/* Initialize the secret key used with cookie. */
|
||||
get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
|
||||
ep->last_key = ep->current_key = 0;
|
||||
|
@ -100,21 +100,6 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The free routine for skbuffs that sctp receives */
|
||||
static void sctp_rfree(struct sk_buff *skb)
|
||||
{
|
||||
atomic_sub(sizeof(struct sctp_chunk),&skb->sk->sk_rmem_alloc);
|
||||
sock_rfree(skb);
|
||||
}
|
||||
|
||||
/* The ownership wrapper routine to do receive buffer accounting */
|
||||
static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
skb_set_owner_r(skb,sk);
|
||||
skb->destructor = sctp_rfree;
|
||||
atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc);
|
||||
}
|
||||
|
||||
struct sctp_input_cb {
|
||||
union {
|
||||
struct inet_skb_parm h4;
|
||||
@ -217,9 +202,6 @@ int sctp_rcv(struct sk_buff *skb)
|
||||
rcvr = &ep->base;
|
||||
}
|
||||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||
goto discard_release;
|
||||
|
||||
/*
|
||||
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
|
||||
* An SCTP packet is called an "out of the blue" (OOTB)
|
||||
@ -256,8 +238,6 @@ int sctp_rcv(struct sk_buff *skb)
|
||||
}
|
||||
SCTP_INPUT_CB(skb)->chunk = chunk;
|
||||
|
||||
sctp_rcv_set_owner_r(skb,sk);
|
||||
|
||||
/* Remember what endpoint is to handle this packet. */
|
||||
chunk->rcvr = rcvr;
|
||||
|
||||
|
@ -530,6 +530,9 @@ static void sctp_v4_get_saddr(struct sctp_association *asoc,
|
||||
{
|
||||
struct rtable *rt = (struct rtable *)dst;
|
||||
|
||||
if (!asoc)
|
||||
return;
|
||||
|
||||
if (rt) {
|
||||
saddr->v4.sin_family = AF_INET;
|
||||
saddr->v4.sin_port = asoc->base.bind_addr.port;
|
||||
@ -1047,6 +1050,9 @@ SCTP_STATIC __init int sctp_init(void)
|
||||
/* Sendbuffer growth - do per-socket accounting */
|
||||
sctp_sndbuf_policy = 0;
|
||||
|
||||
/* Rcvbuffer growth - do per-socket accounting */
|
||||
sctp_rcvbuf_policy = 0;
|
||||
|
||||
/* HB.interval - 30 seconds */
|
||||
sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
|
||||
|
||||
|
@ -385,7 +385,7 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
|
||||
NULL,
|
||||
sctp_generate_t4_rto_event,
|
||||
sctp_generate_t5_shutdown_guard_event,
|
||||
sctp_generate_heartbeat_event,
|
||||
NULL,
|
||||
sctp_generate_sack_event,
|
||||
sctp_generate_autoclose_event,
|
||||
};
|
||||
@ -689,9 +689,9 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
|
||||
* increased due to timer expirations.
|
||||
*/
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
|
||||
asoc->ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT];
|
||||
asoc->rto_initial;
|
||||
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
|
||||
asoc->ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE];
|
||||
asoc->rto_initial;
|
||||
}
|
||||
|
||||
if (sctp_state(asoc, ESTABLISHED) ||
|
||||
|
@ -5160,6 +5160,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
||||
sctp_verb_t deliver;
|
||||
int tmp;
|
||||
__u32 tsn;
|
||||
int account_value;
|
||||
struct sock *sk = asoc->base.sk;
|
||||
|
||||
data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
|
||||
skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
|
||||
@ -5169,6 +5171,26 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
||||
|
||||
/* ASSERT: Now skb->data is really the user data. */
|
||||
|
||||
/*
|
||||
* if we are established, and we have used up our receive
|
||||
* buffer memory, drop the frame
|
||||
*/
|
||||
if (asoc->state == SCTP_STATE_ESTABLISHED) {
|
||||
/*
|
||||
* If the receive buffer policy is 1, then each
|
||||
* association can allocate up to sk_rcvbuf bytes
|
||||
* otherwise, all the associations in aggregate
|
||||
* may allocate up to sk_rcvbuf bytes
|
||||
*/
|
||||
if (asoc->ep->rcvbuf_policy)
|
||||
account_value = atomic_read(&asoc->rmem_alloc);
|
||||
else
|
||||
account_value = atomic_read(&sk->sk_rmem_alloc);
|
||||
|
||||
if (account_value > sk->sk_rcvbuf)
|
||||
return SCTP_IERROR_IGNORE_TSN;
|
||||
}
|
||||
|
||||
/* Process ECN based congestion.
|
||||
*
|
||||
* Since the chunk structure is reused for all chunks within
|
||||
|
@ -1932,7 +1932,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
|
||||
if (copy_from_user(&sp->autoclose, optval, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
sp->ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5115,8 +5114,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
||||
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
|
||||
event = sctp_skb2event(skb);
|
||||
if (event->asoc == assoc) {
|
||||
sock_rfree(skb);
|
||||
__skb_unlink(skb, &oldsk->sk_receive_queue);
|
||||
__skb_queue_tail(&newsk->sk_receive_queue, skb);
|
||||
skb_set_owner_r(skb, newsk);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5144,8 +5145,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
||||
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
|
||||
event = sctp_skb2event(skb);
|
||||
if (event->asoc == assoc) {
|
||||
sock_rfree(skb);
|
||||
__skb_unlink(skb, &oldsp->pd_lobby);
|
||||
__skb_queue_tail(queue, skb);
|
||||
skb_set_owner_r(skb, newsk);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -120,6 +120,14 @@ static ctl_table sctp_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec
|
||||
},
|
||||
{
|
||||
.ctl_name = NET_SCTP_RCVBUF_POLICY,
|
||||
.procname = "rcvbuf_policy",
|
||||
.data = &sctp_rcvbuf_policy,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec
|
||||
},
|
||||
{
|
||||
.ctl_name = NET_SCTP_PATH_MAX_RETRANS,
|
||||
.procname = "path_max_retrans",
|
||||
|
@ -52,19 +52,6 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
|
||||
struct sctp_association *asoc);
|
||||
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
|
||||
|
||||
/* Stub skb destructor. */
|
||||
static void sctp_stub_rfree(struct sk_buff *skb)
|
||||
{
|
||||
/* WARNING: This function is just a warning not to use the
|
||||
* skb destructor. If the skb is shared, we may get the destructor
|
||||
* callback on some processor that does not own the sock_lock. This
|
||||
* was occuring with PACKET socket applications that were monitoring
|
||||
* our skbs. We can't take the sock_lock, because we can't risk
|
||||
* recursing if we do really own the sock lock. Instead, do all
|
||||
* of our rwnd manipulation while we own the sock_lock outright.
|
||||
*/
|
||||
}
|
||||
|
||||
/* Initialize an ULP event from an given skb. */
|
||||
SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags)
|
||||
{
|
||||
@ -111,15 +98,19 @@ static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
|
||||
*/
|
||||
sctp_association_hold((struct sctp_association *)asoc);
|
||||
skb = sctp_event2skb(event);
|
||||
skb->sk = asoc->base.sk;
|
||||
event->asoc = (struct sctp_association *)asoc;
|
||||
skb->destructor = sctp_stub_rfree;
|
||||
atomic_add(skb->truesize, &event->asoc->rmem_alloc);
|
||||
skb_set_owner_r(skb, asoc->base.sk);
|
||||
}
|
||||
|
||||
/* A simple destructor to give up the reference to the association. */
|
||||
static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
|
||||
{
|
||||
sctp_association_put(event->asoc);
|
||||
struct sctp_association *asoc = event->asoc;
|
||||
struct sk_buff *skb = sctp_event2skb(event);
|
||||
|
||||
atomic_sub(skb->truesize, &asoc->rmem_alloc);
|
||||
sctp_association_put(asoc);
|
||||
}
|
||||
|
||||
/* Create and initialize an SCTP_ASSOC_CHANGE event.
|
||||
@ -922,7 +913,6 @@ done:
|
||||
/* Free a ulpevent that has an owner. It includes releasing the reference
|
||||
* to the owner, updating the rwnd in case of a DATA event and freeing the
|
||||
* skb.
|
||||
* See comments in sctp_stub_rfree().
|
||||
*/
|
||||
void sctp_ulpevent_free(struct sctp_ulpevent *event)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user