mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
netem: add limitation to reordered packets
Fix two netem bugs : 1) When a frame was dropped by tfifo_enqueue(), drop counter was incremented twice. 2) When reordering is triggered, we enqueue a packet without checking queue limit. This can OOM pretty fast when this is repeated enough, since skbs are orphaned, no socket limit can help in this situation. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Mark Gordon <msg@google.com> Cc: Andreas Terzis <aterzis@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Hagen Paul Pfeifer <hagen@jauu.net> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b94e52f626
commit
960fb66e52
@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
|
|||||||
return PSCHED_NS2TICKS(ticks);
|
return PSCHED_NS2TICKS(ticks);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
struct sk_buff_head *list = &sch->q;
|
struct sk_buff_head *list = &sch->q;
|
||||||
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
|
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb = skb_peek_tail(list);
|
||||||
|
|
||||||
if (likely(skb_queue_len(list) < sch->limit)) {
|
/* Optimize for add at tail */
|
||||||
skb = skb_peek_tail(list);
|
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
|
||||||
/* Optimize for add at tail */
|
return __skb_queue_tail(list, nskb);
|
||||||
if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
|
|
||||||
return qdisc_enqueue_tail(nskb, sch);
|
|
||||||
|
|
||||||
skb_queue_reverse_walk(list, skb) {
|
skb_queue_reverse_walk(list, skb) {
|
||||||
if (tnext >= netem_skb_cb(skb)->time_to_send)
|
if (tnext >= netem_skb_cb(skb)->time_to_send)
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
|
|
||||||
__skb_queue_after(list, skb, nskb);
|
|
||||||
sch->qstats.backlog += qdisc_pkt_len(nskb);
|
|
||||||
return NET_XMIT_SUCCESS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return qdisc_reshape_fail(nskb, sch);
|
__skb_queue_after(list, skb, nskb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
/* We don't fill cb now as skb_unshare() may invalidate it */
|
/* We don't fill cb now as skb_unshare() may invalidate it */
|
||||||
struct netem_skb_cb *cb;
|
struct netem_skb_cb *cb;
|
||||||
struct sk_buff *skb2;
|
struct sk_buff *skb2;
|
||||||
int ret;
|
|
||||||
int count = 1;
|
int count = 1;
|
||||||
|
|
||||||
/* Random duplication */
|
/* Random duplication */
|
||||||
@ -419,6 +411,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
|
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
|
||||||
|
return qdisc_reshape_fail(skb, sch);
|
||||||
|
|
||||||
|
sch->qstats.backlog += qdisc_pkt_len(skb);
|
||||||
|
|
||||||
cb = netem_skb_cb(skb);
|
cb = netem_skb_cb(skb);
|
||||||
if (q->gap == 0 || /* not doing reordering */
|
if (q->gap == 0 || /* not doing reordering */
|
||||||
q->counter < q->gap - 1 || /* inside last reordering gap */
|
q->counter < q->gap - 1 || /* inside last reordering gap */
|
||||||
@ -450,7 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
|
|
||||||
cb->time_to_send = now + delay;
|
cb->time_to_send = now + delay;
|
||||||
++q->counter;
|
++q->counter;
|
||||||
ret = tfifo_enqueue(skb, sch);
|
tfifo_enqueue(skb, sch);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Do re-ordering by putting one out of N packets at the front
|
* Do re-ordering by putting one out of N packets at the front
|
||||||
@ -460,16 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
q->counter = 0;
|
q->counter = 0;
|
||||||
|
|
||||||
__skb_queue_head(&sch->q, skb);
|
__skb_queue_head(&sch->q, skb);
|
||||||
sch->qstats.backlog += qdisc_pkt_len(skb);
|
|
||||||
sch->qstats.requeues++;
|
sch->qstats.requeues++;
|
||||||
ret = NET_XMIT_SUCCESS;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret != NET_XMIT_SUCCESS) {
|
|
||||||
if (net_xmit_drop_count(ret)) {
|
|
||||||
sch->qstats.drops++;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
|
Loading…
Reference in New Issue
Block a user