mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
net/packet: make tp_drops atomic
Under DDOS, we want to be able to increment tp_drops without touching the spinlock. This will help readers to drain the receive queue slightly faster :/ Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0338a14523
commit
8e8e2951e3
@ -758,7 +758,7 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
|
||||
struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
|
||||
struct sock *sk = &po->sk;
|
||||
|
||||
if (po->stats.stats3.tp_drops)
|
||||
if (atomic_read(&po->tp_drops))
|
||||
status |= TP_STATUS_LOSING;
|
||||
|
||||
last_pkt = (struct tpacket3_hdr *)pkc1->prev;
|
||||
@ -2128,10 +2128,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
drop_n_acct:
|
||||
is_drop_n_account = true;
|
||||
spin_lock(&sk->sk_receive_queue.lock);
|
||||
po->stats.stats1.tp_drops++;
|
||||
atomic_inc(&po->tp_drops);
|
||||
atomic_inc(&sk->sk_drops);
|
||||
spin_unlock(&sk->sk_receive_queue.lock);
|
||||
|
||||
drop_n_restore:
|
||||
if (skb_head != skb->data && skb_shared(skb)) {
|
||||
@ -2265,7 +2263,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
* Anyways, moving it for V1/V2 only as V3 doesn't need this
|
||||
* at packet level.
|
||||
*/
|
||||
if (po->stats.stats1.tp_drops)
|
||||
if (atomic_read(&po->tp_drops))
|
||||
status |= TP_STATUS_LOSING;
|
||||
}
|
||||
|
||||
@ -2381,9 +2379,9 @@ drop:
|
||||
return 0;
|
||||
|
||||
drop_n_account:
|
||||
is_drop_n_account = true;
|
||||
po->stats.stats1.tp_drops++;
|
||||
spin_unlock(&sk->sk_receive_queue.lock);
|
||||
atomic_inc(&po->tp_drops);
|
||||
is_drop_n_account = true;
|
||||
|
||||
sk->sk_data_ready(sk);
|
||||
kfree_skb(copy_skb);
|
||||
@ -3879,6 +3877,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
||||
void *data = &val;
|
||||
union tpacket_stats_u st;
|
||||
struct tpacket_rollover_stats rstats;
|
||||
int drops;
|
||||
|
||||
if (level != SOL_PACKET)
|
||||
return -ENOPROTOOPT;
|
||||
@ -3895,14 +3894,17 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
|
||||
memcpy(&st, &po->stats, sizeof(st));
|
||||
memset(&po->stats, 0, sizeof(po->stats));
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
drops = atomic_xchg(&po->tp_drops, 0);
|
||||
|
||||
if (po->tp_version == TPACKET_V3) {
|
||||
lv = sizeof(struct tpacket_stats_v3);
|
||||
st.stats3.tp_packets += st.stats3.tp_drops;
|
||||
st.stats3.tp_drops = drops;
|
||||
st.stats3.tp_packets += drops;
|
||||
data = &st.stats3;
|
||||
} else {
|
||||
lv = sizeof(struct tpacket_stats);
|
||||
st.stats1.tp_packets += st.stats1.tp_drops;
|
||||
st.stats1.tp_drops = drops;
|
||||
st.stats1.tp_packets += drops;
|
||||
data = &st.stats1;
|
||||
}
|
||||
|
||||
|
@ -131,6 +131,7 @@ struct packet_sock {
|
||||
struct net_device __rcu *cached_dev;
|
||||
int (*xmit)(struct sk_buff *skb);
|
||||
struct packet_type prot_hook ____cacheline_aligned_in_smp;
|
||||
atomic_t tp_drops ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
static struct packet_sock *pkt_sk(struct sock *sk)
|
||||
|
Loading…
Reference in New Issue
Block a user