2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-20 03:04:01 +08:00

net: convert gro_count to bitmask

gro_hash size is 192 bytes, and uses 3 cache lines, if there is few
flows, gro_hash may be not fully used, so it is unnecessary to iterate
all gro_hash in napi_gro_flush(), to occupy unnecessary cacheline.

convert gro_count to a bitmask, and rename it as gro_bitmask, each bit
represents a element of gro_hash, only flush a gro_hash element if the
related bit is set, to speed up napi_gro_flush().

and update gro_bitmask only if it will be changed, to reduce cache
update

Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Li RongQing <lirongqing@baidu.com>
Cc: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Li RongQing 2018-07-13 14:41:36 +08:00 committed by David S. Miller
parent 48559af345
commit d9f37d01e2
2 changed files with 31 additions and 14 deletions

View File

@ -308,9 +308,14 @@ struct gro_list {
}; };
/* /*
* Structure for NAPI scheduling similar to tasklet but with weighting * size of gro hash buckets, must less than bit number of
* napi_struct::gro_bitmask
*/ */
#define GRO_HASH_BUCKETS 8 #define GRO_HASH_BUCKETS 8
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct { struct napi_struct {
/* The poll_list must only be managed by the entity which /* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means * changes the state of the NAPI_STATE_SCHED bit. This means
@ -322,7 +327,7 @@ struct napi_struct {
unsigned long state; unsigned long state;
int weight; int weight;
unsigned int gro_count; unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int); int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL #ifdef CONFIG_NETPOLL
int poll_owner; int poll_owner;

View File

@ -5282,9 +5282,11 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
list_del(&skb->list); list_del(&skb->list);
skb->next = NULL; skb->next = NULL;
napi_gro_complete(skb); napi_gro_complete(skb);
napi->gro_count--;
napi->gro_hash[index].count--; napi->gro_hash[index].count--;
} }
if (!napi->gro_hash[index].count)
__clear_bit(index, &napi->gro_bitmask);
} }
/* napi->gro_hash[].list contains packets ordered by age. /* napi->gro_hash[].list contains packets ordered by age.
@ -5295,8 +5297,10 @@ void napi_gro_flush(struct napi_struct *napi, bool flush_old)
{ {
u32 i; u32 i;
for (i = 0; i < GRO_HASH_BUCKETS; i++) for (i = 0; i < GRO_HASH_BUCKETS; i++) {
__napi_gro_flush_chain(napi, i, flush_old); if (test_bit(i, &napi->gro_bitmask))
__napi_gro_flush_chain(napi, i, flush_old);
}
} }
EXPORT_SYMBOL(napi_gro_flush); EXPORT_SYMBOL(napi_gro_flush);
@ -5388,8 +5392,8 @@ static void gro_flush_oldest(struct list_head *head)
if (WARN_ON_ONCE(!oldest)) if (WARN_ON_ONCE(!oldest))
return; return;
/* Do not adjust napi->gro_count, caller is adding a new SKB to /* Do not adjust napi->gro_hash[].count, caller is adding a new
* the chain. * SKB to the chain.
*/ */
list_del(&oldest->list); list_del(&oldest->list);
napi_gro_complete(oldest); napi_gro_complete(oldest);
@ -5464,7 +5468,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
list_del(&pp->list); list_del(&pp->list);
pp->next = NULL; pp->next = NULL;
napi_gro_complete(pp); napi_gro_complete(pp);
napi->gro_count--;
napi->gro_hash[hash].count--; napi->gro_hash[hash].count--;
} }
@ -5477,7 +5480,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) { if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
gro_flush_oldest(gro_head); gro_flush_oldest(gro_head);
} else { } else {
napi->gro_count++;
napi->gro_hash[hash].count++; napi->gro_hash[hash].count++;
} }
NAPI_GRO_CB(skb)->count = 1; NAPI_GRO_CB(skb)->count = 1;
@ -5492,6 +5494,13 @@ pull:
if (grow > 0) if (grow > 0)
gro_pull_from_frag0(skb, grow); gro_pull_from_frag0(skb, grow);
ok: ok:
if (napi->gro_hash[hash].count) {
if (!test_bit(hash, &napi->gro_bitmask))
__set_bit(hash, &napi->gro_bitmask);
} else if (test_bit(hash, &napi->gro_bitmask)) {
__clear_bit(hash, &napi->gro_bitmask);
}
return ret; return ret;
normal: normal:
@ -5890,7 +5899,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
NAPIF_STATE_IN_BUSY_POLL))) NAPIF_STATE_IN_BUSY_POLL)))
return false; return false;
if (n->gro_count) { if (n->gro_bitmask) {
unsigned long timeout = 0; unsigned long timeout = 0;
if (work_done) if (work_done)
@ -6099,7 +6108,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
/* Note : we use a relaxed variant of napi_schedule_prep() not setting /* Note : we use a relaxed variant of napi_schedule_prep() not setting
* NAPI_STATE_MISSED, since we do not react to a device IRQ. * NAPI_STATE_MISSED, since we do not react to a device IRQ.
*/ */
if (napi->gro_count && !napi_disable_pending(napi) && if (napi->gro_bitmask && !napi_disable_pending(napi) &&
!test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
__napi_schedule_irqoff(napi); __napi_schedule_irqoff(napi);
@ -6114,7 +6123,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
INIT_LIST_HEAD(&napi->poll_list); INIT_LIST_HEAD(&napi->poll_list);
hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
napi->timer.function = napi_watchdog; napi->timer.function = napi_watchdog;
napi->gro_count = 0; napi->gro_bitmask = 0;
for (i = 0; i < GRO_HASH_BUCKETS; i++) { for (i = 0; i < GRO_HASH_BUCKETS; i++) {
INIT_LIST_HEAD(&napi->gro_hash[i].list); INIT_LIST_HEAD(&napi->gro_hash[i].list);
napi->gro_hash[i].count = 0; napi->gro_hash[i].count = 0;
@ -6174,7 +6183,7 @@ void netif_napi_del(struct napi_struct *napi)
napi_free_frags(napi); napi_free_frags(napi);
flush_gro_hash(napi); flush_gro_hash(napi);
napi->gro_count = 0; napi->gro_bitmask = 0;
} }
EXPORT_SYMBOL(netif_napi_del); EXPORT_SYMBOL(netif_napi_del);
@ -6216,7 +6225,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
goto out_unlock; goto out_unlock;
} }
if (n->gro_count) { if (n->gro_bitmask) {
/* flush too old packets /* flush too old packets
* If HZ < 1000, flush all packets. * If HZ < 1000, flush all packets.
*/ */
@ -9272,6 +9281,9 @@ static struct hlist_head * __net_init netdev_create_hash(void)
/* Initialize per network namespace state */ /* Initialize per network namespace state */
static int __net_init netdev_init(struct net *net) static int __net_init netdev_init(struct net *net)
{ {
BUILD_BUG_ON(GRO_HASH_BUCKETS >
FIELD_SIZEOF(struct napi_struct, gro_bitmask));
if (net != &init_net) if (net != &init_net)
INIT_LIST_HEAD(&net->dev_base_head); INIT_LIST_HEAD(&net->dev_base_head);