mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
net: Allow GRO to use and set levels of checksum unnecessary
Allow GRO path to "consume" checksums provided in CHECKSUM_UNNECESSARY and to report new checksums verfied for use in fallback to normal path. Change GRO checksum path to track csum_level using a csum_cnt field in NAPI_GRO_CB. On GRO initialization, if ip_summed is CHECKSUM_UNNECESSARY set NAPI_GRO_CB(skb)->csum_cnt to skb->csum_level + 1. For each checksum verified, decrement NAPI_GRO_CB(skb)->csum_cnt while its greater than zero. If a checksum is verfied and NAPI_GRO_CB(skb)->csum_cnt == 0, we have verified a deeper checksum than originally indicated in skbuf so increment csum_level (or initialize to CHECKSUM_UNNECESSARY if ip_summed is CHECKSUM_NONE or CHECKSUM_COMPLETE). Signed-off-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
77cffe23c1
commit
662880f442
@ -1883,8 +1883,8 @@ struct napi_gro_cb {
|
||||
/* GRO checksum is valid */
|
||||
u8 csum_valid:1;
|
||||
|
||||
/* Number encapsulation layers crossed */
|
||||
u8 encapsulation;
|
||||
/* Number of checksums via CHECKSUM_UNNECESSARY */
|
||||
u8 csum_cnt:3;
|
||||
|
||||
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
|
||||
__wsum csum;
|
||||
@ -2179,8 +2179,7 @@ static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
|
||||
__sum16 check)
|
||||
{
|
||||
return (skb->ip_summed != CHECKSUM_PARTIAL &&
|
||||
(skb->ip_summed != CHECKSUM_UNNECESSARY ||
|
||||
(NAPI_GRO_CB(skb)->encapsulation > skb->encapsulation)) &&
|
||||
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
|
||||
(!zero_okay || check));
|
||||
}
|
||||
|
||||
@ -2196,18 +2195,17 @@ static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
|
||||
return __skb_gro_checksum_complete(skb);
|
||||
}
|
||||
|
||||
/* Update skb for CHECKSUM_UNNECESSARY when we verified a top level
|
||||
* checksum or an encapsulated one during GRO. This saves work
|
||||
* if we fallback to normal path with the packet.
|
||||
*/
|
||||
static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
|
||||
if (NAPI_GRO_CB(skb)->encapsulation)
|
||||
skb->encapsulation = 1;
|
||||
} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->encapsulation = 0;
|
||||
if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
|
||||
/* Consume a checksum from CHECKSUM_UNNECESSARY */
|
||||
NAPI_GRO_CB(skb)->csum_cnt--;
|
||||
} else {
|
||||
/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
|
||||
* verified a new top level checksum or an encapsulated one
|
||||
* during GRO. This saves work if we fallback to normal path.
|
||||
*/
|
||||
__skb_incr_checksum_unnecessary(skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3962,13 +3962,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
|
||||
gro_list_prepare(napi, skb);
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE) {
|
||||
NAPI_GRO_CB(skb)->csum = skb->csum;
|
||||
NAPI_GRO_CB(skb)->csum_valid = 1;
|
||||
} else {
|
||||
NAPI_GRO_CB(skb)->csum_valid = 0;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ptype, head, list) {
|
||||
if (ptype->type != type || !ptype->callbacks.gro_receive)
|
||||
@ -3980,7 +3973,22 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
NAPI_GRO_CB(skb)->flush = 0;
|
||||
NAPI_GRO_CB(skb)->free = 0;
|
||||
NAPI_GRO_CB(skb)->udp_mark = 0;
|
||||
NAPI_GRO_CB(skb)->encapsulation = 0;
|
||||
|
||||
/* Setup for GRO checksum validation */
|
||||
switch (skb->ip_summed) {
|
||||
case CHECKSUM_COMPLETE:
|
||||
NAPI_GRO_CB(skb)->csum = skb->csum;
|
||||
NAPI_GRO_CB(skb)->csum_valid = 1;
|
||||
NAPI_GRO_CB(skb)->csum_cnt = 0;
|
||||
break;
|
||||
case CHECKSUM_UNNECESSARY:
|
||||
NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
|
||||
NAPI_GRO_CB(skb)->csum_valid = 0;
|
||||
break;
|
||||
default:
|
||||
NAPI_GRO_CB(skb)->csum_cnt = 0;
|
||||
NAPI_GRO_CB(skb)->csum_valid = 0;
|
||||
}
|
||||
|
||||
pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
|
||||
break;
|
||||
|
@ -172,12 +172,9 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
|
||||
}
|
||||
|
||||
/* Don't bother verifying checksum if we're going to flush anyway. */
|
||||
if (greh->flags & GRE_CSUM) {
|
||||
if (!NAPI_GRO_CB(skb)->flush &&
|
||||
skb_gro_checksum_simple_validate(skb))
|
||||
if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush &&
|
||||
skb_gro_checksum_simple_validate(skb))
|
||||
goto out_unlock;
|
||||
NAPI_GRO_CB(skb)->encapsulation++;
|
||||
}
|
||||
|
||||
flush = 0;
|
||||
|
||||
|
@ -238,12 +238,13 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
|
||||
int flush = 1;
|
||||
|
||||
if (NAPI_GRO_CB(skb)->udp_mark ||
|
||||
(!skb->encapsulation && !NAPI_GRO_CB(skb)->csum_valid))
|
||||
(skb->ip_summed != CHECKSUM_PARTIAL &&
|
||||
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
|
||||
!NAPI_GRO_CB(skb)->csum_valid))
|
||||
goto out;
|
||||
|
||||
/* mark that this skb passed once through the udp gro layer */
|
||||
NAPI_GRO_CB(skb)->udp_mark = 1;
|
||||
NAPI_GRO_CB(skb)->encapsulation++;
|
||||
|
||||
rcu_read_lock();
|
||||
uo_priv = rcu_dereference(udp_offload_base);
|
||||
|
Loading…
Reference in New Issue
Block a user