mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-18 07:35:12 +08:00
cdbaa0bb26
This change makes it so that the GRE and VXLAN tunnels can make use of Tx checksum offload support provided by some drivers via the hw_enc_features. Without this fix enabling GSO means sacrificing Tx checksum offload and this actually leads to a performance regression as shown below: Utilization Send Throughput local GSO 10^6bits/s % S state 6276.51 8.39 enabled 7123.52 8.42 disabled To resolve this it was necessary to address two items. First netif_skb_features needed to be updated so that it would correctly handle the Trans Ether Bridging protocol without impacting the need to check for Q-in-Q tagging. To do this it was necessary to update harmonize_features so that it used skb_network_protocol instead of just using the outer protocol. Second it was necessary to update the GRE and UDP tunnel segmentation offloads so that they would reset the encapsulation bit and inner header offsets after the offload was complete. As a result of this change I have seen the following results on a interface with Tx checksum enabled for encapsulated frames: Utilization Send Throughput local GSO 10^6bits/s % S state 7123.52 8.42 disabled 8321.75 5.43 enabled v2: Instead of replacing refrence to skb->protocol with skb_network_protocol just replace the protocol reference in harmonize_features to allow for double VLAN tag checks. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
131 lines
2.8 KiB
C
131 lines
2.8 KiB
C
/*
|
|
* IPV4 GSO/GRO offload support
|
|
* Linux INET implementation
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
* GRE GSO support
|
|
*/
|
|
|
|
#include <linux/skbuff.h>
|
|
#include <net/protocol.h>
|
|
#include <net/gre.h>
|
|
|
|
static int gre_gso_send_check(struct sk_buff *skb)
|
|
{
|
|
if (!skb->encapsulation)
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
|
|
netdev_features_t features)
|
|
{
|
|
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
|
netdev_features_t enc_features;
|
|
int ghl = GRE_HEADER_SECTION;
|
|
struct gre_base_hdr *greh;
|
|
int mac_len = skb->mac_len;
|
|
__be16 protocol = skb->protocol;
|
|
int tnl_hlen;
|
|
bool csum;
|
|
|
|
if (unlikely(skb_shinfo(skb)->gso_type &
|
|
~(SKB_GSO_TCPV4 |
|
|
SKB_GSO_TCPV6 |
|
|
SKB_GSO_UDP |
|
|
SKB_GSO_DODGY |
|
|
SKB_GSO_TCP_ECN |
|
|
SKB_GSO_GRE)))
|
|
goto out;
|
|
|
|
if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
|
|
goto out;
|
|
|
|
greh = (struct gre_base_hdr *)skb_transport_header(skb);
|
|
|
|
if (greh->flags & GRE_KEY)
|
|
ghl += GRE_HEADER_SECTION;
|
|
if (greh->flags & GRE_SEQ)
|
|
ghl += GRE_HEADER_SECTION;
|
|
if (greh->flags & GRE_CSUM) {
|
|
ghl += GRE_HEADER_SECTION;
|
|
csum = true;
|
|
} else
|
|
csum = false;
|
|
|
|
/* setup inner skb. */
|
|
skb->protocol = greh->protocol;
|
|
skb->encapsulation = 0;
|
|
|
|
if (unlikely(!pskb_may_pull(skb, ghl)))
|
|
goto out;
|
|
|
|
__skb_pull(skb, ghl);
|
|
skb_reset_mac_header(skb);
|
|
skb_set_network_header(skb, skb_inner_network_offset(skb));
|
|
skb->mac_len = skb_inner_network_offset(skb);
|
|
|
|
/* segment inner packet. */
|
|
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
|
|
segs = skb_mac_gso_segment(skb, enc_features);
|
|
if (!segs || IS_ERR(segs))
|
|
goto out;
|
|
|
|
skb = segs;
|
|
tnl_hlen = skb_tnl_header_len(skb);
|
|
do {
|
|
__skb_push(skb, ghl);
|
|
if (csum) {
|
|
__be32 *pcsum;
|
|
|
|
if (skb_has_shared_frag(skb)) {
|
|
int err;
|
|
|
|
err = __skb_linearize(skb);
|
|
if (err) {
|
|
kfree_skb_list(segs);
|
|
segs = ERR_PTR(err);
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
greh = (struct gre_base_hdr *)(skb->data);
|
|
pcsum = (__be32 *)(greh + 1);
|
|
*pcsum = 0;
|
|
*(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0));
|
|
}
|
|
__skb_push(skb, tnl_hlen - ghl);
|
|
|
|
skb_reset_inner_headers(skb);
|
|
skb->encapsulation = 1;
|
|
|
|
skb_reset_mac_header(skb);
|
|
skb_set_network_header(skb, mac_len);
|
|
skb->mac_len = mac_len;
|
|
skb->protocol = protocol;
|
|
} while ((skb = skb->next));
|
|
out:
|
|
return segs;
|
|
}
|
|
|
|
static const struct net_offload gre_offload = {
|
|
.callbacks = {
|
|
.gso_send_check = gre_gso_send_check,
|
|
.gso_segment = gre_gso_segment,
|
|
},
|
|
};
|
|
|
|
int __init gre_offload_init(void)
|
|
{
|
|
return inet_add_offload(&gre_offload, IPPROTO_GRE);
|
|
}
|
|
|
|
void __exit gre_offload_exit(void)
|
|
{
|
|
inet_del_offload(&gre_offload, IPPROTO_GRE);
|
|
}
|