mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 01:04:19 +08:00
Merge branch 'eth_get_headlen'
Alexander Duyck says: ==================== net: Drop get_headlen functions in favor of generic function This series replaces the igb_get_headlen and ixgbe_get_headlen functions with a generic function named eth_get_headlen. I have done some performance testing on ixgbe with 258 byte frames since the calls are only used on frames larger than 256 bytes and have seen no significant difference in CPU utilization. v2: renamed __skb_get_poff to skb_get_poff renamed ___skb_get_poff to __skb_get_poff ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2ba38943ba
@ -6768,113 +6768,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_get_headlen - determine size of header for LRO/GRO
|
||||
* @data: pointer to the start of the headers
|
||||
* @max_len: total length of section to find headers in
|
||||
*
|
||||
* This function is meant to determine the length of headers that will
|
||||
* be recognized by hardware for LRO, and GRO offloads. The main
|
||||
* motivation of doing this is to only perform one pull for IPv4 TCP
|
||||
* packets so that we can do basic things like calculating the gso_size
|
||||
* based on the average data per packet.
|
||||
**/
|
||||
static unsigned int igb_get_headlen(unsigned char *data,
|
||||
unsigned int max_len)
|
||||
{
|
||||
union {
|
||||
unsigned char *network;
|
||||
/* l2 headers */
|
||||
struct ethhdr *eth;
|
||||
struct vlan_hdr *vlan;
|
||||
/* l3 headers */
|
||||
struct iphdr *ipv4;
|
||||
struct ipv6hdr *ipv6;
|
||||
} hdr;
|
||||
__be16 protocol;
|
||||
u8 nexthdr = 0; /* default to not TCP */
|
||||
u8 hlen;
|
||||
|
||||
/* this should never happen, but better safe than sorry */
|
||||
if (max_len < ETH_HLEN)
|
||||
return max_len;
|
||||
|
||||
/* initialize network frame pointer */
|
||||
hdr.network = data;
|
||||
|
||||
/* set first protocol and move network header forward */
|
||||
protocol = hdr.eth->h_proto;
|
||||
hdr.network += ETH_HLEN;
|
||||
|
||||
/* handle any vlan tag if present */
|
||||
if (protocol == htons(ETH_P_8021Q)) {
|
||||
if ((hdr.network - data) > (max_len - VLAN_HLEN))
|
||||
return max_len;
|
||||
|
||||
protocol = hdr.vlan->h_vlan_encapsulated_proto;
|
||||
hdr.network += VLAN_HLEN;
|
||||
}
|
||||
|
||||
/* handle L3 protocols */
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
|
||||
return max_len;
|
||||
|
||||
/* access ihl as a u8 to avoid unaligned access on ia64 */
|
||||
hlen = (hdr.network[0] & 0x0F) << 2;
|
||||
|
||||
/* verify hlen meets minimum size requirements */
|
||||
if (hlen < sizeof(struct iphdr))
|
||||
return hdr.network - data;
|
||||
|
||||
/* record next protocol if header is present */
|
||||
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
|
||||
nexthdr = hdr.ipv4->protocol;
|
||||
} else if (protocol == htons(ETH_P_IPV6)) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
|
||||
return max_len;
|
||||
|
||||
/* record next protocol */
|
||||
nexthdr = hdr.ipv6->nexthdr;
|
||||
hlen = sizeof(struct ipv6hdr);
|
||||
} else {
|
||||
return hdr.network - data;
|
||||
}
|
||||
|
||||
/* relocate pointer to start of L4 header */
|
||||
hdr.network += hlen;
|
||||
|
||||
/* finally sort out TCP */
|
||||
if (nexthdr == IPPROTO_TCP) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
|
||||
return max_len;
|
||||
|
||||
/* access doff as a u8 to avoid unaligned access on ia64 */
|
||||
hlen = (hdr.network[12] & 0xF0) >> 2;
|
||||
|
||||
/* verify hlen meets minimum size requirements */
|
||||
if (hlen < sizeof(struct tcphdr))
|
||||
return hdr.network - data;
|
||||
|
||||
hdr.network += hlen;
|
||||
} else if (nexthdr == IPPROTO_UDP) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
|
||||
return max_len;
|
||||
|
||||
hdr.network += sizeof(struct udphdr);
|
||||
}
|
||||
|
||||
/* If everything has gone correctly hdr.network should be the
|
||||
* data section of the packet and will be the end of the header.
|
||||
* If not then it probably represents the end of the last recognized
|
||||
* header.
|
||||
*/
|
||||
if ((hdr.network - data) < max_len)
|
||||
return hdr.network - data;
|
||||
else
|
||||
return max_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_pull_tail - igb specific version of skb_pull_tail
|
||||
* @rx_ring: rx descriptor ring packet is being transacted on
|
||||
@ -6919,7 +6812,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
|
||||
/* we need the header to contain the greater of either ETH_HLEN or
|
||||
* 60 bytes if the skb->len is less than 60 for skb_pad.
|
||||
*/
|
||||
pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
|
||||
pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
|
||||
|
||||
/* align pull length to size of long to optimize memcpy performance */
|
||||
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
|
||||
|
@ -1521,120 +1521,6 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
|
||||
ixgbe_release_rx_desc(rx_ring, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
|
||||
* @data: pointer to the start of the headers
|
||||
* @max_len: total length of section to find headers in
|
||||
*
|
||||
* This function is meant to determine the length of headers that will
|
||||
* be recognized by hardware for LRO, GRO, and RSC offloads. The main
|
||||
* motivation of doing this is to only perform one pull for IPv4 TCP
|
||||
* packets so that we can do basic things like calculating the gso_size
|
||||
* based on the average data per packet.
|
||||
**/
|
||||
static unsigned int ixgbe_get_headlen(unsigned char *data,
|
||||
unsigned int max_len)
|
||||
{
|
||||
union {
|
||||
unsigned char *network;
|
||||
/* l2 headers */
|
||||
struct ethhdr *eth;
|
||||
struct vlan_hdr *vlan;
|
||||
/* l3 headers */
|
||||
struct iphdr *ipv4;
|
||||
struct ipv6hdr *ipv6;
|
||||
} hdr;
|
||||
__be16 protocol;
|
||||
u8 nexthdr = 0; /* default to not TCP */
|
||||
u8 hlen;
|
||||
|
||||
/* this should never happen, but better safe than sorry */
|
||||
if (max_len < ETH_HLEN)
|
||||
return max_len;
|
||||
|
||||
/* initialize network frame pointer */
|
||||
hdr.network = data;
|
||||
|
||||
/* set first protocol and move network header forward */
|
||||
protocol = hdr.eth->h_proto;
|
||||
hdr.network += ETH_HLEN;
|
||||
|
||||
/* handle any vlan tag if present */
|
||||
if (protocol == htons(ETH_P_8021Q)) {
|
||||
if ((hdr.network - data) > (max_len - VLAN_HLEN))
|
||||
return max_len;
|
||||
|
||||
protocol = hdr.vlan->h_vlan_encapsulated_proto;
|
||||
hdr.network += VLAN_HLEN;
|
||||
}
|
||||
|
||||
/* handle L3 protocols */
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
|
||||
return max_len;
|
||||
|
||||
/* access ihl as a u8 to avoid unaligned access on ia64 */
|
||||
hlen = (hdr.network[0] & 0x0F) << 2;
|
||||
|
||||
/* verify hlen meets minimum size requirements */
|
||||
if (hlen < sizeof(struct iphdr))
|
||||
return hdr.network - data;
|
||||
|
||||
/* record next protocol if header is present */
|
||||
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
|
||||
nexthdr = hdr.ipv4->protocol;
|
||||
} else if (protocol == htons(ETH_P_IPV6)) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
|
||||
return max_len;
|
||||
|
||||
/* record next protocol */
|
||||
nexthdr = hdr.ipv6->nexthdr;
|
||||
hlen = sizeof(struct ipv6hdr);
|
||||
#ifdef IXGBE_FCOE
|
||||
} else if (protocol == htons(ETH_P_FCOE)) {
|
||||
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
|
||||
return max_len;
|
||||
hlen = FCOE_HEADER_LEN;
|
||||
#endif
|
||||
} else {
|
||||
return hdr.network - data;
|
||||
}
|
||||
|
||||
/* relocate pointer to start of L4 header */
|
||||
hdr.network += hlen;
|
||||
|
||||
/* finally sort out TCP/UDP */
|
||||
if (nexthdr == IPPROTO_TCP) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
|
||||
return max_len;
|
||||
|
||||
/* access doff as a u8 to avoid unaligned access on ia64 */
|
||||
hlen = (hdr.network[12] & 0xF0) >> 2;
|
||||
|
||||
/* verify hlen meets minimum size requirements */
|
||||
if (hlen < sizeof(struct tcphdr))
|
||||
return hdr.network - data;
|
||||
|
||||
hdr.network += hlen;
|
||||
} else if (nexthdr == IPPROTO_UDP) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
|
||||
return max_len;
|
||||
|
||||
hdr.network += sizeof(struct udphdr);
|
||||
}
|
||||
|
||||
/*
|
||||
* If everything has gone correctly hdr.network should be the
|
||||
* data section of the packet and will be the end of the header.
|
||||
* If not then it probably represents the end of the last recognized
|
||||
* header.
|
||||
*/
|
||||
if ((hdr.network - data) < max_len)
|
||||
return hdr.network - data;
|
||||
else
|
||||
return max_len;
|
||||
}
|
||||
|
||||
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
@ -1793,7 +1679,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
|
||||
* we need the header to contain the greater of either ETH_HLEN or
|
||||
* 60 bytes if the skb->len is less than 60 for skb_pad.
|
||||
*/
|
||||
pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
|
||||
pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
|
||||
|
||||
/* align pull length to size of long to optimize memcpy performance */
|
||||
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
u32 eth_get_headlen(void *data, unsigned int max_len);
|
||||
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
|
||||
extern const struct header_ops eth_header_ops;
|
||||
|
||||
|
@ -3218,7 +3218,9 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
|
||||
|
||||
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
|
||||
|
||||
u32 __skb_get_poff(const struct sk_buff *skb);
|
||||
u32 skb_get_poff(const struct sk_buff *skb);
|
||||
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
|
||||
const struct flow_keys *keys, int hlen);
|
||||
|
||||
/**
|
||||
* skb_head_is_locked - Determine if the skb->head is locked down
|
||||
|
@ -40,4 +40,6 @@ static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8
|
||||
return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
|
||||
}
|
||||
u32 flow_hash_from_keys(struct flow_keys *keys);
|
||||
unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
|
||||
__be16 protocol);
|
||||
#endif
|
||||
|
@ -113,7 +113,7 @@ static unsigned int pkt_type_offset(void)
|
||||
|
||||
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
|
||||
{
|
||||
return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
|
||||
return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
|
||||
}
|
||||
|
||||
static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/if_pppox.h>
|
||||
#include <linux/ppp_defs.h>
|
||||
#include <net/flow_keys.h>
|
||||
#include <scsi/fc/fc_fcoe.h>
|
||||
|
||||
/* copy saddr & daddr, possibly using 64bit load/store
|
||||
* Equivalent to : flow->src = iph->saddr;
|
||||
@ -117,6 +118,13 @@ ipv6:
|
||||
flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
|
||||
nhoff += sizeof(struct ipv6hdr);
|
||||
|
||||
/* skip the flow label processing if skb is NULL. The
|
||||
* assumption here is that if there is no skb we are not
|
||||
* looking for flow info as much as we are length.
|
||||
*/
|
||||
if (!skb)
|
||||
break;
|
||||
|
||||
flow_label = ip6_flowlabel(iph);
|
||||
if (flow_label) {
|
||||
/* Awesome, IPv6 packet has a flow label so we can
|
||||
@ -165,6 +173,9 @@ ipv6:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
case htons(ETH_P_FCOE):
|
||||
flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
|
||||
/* fall through */
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@ -316,26 +327,18 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
|
||||
}
|
||||
EXPORT_SYMBOL(__skb_tx_hash);
|
||||
|
||||
/* __skb_get_poff() returns the offset to the payload as far as it could
|
||||
* be dissected. The main user is currently BPF, so that we can dynamically
|
||||
* truncate packets without needing to push actual payload to the user
|
||||
* space and can analyze headers only, instead.
|
||||
*/
|
||||
u32 __skb_get_poff(const struct sk_buff *skb)
|
||||
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
|
||||
const struct flow_keys *keys, int hlen)
|
||||
{
|
||||
struct flow_keys keys;
|
||||
u32 poff = 0;
|
||||
u32 poff = keys->thoff;
|
||||
|
||||
if (!skb_flow_dissect(skb, &keys))
|
||||
return 0;
|
||||
|
||||
poff += keys.thoff;
|
||||
switch (keys.ip_proto) {
|
||||
switch (keys->ip_proto) {
|
||||
case IPPROTO_TCP: {
|
||||
const struct tcphdr *tcph;
|
||||
struct tcphdr _tcph;
|
||||
|
||||
tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
|
||||
tcph = __skb_header_pointer(skb, poff, sizeof(_tcph),
|
||||
data, hlen, &_tcph);
|
||||
if (!tcph)
|
||||
return poff;
|
||||
|
||||
@ -369,6 +372,21 @@ u32 __skb_get_poff(const struct sk_buff *skb)
|
||||
return poff;
|
||||
}
|
||||
|
||||
/* skb_get_poff() returns the offset to the payload as far as it could
|
||||
* be dissected. The main user is currently BPF, so that we can dynamically
|
||||
* truncate packets without needing to push actual payload to the user
|
||||
* space and can analyze headers only, instead.
|
||||
*/
|
||||
u32 skb_get_poff(const struct sk_buff *skb)
|
||||
{
|
||||
struct flow_keys keys;
|
||||
|
||||
if (!skb_flow_dissect(skb, &keys))
|
||||
return 0;
|
||||
|
||||
return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
|
||||
}
|
||||
|
||||
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_XPS
|
||||
|
@ -145,6 +145,33 @@ int eth_rebuild_header(struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL(eth_rebuild_header);
|
||||
|
||||
/**
|
||||
* eth_get_headlen - determine the the length of header for an ethernet frame
|
||||
* @data: pointer to start of frame
|
||||
* @len: total length of frame
|
||||
*
|
||||
* Make a best effort attempt to pull the length for all of the headers for
|
||||
* a given frame in a linear buffer.
|
||||
*/
|
||||
u32 eth_get_headlen(void *data, unsigned int len)
|
||||
{
|
||||
const struct ethhdr *eth = (const struct ethhdr *)data;
|
||||
struct flow_keys keys;
|
||||
|
||||
/* this should never happen, but better safe than sorry */
|
||||
if (len < sizeof(*eth))
|
||||
return len;
|
||||
|
||||
/* parse any remaining L2/L3 headers, check for L4 */
|
||||
if (!__skb_flow_dissect(NULL, &keys, data,
|
||||
eth->h_proto, sizeof(*eth), len))
|
||||
return max_t(u32, keys.thoff, sizeof(*eth));
|
||||
|
||||
/* parse for any L4 headers */
|
||||
return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len);
|
||||
}
|
||||
EXPORT_SYMBOL(eth_get_headlen);
|
||||
|
||||
/**
|
||||
* eth_type_trans - determine the packet's protocol ID.
|
||||
* @skb: received socket data
|
||||
|
Loading…
Reference in New Issue
Block a user