mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-27 13:05:03 +08:00
amd-xgbe: Rework the Rx path SKB allocation
Rework the SKB allocation so that all of the buffers of the first descriptor are handled in the SKB allocation routine. After copying the data in the header buffer (which can be just the header if split header processing succeeded for header plus data if split header processing did not succeed) into the SKB, check for remaining data in the receive buffer. If there is data remaining in the receive buffer, add that as a frag to the SKB. Once an SKB has been allocated, all other descriptors are added as frags to the SKB. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
34bf65dfa3
commit
7d9ca345b5
@ -481,8 +481,6 @@ static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
|
|||||||
|
|
||||||
if (rdata->state_saved) {
|
if (rdata->state_saved) {
|
||||||
rdata->state_saved = 0;
|
rdata->state_saved = 0;
|
||||||
rdata->state.incomplete = 0;
|
|
||||||
rdata->state.context_next = 0;
|
|
||||||
rdata->state.skb = NULL;
|
rdata->state.skb = NULL;
|
||||||
rdata->state.len = 0;
|
rdata->state.len = 0;
|
||||||
rdata->state.error = 0;
|
rdata->state.error = 0;
|
||||||
|
@ -1822,9 +1822,10 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
|
|||||||
lower_32_bits(rdata->rdesc_dma));
|
lower_32_bits(rdata->rdesc_dma));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
|
static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
|
||||||
|
struct napi_struct *napi,
|
||||||
struct xgbe_ring_data *rdata,
|
struct xgbe_ring_data *rdata,
|
||||||
unsigned int *len)
|
unsigned int len)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u8 *packet;
|
u8 *packet;
|
||||||
@ -1834,14 +1835,31 @@ static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
|
|||||||
if (!skb)
|
if (!skb)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/* Start with the header buffer which may contain just the header
|
||||||
|
* or the header plus data
|
||||||
|
*/
|
||||||
|
dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
|
||||||
|
rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
packet = page_address(rdata->rx.hdr.pa.pages) +
|
packet = page_address(rdata->rx.hdr.pa.pages) +
|
||||||
rdata->rx.hdr.pa.pages_offset;
|
rdata->rx.hdr.pa.pages_offset;
|
||||||
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
|
copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
|
||||||
copy_len = min(rdata->rx.hdr.dma_len, copy_len);
|
copy_len = min(rdata->rx.hdr.dma_len, copy_len);
|
||||||
skb_copy_to_linear_data(skb, packet, copy_len);
|
skb_copy_to_linear_data(skb, packet, copy_len);
|
||||||
skb_put(skb, copy_len);
|
skb_put(skb, copy_len);
|
||||||
|
|
||||||
*len -= copy_len;
|
len -= copy_len;
|
||||||
|
if (len) {
|
||||||
|
/* Add the remaining data as a frag */
|
||||||
|
dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
|
||||||
|
rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||||
|
rdata->rx.buf.pa.pages,
|
||||||
|
rdata->rx.buf.pa.pages_offset,
|
||||||
|
len, rdata->rx.buf.dma_len);
|
||||||
|
rdata->rx.buf.pa.pages = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
@ -1923,7 +1941,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct skb_shared_hwtstamps *hwtstamps;
|
struct skb_shared_hwtstamps *hwtstamps;
|
||||||
unsigned int incomplete, error, context_next, context;
|
unsigned int incomplete, error, context_next, context;
|
||||||
unsigned int len, put_len, max_len;
|
unsigned int len, rdesc_len, max_len;
|
||||||
unsigned int received = 0;
|
unsigned int received = 0;
|
||||||
int packet_count = 0;
|
int packet_count = 0;
|
||||||
|
|
||||||
@ -1933,6 +1951,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||||||
if (!ring)
|
if (!ring)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
incomplete = 0;
|
||||||
|
context_next = 0;
|
||||||
|
|
||||||
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
|
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
|
||||||
|
|
||||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||||
@ -1942,15 +1963,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
|
|||||||
|
|
||||||
/* First time in loop see if we need to restore state */
|
/* First time in loop see if we need to restore state */
|
||||||
if (!received && rdata->state_saved) {
|
if (!received && rdata->state_saved) {
|
||||||
incomplete = rdata->state.incomplete;
|
|
||||||
context_next = rdata->state.context_next;
|
|
||||||
skb = rdata->state.skb;
|
skb = rdata->state.skb;
|
||||||
error = rdata->state.error;
|
error = rdata->state.error;
|
||||||
len = rdata->state.len;
|
len = rdata->state.len;
|
||||||
} else {
|
} else {
|
||||||
memset(packet, 0, sizeof(*packet));
|
memset(packet, 0, sizeof(*packet));
|
||||||
incomplete = 0;
|
|
||||||
context_next = 0;
|
|
||||||
skb = NULL;
|
skb = NULL;
|
||||||
error = 0;
|
error = 0;
|
||||||
len = 0;
|
len = 0;
|
||||||
@ -1991,23 +2008,16 @@ read_again:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!context) {
|
if (!context) {
|
||||||
put_len = rdata->rx.len - len;
|
/* Length is cumulative, get this descriptor's length */
|
||||||
len += put_len;
|
rdesc_len = rdata->rx.len - len;
|
||||||
|
len += rdesc_len;
|
||||||
|
|
||||||
if (!skb) {
|
if (rdesc_len && !skb) {
|
||||||
dma_sync_single_for_cpu(pdata->dev,
|
skb = xgbe_create_skb(pdata, napi, rdata,
|
||||||
rdata->rx.hdr.dma,
|
rdesc_len);
|
||||||
rdata->rx.hdr.dma_len,
|
if (!skb)
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
skb = xgbe_create_skb(napi, rdata, &put_len);
|
|
||||||
if (!skb) {
|
|
||||||
error = 1;
|
error = 1;
|
||||||
goto skip_data;
|
} else if (rdesc_len) {
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (put_len) {
|
|
||||||
dma_sync_single_for_cpu(pdata->dev,
|
dma_sync_single_for_cpu(pdata->dev,
|
||||||
rdata->rx.buf.dma,
|
rdata->rx.buf.dma,
|
||||||
rdata->rx.buf.dma_len,
|
rdata->rx.buf.dma_len,
|
||||||
@ -2016,12 +2026,12 @@ read_again:
|
|||||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||||
rdata->rx.buf.pa.pages,
|
rdata->rx.buf.pa.pages,
|
||||||
rdata->rx.buf.pa.pages_offset,
|
rdata->rx.buf.pa.pages_offset,
|
||||||
put_len, rdata->rx.buf.dma_len);
|
rdesc_len,
|
||||||
|
rdata->rx.buf.dma_len);
|
||||||
rdata->rx.buf.pa.pages = NULL;
|
rdata->rx.buf.pa.pages = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_data:
|
|
||||||
if (incomplete || context_next)
|
if (incomplete || context_next)
|
||||||
goto read_again;
|
goto read_again;
|
||||||
|
|
||||||
@ -2084,8 +2094,6 @@ next_packet:
|
|||||||
if (received && (incomplete || context_next)) {
|
if (received && (incomplete || context_next)) {
|
||||||
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
|
||||||
rdata->state_saved = 1;
|
rdata->state_saved = 1;
|
||||||
rdata->state.incomplete = incomplete;
|
|
||||||
rdata->state.context_next = context_next;
|
|
||||||
rdata->state.skb = skb;
|
rdata->state.skb = skb;
|
||||||
rdata->state.len = len;
|
rdata->state.len = len;
|
||||||
rdata->state.error = error;
|
rdata->state.error = error;
|
||||||
|
@ -334,8 +334,6 @@ struct xgbe_ring_data {
|
|||||||
*/
|
*/
|
||||||
unsigned int state_saved;
|
unsigned int state_saved;
|
||||||
struct {
|
struct {
|
||||||
unsigned int incomplete;
|
|
||||||
unsigned int context_next;
|
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
unsigned int error;
|
unsigned int error;
|
||||||
|
Loading…
Reference in New Issue
Block a user