mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
dpaa2-eth: add rx copybreak support
DMA unmapping, allocating a new buffer and DMA mapping it back on the refill path is really not that efficient. Proper buffer recycling (page pool, flipping the page and using the other half) cannot be done for DPAA2 since it's not a ring based controller but it rather deals with multiple queues which all get their buffers from the same buffer pool on Rx. To circumvent these limitations, add support for Rx copybreak. For small sized packets instead of creating a skb around the buffer in which the frame was received, allocate a new sk buffer altogether, copy the contents of the frame and release the initial page back into the buffer pool. Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com> Reviewed-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
28d137cc8c
commit
50f826999a
@ -418,6 +418,34 @@ out:
|
||||
return xdp_act;
|
||||
}
|
||||
|
||||
static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
|
||||
const struct dpaa2_fd *fd,
|
||||
void *fd_vaddr)
|
||||
{
|
||||
u16 fd_offset = dpaa2_fd_get_offset(fd);
|
||||
u32 fd_length = dpaa2_fd_get_len(fd);
|
||||
struct sk_buff *skb = NULL;
|
||||
unsigned int skb_len;
|
||||
|
||||
if (fd_length > DPAA2_ETH_DEFAULT_COPYBREAK)
|
||||
return NULL;
|
||||
|
||||
skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
|
||||
|
||||
skb = napi_alloc_skb(&ch->napi, skb_len);
|
||||
if (!skb)
|
||||
return NULL;
|
||||
|
||||
skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
|
||||
skb_put(skb, fd_length);
|
||||
|
||||
memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
|
||||
|
||||
dpaa2_eth_recycle_buf(ch->priv, ch, dpaa2_fd_get_addr(fd));
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/* Main Rx frame processing routine */
|
||||
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
||||
struct dpaa2_eth_channel *ch,
|
||||
@ -459,9 +487,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
|
||||
return;
|
||||
}
|
||||
|
||||
dma_unmap_page(dev, addr, priv->rx_buf_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
|
||||
skb = dpaa2_eth_copybreak(ch, fd, vaddr);
|
||||
if (!skb) {
|
||||
dma_unmap_page(dev, addr, priv->rx_buf_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
|
||||
}
|
||||
} else if (fd_format == dpaa2_fd_sg) {
|
||||
WARN_ON(priv->xdp_prog);
|
||||
|
||||
|
@ -489,6 +489,8 @@ struct dpaa2_eth_trap_data {
|
||||
struct dpaa2_eth_priv *priv;
|
||||
};
|
||||
|
||||
#define DPAA2_ETH_DEFAULT_COPYBREAK 512
|
||||
|
||||
/* Driver private data */
|
||||
struct dpaa2_eth_priv {
|
||||
struct net_device *net_dev;
|
||||
|
Loading…
Reference in New Issue
Block a user