mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
i40e: Use the xsk batched rx allocation interface
Use the new xsk batched rx allocation interface for the zero-copy data path. As the array of struct xdp_buff pointers kept by the driver is really a ring that wraps, the allocation routine is modified to detect a wrap and in that case call the allocation function twice. The allocation function cannot deal with wrapped rings, only arrays. As we now know exactly how many buffers we get and that there is no wrapping, the allocation function can be simplified even more as all if-statements in the allocation loop can be removed, improving performance. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20210922075613.12186-6-magnus.karlsson@gmail.com
This commit is contained in:
parent
db804cfc21
commit
6aab0bb0c5
@ -193,42 +193,40 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
|
||||
{
|
||||
u16 ntu = rx_ring->next_to_use;
|
||||
union i40e_rx_desc *rx_desc;
|
||||
struct xdp_buff **bi, *xdp;
|
||||
struct xdp_buff **xdp;
|
||||
u32 nb_buffs, i;
|
||||
dma_addr_t dma;
|
||||
bool ok = true;
|
||||
|
||||
rx_desc = I40E_RX_DESC(rx_ring, ntu);
|
||||
bi = i40e_rx_bi(rx_ring, ntu);
|
||||
do {
|
||||
xdp = xsk_buff_alloc(rx_ring->xsk_pool);
|
||||
if (!xdp) {
|
||||
ok = false;
|
||||
goto no_buffers;
|
||||
}
|
||||
*bi = xdp;
|
||||
dma = xsk_buff_xdp_get_dma(xdp);
|
||||
xdp = i40e_rx_bi(rx_ring, ntu);
|
||||
|
||||
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
|
||||
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
|
||||
if (!nb_buffs)
|
||||
return false;
|
||||
|
||||
i = nb_buffs;
|
||||
while (i--) {
|
||||
dma = xsk_buff_xdp_get_dma(*xdp);
|
||||
rx_desc->read.pkt_addr = cpu_to_le64(dma);
|
||||
rx_desc->read.hdr_addr = 0;
|
||||
|
||||
rx_desc++;
|
||||
bi++;
|
||||
ntu++;
|
||||
|
||||
if (unlikely(ntu == rx_ring->count)) {
|
||||
rx_desc = I40E_RX_DESC(rx_ring, 0);
|
||||
bi = i40e_rx_bi(rx_ring, 0);
|
||||
ntu = 0;
|
||||
}
|
||||
} while (--count);
|
||||
|
||||
no_buffers:
|
||||
if (rx_ring->next_to_use != ntu) {
|
||||
/* clear the status bits for the next_to_use descriptor */
|
||||
rx_desc->wb.qword1.status_error_len = 0;
|
||||
i40e_release_rx_desc(rx_ring, ntu);
|
||||
xdp++;
|
||||
}
|
||||
|
||||
return ok;
|
||||
ntu += nb_buffs;
|
||||
if (ntu == rx_ring->count) {
|
||||
rx_desc = I40E_RX_DESC(rx_ring, 0);
|
||||
xdp = i40e_rx_bi(rx_ring, 0);
|
||||
ntu = 0;
|
||||
}
|
||||
|
||||
/* clear the status bits for the next_to_use descriptor */
|
||||
rx_desc->wb.qword1.status_error_len = 0;
|
||||
i40e_release_rx_desc(rx_ring, ntu);
|
||||
|
||||
return count == nb_buffs ? true : false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -365,7 +363,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
|
||||
break;
|
||||
|
||||
bi = *i40e_rx_bi(rx_ring, next_to_clean);
|
||||
bi->data_end = bi->data + size;
|
||||
xsk_buff_set_size(bi, size);
|
||||
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
|
||||
|
||||
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
|
||||
|
Loading…
Reference in New Issue
Block a user