mirror of
https://github.com/qemu/qemu.git
synced 2024-11-24 03:13:44 +08:00
net: netmap: improve netmap_receive_iov()
Changes: - Save CPU cycles by computing the return value while scanning the input iovec, rather than calling iov_size() at the end. - Remove check for s->tx != NULL, because it cannot happen. - Cache ring->tail in a local variable and use it to check for space in the TX ring. The use of nm_ring_empty() was invalid, because nobody is updating ring->cur and ring->head at that point. - In case we run out of netmap slots in the middle of a packet, move the wake-up point by advancing ring->cur, but do not expose the incomplete packet (i.e., by updating also ring->head). Signed-off-by: Vincenzo Maffione <v.maffione@gmail.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
parent
c7cbb6b48f
commit
4875bf1406
29
net/netmap.c
29
net/netmap.c
@ -159,21 +159,22 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
|
||||
{
|
||||
NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
|
||||
struct netmap_ring *ring = s->tx;
|
||||
unsigned int tail = ring->tail;
|
||||
ssize_t totlen = 0;
|
||||
uint32_t last;
|
||||
uint32_t idx;
|
||||
uint8_t *dst;
|
||||
int j;
|
||||
uint32_t i;
|
||||
|
||||
if (unlikely(!ring)) {
|
||||
/* Drop the packet. */
|
||||
return iov_size(iov, iovcnt);
|
||||
}
|
||||
|
||||
last = i = ring->cur;
|
||||
last = i = ring->head;
|
||||
|
||||
if (nm_ring_space(ring) < iovcnt) {
|
||||
/* Not enough netmap slots. */
|
||||
/* Not enough netmap slots. Tell the kernel that we have seen the new
|
||||
* available slots (so that it notifies us again when it has more
|
||||
* ones), but without publishing any new slots to be processed
|
||||
* (e.g., we don't advance ring->head). */
|
||||
ring->cur = tail;
|
||||
netmap_write_poll(s, true);
|
||||
return 0;
|
||||
}
|
||||
@ -183,14 +184,17 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
|
||||
int offset = 0;
|
||||
int nm_frag_size;
|
||||
|
||||
totlen += iov_frag_size;
|
||||
|
||||
/* Split each iovec fragment over more netmap slots, if
|
||||
necessary. */
|
||||
while (iov_frag_size) {
|
||||
nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size);
|
||||
|
||||
if (unlikely(nm_ring_empty(ring))) {
|
||||
/* We run out of netmap slots while splitting the
|
||||
if (unlikely(i == tail)) {
|
||||
/* We ran out of netmap slots while splitting the
|
||||
iovec fragments. */
|
||||
ring->cur = tail;
|
||||
netmap_write_poll(s, true);
|
||||
return 0;
|
||||
}
|
||||
@ -212,12 +216,13 @@ static ssize_t netmap_receive_iov(NetClientState *nc,
|
||||
/* The last slot must not have NS_MOREFRAG set. */
|
||||
ring->slot[last].flags &= ~NS_MOREFRAG;
|
||||
|
||||
/* Now update ring->cur and ring->head. */
|
||||
ring->cur = ring->head = i;
|
||||
/* Now update ring->head and ring->cur to publish the new slots and
|
||||
* the new wakeup point. */
|
||||
ring->head = ring->cur = i;
|
||||
|
||||
ioctl(s->nmd->fd, NIOCTXSYNC, NULL);
|
||||
|
||||
return iov_size(iov, iovcnt);
|
||||
return totlen;
|
||||
}
|
||||
|
||||
static ssize_t netmap_receive(NetClientState *nc,
|
||||
|
Loading…
Reference in New Issue
Block a user