virtio-net: correctly update XDP_TX counters

Commit 5b8f3c8d30 ("virtio_net: Add XDP related stats") tries to
count TX XDP stats in virtnet_receive(). This will cause several
issues:

- virtnet_xdp_sq() was called without checking whether or not XDP is
  set. This may cause out of bound access when there's no enough txq
  for XDP.
- Stats were updated even if there's no XDP/XDP_TX.

Fixing this by reusing virtnet_xdp_xmit() for XDP_TX which can counts
TX XDP counter itself and remove the unnecessary tx stats embedded in
rx stats.

Reported-by: syzbot+604f8271211546f5b3c7@syzkaller.appspotmail.com
Fixes: 5b8f3c8d30 ("virtio_net: Add XDP related stats")
Cc: Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jason Wang 2018-07-31 17:43:38 +08:00 committed by David S. Miller
parent 6293e4d674
commit ca9e83b4a5

View File

@ -105,10 +105,6 @@ struct virtnet_rq_stats {
struct virtnet_rx_stats {
struct virtnet_rq_stat_items rx;
struct {
unsigned int xdp_tx;
unsigned int xdp_tx_drops;
} tx;
};
#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
@ -485,22 +481,6 @@ static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
return &vi->sq[qp];
}
static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
struct xdp_frame *xdpf)
{
struct xdp_frame *xdpf_sent;
struct send_queue *sq;
unsigned int len;
sq = virtnet_xdp_sq(vi);
/* Free up any pending old buffers before queueing new ones. */
while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
xdp_return_frame(xdpf_sent);
return __virtnet_xdp_xmit_one(vi, sq, xdpf);
}
static int virtnet_xdp_xmit(struct net_device *dev,
int n, struct xdp_frame **frames, u32 flags)
{
@ -707,10 +687,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
stats->tx.xdp_tx++;
err = __virtnet_xdp_tx_xmit(vi, xdpf);
if (unlikely(err)) {
stats->tx.xdp_tx_drops++;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
}
@ -879,10 +857,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
stats->tx.xdp_tx++;
err = __virtnet_xdp_tx_xmit(vi, xdpf);
if (unlikely(err)) {
stats->tx.xdp_tx_drops++;
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
if (unlikely(xdp_page != page))
put_page(xdp_page);
@ -1315,7 +1291,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
{
struct virtnet_info *vi = rq->vq->vdev->priv;
struct virtnet_rx_stats stats = {};
struct send_queue *sq;
unsigned int len;
void *buf;
int i;
@ -1351,12 +1326,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
}
u64_stats_update_end(&rq->stats.syncp);
sq = virtnet_xdp_sq(vi);
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.xdp_tx += stats.tx.xdp_tx;
sq->stats.xdp_tx_drops += stats.tx.xdp_tx_drops;
u64_stats_update_end(&sq->stats.syncp);
return stats.rx.packets;
}