Merge branch 'virtio-ctrl-buffer-fixes'

Michael S. Tsirkin says:

====================
virtio: ctrl buffer fixes

Here are a couple of fixes related to the virtio control buffer.
Lightly tested on x86 only.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-04-19 16:33:21 -04:00
commit 0df8bb01cb

View File

@ -147,6 +147,17 @@ struct receive_queue {
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
}; };
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
virtio_net_ctrl_ack status;
struct virtio_net_ctrl_mq mq;
u8 promisc;
u8 allmulti;
__virtio16 vid;
__virtio64 offloads;
};
struct virtnet_info { struct virtnet_info {
struct virtio_device *vdev; struct virtio_device *vdev;
struct virtqueue *cvq; struct virtqueue *cvq;
@ -192,14 +203,7 @@ struct virtnet_info {
struct hlist_node node; struct hlist_node node;
struct hlist_node node_dead; struct hlist_node node_dead;
/* Control VQ buffers: protected by the rtnl lock */ struct control_buf *ctrl;
struct virtio_net_ctrl_hdr ctrl_hdr;
virtio_net_ctrl_ack ctrl_status;
struct virtio_net_ctrl_mq ctrl_mq;
u8 ctrl_promisc;
u8 ctrl_allmulti;
u16 ctrl_vid;
u64 ctrl_offloads;
/* Ethtool settings */ /* Ethtool settings */
u8 duplex; u8 duplex;
@ -1461,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
/* Caller should know better */ /* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
vi->ctrl_status = ~0; vi->ctrl->status = ~0;
vi->ctrl_hdr.class = class; vi->ctrl->hdr.class = class;
vi->ctrl_hdr.cmd = cmd; vi->ctrl->hdr.cmd = cmd;
/* Add header */ /* Add header */
sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
sgs[out_num++] = &hdr; sgs[out_num++] = &hdr;
if (out) if (out)
sgs[out_num++] = out; sgs[out_num++] = out;
/* Add return status. */ /* Add return status. */
sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
sgs[out_num] = &stat; sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (unlikely(!virtqueue_kick(vi->cvq))) if (unlikely(!virtqueue_kick(vi->cvq)))
return vi->ctrl_status == VIRTIO_NET_OK; return vi->ctrl->status == VIRTIO_NET_OK;
/* Spin for a response, the kick causes an ioport write, trapping /* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately. * into the hypervisor, so the request should be handled immediately.
@ -1488,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
!virtqueue_is_broken(vi->cvq)) !virtqueue_is_broken(vi->cvq))
cpu_relax(); cpu_relax();
return vi->ctrl_status == VIRTIO_NET_OK; return vi->ctrl->status == VIRTIO_NET_OK;
} }
static int virtnet_set_mac_address(struct net_device *dev, void *p) static int virtnet_set_mac_address(struct net_device *dev, void *p)
@ -1600,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0; return 0;
vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@ -1660,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return; return;
vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg)) VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
vi->ctrl_promisc ? "en" : "dis"); vi->ctrl->promisc ? "en" : "dis");
sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
vi->ctrl_allmulti ? "en" : "dis"); vi->ctrl->allmulti ? "en" : "dis");
uc_count = netdev_uc_count(dev); uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev); mc_count = netdev_mc_count(dev);
@ -1721,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg; struct scatterlist sg;
vi->ctrl_vid = vid; vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg)) VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@ -1736,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev); struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg; struct scatterlist sg;
vi->ctrl_vid = vid; vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg)) VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@ -2133,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{ {
struct scatterlist sg; struct scatterlist sg;
vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@ -2358,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
kfree(vi->rq); kfree(vi->rq);
kfree(vi->sq); kfree(vi->sq);
kfree(vi->ctrl);
} }
static void _free_receive_bufs(struct virtnet_info *vi) static void _free_receive_bufs(struct virtnet_info *vi)
@ -2550,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
{ {
int i; int i;
vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
if (!vi->ctrl)
goto err_ctrl;
vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
if (!vi->sq) if (!vi->sq)
goto err_sq; goto err_sq;
@ -2578,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
err_rq: err_rq:
kfree(vi->sq); kfree(vi->sq);
err_sq: err_sq:
kfree(vi->ctrl);
err_ctrl:
return -ENOMEM; return -ENOMEM;
} }