vdpa: mlx5: prevent cvq work from hogging CPU

A userspace triggerable infinite loop could happen in
mlx5_cvq_kick_handler() if userspace keeps sending a huge amount of
cvq requests.

Fixing this by introducing a quota and re-queue the work if we're out
of the budget (currently the implicit budget is one) . While at it,
using a per device work struct to avoid on demand memory allocation
for cvq.

Fixes: 5262912ef3 ("vdpa/mlx5: Add support for control VQ and MAC setting")
Signed-off-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20220329042109.4029-1-jasowang@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Eli Cohen <elic@nvidia.com>
This commit is contained in:
Jason Wang 2022-03-29 12:21:07 +08:00 committed by Michael S. Tsirkin
parent c18c86808b
commit 55ebf0d60e

View File

@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
u32 cur_num_vqs;
struct notifier_block nb;
struct vdpa_callback config_cb;
struct mlx5_vdpa_wq_ent cvq_ent;
};
static void free_resources(struct mlx5_vdpa_net *ndev);
@ -1659,10 +1660,10 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
ndev = to_mlx5_vdpa_ndev(mvdev);
cvq = &mvdev->cvq;
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
goto out;
return;
if (!cvq->ready)
goto out;
return;
while (true) {
err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
@ -1696,9 +1697,10 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
if (vringh_need_notify_iotlb(&cvq->vring))
vringh_notify(&cvq->vring);
queue_work(mvdev->wq, &wqent->work);
break;
}
out:
kfree(wqent);
}
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@ -1706,7 +1708,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
struct mlx5_vdpa_wq_ent *wqent;
if (!is_index_valid(mvdev, idx))
return;
@ -1715,13 +1716,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
if (!mvdev->wq || !mvdev->cvq.ready)
return;
wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
if (!wqent)
return;
wqent->mvdev = mvdev;
INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
queue_work(mvdev->wq, &wqent->work);
queue_work(mvdev->wq, &ndev->cvq_ent.work);
return;
}
@ -2740,6 +2735,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
if (err)
goto err_mr;
ndev->cvq_ent.mvdev = mvdev;
INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
if (!mvdev->wq) {
err = -ENOMEM;