mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 21:24:08 +08:00
vdpa: mlx5: synchronize driver status with CVQ
Currently, CVQ doesn't have any synchronization with the driver
status. Then CVQ emulation code run in the middle of:
1) device reset
2) device status changed
3) map updating
The will lead several unexpected issue like trying to execute CVQ
command after the driver has been teared down.
Fixing this by using reslock to synchronize CVQ emulation code with
the driver status changing:
- protect the whole device reset, status changing and set_map()
updating with reslock
- protect the CVQ handler with the reslock and check
VIRTIO_CONFIG_S_DRIVER_OK in the CVQ handler
This will guarantee that:
1) CVQ handler won't work if VIRTIO_CONFIG_S_DRIVER_OK is not set
2) CVQ handler will see a consistent state of the driver instead of
the partial one when it is running in the middle of the
teardown_driver() or setup_driver().
Cc: 5262912ef3
("vdpa/mlx5: Add support for control VQ and MAC setting")
Signed-off-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20220329042109.4029-2-jasowang@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Eli Cohen <elic@nvidia.com>
This commit is contained in:
parent
55ebf0d60e
commit
1c80cf031e
@ -1659,11 +1659,17 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
|
|||||||
mvdev = wqent->mvdev;
|
mvdev = wqent->mvdev;
|
||||||
ndev = to_mlx5_vdpa_ndev(mvdev);
|
ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
cvq = &mvdev->cvq;
|
cvq = &mvdev->cvq;
|
||||||
|
|
||||||
|
mutex_lock(&ndev->reslock);
|
||||||
|
|
||||||
|
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
|
if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
if (!cvq->ready)
|
if (!cvq->ready)
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
|
err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head,
|
||||||
@ -1701,6 +1707,9 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
|
|||||||
queue_work(mvdev->wq, &wqent->work);
|
queue_work(mvdev->wq, &wqent->work);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&ndev->reslock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
|
static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
|
||||||
@ -2175,7 +2184,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
|
|||||||
goto err_mr;
|
goto err_mr;
|
||||||
|
|
||||||
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||||
return 0;
|
goto err_mr;
|
||||||
|
|
||||||
restore_channels_info(ndev);
|
restore_channels_info(ndev);
|
||||||
err = setup_driver(mvdev);
|
err = setup_driver(mvdev);
|
||||||
@ -2190,12 +2199,14 @@ err_mr:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* reslock must be held for this function */
|
||||||
static int setup_driver(struct mlx5_vdpa_dev *mvdev)
|
static int setup_driver(struct mlx5_vdpa_dev *mvdev)
|
||||||
{
|
{
|
||||||
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_lock(&ndev->reslock);
|
WARN_ON(!mutex_is_locked(&ndev->reslock));
|
||||||
|
|
||||||
if (ndev->setup) {
|
if (ndev->setup) {
|
||||||
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
|
mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
|
||||||
err = 0;
|
err = 0;
|
||||||
@ -2225,7 +2236,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
|
|||||||
goto err_fwd;
|
goto err_fwd;
|
||||||
}
|
}
|
||||||
ndev->setup = true;
|
ndev->setup = true;
|
||||||
mutex_unlock(&ndev->reslock);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -2236,23 +2246,23 @@ err_tir:
|
|||||||
err_rqt:
|
err_rqt:
|
||||||
teardown_virtqueues(ndev);
|
teardown_virtqueues(ndev);
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&ndev->reslock);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* reslock must be held for this function */
|
||||||
static void teardown_driver(struct mlx5_vdpa_net *ndev)
|
static void teardown_driver(struct mlx5_vdpa_net *ndev)
|
||||||
{
|
{
|
||||||
mutex_lock(&ndev->reslock);
|
|
||||||
|
WARN_ON(!mutex_is_locked(&ndev->reslock));
|
||||||
|
|
||||||
if (!ndev->setup)
|
if (!ndev->setup)
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
remove_fwd_to_tir(ndev);
|
remove_fwd_to_tir(ndev);
|
||||||
destroy_tir(ndev);
|
destroy_tir(ndev);
|
||||||
destroy_rqt(ndev);
|
destroy_rqt(ndev);
|
||||||
teardown_virtqueues(ndev);
|
teardown_virtqueues(ndev);
|
||||||
ndev->setup = false;
|
ndev->setup = false;
|
||||||
out:
|
|
||||||
mutex_unlock(&ndev->reslock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
|
static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
|
||||||
@ -2273,6 +2283,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
|
|||||||
|
|
||||||
print_status(mvdev, status, true);
|
print_status(mvdev, status, true);
|
||||||
|
|
||||||
|
mutex_lock(&ndev->reslock);
|
||||||
|
|
||||||
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
|
if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
|
||||||
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
|
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
|
||||||
err = setup_driver(mvdev);
|
err = setup_driver(mvdev);
|
||||||
@ -2282,16 +2294,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
|
mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
|
||||||
return;
|
goto err_clear;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ndev->mvdev.status = status;
|
ndev->mvdev.status = status;
|
||||||
|
mutex_unlock(&ndev->reslock);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
err_setup:
|
err_setup:
|
||||||
mlx5_vdpa_destroy_mr(&ndev->mvdev);
|
mlx5_vdpa_destroy_mr(&ndev->mvdev);
|
||||||
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
|
ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
|
||||||
|
err_clear:
|
||||||
|
mutex_unlock(&ndev->reslock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
||||||
@ -2301,6 +2316,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
|||||||
|
|
||||||
print_status(mvdev, 0, true);
|
print_status(mvdev, 0, true);
|
||||||
mlx5_vdpa_info(mvdev, "performing device reset\n");
|
mlx5_vdpa_info(mvdev, "performing device reset\n");
|
||||||
|
|
||||||
|
mutex_lock(&ndev->reslock);
|
||||||
teardown_driver(ndev);
|
teardown_driver(ndev);
|
||||||
clear_vqs_ready(ndev);
|
clear_vqs_ready(ndev);
|
||||||
mlx5_vdpa_destroy_mr(&ndev->mvdev);
|
mlx5_vdpa_destroy_mr(&ndev->mvdev);
|
||||||
@ -2313,6 +2330,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
|
|||||||
if (mlx5_vdpa_create_mr(mvdev, NULL))
|
if (mlx5_vdpa_create_mr(mvdev, NULL))
|
||||||
mlx5_vdpa_warn(mvdev, "create MR failed\n");
|
mlx5_vdpa_warn(mvdev, "create MR failed\n");
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&ndev->reslock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2348,19 +2366,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
|
|||||||
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
|
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
|
||||||
{
|
{
|
||||||
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
|
||||||
|
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
|
||||||
bool change_map;
|
bool change_map;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
mutex_lock(&ndev->reslock);
|
||||||
|
|
||||||
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
|
err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
|
mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
|
||||||
return err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (change_map)
|
if (change_map)
|
||||||
return mlx5_vdpa_change_map(mvdev, iotlb);
|
err = mlx5_vdpa_change_map(mvdev, iotlb);
|
||||||
|
|
||||||
return 0;
|
err:
|
||||||
|
mutex_unlock(&ndev->reslock);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlx5_vdpa_free(struct vdpa_device *vdev)
|
static void mlx5_vdpa_free(struct vdpa_device *vdev)
|
||||||
|
Loading…
Reference in New Issue
Block a user