firmware: arm_scmi: Fix deferred_tx_wq release on error paths

Use devres to allocate the dedicated deferred_tx_wq polling workqueue so
as to automatically trigger the proper resource release on error path.

Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Fixes: 5a3b7185c4 ("firmware: arm_scmi: Add atomic mode support to virtio transport")
Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
Link: https://lore.kernel.org/r/20221028140833.280091-6-cristian.marussi@arm.com
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
This commit is contained in:
Cristian Marussi 2022-10-28 15:08:31 +01:00 committed by Sudeep Holla
parent 5ffc1c4cb8
commit 1eff6929af

View File

@ -148,7 +148,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
{ {
unsigned long flags; unsigned long flags;
DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
void *deferred_wq = NULL;
/* /*
* Prepare to wait for the last release if not already released * Prepare to wait for the last release if not already released
@ -162,16 +161,11 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
vioch->shutdown_done = &vioch_shutdown_done; vioch->shutdown_done = &vioch_shutdown_done;
virtio_break_device(vioch->vqueue->vdev); virtio_break_device(vioch->vqueue->vdev);
if (!vioch->is_rx && vioch->deferred_tx_wq) { if (!vioch->is_rx && vioch->deferred_tx_wq)
deferred_wq = vioch->deferred_tx_wq;
/* Cannot be kicked anymore after this...*/ /* Cannot be kicked anymore after this...*/
vioch->deferred_tx_wq = NULL; vioch->deferred_tx_wq = NULL;
}
spin_unlock_irqrestore(&vioch->lock, flags); spin_unlock_irqrestore(&vioch->lock, flags);
if (deferred_wq)
destroy_workqueue(deferred_wq);
scmi_vio_channel_release(vioch); scmi_vio_channel_release(vioch);
/* Let any possibly concurrent RX path release the channel */ /* Let any possibly concurrent RX path release the channel */
@ -416,6 +410,11 @@ static bool virtio_chan_available(struct device *dev, int idx)
return vioch && !vioch->cinfo; return vioch && !vioch->cinfo;
} }
static void scmi_destroy_tx_workqueue(void *deferred_tx_wq)
{
destroy_workqueue(deferred_tx_wq);
}
static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx) bool tx)
{ {
@ -430,6 +429,8 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
/* Setup a deferred worker for polling. */ /* Setup a deferred worker for polling. */
if (tx && !vioch->deferred_tx_wq) { if (tx && !vioch->deferred_tx_wq) {
int ret;
vioch->deferred_tx_wq = vioch->deferred_tx_wq =
alloc_workqueue(dev_name(&scmi_vdev->dev), alloc_workqueue(dev_name(&scmi_vdev->dev),
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
@ -437,6 +438,11 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
if (!vioch->deferred_tx_wq) if (!vioch->deferred_tx_wq)
return -ENOMEM; return -ENOMEM;
ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue,
vioch->deferred_tx_wq);
if (ret)
return ret;
INIT_WORK(&vioch->deferred_tx_work, INIT_WORK(&vioch->deferred_tx_work,
scmi_vio_deferred_tx_worker); scmi_vio_deferred_tx_worker);
} }