accel/ivpu: Stop job_done_thread on suspend

Stop job_done thread when going to suspend. Use kthread_park() instead
of kthread_stop() to avoid memory allocation and potential failure
on resume.

Use separate function as thread wake up condition. Use spin lock to assure
rx_msg_list is properly protected against concurrent access. This avoid
race condition when the rx_msg_list list is modified and read in
ivpu_ipc_recive() at the same time.

Reviewed-by: Karol Wachowski <karol.wachowski@linux.intel.com>
Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Signed-off-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-4-stanislaw.gruszka@linux.intel.com
This commit is contained in:
Stanislaw Gruszka 2023-10-28 17:59:31 +02:00
parent a06eb9be49
commit 57c7e3e480
4 changed files with 35 additions and 6 deletions

View File

@ -378,6 +378,7 @@ int ivpu_boot(struct ivpu_device *vdev)
enable_irq(vdev->irq);
ivpu_hw_irq_enable(vdev);
ivpu_ipc_enable(vdev);
ivpu_job_done_thread_enable(vdev);
return 0;
}
@ -389,6 +390,7 @@ int ivpu_shutdown(struct ivpu_device *vdev)
disable_irq(vdev->irq);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
ivpu_job_done_thread_disable(vdev);
ret = ivpu_hw_power_down(vdev);
if (ret)

View File

@ -202,6 +202,20 @@ unlock:
return ret;
}
static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
{
int ret = 0;
if (IS_KTHREAD())
ret |= (kthread_should_stop() || kthread_should_park());
spin_lock_irq(&cons->rx_msg_lock);
ret |= !list_empty(&cons->rx_msg_list);
spin_unlock_irq(&cons->rx_msg_lock);
return ret;
}
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf,
struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
@ -211,8 +225,7 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
int wait_ret, ret = 0;
wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
(IS_KTHREAD() && kthread_should_stop()) ||
!list_empty(&cons->rx_msg_list),
ivpu_ipc_rx_need_wakeup(cons),
msecs_to_jiffies(timeout_ms));
if (IS_KTHREAD() && kthread_should_stop())

View File

@ -590,6 +590,11 @@ static int ivpu_job_done_thread(void *arg)
ivpu_pm_schedule_recovery(vdev);
}
}
if (kthread_should_park()) {
ivpu_dbg(vdev, JOB, "Parked %s\n", __func__);
kthread_parkme();
ivpu_dbg(vdev, JOB, "Unparked %s\n", __func__);
}
}
ivpu_ipc_consumer_del(vdev, &cons);
@ -610,9 +615,6 @@ int ivpu_job_done_thread_init(struct ivpu_device *vdev)
return -EIO;
}
get_task_struct(thread);
wake_up_process(thread);
vdev->job_done_thread = thread;
return 0;
@ -620,6 +622,16 @@ int ivpu_job_done_thread_init(struct ivpu_device *vdev)
void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
{
kthread_unpark(vdev->job_done_thread);
kthread_stop(vdev->job_done_thread);
put_task_struct(vdev->job_done_thread);
}
void ivpu_job_done_thread_disable(struct ivpu_device *vdev)
{
kthread_park(vdev->job_done_thread);
}
void ivpu_job_done_thread_enable(struct ivpu_device *vdev)
{
kthread_unpark(vdev->job_done_thread);
}

View File

@ -61,6 +61,8 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
int ivpu_job_done_thread_init(struct ivpu_device *vdev);
void ivpu_job_done_thread_fini(struct ivpu_device *vdev);
void ivpu_job_done_thread_disable(struct ivpu_device *vdev);
void ivpu_job_done_thread_enable(struct ivpu_device *vdev);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);