mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-24 19:45:06 +08:00
accel/ivpu: Use threaded IRQ to handle JOB done messages
Remove job_done thread and replace it with generic callback based mechanism. Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com> Link: https://patchwork.freedesktop.org/patch/msgid/20231113170252.758137-6-jacek.lawrynowicz@linux.intel.com
This commit is contained in:
parent
58cde80f45
commit
3b434a3445
@ -318,13 +318,11 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
|
|||||||
if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
|
if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG);
|
ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
|
||||||
|
|
||||||
timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
|
timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
|
||||||
while (1) {
|
while (1) {
|
||||||
ret = ivpu_ipc_irq_handler(vdev);
|
ivpu_ipc_irq_handler(vdev, NULL);
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
|
ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
|
||||||
if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
|
if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
|
||||||
break;
|
break;
|
||||||
@ -378,7 +376,6 @@ int ivpu_boot(struct ivpu_device *vdev)
|
|||||||
enable_irq(vdev->irq);
|
enable_irq(vdev->irq);
|
||||||
ivpu_hw_irq_enable(vdev);
|
ivpu_hw_irq_enable(vdev);
|
||||||
ivpu_ipc_enable(vdev);
|
ivpu_ipc_enable(vdev);
|
||||||
ivpu_job_done_thread_enable(vdev);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -388,7 +385,6 @@ void ivpu_prepare_for_reset(struct ivpu_device *vdev)
|
|||||||
disable_irq(vdev->irq);
|
disable_irq(vdev->irq);
|
||||||
ivpu_ipc_disable(vdev);
|
ivpu_ipc_disable(vdev);
|
||||||
ivpu_mmu_disable(vdev);
|
ivpu_mmu_disable(vdev);
|
||||||
ivpu_job_done_thread_disable(vdev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ivpu_shutdown(struct ivpu_device *vdev)
|
int ivpu_shutdown(struct ivpu_device *vdev)
|
||||||
@ -429,6 +425,13 @@ static const struct drm_driver driver = {
|
|||||||
.minor = DRM_IVPU_DRIVER_MINOR,
|
.minor = DRM_IVPU_DRIVER_MINOR,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
|
||||||
|
{
|
||||||
|
struct ivpu_device *vdev = arg;
|
||||||
|
|
||||||
|
return ivpu_ipc_irq_thread_handler(vdev);
|
||||||
|
}
|
||||||
|
|
||||||
static int ivpu_irq_init(struct ivpu_device *vdev)
|
static int ivpu_irq_init(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
|
struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
|
||||||
@ -442,8 +445,8 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
|
|||||||
|
|
||||||
vdev->irq = pci_irq_vector(pdev, 0);
|
vdev->irq = pci_irq_vector(pdev, 0);
|
||||||
|
|
||||||
ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
|
ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
|
||||||
IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
|
ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
|
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
|
||||||
|
|
||||||
@ -581,20 +584,15 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
|
|||||||
|
|
||||||
ivpu_pm_init(vdev);
|
ivpu_pm_init(vdev);
|
||||||
|
|
||||||
ret = ivpu_job_done_thread_init(vdev);
|
ret = ivpu_boot(vdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_ipc_fini;
|
goto err_ipc_fini;
|
||||||
|
|
||||||
ret = ivpu_boot(vdev);
|
ivpu_job_done_consumer_init(vdev);
|
||||||
if (ret)
|
|
||||||
goto err_job_done_thread_fini;
|
|
||||||
|
|
||||||
ivpu_pm_enable(vdev);
|
ivpu_pm_enable(vdev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_job_done_thread_fini:
|
|
||||||
ivpu_job_done_thread_fini(vdev);
|
|
||||||
err_ipc_fini:
|
err_ipc_fini:
|
||||||
ivpu_ipc_fini(vdev);
|
ivpu_ipc_fini(vdev);
|
||||||
err_fw_fini:
|
err_fw_fini:
|
||||||
@ -619,7 +617,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
|
|||||||
ivpu_shutdown(vdev);
|
ivpu_shutdown(vdev);
|
||||||
if (IVPU_WA(d3hot_after_power_off))
|
if (IVPU_WA(d3hot_after_power_off))
|
||||||
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
|
||||||
ivpu_job_done_thread_fini(vdev);
|
ivpu_job_done_consumer_fini(vdev);
|
||||||
ivpu_pm_cancel_recovery(vdev);
|
ivpu_pm_cancel_recovery(vdev);
|
||||||
|
|
||||||
ivpu_ipc_fini(vdev);
|
ivpu_ipc_fini(vdev);
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <uapi/drm/ivpu_accel.h>
|
#include <uapi/drm/ivpu_accel.h>
|
||||||
|
|
||||||
#include "ivpu_mmu_context.h"
|
#include "ivpu_mmu_context.h"
|
||||||
|
#include "ivpu_ipc.h"
|
||||||
|
|
||||||
#define DRIVER_NAME "intel_vpu"
|
#define DRIVER_NAME "intel_vpu"
|
||||||
#define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)"
|
#define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)"
|
||||||
@ -120,7 +121,7 @@ struct ivpu_device {
|
|||||||
struct list_head bo_list;
|
struct list_head bo_list;
|
||||||
|
|
||||||
struct xarray submitted_jobs_xa;
|
struct xarray submitted_jobs_xa;
|
||||||
struct task_struct *job_done_thread;
|
struct ivpu_ipc_consumer job_done_consumer;
|
||||||
|
|
||||||
atomic64_t unique_id_counter;
|
atomic64_t unique_id_counter;
|
||||||
|
|
||||||
|
@ -891,17 +891,20 @@ static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Handler for IRQs from VPU core (irqV) */
|
/* Handler for IRQs from VPU core (irqV) */
|
||||||
static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
|
static bool ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread)
|
||||||
{
|
{
|
||||||
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
|
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
|
||||||
|
|
||||||
|
if (!status)
|
||||||
|
return false;
|
||||||
|
|
||||||
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
|
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
|
||||||
|
|
||||||
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
|
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
|
||||||
ivpu_mmu_irq_evtq_handler(vdev);
|
ivpu_mmu_irq_evtq_handler(vdev);
|
||||||
|
|
||||||
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
|
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
|
||||||
ivpu_ipc_irq_handler(vdev);
|
ivpu_ipc_irq_handler(vdev, wake_thread);
|
||||||
|
|
||||||
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
|
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
|
||||||
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
|
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
|
||||||
@ -918,17 +921,17 @@ static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
|
|||||||
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
|
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
|
||||||
ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
|
ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
|
||||||
|
|
||||||
return status;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handler for IRQs from Buttress core (irqB) */
|
/* Handler for IRQs from Buttress core (irqB) */
|
||||||
static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
|
static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
|
||||||
{
|
{
|
||||||
u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
|
u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
|
||||||
bool schedule_recovery = false;
|
bool schedule_recovery = false;
|
||||||
|
|
||||||
if (status == 0)
|
if (!status)
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
|
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
|
||||||
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
|
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
|
||||||
@ -964,23 +967,27 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
|
|||||||
if (schedule_recovery)
|
if (schedule_recovery)
|
||||||
ivpu_pm_schedule_recovery(vdev);
|
ivpu_pm_schedule_recovery(vdev);
|
||||||
|
|
||||||
return status;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
|
static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
|
||||||
{
|
{
|
||||||
struct ivpu_device *vdev = ptr;
|
struct ivpu_device *vdev = ptr;
|
||||||
u32 ret_irqv, ret_irqb;
|
bool irqv_handled, irqb_handled, wake_thread = false;
|
||||||
|
|
||||||
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
|
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
|
||||||
|
|
||||||
ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
|
irqv_handled = ivpu_hw_37xx_irqv_handler(vdev, irq, &wake_thread);
|
||||||
ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
|
irqb_handled = ivpu_hw_37xx_irqb_handler(vdev, irq);
|
||||||
|
|
||||||
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
|
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
|
||||||
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
|
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
|
||||||
|
|
||||||
return IRQ_RETVAL(ret_irqb | ret_irqv);
|
if (wake_thread)
|
||||||
|
return IRQ_WAKE_THREAD;
|
||||||
|
if (irqv_handled || irqb_handled)
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
return IRQ_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
|
static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
|
||||||
|
@ -1047,13 +1047,12 @@ static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Handler for IRQs from VPU core (irqV) */
|
/* Handler for IRQs from VPU core (irqV) */
|
||||||
static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
|
static bool ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread)
|
||||||
{
|
{
|
||||||
u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
|
u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
|
||||||
irqreturn_t ret = IRQ_NONE;
|
|
||||||
|
|
||||||
if (!status)
|
if (!status)
|
||||||
return IRQ_NONE;
|
return false;
|
||||||
|
|
||||||
REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
|
REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
|
||||||
|
|
||||||
@ -1061,7 +1060,7 @@ static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
|
|||||||
ivpu_mmu_irq_evtq_handler(vdev);
|
ivpu_mmu_irq_evtq_handler(vdev);
|
||||||
|
|
||||||
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
|
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
|
||||||
ret |= ivpu_ipc_irq_handler(vdev);
|
ivpu_ipc_irq_handler(vdev, wake_thread);
|
||||||
|
|
||||||
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
|
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
|
||||||
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
|
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
|
||||||
@ -1078,17 +1077,17 @@ static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq)
|
|||||||
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
|
if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
|
||||||
ivpu_hw_40xx_irq_noc_firewall_handler(vdev);
|
ivpu_hw_40xx_irq_noc_firewall_handler(vdev);
|
||||||
|
|
||||||
return ret;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handler for IRQs from Buttress core (irqB) */
|
/* Handler for IRQs from Buttress core (irqB) */
|
||||||
static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
|
static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
|
||||||
{
|
{
|
||||||
bool schedule_recovery = false;
|
bool schedule_recovery = false;
|
||||||
u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
|
u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
|
||||||
|
|
||||||
if (status == 0)
|
if (!status)
|
||||||
return IRQ_NONE;
|
return false;
|
||||||
|
|
||||||
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
|
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
|
||||||
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
|
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
|
||||||
@ -1140,26 +1139,27 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
|
|||||||
if (schedule_recovery)
|
if (schedule_recovery)
|
||||||
ivpu_pm_schedule_recovery(vdev);
|
ivpu_pm_schedule_recovery(vdev);
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
|
static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
|
||||||
{
|
{
|
||||||
|
bool irqv_handled, irqb_handled, wake_thread = false;
|
||||||
struct ivpu_device *vdev = ptr;
|
struct ivpu_device *vdev = ptr;
|
||||||
irqreturn_t ret = IRQ_NONE;
|
|
||||||
|
|
||||||
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
|
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
|
||||||
|
|
||||||
ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
|
irqv_handled = ivpu_hw_40xx_irqv_handler(vdev, irq, &wake_thread);
|
||||||
ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
|
irqb_handled = ivpu_hw_40xx_irqb_handler(vdev, irq);
|
||||||
|
|
||||||
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
|
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
|
||||||
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
|
REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
|
||||||
|
|
||||||
if (ret & IRQ_WAKE_THREAD)
|
if (wake_thread)
|
||||||
return IRQ_WAKE_THREAD;
|
return IRQ_WAKE_THREAD;
|
||||||
|
if (irqv_handled || irqb_handled)
|
||||||
return ret;
|
return IRQ_HANDLED;
|
||||||
|
return IRQ_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev)
|
static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev)
|
||||||
|
@ -5,7 +5,6 @@
|
|||||||
|
|
||||||
#include <linux/genalloc.h>
|
#include <linux/genalloc.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/kthread.h>
|
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
|
|
||||||
@ -18,19 +17,12 @@
|
|||||||
#include "ivpu_pm.h"
|
#include "ivpu_pm.h"
|
||||||
|
|
||||||
#define IPC_MAX_RX_MSG 128
|
#define IPC_MAX_RX_MSG 128
|
||||||
#define IS_KTHREAD() (get_current()->flags & PF_KTHREAD)
|
|
||||||
|
|
||||||
struct ivpu_ipc_tx_buf {
|
struct ivpu_ipc_tx_buf {
|
||||||
struct ivpu_ipc_hdr ipc;
|
struct ivpu_ipc_hdr ipc;
|
||||||
struct vpu_jsm_msg jsm;
|
struct vpu_jsm_msg jsm;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ivpu_ipc_rx_msg {
|
|
||||||
struct list_head link;
|
|
||||||
struct ivpu_ipc_hdr *ipc_hdr;
|
|
||||||
struct vpu_jsm_msg *jsm_msg;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
|
static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
|
||||||
struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
|
struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
|
||||||
{
|
{
|
||||||
@ -140,8 +132,49 @@ static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
|
|||||||
ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
|
ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static void
|
||||||
ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 channel)
|
ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
||||||
|
struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
|
||||||
|
{
|
||||||
|
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||||
|
struct ivpu_ipc_rx_msg *rx_msg;
|
||||||
|
|
||||||
|
lockdep_assert_held(&ipc->cons_lock);
|
||||||
|
lockdep_assert_irqs_disabled();
|
||||||
|
|
||||||
|
rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
|
||||||
|
if (!rx_msg) {
|
||||||
|
ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic_inc(&ipc->rx_msg_count);
|
||||||
|
|
||||||
|
rx_msg->ipc_hdr = ipc_hdr;
|
||||||
|
rx_msg->jsm_msg = jsm_msg;
|
||||||
|
rx_msg->callback = cons->rx_callback;
|
||||||
|
|
||||||
|
if (rx_msg->callback) {
|
||||||
|
list_add_tail(&rx_msg->link, &ipc->cb_msg_list);
|
||||||
|
} else {
|
||||||
|
spin_lock(&cons->rx_lock);
|
||||||
|
list_add_tail(&rx_msg->link, &cons->rx_msg_list);
|
||||||
|
spin_unlock(&cons->rx_lock);
|
||||||
|
wake_up(&cons->rx_msg_wq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg)
|
||||||
|
{
|
||||||
|
list_del(&rx_msg->link);
|
||||||
|
ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
|
||||||
|
atomic_dec(&vdev->ipc->rx_msg_count);
|
||||||
|
kfree(rx_msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
||||||
|
u32 channel, ivpu_ipc_rx_callback_t rx_callback)
|
||||||
{
|
{
|
||||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||||
|
|
||||||
@ -150,13 +183,14 @@ ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
|||||||
cons->tx_vpu_addr = 0;
|
cons->tx_vpu_addr = 0;
|
||||||
cons->request_id = 0;
|
cons->request_id = 0;
|
||||||
cons->aborted = false;
|
cons->aborted = false;
|
||||||
|
cons->rx_callback = rx_callback;
|
||||||
spin_lock_init(&cons->rx_lock);
|
spin_lock_init(&cons->rx_lock);
|
||||||
INIT_LIST_HEAD(&cons->rx_msg_list);
|
INIT_LIST_HEAD(&cons->rx_msg_list);
|
||||||
init_waitqueue_head(&cons->rx_msg_wq);
|
init_waitqueue_head(&cons->rx_msg_wq);
|
||||||
|
|
||||||
spin_lock_irq(&ipc->cons_list_lock);
|
spin_lock_irq(&ipc->cons_lock);
|
||||||
list_add_tail(&cons->link, &ipc->cons_list);
|
list_add_tail(&cons->link, &ipc->cons_list);
|
||||||
spin_unlock_irq(&ipc->cons_list_lock);
|
spin_unlock_irq(&ipc->cons_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
|
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
|
||||||
@ -164,18 +198,13 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c
|
|||||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||||
struct ivpu_ipc_rx_msg *rx_msg, *r;
|
struct ivpu_ipc_rx_msg *rx_msg, *r;
|
||||||
|
|
||||||
spin_lock_irq(&ipc->cons_list_lock);
|
spin_lock_irq(&ipc->cons_lock);
|
||||||
list_del(&cons->link);
|
list_del(&cons->link);
|
||||||
spin_unlock_irq(&ipc->cons_list_lock);
|
spin_unlock_irq(&ipc->cons_lock);
|
||||||
|
|
||||||
spin_lock_irq(&cons->rx_lock);
|
spin_lock_irq(&cons->rx_lock);
|
||||||
list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) {
|
list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
|
||||||
list_del(&rx_msg->link);
|
ivpu_ipc_rx_msg_del(vdev, rx_msg);
|
||||||
if (!cons->aborted)
|
|
||||||
ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
|
|
||||||
atomic_dec(&ipc->rx_msg_count);
|
|
||||||
kfree(rx_msg);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&cons->rx_lock);
|
spin_unlock_irq(&cons->rx_lock);
|
||||||
|
|
||||||
ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
|
ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
|
||||||
@ -205,15 +234,12 @@ unlock:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
|
static bool ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
bool ret;
|
||||||
|
|
||||||
if (IS_KTHREAD())
|
|
||||||
ret |= (kthread_should_stop() || kthread_should_park());
|
|
||||||
|
|
||||||
spin_lock_irq(&cons->rx_lock);
|
spin_lock_irq(&cons->rx_lock);
|
||||||
ret |= !list_empty(&cons->rx_msg_list) || cons->aborted;
|
ret = !list_empty(&cons->rx_msg_list) || cons->aborted;
|
||||||
spin_unlock_irq(&cons->rx_lock);
|
spin_unlock_irq(&cons->rx_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -221,19 +247,18 @@ static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
|
|||||||
|
|
||||||
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
||||||
struct ivpu_ipc_hdr *ipc_buf,
|
struct ivpu_ipc_hdr *ipc_buf,
|
||||||
struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
|
struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms)
|
||||||
{
|
{
|
||||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
|
||||||
struct ivpu_ipc_rx_msg *rx_msg;
|
struct ivpu_ipc_rx_msg *rx_msg;
|
||||||
int wait_ret, ret = 0;
|
int wait_ret, ret = 0;
|
||||||
|
|
||||||
|
if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n"))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
wait_ret = wait_event_timeout(cons->rx_msg_wq,
|
wait_ret = wait_event_timeout(cons->rx_msg_wq,
|
||||||
ivpu_ipc_rx_need_wakeup(cons),
|
ivpu_ipc_rx_need_wakeup(cons),
|
||||||
msecs_to_jiffies(timeout_ms));
|
msecs_to_jiffies(timeout_ms));
|
||||||
|
|
||||||
if (IS_KTHREAD() && kthread_should_stop())
|
|
||||||
return -EINTR;
|
|
||||||
|
|
||||||
if (wait_ret == 0)
|
if (wait_ret == 0)
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
|
|
||||||
@ -247,27 +272,23 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
|||||||
spin_unlock_irq(&cons->rx_lock);
|
spin_unlock_irq(&cons->rx_lock);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
list_del(&rx_msg->link);
|
|
||||||
spin_unlock_irq(&cons->rx_lock);
|
|
||||||
|
|
||||||
if (ipc_buf)
|
if (ipc_buf)
|
||||||
memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
|
memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
|
||||||
if (rx_msg->jsm_msg) {
|
if (rx_msg->jsm_msg) {
|
||||||
u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*ipc_payload));
|
u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg));
|
||||||
|
|
||||||
if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
|
if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
|
||||||
ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
|
ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
|
||||||
ret = -EBADMSG;
|
ret = -EBADMSG;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ipc_payload)
|
if (jsm_msg)
|
||||||
memcpy(ipc_payload, rx_msg->jsm_msg, size);
|
memcpy(jsm_msg, rx_msg->jsm_msg, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
|
ivpu_ipc_rx_msg_del(vdev, rx_msg);
|
||||||
atomic_dec(&ipc->rx_msg_count);
|
spin_unlock_irq(&cons->rx_lock);
|
||||||
kfree(rx_msg);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -280,7 +301,7 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req
|
|||||||
struct ivpu_ipc_consumer cons;
|
struct ivpu_ipc_consumer cons;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ivpu_ipc_consumer_add(vdev, &cons, channel);
|
ivpu_ipc_consumer_add(vdev, &cons, channel, NULL);
|
||||||
|
|
||||||
ret = ivpu_ipc_send(vdev, &cons, req);
|
ret = ivpu_ipc_send(vdev, &cons, req);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -359,35 +380,7 @@ ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
|
||||||
ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
|
||||||
struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
|
|
||||||
{
|
|
||||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
|
||||||
struct ivpu_ipc_rx_msg *rx_msg;
|
|
||||||
|
|
||||||
lockdep_assert_held(&ipc->cons_list_lock);
|
|
||||||
lockdep_assert_irqs_disabled();
|
|
||||||
|
|
||||||
rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
|
|
||||||
if (!rx_msg) {
|
|
||||||
ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_inc(&ipc->rx_msg_count);
|
|
||||||
|
|
||||||
rx_msg->ipc_hdr = ipc_hdr;
|
|
||||||
rx_msg->jsm_msg = jsm_msg;
|
|
||||||
|
|
||||||
spin_lock(&cons->rx_lock);
|
|
||||||
list_add_tail(&rx_msg->link, &cons->rx_msg_list);
|
|
||||||
spin_unlock(&cons->rx_lock);
|
|
||||||
|
|
||||||
wake_up(&cons->rx_msg_wq);
|
|
||||||
}
|
|
||||||
|
|
||||||
int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
|
|
||||||
{
|
{
|
||||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||||
struct ivpu_ipc_consumer *cons;
|
struct ivpu_ipc_consumer *cons;
|
||||||
@ -405,7 +398,7 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
|
|||||||
vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
|
vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
|
||||||
if (vpu_addr == REG_IO_ERROR) {
|
if (vpu_addr == REG_IO_ERROR) {
|
||||||
ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
|
ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
|
||||||
return -EIO;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
|
ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
|
||||||
@ -435,15 +428,15 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
dispatched = false;
|
dispatched = false;
|
||||||
spin_lock_irqsave(&ipc->cons_list_lock, flags);
|
spin_lock_irqsave(&ipc->cons_lock, flags);
|
||||||
list_for_each_entry(cons, &ipc->cons_list, link) {
|
list_for_each_entry(cons, &ipc->cons_list, link) {
|
||||||
if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
|
if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
|
||||||
ivpu_ipc_dispatch(vdev, cons, ipc_hdr, jsm_msg);
|
ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg);
|
||||||
dispatched = true;
|
dispatched = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
|
spin_unlock_irqrestore(&ipc->cons_lock, flags);
|
||||||
|
|
||||||
if (!dispatched) {
|
if (!dispatched) {
|
||||||
ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
|
ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
|
||||||
@ -451,7 +444,28 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
if (wake_thread)
|
||||||
|
*wake_thread = !list_empty(&ipc->cb_msg_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
|
||||||
|
{
|
||||||
|
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||||
|
struct ivpu_ipc_rx_msg *rx_msg, *r;
|
||||||
|
struct list_head cb_msg_list;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&cb_msg_list);
|
||||||
|
|
||||||
|
spin_lock_irq(&ipc->cons_lock);
|
||||||
|
list_splice_tail_init(&ipc->cb_msg_list, &cb_msg_list);
|
||||||
|
spin_unlock_irq(&ipc->cons_lock);
|
||||||
|
|
||||||
|
list_for_each_entry_safe(rx_msg, r, &cb_msg_list, link) {
|
||||||
|
rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
|
||||||
|
ivpu_ipc_rx_msg_del(vdev, rx_msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ivpu_ipc_init(struct ivpu_device *vdev)
|
int ivpu_ipc_init(struct ivpu_device *vdev)
|
||||||
@ -486,10 +500,10 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
|
|||||||
goto err_free_rx;
|
goto err_free_rx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_init(&ipc->cons_lock);
|
||||||
INIT_LIST_HEAD(&ipc->cons_list);
|
INIT_LIST_HEAD(&ipc->cons_list);
|
||||||
spin_lock_init(&ipc->cons_list_lock);
|
INIT_LIST_HEAD(&ipc->cb_msg_list);
|
||||||
drmm_mutex_init(&vdev->drm, &ipc->lock);
|
drmm_mutex_init(&vdev->drm, &ipc->lock);
|
||||||
|
|
||||||
ivpu_ipc_reset(vdev);
|
ivpu_ipc_reset(vdev);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -502,6 +516,13 @@ err_free_tx:
|
|||||||
|
|
||||||
void ivpu_ipc_fini(struct ivpu_device *vdev)
|
void ivpu_ipc_fini(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
|
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||||
|
|
||||||
|
drm_WARN_ON(&vdev->drm, ipc->on);
|
||||||
|
drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list));
|
||||||
|
drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
|
||||||
|
drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
|
||||||
|
|
||||||
ivpu_ipc_mem_fini(vdev);
|
ivpu_ipc_mem_fini(vdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -518,22 +539,27 @@ void ivpu_ipc_disable(struct ivpu_device *vdev)
|
|||||||
{
|
{
|
||||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||||
struct ivpu_ipc_consumer *cons, *c;
|
struct ivpu_ipc_consumer *cons, *c;
|
||||||
unsigned long flags;
|
struct ivpu_ipc_rx_msg *rx_msg, *r;
|
||||||
|
|
||||||
|
drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list));
|
||||||
|
|
||||||
mutex_lock(&ipc->lock);
|
mutex_lock(&ipc->lock);
|
||||||
ipc->on = false;
|
ipc->on = false;
|
||||||
mutex_unlock(&ipc->lock);
|
mutex_unlock(&ipc->lock);
|
||||||
|
|
||||||
spin_lock_irqsave(&ipc->cons_list_lock, flags);
|
spin_lock_irq(&ipc->cons_lock);
|
||||||
list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
|
list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
|
||||||
if (cons->channel != VPU_IPC_CHAN_JOB_RET) {
|
spin_lock(&cons->rx_lock);
|
||||||
spin_lock(&cons->rx_lock);
|
if (!cons->rx_callback)
|
||||||
cons->aborted = true;
|
cons->aborted = true;
|
||||||
spin_unlock(&cons->rx_lock);
|
list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link)
|
||||||
}
|
ivpu_ipc_rx_msg_del(vdev, rx_msg);
|
||||||
|
spin_unlock(&cons->rx_lock);
|
||||||
wake_up(&cons->rx_msg_wq);
|
wake_up(&cons->rx_msg_wq);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
|
spin_unlock_irq(&ipc->cons_lock);
|
||||||
|
|
||||||
|
drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_ipc_reset(struct ivpu_device *vdev)
|
void ivpu_ipc_reset(struct ivpu_device *vdev)
|
||||||
|
@ -42,12 +42,24 @@ struct ivpu_ipc_hdr {
|
|||||||
u8 status;
|
u8 status;
|
||||||
} __packed __aligned(IVPU_IPC_ALIGNMENT);
|
} __packed __aligned(IVPU_IPC_ALIGNMENT);
|
||||||
|
|
||||||
|
typedef void (*ivpu_ipc_rx_callback_t)(struct ivpu_device *vdev,
|
||||||
|
struct ivpu_ipc_hdr *ipc_hdr,
|
||||||
|
struct vpu_jsm_msg *jsm_msg);
|
||||||
|
|
||||||
|
struct ivpu_ipc_rx_msg {
|
||||||
|
struct list_head link;
|
||||||
|
struct ivpu_ipc_hdr *ipc_hdr;
|
||||||
|
struct vpu_jsm_msg *jsm_msg;
|
||||||
|
ivpu_ipc_rx_callback_t callback;
|
||||||
|
};
|
||||||
|
|
||||||
struct ivpu_ipc_consumer {
|
struct ivpu_ipc_consumer {
|
||||||
struct list_head link;
|
struct list_head link;
|
||||||
u32 channel;
|
u32 channel;
|
||||||
u32 tx_vpu_addr;
|
u32 tx_vpu_addr;
|
||||||
u32 request_id;
|
u32 request_id;
|
||||||
bool aborted;
|
bool aborted;
|
||||||
|
ivpu_ipc_rx_callback_t rx_callback;
|
||||||
|
|
||||||
spinlock_t rx_lock; /* Protects rx_msg_list and aborted */
|
spinlock_t rx_lock; /* Protects rx_msg_list and aborted */
|
||||||
struct list_head rx_msg_list;
|
struct list_head rx_msg_list;
|
||||||
@ -61,8 +73,9 @@ struct ivpu_ipc_info {
|
|||||||
|
|
||||||
atomic_t rx_msg_count;
|
atomic_t rx_msg_count;
|
||||||
|
|
||||||
spinlock_t cons_list_lock; /* Protects cons_list */
|
spinlock_t cons_lock; /* Protects cons_list and cb_msg_list */
|
||||||
struct list_head cons_list;
|
struct list_head cons_list;
|
||||||
|
struct list_head cb_msg_list;
|
||||||
|
|
||||||
atomic_t request_id;
|
atomic_t request_id;
|
||||||
struct mutex lock; /* Lock on status */
|
struct mutex lock; /* Lock on status */
|
||||||
@ -76,14 +89,15 @@ void ivpu_ipc_enable(struct ivpu_device *vdev);
|
|||||||
void ivpu_ipc_disable(struct ivpu_device *vdev);
|
void ivpu_ipc_disable(struct ivpu_device *vdev);
|
||||||
void ivpu_ipc_reset(struct ivpu_device *vdev);
|
void ivpu_ipc_reset(struct ivpu_device *vdev);
|
||||||
|
|
||||||
int ivpu_ipc_irq_handler(struct ivpu_device *vdev);
|
void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread);
|
||||||
|
irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
|
||||||
|
|
||||||
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
||||||
u32 channel);
|
u32 channel, ivpu_ipc_rx_callback_t callback);
|
||||||
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
|
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
|
||||||
|
|
||||||
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
||||||
struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *ipc_payload,
|
struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
|
||||||
unsigned long timeout_ms);
|
unsigned long timeout_ms);
|
||||||
|
|
||||||
int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
|
int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
|
|
||||||
#include <linux/bitfield.h>
|
#include <linux/bitfield.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/kthread.h>
|
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <uapi/drm/ivpu_accel.h>
|
#include <uapi/drm/ivpu_accel.h>
|
||||||
@ -344,22 +343,6 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg)
|
|
||||||
{
|
|
||||||
struct vpu_ipc_msg_payload_job_done *payload;
|
|
||||||
struct vpu_jsm_msg *job_ret_msg = msg;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
payload = (struct vpu_ipc_msg_payload_job_done *)&job_ret_msg->payload;
|
|
||||||
|
|
||||||
ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
|
|
||||||
if (ret)
|
|
||||||
ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret);
|
|
||||||
|
|
||||||
if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
|
|
||||||
ivpu_start_job_timeout_detection(vdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ivpu_jobs_abort_all(struct ivpu_device *vdev)
|
void ivpu_jobs_abort_all(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
struct ivpu_job *job;
|
struct ivpu_job *job;
|
||||||
@ -567,65 +550,36 @@ free_handles:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ivpu_job_done_thread(void *arg)
|
static void
|
||||||
|
ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
|
||||||
|
struct vpu_jsm_msg *jsm_msg)
|
||||||
{
|
{
|
||||||
struct ivpu_device *vdev = (struct ivpu_device *)arg;
|
struct vpu_ipc_msg_payload_job_done *payload;
|
||||||
struct ivpu_ipc_consumer cons;
|
|
||||||
struct vpu_jsm_msg jsm_msg;
|
|
||||||
unsigned int timeout;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ivpu_dbg(vdev, JOB, "Started %s\n", __func__);
|
if (!jsm_msg) {
|
||||||
|
ivpu_err(vdev, "IPC message has no JSM payload\n");
|
||||||
ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET);
|
return;
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
|
||||||
ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout);
|
|
||||||
if (!ret)
|
|
||||||
ivpu_job_done_message(vdev, &jsm_msg);
|
|
||||||
|
|
||||||
if (kthread_should_park()) {
|
|
||||||
ivpu_dbg(vdev, JOB, "Parked %s\n", __func__);
|
|
||||||
kthread_parkme();
|
|
||||||
ivpu_dbg(vdev, JOB, "Unparked %s\n", __func__);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ivpu_ipc_consumer_del(vdev, &cons);
|
if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
|
||||||
|
ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result);
|
||||||
ivpu_jobs_abort_all(vdev);
|
return;
|
||||||
|
|
||||||
ivpu_dbg(vdev, JOB, "Stopped %s\n", __func__);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ivpu_job_done_thread_init(struct ivpu_device *vdev)
|
|
||||||
{
|
|
||||||
struct task_struct *thread;
|
|
||||||
|
|
||||||
thread = kthread_run(&ivpu_job_done_thread, (void *)vdev, "ivpu_job_done_thread");
|
|
||||||
if (IS_ERR(thread)) {
|
|
||||||
ivpu_err(vdev, "Failed to start job completion thread\n");
|
|
||||||
return -EIO;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
vdev->job_done_thread = thread;
|
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
|
||||||
|
ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
|
||||||
return 0;
|
if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
|
||||||
|
ivpu_start_job_timeout_detection(vdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
|
void ivpu_job_done_consumer_init(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
kthread_unpark(vdev->job_done_thread);
|
ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer,
|
||||||
kthread_stop(vdev->job_done_thread);
|
VPU_IPC_CHAN_JOB_RET, ivpu_job_done_callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ivpu_job_done_thread_disable(struct ivpu_device *vdev)
|
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev)
|
||||||
{
|
{
|
||||||
kthread_park(vdev->job_done_thread);
|
ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer);
|
||||||
}
|
|
||||||
|
|
||||||
void ivpu_job_done_thread_enable(struct ivpu_device *vdev)
|
|
||||||
{
|
|
||||||
kthread_unpark(vdev->job_done_thread);
|
|
||||||
}
|
}
|
||||||
|
@ -59,10 +59,8 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
|||||||
void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
|
void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
|
||||||
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
|
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
|
||||||
|
|
||||||
int ivpu_job_done_thread_init(struct ivpu_device *vdev);
|
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
|
||||||
void ivpu_job_done_thread_fini(struct ivpu_device *vdev);
|
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
|
||||||
void ivpu_job_done_thread_disable(struct ivpu_device *vdev);
|
|
||||||
void ivpu_job_done_thread_enable(struct ivpu_device *vdev);
|
|
||||||
|
|
||||||
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
|
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user