mailbox: mediatek: remove implementation related to atomic_exec

After implement flush, client can flush the executing
command buffer or abort the still waiting for event
command buffer, so controller do not need to implement
atomic_exe feature. remove it.

Signed-off-by: Bibby Hsieh <bibby.hsieh@mediatek.com>
Reviewed-by: CK Hu <ck.hu@mediatek.com>
Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org>
This commit is contained in:
Bibby Hsieh 2020-02-17 17:05:32 +08:00 committed by Jassi Brar
parent b0524f7c96
commit c9ea564f3d

View File

@ -56,7 +56,6 @@ struct cmdq_thread {
void __iomem *base;
struct list_head task_busy_list;
u32 priority;
bool atomic_exec;
};
struct cmdq_task {
@ -162,48 +161,11 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
cmdq_thread_invalidate_fetched_data(thread);
}
static bool cmdq_command_is_wfe(u64 cmd)
{
u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
return ((cmd & wfe_mask) == (wfe_op | wfe_option));
}
/* we assume tasks in the same display GCE thread are waiting the same event. */
static void cmdq_task_remove_wfe(struct cmdq_task *task)
{
struct device *dev = task->cmdq->mbox.dev;
u64 *base = task->pkt->va_base;
int i;
dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
DMA_TO_DEVICE);
for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
if (cmdq_command_is_wfe(base[i]))
base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
CMDQ_JUMP_PASS;
dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
DMA_TO_DEVICE);
}
static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
{
return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
}
static void cmdq_thread_wait_end(struct cmdq_thread *thread,
unsigned long end_pa)
{
struct device *dev = thread->chan->mbox->dev;
unsigned long curr_pa;
if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
curr_pa, curr_pa == end_pa, 1, 20))
dev_err(dev, "GCE thread cannot run to end.\n");
}
static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
{
struct cmdq_task_cb *cb = &task->pkt->async_cb;
@ -383,36 +345,15 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
/*
* Atomic execution should remove the following wfe, i.e. only
* wait event at first task, and prevent to pause when running.
*/
if (thread->atomic_exec) {
/* GCE is executing if command is not WFE */
if (!cmdq_thread_is_in_wfe(thread)) {
cmdq_thread_resume(thread);
cmdq_thread_wait_end(thread, end_pa);
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
/* set to this task directly */
writel(task->pa_base,
thread->base + CMDQ_THR_CURR_ADDR);
} else {
cmdq_task_insert_into_thread(task);
cmdq_task_remove_wfe(task);
smp_mb(); /* modify jump before enable thread */
}
/* check boundary */
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
curr_pa == end_pa) {
/* set to this task directly */
writel(task->pa_base,
thread->base + CMDQ_THR_CURR_ADDR);
} else {
/* check boundary */
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
curr_pa == end_pa) {
/* set to this task directly */
writel(task->pa_base,
thread->base + CMDQ_THR_CURR_ADDR);
} else {
cmdq_task_insert_into_thread(task);
smp_mb(); /* modify jump before enable thread */
}
cmdq_task_insert_into_thread(task);
smp_mb(); /* modify jump before enable thread */
}
writel(task->pa_base + pkt->cmd_buf_size,
thread->base + CMDQ_THR_END_ADDR);
@ -501,7 +442,6 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
thread->priority = sp->args[1];
thread->atomic_exec = (sp->args[2] != 0);
thread->chan = &mbox->chans[ind];
return &mbox->chans[ind];