mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 01:34:00 +08:00
soc: qcom: rpmh: Remove serialization of TCS commands
Requests sent to RPMH can be sent as fire-n-forget or response required, with the latter ensuring the command has been completed by the hardware accelerator. Commands in a request with tcs_cmd::wait set, would ensure that those select commands are sent as response required, even though the actual TCS request may be fire-n-forget. Also, commands with .wait flag were also guaranteed to be complete before the following command in the TCS is sent. This means that the next command of the same request blocked until the current request is completed. This could mean waiting for a voltage to settle or series of NOCs be configured before the next command is sent. But drivers using this feature have never cared about the serialization aspect. By not enforcing the serialization we can allow the hardware to run in parallel improving the performance. Let's clarify the usage of this member in the tcs_cmd structure to mean only completion and not serialization. This should also improve the performance of bus requests where changes could happen in parallel. Also, CPU resume from deep idle may see benefits from certain wake requests. Reviewed-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Lina Iyer <ilina@codeaurora.org> Signed-off-by: Maulik Shah <mkshah@codeaurora.org> Link: https://lore.kernel.org/r/1610008770-13891-1-git-send-email-mkshah@codeaurora.org Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
This commit is contained in:
parent
1b3df36891
commit
fef419c463
@ -231,10 +231,9 @@ static void tcs_invalidate(struct rsc_drv *drv, int type)
|
||||
if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
|
||||
return;
|
||||
|
||||
for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
|
||||
for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++)
|
||||
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
|
||||
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
|
||||
}
|
||||
|
||||
bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
|
||||
}
|
||||
|
||||
@ -443,7 +442,6 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
|
||||
skip:
|
||||
/* Reclaim the TCS */
|
||||
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
|
||||
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
|
||||
writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
|
||||
spin_lock(&drv->lock);
|
||||
clear_bit(i, drv->tcs_in_use);
|
||||
@ -476,23 +474,23 @@ skip:
|
||||
static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
|
||||
const struct tcs_request *msg)
|
||||
{
|
||||
u32 msgid, cmd_msgid;
|
||||
u32 msgid;
|
||||
u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE;
|
||||
u32 cmd_enable = 0;
|
||||
u32 cmd_complete;
|
||||
struct tcs_cmd *cmd;
|
||||
int i, j;
|
||||
|
||||
cmd_msgid = CMD_MSGID_LEN;
|
||||
/* Convert all commands to RR when the request has wait_for_compl set */
|
||||
cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
|
||||
cmd_msgid |= CMD_MSGID_WRITE;
|
||||
|
||||
cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id);
|
||||
|
||||
for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
|
||||
cmd = &msg->cmds[i];
|
||||
cmd_enable |= BIT(j);
|
||||
cmd_complete |= cmd->wait << j;
|
||||
msgid = cmd_msgid;
|
||||
/*
|
||||
* Additionally, if the cmd->wait is set, make the command
|
||||
* response reqd even if the overall request was fire-n-forget.
|
||||
*/
|
||||
msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
|
||||
|
||||
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
|
||||
@ -501,7 +499,6 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
|
||||
trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
|
||||
}
|
||||
|
||||
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
|
||||
cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
|
||||
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
|
||||
}
|
||||
@ -652,7 +649,6 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
|
||||
* cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
|
||||
*/
|
||||
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
|
||||
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
|
||||
enable_tcs_irq(drv, tcs_id, true);
|
||||
}
|
||||
spin_unlock_irqrestore(&drv->lock, flags);
|
||||
|
@ -30,7 +30,13 @@ enum rpmh_state {
|
||||
*
|
||||
* @addr: the address of the resource slv_id:18:16 | offset:0:15
|
||||
* @data: the resource state request
|
||||
* @wait: wait for this request to be complete before sending the next
|
||||
* @wait: ensure that this command is complete before returning.
|
||||
* Setting "wait" here only makes sense during rpmh_write_batch() for
|
||||
* active-only transfers, this is because:
|
||||
* rpmh_write() - Always waits.
|
||||
* (DEFINE_RPMH_MSG_ONSTACK will set .wait_for_compl)
|
||||
* rpmh_write_async() - Never waits.
|
||||
* (There's no request completion callback)
|
||||
*/
|
||||
struct tcs_cmd {
|
||||
u32 addr;
|
||||
@ -43,6 +49,7 @@ struct tcs_cmd {
|
||||
*
|
||||
* @state: state for the request.
|
||||
* @wait_for_compl: wait until we get a response from the h/w accelerator
|
||||
* (same as setting cmd->wait for all commands in the request)
|
||||
* @num_cmds: the number of @cmds in this request
|
||||
* @cmds: an array of tcs_cmds
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user