mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
f6ea568f0d
We shouldn't need to hold this spinlock here around the entire SCM call
into the firmware and back. Instead, we should be able to query the
firmware, potentially in parallel with other CPUs making the same
convention detection firmware call, and then grab the lock to update the
calling convention detected. The convention doesn't change at runtime so
calling into firmware more than once is possibly wasteful but simpler.
Besides, this is the slow path, not the fast path where we've already
detected the convention used.
More importantly, this allows us to add more logic here to workaround
the case where the firmware call to check for availability isn't
implemented in the firmware at all. In that case we can check the
firmware node compatible string and force a calling convention.
Note that we remove the 'has_queried' logic that is repeated twice. That
could lead to the calling convention being printed multiple times to the
kernel logs if the bool is true but __query_convention() is running on
multiple CPUs. We also shorten the time where the lock is held, but we
keep the lock held around the printk because it doesn't seem hugely
important to drop it for that.
Cc: Elliot Berman <eberman@codeaurora.org>
Cc: Brian Masney <masneyb@onstation.org>
Cc: Stephan Gerhold <stephan@gerhold.net>
Cc: Jeffrey Hugo <jhugo@codeaurora.org>
Cc: Douglas Anderson <dianders@chromium.org>
Fixes: 9a434cee77
("firmware: qcom_scm: Dynamically support SMCCC and legacy conventions")
Signed-off-by: Stephen Boyd <swboyd@chromium.org>
Link: https://lore.kernel.org/r/20210223214539.1336155-3-swboyd@chromium.org
Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
154 lines
3.7 KiB
C
154 lines
3.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
|
|
*/
|
|
|
|
#include <linux/io.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/types.h>
|
|
#include <linux/qcom_scm.h>
|
|
#include <linux/arm-smccc.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include "qcom_scm.h"
|
|
|
|
/**
|
|
* struct arm_smccc_args
|
|
* @args: The array of values used in registers in smc instruction
|
|
*/
|
|
struct arm_smccc_args {
|
|
unsigned long args[8];
|
|
};
|
|
|
|
static DEFINE_MUTEX(qcom_scm_lock);
|
|
|
|
#define QCOM_SCM_EBUSY_WAIT_MS 30
|
|
#define QCOM_SCM_EBUSY_MAX_RETRY 20
|
|
|
|
#define SCM_SMC_N_REG_ARGS 4
|
|
#define SCM_SMC_FIRST_EXT_IDX (SCM_SMC_N_REG_ARGS - 1)
|
|
#define SCM_SMC_N_EXT_ARGS (MAX_QCOM_SCM_ARGS - SCM_SMC_N_REG_ARGS + 1)
|
|
#define SCM_SMC_FIRST_REG_IDX 2
|
|
#define SCM_SMC_LAST_REG_IDX (SCM_SMC_FIRST_REG_IDX + SCM_SMC_N_REG_ARGS - 1)
|
|
|
|
static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
|
|
struct arm_smccc_res *res)
|
|
{
|
|
unsigned long a0 = smc->args[0];
|
|
struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
|
|
|
|
quirk.state.a6 = 0;
|
|
|
|
do {
|
|
arm_smccc_smc_quirk(a0, smc->args[1], smc->args[2],
|
|
smc->args[3], smc->args[4], smc->args[5],
|
|
quirk.state.a6, smc->args[7], res, &quirk);
|
|
|
|
if (res->a0 == QCOM_SCM_INTERRUPTED)
|
|
a0 = res->a0;
|
|
|
|
} while (res->a0 == QCOM_SCM_INTERRUPTED);
|
|
}
|
|
|
|
static void __scm_smc_do(const struct arm_smccc_args *smc,
|
|
struct arm_smccc_res *res, bool atomic)
|
|
{
|
|
int retry_count = 0;
|
|
|
|
if (atomic) {
|
|
__scm_smc_do_quirk(smc, res);
|
|
return;
|
|
}
|
|
|
|
do {
|
|
mutex_lock(&qcom_scm_lock);
|
|
|
|
__scm_smc_do_quirk(smc, res);
|
|
|
|
mutex_unlock(&qcom_scm_lock);
|
|
|
|
if (res->a0 == QCOM_SCM_V2_EBUSY) {
|
|
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
|
|
break;
|
|
msleep(QCOM_SCM_EBUSY_WAIT_MS);
|
|
}
|
|
} while (res->a0 == QCOM_SCM_V2_EBUSY);
|
|
}
|
|
|
|
|
|
int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
|
|
enum qcom_scm_convention qcom_convention,
|
|
struct qcom_scm_res *res, bool atomic)
|
|
{
|
|
int arglen = desc->arginfo & 0xf;
|
|
int i;
|
|
dma_addr_t args_phys = 0;
|
|
void *args_virt = NULL;
|
|
size_t alloc_len;
|
|
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
|
|
u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
|
|
u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
|
|
ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
|
|
struct arm_smccc_res smc_res;
|
|
struct arm_smccc_args smc = {0};
|
|
|
|
smc.args[0] = ARM_SMCCC_CALL_VAL(
|
|
smccc_call_type,
|
|
qcom_smccc_convention,
|
|
desc->owner,
|
|
SCM_SMC_FNID(desc->svc, desc->cmd));
|
|
smc.args[1] = desc->arginfo;
|
|
for (i = 0; i < SCM_SMC_N_REG_ARGS; i++)
|
|
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
|
|
|
|
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
|
|
alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
|
|
args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
|
|
|
|
if (!args_virt)
|
|
return -ENOMEM;
|
|
|
|
if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
|
|
__le32 *args = args_virt;
|
|
|
|
for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
|
|
args[i] = cpu_to_le32(desc->args[i +
|
|
SCM_SMC_FIRST_EXT_IDX]);
|
|
} else {
|
|
__le64 *args = args_virt;
|
|
|
|
for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
|
|
args[i] = cpu_to_le64(desc->args[i +
|
|
SCM_SMC_FIRST_EXT_IDX]);
|
|
}
|
|
|
|
args_phys = dma_map_single(dev, args_virt, alloc_len,
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(dev, args_phys)) {
|
|
kfree(args_virt);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
|
|
}
|
|
|
|
__scm_smc_do(&smc, &smc_res, atomic);
|
|
|
|
if (args_virt) {
|
|
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
|
|
kfree(args_virt);
|
|
}
|
|
|
|
if (res) {
|
|
res->result[0] = smc_res.a1;
|
|
res->result[1] = smc_res.a2;
|
|
res->result[2] = smc_res.a3;
|
|
}
|
|
|
|
return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
|
|
|
|
}
|