mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 08:34:20 +08:00
coresight: etm4x: add CPU hotplug support for probing
etm4x devices cannot be successfully probed when their CPU is offline. For example, when booting with maxcpus=n, ETM probing will fail on CPUs >n, and the probing won't be reattempted once the CPUs come online. This will leave those CPUs unable to make use of ETM. This change adds a mechanism to delay the probing if the corresponding CPU is offline, and to try it again when the CPU comes online. Signed-off-by: Tamas Zsoldos <tamas.zsoldos@arm.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Link: https://lore.kernel.org/r/20220705145935.24679-1-tamas.zsoldos@arm.com
This commit is contained in:
parent
30a0b95b13
commit
3c728e079d
@ -66,10 +66,13 @@ static enum cpuhp_state hp_online;
|
||||
|
||||
struct etm4_init_arg {
|
||||
unsigned int pid;
|
||||
struct etmv4_drvdata *drvdata;
|
||||
struct device *dev;
|
||||
struct csdev_access *csa;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct etm4_init_arg *, delayed_probe);
|
||||
static int etm4_probe_cpu(unsigned int cpu);
|
||||
|
||||
/*
|
||||
* Check if TRCSSPCICRn(i) is implemented for a given instance.
|
||||
*
|
||||
@ -1085,7 +1088,7 @@ static void etm4_init_arch_data(void *info)
|
||||
struct csdev_access *csa;
|
||||
int i;
|
||||
|
||||
drvdata = init_arg->drvdata;
|
||||
drvdata = dev_get_drvdata(init_arg->dev);
|
||||
csa = init_arg->csa;
|
||||
|
||||
/*
|
||||
@ -1528,7 +1531,7 @@ void etm4_config_trace_mode(struct etmv4_config *config)
|
||||
static int etm4_online_cpu(unsigned int cpu)
|
||||
{
|
||||
if (!etmdrvdata[cpu])
|
||||
return 0;
|
||||
return etm4_probe_cpu(cpu);
|
||||
|
||||
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
|
||||
coresight_enable(etmdrvdata[cpu]->csdev);
|
||||
@ -1904,48 +1907,20 @@ static void etm4_pm_clear(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
|
||||
static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
|
||||
{
|
||||
int ret;
|
||||
struct coresight_platform_data *pdata = NULL;
|
||||
struct etmv4_drvdata *drvdata;
|
||||
struct device *dev = init_arg->dev;
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
struct coresight_desc desc = { 0 };
|
||||
struct etm4_init_arg init_arg = { 0 };
|
||||
u8 major, minor;
|
||||
char *type_name;
|
||||
|
||||
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
|
||||
if (!drvdata)
|
||||
return -ENOMEM;
|
||||
return -EINVAL;
|
||||
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
|
||||
if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
|
||||
pm_save_enable = coresight_loses_context_with_cpu(dev) ?
|
||||
PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
|
||||
|
||||
if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
|
||||
drvdata->save_state = devm_kmalloc(dev,
|
||||
sizeof(struct etmv4_save_state), GFP_KERNEL);
|
||||
if (!drvdata->save_state)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
drvdata->base = base;
|
||||
|
||||
spin_lock_init(&drvdata->spinlock);
|
||||
|
||||
drvdata->cpu = coresight_get_cpu(dev);
|
||||
if (drvdata->cpu < 0)
|
||||
return drvdata->cpu;
|
||||
|
||||
init_arg.drvdata = drvdata;
|
||||
init_arg.csa = &desc.access;
|
||||
init_arg.pid = etm_pid;
|
||||
|
||||
if (smp_call_function_single(drvdata->cpu,
|
||||
etm4_init_arch_data, &init_arg, 1))
|
||||
dev_err(dev, "ETM arch init failed\n");
|
||||
desc.access = *init_arg->csa;
|
||||
|
||||
if (!drvdata->arch)
|
||||
return -EINVAL;
|
||||
@ -2016,6 +1991,68 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
|
||||
{
|
||||
struct etmv4_drvdata *drvdata;
|
||||
struct csdev_access access = { 0 };
|
||||
struct etm4_init_arg init_arg = { 0 };
|
||||
struct etm4_init_arg *delayed;
|
||||
|
||||
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
|
||||
if (!drvdata)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
|
||||
if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
|
||||
pm_save_enable = coresight_loses_context_with_cpu(dev) ?
|
||||
PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
|
||||
|
||||
if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
|
||||
drvdata->save_state = devm_kmalloc(dev,
|
||||
sizeof(struct etmv4_save_state), GFP_KERNEL);
|
||||
if (!drvdata->save_state)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
drvdata->base = base;
|
||||
|
||||
spin_lock_init(&drvdata->spinlock);
|
||||
|
||||
drvdata->cpu = coresight_get_cpu(dev);
|
||||
if (drvdata->cpu < 0)
|
||||
return drvdata->cpu;
|
||||
|
||||
init_arg.dev = dev;
|
||||
init_arg.csa = &access;
|
||||
init_arg.pid = etm_pid;
|
||||
|
||||
/*
|
||||
* Serialize against CPUHP callbacks to avoid race condition
|
||||
* between the smp call and saving the delayed probe.
|
||||
*/
|
||||
cpus_read_lock();
|
||||
if (smp_call_function_single(drvdata->cpu,
|
||||
etm4_init_arch_data, &init_arg, 1)) {
|
||||
/* The CPU was offline, try again once it comes online. */
|
||||
delayed = devm_kmalloc(dev, sizeof(*delayed), GFP_KERNEL);
|
||||
if (!delayed) {
|
||||
cpus_read_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
*delayed = init_arg;
|
||||
|
||||
per_cpu(delayed_probe, drvdata->cpu) = delayed;
|
||||
|
||||
cpus_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
cpus_read_unlock();
|
||||
|
||||
return etm4_add_coresight_dev(&init_arg);
|
||||
}
|
||||
|
||||
static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
|
||||
{
|
||||
void __iomem *base;
|
||||
@ -2054,6 +2091,35 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int etm4_probe_cpu(unsigned int cpu)
|
||||
{
|
||||
int ret;
|
||||
struct etm4_init_arg init_arg;
|
||||
struct csdev_access access = { 0 };
|
||||
struct etm4_init_arg *iap = *this_cpu_ptr(&delayed_probe);
|
||||
|
||||
if (!iap)
|
||||
return 0;
|
||||
|
||||
init_arg = *iap;
|
||||
devm_kfree(init_arg.dev, iap);
|
||||
*this_cpu_ptr(&delayed_probe) = NULL;
|
||||
|
||||
ret = pm_runtime_resume_and_get(init_arg.dev);
|
||||
if (ret < 0) {
|
||||
dev_err(init_arg.dev, "Failed to get PM runtime!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
init_arg.csa = &access;
|
||||
etm4_init_arch_data(&init_arg);
|
||||
|
||||
etm4_add_coresight_dev(&init_arg);
|
||||
|
||||
pm_runtime_put(init_arg.dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct amba_cs_uci_id uci_id_etm4[] = {
|
||||
{
|
||||
/* ETMv4 UCI data */
|
||||
@ -2068,16 +2134,20 @@ static void clear_etmdrvdata(void *info)
|
||||
int cpu = *(int *)info;
|
||||
|
||||
etmdrvdata[cpu] = NULL;
|
||||
per_cpu(delayed_probe, cpu) = NULL;
|
||||
}
|
||||
|
||||
static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
etm_perf_symlink(drvdata->csdev, false);
|
||||
bool had_delayed_probe;
|
||||
/*
|
||||
* Taking hotplug lock here to avoid racing between etm4_remove_dev()
|
||||
* and CPU hotplug call backs.
|
||||
*/
|
||||
cpus_read_lock();
|
||||
|
||||
had_delayed_probe = per_cpu(delayed_probe, drvdata->cpu);
|
||||
|
||||
/*
|
||||
* The readers for etmdrvdata[] are CPU hotplug call backs
|
||||
* and PM notification call backs. Change etmdrvdata[i] on
|
||||
@ -2085,12 +2155,15 @@ static int __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
|
||||
* inside one call back function.
|
||||
*/
|
||||
if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
|
||||
etmdrvdata[drvdata->cpu] = NULL;
|
||||
clear_etmdrvdata(&drvdata->cpu);
|
||||
|
||||
cpus_read_unlock();
|
||||
|
||||
cscfg_unregister_csdev(drvdata->csdev);
|
||||
coresight_unregister(drvdata->csdev);
|
||||
if (!had_delayed_probe) {
|
||||
etm_perf_symlink(drvdata->csdev, false);
|
||||
cscfg_unregister_csdev(drvdata->csdev);
|
||||
coresight_unregister(drvdata->csdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user