2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-07-17 17:30:07 +08:00
|
|
|
/*
|
|
|
|
* ARM64 CPU idle arch support
|
|
|
|
*
|
|
|
|
* Copyright (C) 2014 ARM Ltd.
|
|
|
|
* Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
|
|
|
|
*/
|
|
|
|
|
2016-07-20 01:52:58 +08:00
|
|
|
#include <linux/acpi.h>
|
|
|
|
#include <linux/cpuidle.h>
|
|
|
|
#include <linux/cpu_pm.h>
|
2014-07-17 17:30:07 +08:00
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_device.h>
|
2019-08-09 19:03:11 +08:00
|
|
|
#include <linux/psci.h>
|
2014-07-17 17:30:07 +08:00
|
|
|
|
|
|
|
#include <asm/cpuidle.h>
|
|
|
|
#include <asm/cpu_ops.h>
|
|
|
|
|
2016-07-20 01:52:55 +08:00
|
|
|
int arm_cpuidle_init(unsigned int cpu)
|
2014-07-17 17:30:07 +08:00
|
|
|
{
|
|
|
|
int ret = -EOPNOTSUPP;
|
|
|
|
|
arm64: cpuidle: make arm_cpuidle_suspend() a bit more efficient
Currently, we check two pointers: cpu_ops and cpu_suspend on every idle
state entry. These pointers check can be avoided:
If cpu_ops has not been registered, arm_cpuidle_init() will return
-EOPNOTSUPP, so arm_cpuidle_suspend() will never have chance to
run. In other word, the cpu_ops check can be avoid.
Similarly, the cpu_suspend check could be avoided in this hot path by
moving it into arm_cpuidle_init().
I measured the 4096 * time from arm_cpuidle_suspend entry point to the
cpu_psci_cpu_suspend entry point. HW platform is Marvell BG4CT STB
board.
1. only one shell, no other process, hot-unplug secondary cpus, execute
the following cmd
while true
do
sleep 0.2
done
before the patch: 1581220ns
after the patch: 1579630ns
reduced by 0.1%
2. only one shell, no other process, hot-unplug secondary cpus, execute
the following cmd
while true
do
md5sum /tmp/testfile
sleep 0.2
done
NOTE: the testfile size should be larger than L1+L2 cache size
before the patch: 1961960ns
after the patch: 1912500ns
reduced by 2.5%
So the more complex the system load, the bigger the improvement.
Signed-off-by: Jisheng Zhang <jszhang@marvell.com>
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-03-25 11:08:55 +08:00
|
|
|
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_suspend &&
|
|
|
|
cpu_ops[cpu]->cpu_init_idle)
|
2015-05-13 21:12:46 +08:00
|
|
|
ret = cpu_ops[cpu]->cpu_init_idle(cpu);
|
2014-07-17 17:30:07 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2015-01-27 02:33:44 +08:00
|
|
|
|
|
|
|
/**
|
2017-02-17 22:25:08 +08:00
|
|
|
* arm_cpuidle_suspend() - function to enter a low-power idle state
|
2015-01-27 02:33:44 +08:00
|
|
|
* @arg: argument to pass to CPU suspend operations
|
|
|
|
*
|
|
|
|
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
|
|
|
|
* operations back-end error code otherwise.
|
|
|
|
*/
|
2015-06-18 22:41:32 +08:00
|
|
|
int arm_cpuidle_suspend(int index)
|
2015-01-27 02:33:44 +08:00
|
|
|
{
|
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
2015-06-18 22:41:32 +08:00
|
|
|
return cpu_ops[cpu]->cpu_suspend(index);
|
2015-01-27 02:33:44 +08:00
|
|
|
}
|
2016-07-20 01:52:58 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_ACPI
|
|
|
|
|
|
|
|
#include <acpi/processor.h>
|
|
|
|
|
2017-11-16 01:11:50 +08:00
|
|
|
#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
|
|
|
|
|
2019-08-09 19:03:12 +08:00
|
|
|
static int psci_acpi_cpu_init_idle(unsigned int cpu)
|
|
|
|
{
|
|
|
|
int i, count;
|
|
|
|
struct acpi_lpi_state *lpi;
|
|
|
|
struct acpi_processor *pr = per_cpu(processors, cpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the PSCI cpu_suspend function hook has not been initialized
|
|
|
|
* idle states must not be enabled, so bail out
|
|
|
|
*/
|
|
|
|
if (!psci_ops.cpu_suspend)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (unlikely(!pr || !pr->flags.has_lpi))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
count = pr->power.count - 1;
|
|
|
|
if (count <= 0)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
u32 state;
|
|
|
|
|
|
|
|
lpi = &pr->power.lpi_states[i + 1];
|
|
|
|
/*
|
|
|
|
* Only bits[31:0] represent a PSCI power_state while
|
|
|
|
* bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
|
|
|
|
*/
|
|
|
|
state = lpi->address;
|
|
|
|
if (!psci_power_state_is_valid(state)) {
|
|
|
|
pr_warn("Invalid PSCI power state %#x\n", state);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-20 01:52:58 +08:00
|
|
|
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
|
|
|
|
{
|
2019-08-09 19:03:12 +08:00
|
|
|
return psci_acpi_cpu_init_idle(cpu);
|
2016-07-20 01:52:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
|
|
|
|
{
|
2019-08-09 19:03:12 +08:00
|
|
|
u32 state = lpi->address;
|
|
|
|
|
2017-11-16 01:11:50 +08:00
|
|
|
if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
|
2019-08-09 19:03:12 +08:00
|
|
|
return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter,
|
|
|
|
lpi->index, state);
|
2017-11-16 01:11:50 +08:00
|
|
|
else
|
2019-08-09 19:03:12 +08:00
|
|
|
return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
|
|
|
|
lpi->index, state);
|
2016-07-20 01:52:58 +08:00
|
|
|
}
|
|
|
|
#endif
|