mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 04:34:11 +08:00
a94e502c22
enter_freeze() callback is expected atleast to do the same as enter() but it has to guarantee that interrupts aren't enabled at any point in its execution, as the tick is frozen. CPUs execute ->enter_freeze with the local tick or entire timekeeping suspended, so it must not re-enable interrupts at any point (even temporarily) or attempt to change states of clock event devices. It will be called when the system goes to suspend-to-idle and will reduce power usage because CPUs won't be awaken for unnecessary IRQs (i.e. woken up only on IRQs from "wakeup sources") We can reuse the same code for both the enter() and enter_freeze() callbacks as along as they don't re-enable interrupts. Only "coupled" cpuidle mechanism enables interrupts and doing that with timekeeping suspended is generally not safe. Since this generic DT based idle driver doesn't support "coupled" states, it is safe to assume that the interrupts are not re-enabled. This patch assign enter_freeze to same as enter callback function which helps to save power without any intermittent spurious wakeups from suspend-to-idle. Signed-off-by: Sudeep Holla <sudeep.holla@arm.com> Tested-by: Andy Gross <andy.gross@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
228 lines
6.5 KiB
C
228 lines
6.5 KiB
C
/*
|
|
* DT idle states parsing code.
|
|
*
|
|
* Copyright (C) 2014 ARM Ltd.
|
|
* Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "DT idle-states: " fmt
|
|
|
|
#include <linux/cpuidle.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_device.h>
|
|
|
|
#include "dt_idle_states.h"
|
|
|
|
static int init_state_node(struct cpuidle_state *idle_state,
|
|
const struct of_device_id *matches,
|
|
struct device_node *state_node)
|
|
{
|
|
int err;
|
|
const struct of_device_id *match_id;
|
|
const char *desc;
|
|
|
|
match_id = of_match_node(matches, state_node);
|
|
if (!match_id)
|
|
return -ENODEV;
|
|
/*
|
|
* CPUidle drivers are expected to initialize the const void *data
|
|
* pointer of the passed in struct of_device_id array to the idle
|
|
* state enter function.
|
|
*/
|
|
idle_state->enter = match_id->data;
|
|
/*
|
|
* Since this is not a "coupled" state, it's safe to assume interrupts
|
|
* won't be enabled when it exits allowing the tick to be frozen
|
|
* safely. So enter() can be also enter_freeze() callback.
|
|
*/
|
|
idle_state->enter_freeze = match_id->data;
|
|
|
|
err = of_property_read_u32(state_node, "wakeup-latency-us",
|
|
&idle_state->exit_latency);
|
|
if (err) {
|
|
u32 entry_latency, exit_latency;
|
|
|
|
err = of_property_read_u32(state_node, "entry-latency-us",
|
|
&entry_latency);
|
|
if (err) {
|
|
pr_debug(" * %s missing entry-latency-us property\n",
|
|
state_node->full_name);
|
|
return -EINVAL;
|
|
}
|
|
|
|
err = of_property_read_u32(state_node, "exit-latency-us",
|
|
&exit_latency);
|
|
if (err) {
|
|
pr_debug(" * %s missing exit-latency-us property\n",
|
|
state_node->full_name);
|
|
return -EINVAL;
|
|
}
|
|
/*
|
|
* If wakeup-latency-us is missing, default to entry+exit
|
|
* latencies as defined in idle states bindings
|
|
*/
|
|
idle_state->exit_latency = entry_latency + exit_latency;
|
|
}
|
|
|
|
err = of_property_read_u32(state_node, "min-residency-us",
|
|
&idle_state->target_residency);
|
|
if (err) {
|
|
pr_debug(" * %s missing min-residency-us property\n",
|
|
state_node->full_name);
|
|
return -EINVAL;
|
|
}
|
|
|
|
err = of_property_read_string(state_node, "idle-state-name", &desc);
|
|
if (err)
|
|
desc = state_node->name;
|
|
|
|
idle_state->flags = 0;
|
|
if (of_property_read_bool(state_node, "local-timer-stop"))
|
|
idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
|
|
/*
|
|
* TODO:
|
|
* replace with kstrdup and pointer assignment when name
|
|
* and desc become string pointers
|
|
*/
|
|
strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1);
|
|
strncpy(idle_state->desc, desc, CPUIDLE_DESC_LEN - 1);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Check that the idle state is uniform across all CPUs in the CPUidle driver
|
|
* cpumask
|
|
*/
|
|
static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
|
|
const cpumask_t *cpumask)
|
|
{
|
|
int cpu;
|
|
struct device_node *cpu_node, *curr_state_node;
|
|
bool valid = true;
|
|
|
|
/*
|
|
* Compare idle state phandles for index idx on all CPUs in the
|
|
* CPUidle driver cpumask. Start from next logical cpu following
|
|
* cpumask_first(cpumask) since that's the CPU state_node was
|
|
* retrieved from. If a mismatch is found bail out straight
|
|
* away since we certainly hit a firmware misconfiguration.
|
|
*/
|
|
for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
|
|
cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
|
|
cpu_node = of_cpu_device_node_get(cpu);
|
|
curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
|
|
idx);
|
|
if (state_node != curr_state_node)
|
|
valid = false;
|
|
|
|
of_node_put(curr_state_node);
|
|
of_node_put(cpu_node);
|
|
if (!valid)
|
|
break;
|
|
}
|
|
|
|
return valid;
|
|
}
|
|
|
|
/**
|
|
* dt_init_idle_driver() - Parse the DT idle states and initialize the
|
|
* idle driver states array
|
|
* @drv: Pointer to CPU idle driver to be initialized
|
|
* @matches: Array of of_device_id match structures to search in for
|
|
* compatible idle state nodes. The data pointer for each valid
|
|
* struct of_device_id entry in the matches array must point to
|
|
* a function with the following signature, that corresponds to
|
|
* the CPUidle state enter function signature:
|
|
*
|
|
* int (*)(struct cpuidle_device *dev,
|
|
* struct cpuidle_driver *drv,
|
|
* int index);
|
|
*
|
|
* @start_idx: First idle state index to be initialized
|
|
*
|
|
* If DT idle states are detected and are valid the state count and states
|
|
* array entries in the cpuidle driver are initialized accordingly starting
|
|
* from index start_idx.
|
|
*
|
|
* Return: number of valid DT idle states parsed, <0 on failure
|
|
*/
|
|
int dt_init_idle_driver(struct cpuidle_driver *drv,
|
|
const struct of_device_id *matches,
|
|
unsigned int start_idx)
|
|
{
|
|
struct cpuidle_state *idle_state;
|
|
struct device_node *state_node, *cpu_node;
|
|
int i, err = 0;
|
|
const cpumask_t *cpumask;
|
|
unsigned int state_idx = start_idx;
|
|
|
|
if (state_idx >= CPUIDLE_STATE_MAX)
|
|
return -EINVAL;
|
|
/*
|
|
* We get the idle states for the first logical cpu in the
|
|
* driver mask (or cpu_possible_mask if the driver cpumask is not set)
|
|
* and we check through idle_state_valid() if they are uniform
|
|
* across CPUs, otherwise we hit a firmware misconfiguration.
|
|
*/
|
|
cpumask = drv->cpumask ? : cpu_possible_mask;
|
|
cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
|
|
|
|
for (i = 0; ; i++) {
|
|
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
|
|
if (!state_node)
|
|
break;
|
|
|
|
if (!of_device_is_available(state_node))
|
|
continue;
|
|
|
|
if (!idle_state_valid(state_node, i, cpumask)) {
|
|
pr_warn("%s idle state not valid, bailing out\n",
|
|
state_node->full_name);
|
|
err = -EINVAL;
|
|
break;
|
|
}
|
|
|
|
if (state_idx == CPUIDLE_STATE_MAX) {
|
|
pr_warn("State index reached static CPU idle driver states array size\n");
|
|
break;
|
|
}
|
|
|
|
idle_state = &drv->states[state_idx++];
|
|
err = init_state_node(idle_state, matches, state_node);
|
|
if (err) {
|
|
pr_err("Parsing idle state node %s failed with err %d\n",
|
|
state_node->full_name, err);
|
|
err = -EINVAL;
|
|
break;
|
|
}
|
|
of_node_put(state_node);
|
|
}
|
|
|
|
of_node_put(state_node);
|
|
of_node_put(cpu_node);
|
|
if (err)
|
|
return err;
|
|
/*
|
|
* Update the driver state count only if some valid DT idle states
|
|
* were detected
|
|
*/
|
|
if (i)
|
|
drv->state_count = state_idx;
|
|
|
|
/*
|
|
* Return the number of present and valid DT idle states, which can
|
|
* also be 0 on platforms with missing DT idle states or legacy DT
|
|
* configuration predating the DT idle states bindings.
|
|
*/
|
|
return i;
|
|
}
|
|
EXPORT_SYMBOL_GPL(dt_init_idle_driver);
|