Merge branches 'pm-cpufreq' and 'pm-sleep'

* pm-cpufreq:
  cpufreq: cpufreq-dt: Restore default cpumask_setall(policy->cpus)
  cpufreq: cpufreq-dt: disable unsupported OPPs

* pm-sleep:
  PM / Sleep: fix recovery during resuming from hibernation
  PM / Sleep: fix async suspend_late/freeze_late error handling
This commit is contained in:
Rafael J. Wysocki 2014-10-31 22:25:07 +01:00
commit 7d627280b3
3 changed files with 51 additions and 27 deletions

View File

@ -1266,6 +1266,8 @@ int dpm_suspend_late(pm_message_t state)
} }
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
async_synchronize_full(); async_synchronize_full();
if (!error)
error = async_error;
if (error) { if (error) {
suspend_stats.failed_suspend_late++; suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE); dpm_save_failed_step(SUSPEND_SUSPEND_LATE);

View File

@ -187,6 +187,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev; struct device *cpu_dev;
struct regulator *cpu_reg; struct regulator *cpu_reg;
struct clk *cpu_clk; struct clk *cpu_clk;
unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency; unsigned int transition_latency;
int ret; int ret;
@ -206,16 +207,10 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* OPPs might be populated at runtime, don't check for error here */ /* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev); of_init_opp_table(cpu_dev);
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_put_node;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) { if (!priv) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_table; goto out_put_node;
} }
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
@ -224,30 +219,51 @@ static int cpufreq_init(struct cpufreq_policy *policy)
transition_latency = CPUFREQ_ETERNAL; transition_latency = CPUFREQ_ETERNAL;
if (!IS_ERR(cpu_reg)) { if (!IS_ERR(cpu_reg)) {
struct dev_pm_opp *opp; unsigned long opp_freq = 0;
unsigned long min_uV, max_uV;
int i;
/* /*
* OPP is maintained in order of increasing frequency, and * Disable any OPPs where the connected regulator isn't able to
* freq_table initialised from OPP is therefore sorted in the * provide the specified voltage and record minimum and maximum
* same order. * voltage levels.
*/ */
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) while (1) {
; struct dev_pm_opp *opp;
rcu_read_lock(); unsigned long opp_uV, tol_uV;
opp = dev_pm_opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true); rcu_read_lock();
min_uV = dev_pm_opp_get_voltage(opp); opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
opp = dev_pm_opp_find_freq_exact(cpu_dev, if (IS_ERR(opp)) {
freq_table[i-1].frequency * 1000, true); rcu_read_unlock();
max_uV = dev_pm_opp_get_voltage(opp); break;
rcu_read_unlock(); }
opp_uV = dev_pm_opp_get_voltage(opp);
rcu_read_unlock();
tol_uV = opp_uV * priv->voltage_tolerance / 100;
if (regulator_is_supported_voltage(cpu_reg, opp_uV,
opp_uV + tol_uV)) {
if (opp_uV < min_uV)
min_uV = opp_uV;
if (opp_uV > max_uV)
max_uV = opp_uV;
} else {
dev_pm_opp_disable(cpu_dev, opp_freq);
}
opp_freq++;
}
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
if (ret > 0) if (ret > 0)
transition_latency += ret * 1000; transition_latency += ret * 1000;
} }
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
goto out_free_priv;
}
/* /*
* For now, just loading the cooling device; * For now, just loading the cooling device;
* thermal DT code takes care of matching them. * thermal DT code takes care of matching them.
@ -277,7 +293,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency; policy->cpuinfo.transition_latency = transition_latency;
pd = cpufreq_get_driver_data(); pd = cpufreq_get_driver_data();
if (pd && !pd->independent_clocks) if (!pd || !pd->independent_clocks)
cpumask_setall(policy->cpus); cpumask_setall(policy->cpus);
of_node_put(np); of_node_put(np);
@ -286,9 +302,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
out_cooling_unregister: out_cooling_unregister:
cpufreq_cooling_unregister(priv->cdev); cpufreq_cooling_unregister(priv->cdev);
kfree(priv);
out_free_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
out_free_priv:
kfree(priv);
out_put_node: out_put_node:
of_node_put(np); of_node_put(np);
out_put_reg_clk: out_put_reg_clk:

View File

@ -502,8 +502,14 @@ int hibernation_restore(int platform_mode)
error = dpm_suspend_start(PMSG_QUIESCE); error = dpm_suspend_start(PMSG_QUIESCE);
if (!error) { if (!error) {
error = resume_target_kernel(platform_mode); error = resume_target_kernel(platform_mode);
dpm_resume_end(PMSG_RECOVER); /*
* The above should either succeed and jump to the new kernel,
* or return with an error. Otherwise things are just
* undefined, so let's be paranoid.
*/
BUG_ON(!error);
} }
dpm_resume_end(PMSG_RECOVER);
pm_restore_gfp_mask(); pm_restore_gfp_mask();
resume_console(); resume_console();
pm_restore_console(); pm_restore_console();