2019-04-28 17:51:04 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2012-08-16 19:41:40 +08:00
|
|
|
/*
|
2019-12-20 06:53:17 +08:00
|
|
|
* linux/drivers/thermal/cpufreq_cooling.c
|
2012-08-16 19:41:40 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
|
|
|
|
*
|
2019-04-28 17:51:03 +08:00
|
|
|
* Copyright (C) 2012-2018 Linaro Limited.
|
|
|
|
*
|
|
|
|
* Authors: Amit Daniel <amit.kachhap@linaro.org>
|
|
|
|
* Viresh Kumar <viresh.kumar@linaro.org>
|
2014-12-04 12:12:08 +08:00
|
|
|
*
|
2012-08-16 19:41:40 +08:00
|
|
|
*/
|
2020-05-11 20:24:57 +08:00
|
|
|
#include <linux/cpu.h>
|
2012-08-16 19:41:40 +08:00
|
|
|
#include <linux/cpufreq.h>
|
2020-05-11 20:24:57 +08:00
|
|
|
#include <linux/cpu_cooling.h>
|
2021-03-14 19:13:30 +08:00
|
|
|
#include <linux/device.h>
|
2020-05-11 20:24:57 +08:00
|
|
|
#include <linux/energy_model.h>
|
2012-08-16 19:41:40 +08:00
|
|
|
#include <linux/err.h>
|
2020-05-11 20:24:58 +08:00
|
|
|
#include <linux/export.h>
|
2015-02-27 03:00:29 +08:00
|
|
|
#include <linux/pm_opp.h>
|
2019-07-23 14:14:02 +08:00
|
|
|
#include <linux/pm_qos.h>
|
2012-08-16 19:41:40 +08:00
|
|
|
#include <linux/slab.h>
|
2020-05-11 20:24:57 +08:00
|
|
|
#include <linux/thermal.h>
|
2022-07-07 15:15:52 +08:00
|
|
|
#include <linux/units.h>
|
2012-08-16 19:41:40 +08:00
|
|
|
|
2015-03-03 01:17:20 +08:00
|
|
|
#include <trace/events/thermal.h>
|
|
|
|
|
2014-12-04 12:11:49 +08:00
|
|
|
/*
|
|
|
|
* Cooling state <-> CPUFreq frequency
|
|
|
|
*
|
|
|
|
* Cooling states are translated to frequencies throughout this driver and this
|
|
|
|
* is the relation between them.
|
|
|
|
*
|
|
|
|
* Highest cooling state corresponds to lowest possible frequency.
|
|
|
|
*
|
|
|
|
* i.e.
|
|
|
|
* level 0 --> 1st Max Freq
|
|
|
|
* level 1 --> 2nd Max Freq
|
|
|
|
* ...
|
|
|
|
*/
|
|
|
|
|
2017-04-25 18:27:20 +08:00
|
|
|
/**
|
|
|
|
* struct time_in_idle - Idle time stats
|
|
|
|
* @time: previous reading of the absolute time that this cpu was idle
|
|
|
|
* @timestamp: wall time of the last invocation of get_cpu_idle_time_us()
|
|
|
|
*/
|
|
|
|
struct time_in_idle {
|
|
|
|
u64 time;
|
|
|
|
u64 timestamp;
|
|
|
|
};
|
|
|
|
|
2012-08-16 19:41:40 +08:00
|
|
|
/**
|
2013-04-18 01:11:56 +08:00
|
|
|
* struct cpufreq_cooling_device - data for cooling device with cpufreq
|
2017-04-25 18:27:24 +08:00
|
|
|
* @last_load: load measured by the latest call to cpufreq_get_requested_power()
|
2012-08-16 19:41:40 +08:00
|
|
|
* @cpufreq_state: integer value representing the current state of cpufreq
|
|
|
|
* cooling devices.
|
2014-12-04 12:12:02 +08:00
|
|
|
* @max_level: maximum cooling level. One less than total number of valid
|
|
|
|
* cpufreq frequencies.
|
2019-10-30 23:14:51 +08:00
|
|
|
* @em: Reference on the Energy Model of the device
|
2017-04-25 18:27:24 +08:00
|
|
|
* @cdev: thermal_cooling_device pointer to keep track of the
|
|
|
|
* registered cooling device.
|
|
|
|
* @policy: cpufreq policy.
|
2022-06-13 20:43:24 +08:00
|
|
|
* @cooling_ops: cpufreq callbacks to thermal cooling device ops
|
2017-04-25 18:27:20 +08:00
|
|
|
* @idle_time: idle time stats
|
2019-11-20 23:45:11 +08:00
|
|
|
* @qos_req: PM QoS contraint to apply
|
2012-08-16 19:41:40 +08:00
|
|
|
*
|
2014-12-04 12:11:48 +08:00
|
|
|
* This structure is required for keeping information of each registered
|
|
|
|
* cpufreq_cooling_device.
|
2012-08-16 19:41:40 +08:00
|
|
|
*/
|
|
|
|
struct cpufreq_cooling_device {
|
2017-04-25 18:27:24 +08:00
|
|
|
u32 last_load;
|
2012-08-16 19:41:40 +08:00
|
|
|
unsigned int cpufreq_state;
|
2014-12-04 12:12:02 +08:00
|
|
|
unsigned int max_level;
|
2019-10-30 23:14:51 +08:00
|
|
|
struct em_perf_domain *em;
|
2017-04-25 18:27:24 +08:00
|
|
|
struct cpufreq_policy *policy;
|
2022-06-13 20:43:24 +08:00
|
|
|
struct thermal_cooling_device_ops cooling_ops;
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
#ifndef CONFIG_SMP
|
2017-04-25 18:27:20 +08:00
|
|
|
struct time_in_idle *idle_time;
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
#endif
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 18:47:06 +08:00
|
|
|
struct freq_qos_request qos_req;
|
2012-08-16 19:41:40 +08:00
|
|
|
};
|
|
|
|
|
2019-10-30 23:14:50 +08:00
|
|
|
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
|
2012-08-16 19:41:40 +08:00
|
|
|
/**
|
2014-12-04 12:12:07 +08:00
|
|
|
* get_level: Find the level for a particular frequency
|
2017-04-25 18:27:10 +08:00
|
|
|
* @cpufreq_cdev: cpufreq_cdev for which the property is required
|
2014-12-04 12:12:07 +08:00
|
|
|
* @freq: Frequency
|
2013-04-18 01:12:00 +08:00
|
|
|
*
|
2017-04-25 18:27:21 +08:00
|
|
|
* Return: level corresponding to the frequency.
|
2012-08-16 19:41:40 +08:00
|
|
|
*/
|
2017-04-25 18:27:10 +08:00
|
|
|
static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
|
2014-12-04 12:12:07 +08:00
|
|
|
unsigned int freq)
|
2012-08-16 19:41:40 +08:00
|
|
|
{
|
2019-10-30 23:14:51 +08:00
|
|
|
int i;
|
2014-01-02 11:57:48 +08:00
|
|
|
|
2019-10-30 23:14:51 +08:00
|
|
|
for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
|
|
|
|
if (freq > cpufreq_cdev->em->table[i].frequency)
|
2014-12-04 12:12:07 +08:00
|
|
|
break;
|
2015-02-27 03:00:29 +08:00
|
|
|
}
|
2012-08-16 19:41:40 +08:00
|
|
|
|
2019-10-30 23:14:51 +08:00
|
|
|
return cpufreq_cdev->max_level - i - 1;
|
2015-02-27 03:00:29 +08:00
|
|
|
}
|
|
|
|
|
2017-04-25 18:27:10 +08:00
|
|
|
static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
|
2015-02-27 03:00:29 +08:00
|
|
|
u32 freq)
|
|
|
|
{
|
2022-07-07 15:15:52 +08:00
|
|
|
unsigned long power_mw;
|
2015-02-27 03:00:29 +08:00
|
|
|
int i;
|
|
|
|
|
2019-10-30 23:14:51 +08:00
|
|
|
for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
|
|
|
|
if (freq > cpufreq_cdev->em->table[i].frequency)
|
2015-02-27 03:00:29 +08:00
|
|
|
break;
|
2019-10-30 23:14:51 +08:00
|
|
|
}
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2022-07-07 15:15:52 +08:00
|
|
|
power_mw = cpufreq_cdev->em->table[i + 1].power;
|
|
|
|
power_mw /= MICROWATT_PER_MILLIWATT;
|
|
|
|
|
|
|
|
return power_mw;
|
2015-02-27 03:00:29 +08:00
|
|
|
}
|
|
|
|
|
2017-04-25 18:27:10 +08:00
|
|
|
static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
|
2015-02-27 03:00:29 +08:00
|
|
|
u32 power)
|
|
|
|
{
|
2022-07-07 15:15:52 +08:00
|
|
|
unsigned long em_power_mw;
|
2015-02-27 03:00:29 +08:00
|
|
|
int i;
|
|
|
|
|
2020-12-29 13:08:31 +08:00
|
|
|
for (i = cpufreq_cdev->max_level; i > 0; i--) {
|
2022-07-07 15:15:52 +08:00
|
|
|
/* Convert EM power to milli-Watts to make safe comparison */
|
|
|
|
em_power_mw = cpufreq_cdev->em->table[i].power;
|
|
|
|
em_power_mw /= MICROWATT_PER_MILLIWATT;
|
|
|
|
if (power >= em_power_mw)
|
2015-02-27 03:00:29 +08:00
|
|
|
break;
|
2019-10-30 23:14:51 +08:00
|
|
|
}
|
2015-02-27 03:00:29 +08:00
|
|
|
|
thermal/drivers/cpufreq_cooling: Fix wrong frequency converted from power
The function cpu_power_to_freq is used to find a frequency and set the
cooling device to consume at most the power to be converted. For example,
if the power to be converted is 80mW, and the em table is as follow.
struct em_cap_state table[] = {
/* KHz mW */
{ 1008000, 36, 0 },
{ 1200000, 49, 0 },
{ 1296000, 59, 0 },
{ 1416000, 72, 0 },
{ 1512000, 86, 0 },
};
The target frequency should be 1416000KHz, not 1512000KHz.
Fixes: 349d39dc5739 ("thermal: cpu_cooling: merge frequency and power tables")
Cc: <stable@vger.kernel.org> # v4.13+
Signed-off-by: Finley Xiao <finley.xiao@rock-chips.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Amit Kucheria <amit.kucheria@linaro.org>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Link: https://lore.kernel.org/r/20200619090825.32747-1-finley.xiao@rock-chips.com
2020-06-19 17:08:25 +08:00
|
|
|
return cpufreq_cdev->em->table[i].frequency;
|
2015-02-27 03:00:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
* get_load() - get load for a cpu
|
|
|
|
* @cpufreq_cdev: struct cpufreq_cooling_device for the cpu
|
|
|
|
* @cpu: cpu number
|
|
|
|
* @cpu_idx: index of the cpu in time_in_idle array
|
2015-02-27 03:00:29 +08:00
|
|
|
*
|
|
|
|
* Return: The average load of cpu @cpu in percentage since this
|
|
|
|
* function was last called.
|
|
|
|
*/
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
|
|
|
|
int cpu_idx)
|
|
|
|
{
|
2022-06-21 17:04:10 +08:00
|
|
|
unsigned long util = sched_cpu_util(cpu);
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
|
2022-06-21 17:04:10 +08:00
|
|
|
return (util * 100) / arch_scale_cpu_capacity(cpu);
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
}
|
|
|
|
#else /* !CONFIG_SMP */
|
2017-04-25 18:27:10 +08:00
|
|
|
static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
|
2016-02-11 20:00:51 +08:00
|
|
|
int cpu_idx)
|
2015-02-27 03:00:29 +08:00
|
|
|
{
|
|
|
|
u32 load;
|
|
|
|
u64 now, now_idle, delta_time, delta_idle;
|
2017-04-25 18:27:20 +08:00
|
|
|
struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx];
|
2015-02-27 03:00:29 +08:00
|
|
|
|
|
|
|
now_idle = get_cpu_idle_time(cpu, &now, 0);
|
2017-04-25 18:27:20 +08:00
|
|
|
delta_idle = now_idle - idle_time->time;
|
|
|
|
delta_time = now - idle_time->timestamp;
|
2015-02-27 03:00:29 +08:00
|
|
|
|
|
|
|
if (delta_time <= delta_idle)
|
|
|
|
load = 0;
|
|
|
|
else
|
|
|
|
load = div64_u64(100 * (delta_time - delta_idle), delta_time);
|
|
|
|
|
2017-04-25 18:27:20 +08:00
|
|
|
idle_time->time = now_idle;
|
|
|
|
idle_time->timestamp = now;
|
2015-02-27 03:00:29 +08:00
|
|
|
|
|
|
|
return load;
|
|
|
|
}
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
#endif /* CONFIG_SMP */
|
2015-02-27 03:00:29 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* get_dynamic_power() - calculate the dynamic power
|
2017-04-25 18:27:10 +08:00
|
|
|
* @cpufreq_cdev: &cpufreq_cooling_device for this cdev
|
2015-02-27 03:00:29 +08:00
|
|
|
* @freq: current frequency
|
|
|
|
*
|
|
|
|
* Return: the dynamic power consumed by the cpus described by
|
2017-04-25 18:27:10 +08:00
|
|
|
* @cpufreq_cdev.
|
2015-02-27 03:00:29 +08:00
|
|
|
*/
|
2017-04-25 18:27:10 +08:00
|
|
|
static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
|
2015-02-27 03:00:29 +08:00
|
|
|
unsigned long freq)
|
|
|
|
{
|
|
|
|
u32 raw_cpu_power;
|
|
|
|
|
2017-04-25 18:27:10 +08:00
|
|
|
raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
|
|
|
|
return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
|
2012-08-16 19:41:40 +08:00
|
|
|
}
|
|
|
|
|
2015-02-27 03:00:29 +08:00
|
|
|
/**
|
|
|
|
* cpufreq_get_requested_power() - get the current power
|
|
|
|
* @cdev: &thermal_cooling_device pointer
|
|
|
|
* @power: pointer in which to store the resulting power
|
|
|
|
*
|
|
|
|
* Calculate the current power consumption of the cpus in milliwatts
|
|
|
|
* and store it in @power. This function should actually calculate
|
|
|
|
* the requested power, but it's hard to get the frequency that
|
|
|
|
* cpufreq would have assigned if there were no thermal limits.
|
|
|
|
* Instead, we calculate the current power on the assumption that the
|
|
|
|
* immediate future will look like the immediate past.
|
|
|
|
*
|
|
|
|
* We use the current frequency and the average load since this
|
|
|
|
* function was last called. In reality, there could have been
|
|
|
|
* multiple opps since this function was last called and that affects
|
|
|
|
* the load calculation. While it's not perfectly accurate, this
|
|
|
|
* simplification is good enough and works. REVISIT this, as more
|
|
|
|
* complex code may be needed if experiments show that it's not
|
|
|
|
* accurate enough.
|
|
|
|
*
|
2022-06-13 20:43:26 +08:00
|
|
|
* Return: 0 on success, this function doesn't fail.
|
2015-02-27 03:00:29 +08:00
|
|
|
*/
|
|
|
|
static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
|
|
|
|
u32 *power)
|
|
|
|
{
|
|
|
|
unsigned long freq;
|
2017-12-05 13:32:46 +08:00
|
|
|
int i = 0, cpu;
|
|
|
|
u32 total_load = 0;
|
2017-04-25 18:27:10 +08:00
|
|
|
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
2017-04-25 18:27:18 +08:00
|
|
|
struct cpufreq_policy *policy = cpufreq_cdev->policy;
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2017-04-25 18:27:18 +08:00
|
|
|
freq = cpufreq_quick_get(policy->cpu);
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2017-04-25 18:27:18 +08:00
|
|
|
for_each_cpu(cpu, policy->related_cpus) {
|
2015-02-27 03:00:29 +08:00
|
|
|
u32 load;
|
|
|
|
|
|
|
|
if (cpu_online(cpu))
|
2017-04-25 18:27:10 +08:00
|
|
|
load = get_load(cpufreq_cdev, cpu, i);
|
2015-02-27 03:00:29 +08:00
|
|
|
else
|
|
|
|
load = 0;
|
|
|
|
|
|
|
|
total_load += load;
|
|
|
|
}
|
|
|
|
|
2017-04-25 18:27:10 +08:00
|
|
|
cpufreq_cdev->last_load = total_load;
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2017-12-05 13:32:46 +08:00
|
|
|
*power = get_dynamic_power(cpufreq_cdev, freq);
|
2015-03-03 01:17:20 +08:00
|
|
|
|
2022-06-13 20:43:25 +08:00
|
|
|
trace_thermal_power_cpu_get_power_simple(policy->cpu, *power);
|
2015-02-27 03:00:29 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cpufreq_state2power() - convert a cpu cdev state to power consumed
|
|
|
|
* @cdev: &thermal_cooling_device pointer
|
|
|
|
* @state: cooling device state to be converted
|
|
|
|
* @power: pointer in which to store the resulting power
|
|
|
|
*
|
|
|
|
* Convert cooling device state @state into power consumption in
|
|
|
|
* milliwatts assuming 100% load. Store the calculated power in
|
|
|
|
* @power.
|
|
|
|
*
|
2022-06-13 20:43:26 +08:00
|
|
|
* Return: 0 on success, -EINVAL if the cooling device state is bigger
|
|
|
|
* than maximum allowed.
|
2015-02-27 03:00:29 +08:00
|
|
|
*/
|
|
|
|
static int cpufreq_state2power(struct thermal_cooling_device *cdev,
|
|
|
|
unsigned long state, u32 *power)
|
|
|
|
{
|
2019-10-30 23:14:51 +08:00
|
|
|
unsigned int freq, num_cpus, idx;
|
2017-04-25 18:27:10 +08:00
|
|
|
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2017-04-25 18:27:23 +08:00
|
|
|
/* Request state should be less than max_level */
|
2020-03-22 03:31:07 +08:00
|
|
|
if (state > cpufreq_cdev->max_level)
|
2017-04-25 18:27:23 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-04-25 18:27:18 +08:00
|
|
|
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2019-10-30 23:14:51 +08:00
|
|
|
idx = cpufreq_cdev->max_level - state;
|
|
|
|
freq = cpufreq_cdev->em->table[idx].frequency;
|
2017-12-05 13:32:46 +08:00
|
|
|
*power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2017-12-05 13:32:46 +08:00
|
|
|
return 0;
|
2015-02-27 03:00:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cpufreq_power2state() - convert power to a cooling device state
|
|
|
|
* @cdev: &thermal_cooling_device pointer
|
|
|
|
* @power: power in milliwatts to be converted
|
|
|
|
* @state: pointer in which to store the resulting state
|
|
|
|
*
|
|
|
|
* Calculate a cooling device state for the cpus described by @cdev
|
|
|
|
* that would allow them to consume at most @power mW and store it in
|
|
|
|
* @state. Note that this calculation depends on external factors
|
2022-06-13 20:43:26 +08:00
|
|
|
* such as the CPUs load. Calling this function with the same power
|
|
|
|
* as input can yield different cooling device states depending on those
|
|
|
|
* external factors.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, this function doesn't fail.
|
2015-02-27 03:00:29 +08:00
|
|
|
*/
|
|
|
|
static int cpufreq_power2state(struct thermal_cooling_device *cdev,
|
2020-09-14 15:11:01 +08:00
|
|
|
u32 power, unsigned long *state)
|
2015-02-27 03:00:29 +08:00
|
|
|
{
|
2019-02-18 14:22:30 +08:00
|
|
|
unsigned int target_freq;
|
2017-12-05 13:32:46 +08:00
|
|
|
u32 last_load, normalised_power;
|
2017-04-25 18:27:10 +08:00
|
|
|
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
2017-04-25 18:27:18 +08:00
|
|
|
struct cpufreq_policy *policy = cpufreq_cdev->policy;
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2017-04-25 18:27:10 +08:00
|
|
|
last_load = cpufreq_cdev->last_load ?: 1;
|
2017-12-05 13:32:46 +08:00
|
|
|
normalised_power = (power * 100) / last_load;
|
2017-04-25 18:27:10 +08:00
|
|
|
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2017-04-25 18:27:12 +08:00
|
|
|
*state = get_level(cpufreq_cdev, target_freq);
|
2017-04-25 18:27:18 +08:00
|
|
|
trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state,
|
|
|
|
power);
|
2015-02-27 03:00:29 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2019-10-30 23:14:51 +08:00
|
|
|
|
|
|
|
static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev,
|
|
|
|
struct em_perf_domain *em) {
|
|
|
|
struct cpufreq_policy *policy;
|
|
|
|
unsigned int nr_levels;
|
|
|
|
|
2022-03-21 17:57:28 +08:00
|
|
|
if (!em || em_is_artificial(em))
|
2019-10-30 23:14:51 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
policy = cpufreq_cdev->policy;
|
2020-05-27 17:58:47 +08:00
|
|
|
if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) {
|
2019-10-30 23:14:51 +08:00
|
|
|
pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n",
|
2020-05-27 17:58:47 +08:00
|
|
|
cpumask_pr_args(em_span_cpus(em)),
|
2019-10-30 23:14:51 +08:00
|
|
|
cpumask_pr_args(policy->related_cpus));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr_levels = cpufreq_cdev->max_level + 1;
|
2020-05-27 17:58:47 +08:00
|
|
|
if (em_pd_nr_perf_states(em) != nr_levels) {
|
|
|
|
pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n",
|
|
|
|
cpumask_pr_args(em_span_cpus(em)),
|
|
|
|
em_pd_nr_perf_states(em), nr_levels);
|
2019-10-30 23:14:51 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-10-30 23:14:50 +08:00
|
|
|
#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */
|
|
|
|
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static inline int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
|
|
|
|
{
|
|
|
|
unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus);
|
|
|
|
|
|
|
|
cpufreq_cdev->idle_time = kcalloc(num_cpus,
|
|
|
|
sizeof(*cpufreq_cdev->idle_time),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!cpufreq_cdev->idle_time)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
|
|
|
|
{
|
|
|
|
kfree(cpufreq_cdev->idle_time);
|
|
|
|
cpufreq_cdev->idle_time = NULL;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
2019-10-30 23:14:51 +08:00
|
|
|
static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev,
|
|
|
|
unsigned long state)
|
|
|
|
{
|
|
|
|
struct cpufreq_policy *policy;
|
|
|
|
unsigned long idx;
|
|
|
|
|
|
|
|
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
|
|
|
|
/* Use the Energy Model table if available */
|
|
|
|
if (cpufreq_cdev->em) {
|
|
|
|
idx = cpufreq_cdev->max_level - state;
|
|
|
|
return cpufreq_cdev->em->table[idx].frequency;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Otherwise, fallback on the CPUFreq table */
|
|
|
|
policy = cpufreq_cdev->policy;
|
|
|
|
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
|
|
|
idx = cpufreq_cdev->max_level - state;
|
|
|
|
else
|
|
|
|
idx = state;
|
|
|
|
|
|
|
|
return policy->freq_table[idx].frequency;
|
|
|
|
}
|
|
|
|
|
2019-10-30 23:14:50 +08:00
|
|
|
/* cpufreq cooling device callback functions are defined below */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cpufreq_get_max_state - callback function to get the max cooling state.
|
|
|
|
* @cdev: thermal cooling device pointer.
|
|
|
|
* @state: fill this variable with the max cooling state.
|
|
|
|
*
|
|
|
|
* Callback for the thermal cooling device to return the cpufreq
|
|
|
|
* max cooling state.
|
|
|
|
*
|
2022-06-13 20:43:26 +08:00
|
|
|
* Return: 0 on success, this function doesn't fail.
|
2019-10-30 23:14:50 +08:00
|
|
|
*/
|
|
|
|
static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
|
|
|
|
unsigned long *state)
|
|
|
|
{
|
|
|
|
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
|
|
|
|
|
|
|
*state = cpufreq_cdev->max_level;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cpufreq_get_cur_state - callback function to get the current cooling state.
|
|
|
|
* @cdev: thermal cooling device pointer.
|
|
|
|
* @state: fill this variable with the current cooling state.
|
|
|
|
*
|
|
|
|
* Callback for the thermal cooling device to return the cpufreq
|
|
|
|
* current cooling state.
|
|
|
|
*
|
2022-06-13 20:43:26 +08:00
|
|
|
* Return: 0 on success, this function doesn't fail.
|
2019-10-30 23:14:50 +08:00
|
|
|
*/
|
|
|
|
static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
|
|
|
|
unsigned long *state)
|
|
|
|
{
|
|
|
|
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
|
|
|
|
|
|
|
*state = cpufreq_cdev->cpufreq_state;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cpufreq_set_cur_state - callback function to set the current cooling state.
|
|
|
|
* @cdev: thermal cooling device pointer.
|
|
|
|
* @state: set this variable to the current cooling state.
|
|
|
|
*
|
|
|
|
* Callback for the thermal cooling device to change the cpufreq
|
|
|
|
* current cooling state.
|
|
|
|
*
|
|
|
|
* Return: 0 on success, an error code otherwise.
|
|
|
|
*/
|
|
|
|
static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
|
|
|
|
unsigned long state)
|
|
|
|
{
|
|
|
|
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
|
2020-02-22 08:52:12 +08:00
|
|
|
struct cpumask *cpus;
|
|
|
|
unsigned int frequency;
|
|
|
|
int ret;
|
2019-10-30 23:14:50 +08:00
|
|
|
|
|
|
|
/* Request state should be less than max_level */
|
2020-03-22 03:31:07 +08:00
|
|
|
if (state > cpufreq_cdev->max_level)
|
2019-10-30 23:14:50 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Check if the old cooling action is same as new cooling action */
|
|
|
|
if (cpufreq_cdev->cpufreq_state == state)
|
|
|
|
return 0;
|
|
|
|
|
2020-02-22 08:52:12 +08:00
|
|
|
frequency = get_state_freq(cpufreq_cdev, state);
|
|
|
|
|
|
|
|
ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
|
2021-02-17 13:48:58 +08:00
|
|
|
if (ret >= 0) {
|
2020-11-06 17:22:43 +08:00
|
|
|
cpufreq_cdev->cpufreq_state = state;
|
2021-06-15 03:10:30 +08:00
|
|
|
cpus = cpufreq_cdev->policy->related_cpus;
|
2021-11-10 03:57:11 +08:00
|
|
|
arch_update_thermal_pressure(cpus, frequency);
|
2020-04-08 11:00:16 +08:00
|
|
|
ret = 0;
|
2020-02-22 08:52:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2019-10-30 23:14:50 +08:00
|
|
|
}
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2012-08-16 19:41:40 +08:00
|
|
|
/**
|
2013-09-13 07:26:45 +08:00
|
|
|
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
|
|
|
|
* @np: a valid struct device_node to the cooling device device tree node
|
2017-04-25 18:27:14 +08:00
|
|
|
* @policy: cpufreq policy
|
2014-12-04 12:11:55 +08:00
|
|
|
* Normally this should be same as cpufreq policy->related_cpus.
|
2019-10-30 23:14:51 +08:00
|
|
|
* @em: Energy Model of the cpufreq policy
|
2013-04-18 01:12:15 +08:00
|
|
|
*
|
|
|
|
* This interface function registers the cpufreq cooling device with the name
|
2022-06-13 20:43:26 +08:00
|
|
|
* "cpufreq-%s". This API can support multiple instances of cpufreq
|
2013-09-13 07:26:45 +08:00
|
|
|
* cooling devices. It also gives the opportunity to link the cooling device
|
|
|
|
* with a device tree node, in order to bind it via the thermal DT code.
|
2013-04-18 01:12:15 +08:00
|
|
|
*
|
|
|
|
* Return: a valid struct thermal_cooling_device pointer on success,
|
|
|
|
* on failure, it returns a corresponding ERR_PTR().
|
2012-08-16 19:41:40 +08:00
|
|
|
*/
|
2013-09-13 07:26:45 +08:00
|
|
|
static struct thermal_cooling_device *
|
|
|
|
__cpufreq_cooling_register(struct device_node *np,
|
2019-10-30 23:14:51 +08:00
|
|
|
struct cpufreq_policy *policy,
|
|
|
|
struct em_perf_domain *em)
|
2012-08-16 19:41:40 +08:00
|
|
|
{
|
2017-04-25 18:27:11 +08:00
|
|
|
struct thermal_cooling_device *cdev;
|
2017-04-25 18:27:10 +08:00
|
|
|
struct cpufreq_cooling_device *cpufreq_cdev;
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
unsigned int i;
|
2019-07-23 14:14:02 +08:00
|
|
|
struct device *dev;
|
2014-12-04 12:11:55 +08:00
|
|
|
int ret;
|
2016-08-17 23:14:59 +08:00
|
|
|
struct thermal_cooling_device_ops *cooling_ops;
|
2021-03-14 19:13:30 +08:00
|
|
|
char *name;
|
2019-07-23 14:14:02 +08:00
|
|
|
|
|
|
|
dev = get_cpu_device(policy->cpu);
|
|
|
|
if (unlikely(!dev)) {
|
|
|
|
pr_warn("No cpu device for cpu %d\n", policy->cpu);
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
|
|
|
|
2017-04-25 18:27:14 +08:00
|
|
|
if (IS_ERR_OR_NULL(policy)) {
|
2017-10-24 15:50:39 +08:00
|
|
|
pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
|
2017-04-25 18:27:14 +08:00
|
|
|
return ERR_PTR(-EINVAL);
|
2016-06-03 13:28:47 +08:00
|
|
|
}
|
|
|
|
|
2017-04-25 18:27:15 +08:00
|
|
|
i = cpufreq_table_count_valid_entries(policy);
|
|
|
|
if (!i) {
|
|
|
|
pr_debug("%s: CPUFreq table not found or has no valid entries\n",
|
|
|
|
__func__);
|
2017-04-25 18:27:14 +08:00
|
|
|
return ERR_PTR(-ENODEV);
|
2012-08-16 19:41:40 +08:00
|
|
|
}
|
2014-12-04 12:11:43 +08:00
|
|
|
|
2017-04-25 18:27:10 +08:00
|
|
|
cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL);
|
2017-04-25 18:27:14 +08:00
|
|
|
if (!cpufreq_cdev)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2012-08-16 19:41:40 +08:00
|
|
|
|
2017-04-25 18:27:16 +08:00
|
|
|
cpufreq_cdev->policy = policy;
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
|
|
|
|
ret = allocate_idle_time(cpufreq_cdev);
|
|
|
|
if (ret) {
|
|
|
|
cdev = ERR_PTR(ret);
|
2015-02-27 03:00:29 +08:00
|
|
|
goto free_cdev;
|
|
|
|
}
|
|
|
|
|
2017-04-25 18:27:15 +08:00
|
|
|
/* max_level is an index, not a counter */
|
|
|
|
cpufreq_cdev->max_level = i - 1;
|
2014-12-04 12:12:02 +08:00
|
|
|
|
2022-06-13 20:43:24 +08:00
|
|
|
cooling_ops = &cpufreq_cdev->cooling_ops;
|
|
|
|
cooling_ops->get_max_state = cpufreq_get_max_state;
|
|
|
|
cooling_ops->get_cur_state = cpufreq_get_cur_state;
|
|
|
|
cooling_ops->set_cur_state = cpufreq_set_cur_state;
|
2019-10-30 23:14:50 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
|
2019-10-30 23:14:51 +08:00
|
|
|
if (em_is_sane(cpufreq_cdev, em)) {
|
|
|
|
cpufreq_cdev->em = em;
|
2019-10-30 23:14:50 +08:00
|
|
|
cooling_ops->get_requested_power = cpufreq_get_requested_power;
|
|
|
|
cooling_ops->state2power = cpufreq_state2power;
|
|
|
|
cooling_ops->power2state = cpufreq_power2state;
|
2019-10-30 23:14:51 +08:00
|
|
|
} else
|
2019-10-30 23:14:50 +08:00
|
|
|
#endif
|
2019-10-30 23:14:51 +08:00
|
|
|
if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) {
|
|
|
|
pr_err("%s: unsorted frequency tables are not supported\n",
|
|
|
|
__func__);
|
|
|
|
cdev = ERR_PTR(-EINVAL);
|
2021-03-14 19:13:30 +08:00
|
|
|
goto free_idle_time;
|
2019-10-30 23:14:51 +08:00
|
|
|
}
|
2016-05-31 18:32:02 +08:00
|
|
|
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 18:47:06 +08:00
|
|
|
ret = freq_qos_add_request(&policy->constraints,
|
|
|
|
&cpufreq_cdev->qos_req, FREQ_QOS_MAX,
|
2019-10-30 23:14:51 +08:00
|
|
|
get_state_freq(cpufreq_cdev, 0));
|
2019-07-23 14:14:02 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
|
|
|
|
ret);
|
|
|
|
cdev = ERR_PTR(ret);
|
2021-03-14 19:13:30 +08:00
|
|
|
goto free_idle_time;
|
2019-07-23 14:14:02 +08:00
|
|
|
}
|
|
|
|
|
2021-03-14 19:13:30 +08:00
|
|
|
cdev = ERR_PTR(-ENOMEM);
|
|
|
|
name = kasprintf(GFP_KERNEL, "cpufreq-%s", dev_name(dev));
|
|
|
|
if (!name)
|
|
|
|
goto remove_qos_req;
|
|
|
|
|
|
|
|
cdev = thermal_of_cooling_device_register(np, name, cpufreq_cdev,
|
2017-04-25 18:27:11 +08:00
|
|
|
cooling_ops);
|
2021-03-14 19:13:30 +08:00
|
|
|
kfree(name);
|
|
|
|
|
2017-04-25 18:27:11 +08:00
|
|
|
if (IS_ERR(cdev))
|
2019-07-23 14:14:02 +08:00
|
|
|
goto remove_qos_req;
|
2014-12-04 12:11:51 +08:00
|
|
|
|
2017-04-25 18:27:14 +08:00
|
|
|
return cdev;
|
2014-12-04 12:11:58 +08:00
|
|
|
|
2019-07-23 14:14:02 +08:00
|
|
|
remove_qos_req:
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 18:47:06 +08:00
|
|
|
freq_qos_remove_request(&cpufreq_cdev->qos_req);
|
2017-04-25 18:27:20 +08:00
|
|
|
free_idle_time:
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
free_idle_time(cpufreq_cdev);
|
2014-12-04 12:11:58 +08:00
|
|
|
free_cdev:
|
2017-04-25 18:27:10 +08:00
|
|
|
kfree(cpufreq_cdev);
|
2017-04-25 18:27:11 +08:00
|
|
|
return cdev;
|
2012-08-16 19:41:40 +08:00
|
|
|
}
|
2013-09-13 07:26:45 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* cpufreq_cooling_register - function to create cpufreq cooling device.
|
2017-04-25 18:27:14 +08:00
|
|
|
* @policy: cpufreq policy
|
2013-09-13 07:26:45 +08:00
|
|
|
*
|
|
|
|
* This interface function registers the cpufreq cooling device with the name
|
2022-06-13 20:43:26 +08:00
|
|
|
* "cpufreq-%s". This API can support multiple instances of cpufreq cooling
|
|
|
|
* devices.
|
2013-09-13 07:26:45 +08:00
|
|
|
*
|
|
|
|
* Return: a valid struct thermal_cooling_device pointer on success,
|
|
|
|
* on failure, it returns a corresponding ERR_PTR().
|
|
|
|
*/
|
|
|
|
struct thermal_cooling_device *
|
2017-04-25 18:27:14 +08:00
|
|
|
cpufreq_cooling_register(struct cpufreq_policy *policy)
|
2013-09-13 07:26:45 +08:00
|
|
|
{
|
2019-10-30 23:14:51 +08:00
|
|
|
return __cpufreq_cooling_register(NULL, policy, NULL);
|
2013-09-13 07:26:45 +08:00
|
|
|
}
|
2013-04-18 01:11:57 +08:00
|
|
|
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
|
2012-08-16 19:41:40 +08:00
|
|
|
|
2013-09-13 07:26:45 +08:00
|
|
|
/**
|
|
|
|
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
|
2017-04-25 18:27:14 +08:00
|
|
|
* @policy: cpufreq policy
|
2013-09-13 07:26:45 +08:00
|
|
|
*
|
|
|
|
* This interface function registers the cpufreq cooling device with the name
|
2022-06-13 20:43:26 +08:00
|
|
|
* "cpufreq-%s". This API can support multiple instances of cpufreq cooling
|
|
|
|
* devices. Using this API, the cpufreq cooling device will be linked to the
|
|
|
|
* device tree node provided.
|
2013-09-13 07:26:45 +08:00
|
|
|
*
|
2015-02-27 03:00:29 +08:00
|
|
|
* Using this function, the cooling device will implement the power
|
2022-06-13 20:43:26 +08:00
|
|
|
* extensions by using the Energy Model (if present). The cpus must have
|
2015-02-27 03:00:29 +08:00
|
|
|
* registered their OPPs using the OPP library.
|
|
|
|
*
|
|
|
|
* Return: a valid struct thermal_cooling_device pointer on success,
|
2017-12-05 13:32:43 +08:00
|
|
|
* and NULL on failure.
|
2015-02-27 03:00:29 +08:00
|
|
|
*/
|
|
|
|
struct thermal_cooling_device *
|
2017-12-05 13:32:45 +08:00
|
|
|
of_cpufreq_cooling_register(struct cpufreq_policy *policy)
|
2015-02-27 03:00:29 +08:00
|
|
|
{
|
2017-12-05 13:32:43 +08:00
|
|
|
struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
|
|
|
|
struct thermal_cooling_device *cdev = NULL;
|
|
|
|
|
|
|
|
if (!np) {
|
2019-12-20 06:53:17 +08:00
|
|
|
pr_err("cpufreq_cooling: OF node not available for cpu%d\n",
|
2017-12-05 13:32:43 +08:00
|
|
|
policy->cpu);
|
|
|
|
return NULL;
|
|
|
|
}
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2017-12-05 13:32:43 +08:00
|
|
|
if (of_find_property(np, "#cooling-cells", NULL)) {
|
2019-10-30 23:14:51 +08:00
|
|
|
struct em_perf_domain *em = em_cpu_get(policy->cpu);
|
2017-12-05 13:32:43 +08:00
|
|
|
|
2019-10-30 23:14:51 +08:00
|
|
|
cdev = __cpufreq_cooling_register(np, policy, em);
|
2017-12-05 13:32:43 +08:00
|
|
|
if (IS_ERR(cdev)) {
|
2019-12-20 06:53:17 +08:00
|
|
|
pr_err("cpufreq_cooling: cpu%d failed to register as cooling device: %ld\n",
|
2017-12-05 13:32:43 +08:00
|
|
|
policy->cpu, PTR_ERR(cdev));
|
|
|
|
cdev = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
of_node_put(np);
|
|
|
|
return cdev;
|
2015-02-27 03:00:29 +08:00
|
|
|
}
|
2017-12-05 13:32:45 +08:00
|
|
|
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
|
2015-02-27 03:00:29 +08:00
|
|
|
|
2012-08-16 19:41:40 +08:00
|
|
|
/**
|
|
|
|
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
|
|
|
|
* @cdev: thermal cooling device pointer.
|
2013-04-18 01:12:16 +08:00
|
|
|
*
|
2022-06-13 20:43:26 +08:00
|
|
|
* This interface function unregisters the "cpufreq-%x" cooling device.
|
2012-08-16 19:41:40 +08:00
|
|
|
*/
|
|
|
|
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
|
|
|
|
{
|
2017-04-25 18:27:10 +08:00
|
|
|
struct cpufreq_cooling_device *cpufreq_cdev;
|
2012-08-16 19:41:40 +08:00
|
|
|
|
2013-08-15 22:54:46 +08:00
|
|
|
if (!cdev)
|
|
|
|
return;
|
|
|
|
|
2017-04-25 18:27:10 +08:00
|
|
|
cpufreq_cdev = cdev->devdata;
|
2012-08-16 19:41:40 +08:00
|
|
|
|
2019-04-28 17:51:05 +08:00
|
|
|
thermal_cooling_device_unregister(cdev);
|
cpufreq: Use per-policy frequency QoS
Replace the CPU device PM QoS used for the management of min and max
frequency constraints in cpufreq (and its users) with per-policy
frequency QoS to avoid problems with cpufreq policies covering
more then one CPU.
Namely, a cpufreq driver is registered with the subsys interface
which calls cpufreq_add_dev() for each CPU, starting from CPU0, so
currently the PM QoS notifiers are added to the first CPU in the
policy (i.e. CPU0 in the majority of cases).
In turn, when the cpufreq driver is unregistered, the subsys interface
doing that calls cpufreq_remove_dev() for each CPU, starting from CPU0,
and the PM QoS notifiers are only removed when cpufreq_remove_dev() is
called for the last CPU in the policy, say CPUx, which as a rule is
not CPU0 if the policy covers more than one CPU. Then, the PM QoS
notifiers cannot be removed, because CPUx does not have them, and
they are still there in the device PM QoS notifiers list of CPU0,
which prevents new PM QoS notifiers from being registered for CPU0
on the next attempt to register the cpufreq driver.
The same issue occurs when the first CPU in the policy goes offline
before unregistering the driver.
After this change it does not matter which CPU is the policy CPU at
the driver registration time and whether or not it is online all the
time, because the frequency QoS is per policy and not per CPU.
Fixes: 67d874c3b2c6 ("cpufreq: Register notifiers with the PM QoS framework")
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Tested-by: Dmitry Osipenko <digetx@gmail.com>
Reported-by: Sudeep Holla <sudeep.holla@arm.com>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Diagnosed-by: Viresh Kumar <viresh.kumar@linaro.org>
Link: https://lore.kernel.org/linux-pm/5ad2624194baa2f53acc1f1e627eb7684c577a19.1562210705.git.viresh.kumar@linaro.org/T/#md2d89e95906b8c91c15f582146173dce2e86e99f
Link: https://lore.kernel.org/linux-pm/20191017094612.6tbkwoq4harsjcqv@vireshk-i7/T/#m30d48cc23b9a80467fbaa16e30f90b3828a5a29b
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
2019-10-16 18:47:06 +08:00
|
|
|
freq_qos_remove_request(&cpufreq_cdev->qos_req);
|
thermal: cpufreq_cooling: Reuse sched_cpu_util() for SMP platforms
Several parts of the kernel are already using the effective CPU
utilization (as seen by the scheduler) to get the current load on the
CPU, do the same here instead of depending on the idle time of the CPU,
which isn't that accurate comparatively.
This is also the right thing to do as it makes the cpufreq governor
(schedutil) align better with the cpufreq_cooling driver, as the power
requested by cpufreq_cooling governor will exactly match the next
frequency requested by the schedutil governor since they are both using
the same metric to calculate load.
This was tested on ARM Hikey6220 platform with hackbench, sysbench and
schbench. None of them showed any regression or significant
improvements. Schbench is the most important ones out of these as it
creates the scenario where the utilization numbers provide a better
estimate of the future.
Scenario 1: The CPUs were mostly idle in the previous polling window of
the IPA governor as the tasks were sleeping and here are the details
from traces (load is in %):
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=203 load={{0x35,0x1,0x0,0x31,0x0,0x0,0x64,0x0}} dynamic_power=1339
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=600 load={{0x60,0x46,0x45,0x45,0x48,0x3b,0x61,0x44}} dynamic_power=3960
Here, the "Old" line gives the load and requested_power (dynamic_power
here) numbers calculated using the idle time based implementation, while
"New" is based on the CPU utilization from scheduler.
As can be clearly seen, the load and requested_power numbers are simply
incorrect in the idle time based approach and the numbers collected from
CPU's utilization are much closer to the reality.
Scenario 2: The CPUs were busy in the previous polling window of the IPA
governor:
Old: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=800 load={{0x64,0x64,0x64,0x64,0x64,0x64,0x64,0x64}} dynamic_power=5280
New: thermal_power_cpu_get_power: cpus=00000000,000000ff freq=1200000 total_load=708 load={{0x4d,0x5c,0x5c,0x5b,0x5c,0x5c,0x51,0x5b}} dynamic_power=4672
As can be seen, the idle time based load is 100% for all the CPUs as it
took only the last window into account, but in reality the CPUs aren't
that loaded as shown by the utilization numbers.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lukasz Luba <lukasz.luba@arm.com>
Link: https://lkml.kernel.org/r/9c255c83d78d58451abc06848001faef94c87a12.1607400596.git.viresh.kumar@linaro.org
2020-12-08 12:16:57 +08:00
|
|
|
free_idle_time(cpufreq_cdev);
|
2017-04-25 18:27:10 +08:00
|
|
|
kfree(cpufreq_cdev);
|
2012-08-16 19:41:40 +08:00
|
|
|
}
|
2013-04-18 01:11:57 +08:00
|
|
|
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
|