mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 07:34:12 +08:00
Merge branch 'pm-cpufreq'
* pm-cpufreq: (21 commits) cpufreq: ondemand: update sampling rate only on right CPUs cpufreq: SPEAr: Add CPUFreq driver cpufreq: governors: Fix jiffies/cputime mixup (revisited) cpufreq: ondemand: fix wrong delay sampling rate cpufreq: exynos: Use static for functions used in only this file cpufreq: exynos: Broadcast frequency change notifications for all cores cpufreq: remove use of __devexit cpufreq: remove use of __devinit cpufreq: remove use of __devexit_p cpufreq: Remove unnecessary initialization of a local variable cpufreq: Make sure target freq is within limits cpufreq: Avoid calling cpufreq driver's target() routine if target_freq == policy->cur cpufreq: Fix sparse warning by making local function static cpufreq: Fix sparse warnings by updating cputime64_t to u64 cpufreq: governors: remove redundant code cpufreq: return early from __cpufreq_driver_getavg() cpufreq: fix jiffies/cputime mixup in conservative/ondemand governors cpufreq: Improve debug prints cpufreq: Move common part from governors to separate file, v2 cpufreq / core: Fix printing of governor and driver name ...
This commit is contained in:
commit
aa84950674
42
Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt
Normal file
42
Documentation/devicetree/bindings/cpufreq/cpufreq-spear.txt
Normal file
@ -0,0 +1,42 @@
|
||||
SPEAr cpufreq driver
|
||||
-------------------
|
||||
|
||||
SPEAr SoC cpufreq driver for CPU frequency scaling.
|
||||
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) systems
|
||||
which share clock across all CPUs.
|
||||
|
||||
Required properties:
|
||||
- cpufreq_tbl: Table of frequencies CPU could be transitioned into, in the
|
||||
increasing order.
|
||||
|
||||
Optional properties:
|
||||
- clock-latency: Specify the possible maximum transition latency for clock, in
|
||||
unit of nanoseconds.
|
||||
|
||||
Both required and optional properties listed above must be defined under node
|
||||
/cpus/cpu@0.
|
||||
|
||||
Examples:
|
||||
--------
|
||||
cpus {
|
||||
|
||||
<...>
|
||||
|
||||
cpu@0 {
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <0>;
|
||||
|
||||
<...>
|
||||
|
||||
cpufreq_tbl = < 166000
|
||||
200000
|
||||
250000
|
||||
300000
|
||||
400000
|
||||
500000
|
||||
600000 >;
|
||||
};
|
||||
|
||||
<...>
|
||||
|
||||
};
|
@ -904,6 +904,7 @@ config ARCH_NOMADIK
|
||||
|
||||
config PLAT_SPEAR
|
||||
bool "ST SPEAr"
|
||||
select ARCH_HAS_CPUFREQ
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ARM_AMBA
|
||||
select CLKDEV_LOOKUP
|
||||
|
@ -76,3 +76,10 @@ config ARM_EXYNOS5250_CPUFREQ
|
||||
help
|
||||
This adds the CPUFreq driver for Samsung EXYNOS5250
|
||||
SoC.
|
||||
|
||||
config ARM_SPEAR_CPUFREQ
|
||||
bool "SPEAr CPUFreq support"
|
||||
depends on PLAT_SPEAR
|
||||
default y
|
||||
help
|
||||
This adds the CPUFreq driver support for SPEAr SOCs.
|
||||
|
@ -7,8 +7,8 @@ obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o cpufreq_governor.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o cpufreq_governor.o
|
||||
|
||||
# CPUfreq cross-arch helpers
|
||||
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
|
||||
@ -50,6 +50,7 @@ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
|
||||
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
|
||||
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
|
||||
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
|
||||
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
|
||||
|
||||
##################################################################################
|
||||
# PowerPC platform drivers
|
||||
|
@ -174,7 +174,7 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
|
||||
.attr = cpu0_cpufreq_attr,
|
||||
};
|
||||
|
||||
static int __devinit cpu0_cpufreq_driver_init(void)
|
||||
static int cpu0_cpufreq_driver_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
int ret;
|
||||
|
@ -15,6 +15,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
@ -127,7 +129,7 @@ static int __init init_cpufreq_transition_notifier_list(void)
|
||||
pure_initcall(init_cpufreq_transition_notifier_list);
|
||||
|
||||
static int off __read_mostly;
|
||||
int cpufreq_disabled(void)
|
||||
static int cpufreq_disabled(void)
|
||||
{
|
||||
return off;
|
||||
}
|
||||
@ -402,7 +404,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
|
||||
static ssize_t store_##file_name \
|
||||
(struct cpufreq_policy *policy, const char *buf, size_t count) \
|
||||
{ \
|
||||
unsigned int ret = -EINVAL; \
|
||||
unsigned int ret; \
|
||||
struct cpufreq_policy new_policy; \
|
||||
\
|
||||
ret = cpufreq_get_policy(&new_policy, policy->cpu); \
|
||||
@ -445,7 +447,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
|
||||
else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
return sprintf(buf, "performance\n");
|
||||
else if (policy->governor)
|
||||
return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
|
||||
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
|
||||
policy->governor->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -457,7 +459,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
|
||||
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int ret = -EINVAL;
|
||||
unsigned int ret;
|
||||
char str_governor[16];
|
||||
struct cpufreq_policy new_policy;
|
||||
|
||||
@ -491,7 +493,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
|
||||
*/
|
||||
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
|
||||
return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -512,7 +514,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
|
||||
if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
|
||||
- (CPUFREQ_NAME_LEN + 2)))
|
||||
goto out;
|
||||
i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
|
||||
i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
|
||||
}
|
||||
out:
|
||||
i += sprintf(&buf[i], "\n");
|
||||
@ -581,7 +583,7 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* show_scaling_driver - show the current cpufreq HW/BIOS limitation
|
||||
* show_bios_limit - show the current cpufreq HW/BIOS limitation
|
||||
*/
|
||||
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
@ -1468,12 +1470,23 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int relation)
|
||||
{
|
||||
int retval = -EINVAL;
|
||||
unsigned int old_target_freq = target_freq;
|
||||
|
||||
if (cpufreq_disabled())
|
||||
return -ENODEV;
|
||||
|
||||
pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
|
||||
target_freq, relation);
|
||||
/* Make sure that target_freq is within supported range */
|
||||
if (target_freq > policy->max)
|
||||
target_freq = policy->max;
|
||||
if (target_freq < policy->min)
|
||||
target_freq = policy->min;
|
||||
|
||||
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
|
||||
policy->cpu, target_freq, relation, old_target_freq);
|
||||
|
||||
if (target_freq == policy->cur)
|
||||
return 0;
|
||||
|
||||
if (cpu_online(policy->cpu) && cpufreq_driver->target)
|
||||
retval = cpufreq_driver->target(policy, target_freq, relation);
|
||||
|
||||
@ -1509,12 +1522,14 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!(cpu_online(cpu) && cpufreq_driver->getavg))
|
||||
return 0;
|
||||
|
||||
policy = cpufreq_cpu_get(policy->cpu);
|
||||
if (!policy)
|
||||
return -EINVAL;
|
||||
|
||||
if (cpu_online(cpu) && cpufreq_driver->getavg)
|
||||
ret = cpufreq_driver->getavg(policy, cpu);
|
||||
ret = cpufreq_driver->getavg(policy, cpu);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
return ret;
|
||||
|
@ -11,83 +11,30 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/percpu-defs.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* dbs is used in this file as a shortform for demandbased switching
|
||||
* It helps to keep variable names smaller, simpler
|
||||
*/
|
||||
#include "cpufreq_governor.h"
|
||||
|
||||
/* Conservative governor macors */
|
||||
#define DEF_FREQUENCY_UP_THRESHOLD (80)
|
||||
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
|
||||
|
||||
/*
|
||||
* The polling frequency of this governor depends on the capability of
|
||||
* the processor. Default polling frequency is 1000 times the transition
|
||||
* latency of the processor. The governor will work on any processor with
|
||||
* transition latency <= 10mS, using appropriate sampling
|
||||
* rate.
|
||||
* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
|
||||
* this governor will not work.
|
||||
* All times here are in uS.
|
||||
*/
|
||||
#define MIN_SAMPLING_RATE_RATIO (2)
|
||||
|
||||
static unsigned int min_sampling_rate;
|
||||
|
||||
#define LATENCY_MULTIPLIER (1000)
|
||||
#define MIN_LATENCY_MULTIPLIER (100)
|
||||
#define DEF_SAMPLING_DOWN_FACTOR (1)
|
||||
#define MAX_SAMPLING_DOWN_FACTOR (10)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
||||
|
||||
static void do_dbs_timer(struct work_struct *work);
|
||||
static struct dbs_data cs_dbs_data;
|
||||
static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
|
||||
|
||||
struct cpu_dbs_info_s {
|
||||
cputime64_t prev_cpu_idle;
|
||||
cputime64_t prev_cpu_wall;
|
||||
cputime64_t prev_cpu_nice;
|
||||
struct cpufreq_policy *cur_policy;
|
||||
struct delayed_work work;
|
||||
unsigned int down_skip;
|
||||
unsigned int requested_freq;
|
||||
int cpu;
|
||||
unsigned int enable:1;
|
||||
/*
|
||||
* percpu mutex that serializes governor limit change with
|
||||
* do_dbs_timer invocation. We do not want do_dbs_timer to run
|
||||
* when user is changing the governor or limits.
|
||||
*/
|
||||
struct mutex timer_mutex;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
|
||||
|
||||
static unsigned int dbs_enable; /* number of CPUs using this policy */
|
||||
|
||||
/*
|
||||
* dbs_mutex protects dbs_enable in governor start/stop.
|
||||
*/
|
||||
static DEFINE_MUTEX(dbs_mutex);
|
||||
|
||||
static struct dbs_tuners {
|
||||
unsigned int sampling_rate;
|
||||
unsigned int sampling_down_factor;
|
||||
unsigned int up_threshold;
|
||||
unsigned int down_threshold;
|
||||
unsigned int ignore_nice;
|
||||
unsigned int freq_step;
|
||||
} dbs_tuners_ins = {
|
||||
static struct cs_dbs_tuners cs_tuners = {
|
||||
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
|
||||
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
|
||||
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
|
||||
@ -95,95 +42,121 @@ static struct dbs_tuners {
|
||||
.freq_step = 5,
|
||||
};
|
||||
|
||||
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||
/*
|
||||
* Every sampling_rate, we check, if current idle time is less than 20%
|
||||
* (default), then we try to increase frequency Every sampling_rate *
|
||||
* sampling_down_factor, we check, if current idle time is more than 80%, then
|
||||
* we try to decrease frequency
|
||||
*
|
||||
* Any frequency increase takes it to the maximum frequency. Frequency reduction
|
||||
* happens at minimum steps of 5% (default) of maximum frequency
|
||||
*/
|
||||
static void cs_check_cpu(int cpu, unsigned int load)
|
||||
{
|
||||
u64 idle_time;
|
||||
u64 cur_wall_time;
|
||||
u64 busy_time;
|
||||
|
||||
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
|
||||
|
||||
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
|
||||
|
||||
idle_time = cur_wall_time - busy_time;
|
||||
if (wall)
|
||||
*wall = jiffies_to_usecs(cur_wall_time);
|
||||
|
||||
return jiffies_to_usecs(idle_time);
|
||||
}
|
||||
|
||||
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
||||
{
|
||||
u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
|
||||
|
||||
if (idle_time == -1ULL)
|
||||
return get_cpu_idle_time_jiffy(cpu, wall);
|
||||
else
|
||||
idle_time += get_cpu_iowait_time_us(cpu, wall);
|
||||
|
||||
return idle_time;
|
||||
}
|
||||
|
||||
/* keep track of frequency transitions */
|
||||
static int
|
||||
dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct cpufreq_freqs *freq = data;
|
||||
struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
|
||||
freq->cpu);
|
||||
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
if (!this_dbs_info->enable)
|
||||
return 0;
|
||||
|
||||
policy = this_dbs_info->cur_policy;
|
||||
struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
|
||||
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
|
||||
unsigned int freq_target;
|
||||
|
||||
/*
|
||||
* we only care if our internally tracked freq moves outside
|
||||
* the 'valid' ranges of freqency available to us otherwise
|
||||
* we do not change it
|
||||
* break out if we 'cannot' reduce the speed as the user might
|
||||
* want freq_step to be zero
|
||||
*/
|
||||
if (cs_tuners.freq_step == 0)
|
||||
return;
|
||||
|
||||
/* Check for frequency increase */
|
||||
if (load > cs_tuners.up_threshold) {
|
||||
dbs_info->down_skip = 0;
|
||||
|
||||
/* if we are already at full speed then break out early */
|
||||
if (dbs_info->requested_freq == policy->max)
|
||||
return;
|
||||
|
||||
freq_target = (cs_tuners.freq_step * policy->max) / 100;
|
||||
|
||||
/* max freq cannot be less than 100. But who knows.... */
|
||||
if (unlikely(freq_target == 0))
|
||||
freq_target = 5;
|
||||
|
||||
dbs_info->requested_freq += freq_target;
|
||||
if (dbs_info->requested_freq > policy->max)
|
||||
dbs_info->requested_freq = policy->max;
|
||||
|
||||
__cpufreq_driver_target(policy, dbs_info->requested_freq,
|
||||
CPUFREQ_RELATION_H);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The optimal frequency is the frequency that is the lowest that can
|
||||
* support the current CPU usage without triggering the up policy. To be
|
||||
* safe, we focus 10 points under the threshold.
|
||||
*/
|
||||
if (load < (cs_tuners.down_threshold - 10)) {
|
||||
freq_target = (cs_tuners.freq_step * policy->max) / 100;
|
||||
|
||||
dbs_info->requested_freq -= freq_target;
|
||||
if (dbs_info->requested_freq < policy->min)
|
||||
dbs_info->requested_freq = policy->min;
|
||||
|
||||
/*
|
||||
* if we cannot reduce the frequency anymore, break out early
|
||||
*/
|
||||
if (policy->cur == policy->min)
|
||||
return;
|
||||
|
||||
__cpufreq_driver_target(policy, dbs_info->requested_freq,
|
||||
CPUFREQ_RELATION_H);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void cs_dbs_timer(struct work_struct *work)
|
||||
{
|
||||
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
|
||||
struct cs_cpu_dbs_info_s, cdbs.work.work);
|
||||
unsigned int cpu = dbs_info->cdbs.cpu;
|
||||
int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
|
||||
|
||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
||||
|
||||
dbs_check_cpu(&cs_dbs_data, cpu);
|
||||
|
||||
schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
}
|
||||
|
||||
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct cpufreq_freqs *freq = data;
|
||||
struct cs_cpu_dbs_info_s *dbs_info =
|
||||
&per_cpu(cs_cpu_dbs_info, freq->cpu);
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
if (!dbs_info->enable)
|
||||
return 0;
|
||||
|
||||
policy = dbs_info->cdbs.cur_policy;
|
||||
|
||||
/*
|
||||
* we only care if our internally tracked freq moves outside the 'valid'
|
||||
* ranges of freqency available to us otherwise we do not change it
|
||||
*/
|
||||
if (this_dbs_info->requested_freq > policy->max
|
||||
|| this_dbs_info->requested_freq < policy->min)
|
||||
this_dbs_info->requested_freq = freq->new;
|
||||
if (dbs_info->requested_freq > policy->max
|
||||
|| dbs_info->requested_freq < policy->min)
|
||||
dbs_info->requested_freq = freq->new;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block dbs_cpufreq_notifier_block = {
|
||||
.notifier_call = dbs_cpufreq_notifier
|
||||
};
|
||||
|
||||
/************************** sysfs interface ************************/
|
||||
static ssize_t show_sampling_rate_min(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", min_sampling_rate);
|
||||
return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
|
||||
}
|
||||
|
||||
define_one_global_ro(sampling_rate_min);
|
||||
|
||||
/* cpufreq_conservative Governor Tunables */
|
||||
#define show_one(file_name, object) \
|
||||
static ssize_t show_##file_name \
|
||||
(struct kobject *kobj, struct attribute *attr, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
|
||||
}
|
||||
show_one(sampling_rate, sampling_rate);
|
||||
show_one(sampling_down_factor, sampling_down_factor);
|
||||
show_one(up_threshold, up_threshold);
|
||||
show_one(down_threshold, down_threshold);
|
||||
show_one(ignore_nice_load, ignore_nice);
|
||||
show_one(freq_step, freq_step);
|
||||
|
||||
static ssize_t store_sampling_down_factor(struct kobject *a,
|
||||
struct attribute *b,
|
||||
const char *buf, size_t count)
|
||||
@ -195,7 +168,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
|
||||
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
|
||||
return -EINVAL;
|
||||
|
||||
dbs_tuners_ins.sampling_down_factor = input;
|
||||
cs_tuners.sampling_down_factor = input;
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -209,7 +182,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
|
||||
cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate);
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -220,11 +193,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
|
||||
int ret;
|
||||
ret = sscanf(buf, "%u", &input);
|
||||
|
||||
if (ret != 1 || input > 100 ||
|
||||
input <= dbs_tuners_ins.down_threshold)
|
||||
if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold)
|
||||
return -EINVAL;
|
||||
|
||||
dbs_tuners_ins.up_threshold = input;
|
||||
cs_tuners.up_threshold = input;
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -237,21 +209,19 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
|
||||
|
||||
/* cannot be lower than 11 otherwise freq will not fall */
|
||||
if (ret != 1 || input < 11 || input > 100 ||
|
||||
input >= dbs_tuners_ins.up_threshold)
|
||||
input >= cs_tuners.up_threshold)
|
||||
return -EINVAL;
|
||||
|
||||
dbs_tuners_ins.down_threshold = input;
|
||||
cs_tuners.down_threshold = input;
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned int input;
|
||||
unsigned int input, j;
|
||||
int ret;
|
||||
|
||||
unsigned int j;
|
||||
|
||||
ret = sscanf(buf, "%u", &input);
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
@ -259,19 +229,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
|
||||
if (input > 1)
|
||||
input = 1;
|
||||
|
||||
if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
|
||||
if (input == cs_tuners.ignore_nice) /* nothing to do */
|
||||
return count;
|
||||
|
||||
dbs_tuners_ins.ignore_nice = input;
|
||||
cs_tuners.ignore_nice = input;
|
||||
|
||||
/* we need to re-evaluate prev_cpu_idle */
|
||||
for_each_online_cpu(j) {
|
||||
struct cpu_dbs_info_s *dbs_info;
|
||||
struct cs_cpu_dbs_info_s *dbs_info;
|
||||
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
|
||||
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
||||
&dbs_info->prev_cpu_wall);
|
||||
if (dbs_tuners_ins.ignore_nice)
|
||||
dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
|
||||
&dbs_info->cdbs.prev_cpu_wall);
|
||||
if (cs_tuners.ignore_nice)
|
||||
dbs_info->cdbs.prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
}
|
||||
return count;
|
||||
}
|
||||
@ -289,18 +260,28 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
|
||||
if (input > 100)
|
||||
input = 100;
|
||||
|
||||
/* no need to test here if freq_step is zero as the user might actually
|
||||
* want this, they would be crazy though :) */
|
||||
dbs_tuners_ins.freq_step = input;
|
||||
/*
|
||||
* no need to test here if freq_step is zero as the user might actually
|
||||
* want this, they would be crazy though :)
|
||||
*/
|
||||
cs_tuners.freq_step = input;
|
||||
return count;
|
||||
}
|
||||
|
||||
show_one(cs, sampling_rate, sampling_rate);
|
||||
show_one(cs, sampling_down_factor, sampling_down_factor);
|
||||
show_one(cs, up_threshold, up_threshold);
|
||||
show_one(cs, down_threshold, down_threshold);
|
||||
show_one(cs, ignore_nice_load, ignore_nice);
|
||||
show_one(cs, freq_step, freq_step);
|
||||
|
||||
define_one_global_rw(sampling_rate);
|
||||
define_one_global_rw(sampling_down_factor);
|
||||
define_one_global_rw(up_threshold);
|
||||
define_one_global_rw(down_threshold);
|
||||
define_one_global_rw(ignore_nice_load);
|
||||
define_one_global_rw(freq_step);
|
||||
define_one_global_ro(sampling_rate_min);
|
||||
|
||||
static struct attribute *dbs_attributes[] = {
|
||||
&sampling_rate_min.attr,
|
||||
@ -313,283 +294,38 @@ static struct attribute *dbs_attributes[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group dbs_attr_group = {
|
||||
static struct attribute_group cs_attr_group = {
|
||||
.attrs = dbs_attributes,
|
||||
.name = "conservative",
|
||||
};
|
||||
|
||||
/************************** sysfs end ************************/
|
||||
|
||||
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||
{
|
||||
unsigned int load = 0;
|
||||
unsigned int max_load = 0;
|
||||
unsigned int freq_target;
|
||||
define_get_cpu_dbs_routines(cs_cpu_dbs_info);
|
||||
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned int j;
|
||||
static struct notifier_block cs_cpufreq_notifier_block = {
|
||||
.notifier_call = dbs_cpufreq_notifier,
|
||||
};
|
||||
|
||||
policy = this_dbs_info->cur_policy;
|
||||
static struct cs_ops cs_ops = {
|
||||
.notifier_block = &cs_cpufreq_notifier_block,
|
||||
};
|
||||
|
||||
/*
|
||||
* Every sampling_rate, we check, if current idle time is less
|
||||
* than 20% (default), then we try to increase frequency
|
||||
* Every sampling_rate*sampling_down_factor, we check, if current
|
||||
* idle time is more than 80%, then we try to decrease frequency
|
||||
*
|
||||
* Any frequency increase takes it to the maximum frequency.
|
||||
* Frequency reduction happens at minimum steps of
|
||||
* 5% (default) of maximum frequency
|
||||
*/
|
||||
static struct dbs_data cs_dbs_data = {
|
||||
.governor = GOV_CONSERVATIVE,
|
||||
.attr_group = &cs_attr_group,
|
||||
.tuners = &cs_tuners,
|
||||
.get_cpu_cdbs = get_cpu_cdbs,
|
||||
.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
|
||||
.gov_dbs_timer = cs_dbs_timer,
|
||||
.gov_check_cpu = cs_check_cpu,
|
||||
.gov_ops = &cs_ops,
|
||||
};
|
||||
|
||||
/* Get Absolute Load */
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_info_s *j_dbs_info;
|
||||
cputime64_t cur_wall_time, cur_idle_time;
|
||||
unsigned int idle_time, wall_time;
|
||||
|
||||
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
|
||||
|
||||
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
|
||||
|
||||
wall_time = (unsigned int)
|
||||
(cur_wall_time - j_dbs_info->prev_cpu_wall);
|
||||
j_dbs_info->prev_cpu_wall = cur_wall_time;
|
||||
|
||||
idle_time = (unsigned int)
|
||||
(cur_idle_time - j_dbs_info->prev_cpu_idle);
|
||||
j_dbs_info->prev_cpu_idle = cur_idle_time;
|
||||
|
||||
if (dbs_tuners_ins.ignore_nice) {
|
||||
u64 cur_nice;
|
||||
unsigned long cur_nice_jiffies;
|
||||
|
||||
cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
|
||||
j_dbs_info->prev_cpu_nice;
|
||||
/*
|
||||
* Assumption: nice time between sampling periods will
|
||||
* be less than 2^32 jiffies for 32 bit sys
|
||||
*/
|
||||
cur_nice_jiffies = (unsigned long)
|
||||
cputime64_to_jiffies64(cur_nice);
|
||||
|
||||
j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
idle_time += jiffies_to_usecs(cur_nice_jiffies);
|
||||
}
|
||||
|
||||
if (unlikely(!wall_time || wall_time < idle_time))
|
||||
continue;
|
||||
|
||||
load = 100 * (wall_time - idle_time) / wall_time;
|
||||
|
||||
if (load > max_load)
|
||||
max_load = load;
|
||||
}
|
||||
|
||||
/*
|
||||
* break out if we 'cannot' reduce the speed as the user might
|
||||
* want freq_step to be zero
|
||||
*/
|
||||
if (dbs_tuners_ins.freq_step == 0)
|
||||
return;
|
||||
|
||||
/* Check for frequency increase */
|
||||
if (max_load > dbs_tuners_ins.up_threshold) {
|
||||
this_dbs_info->down_skip = 0;
|
||||
|
||||
/* if we are already at full speed then break out early */
|
||||
if (this_dbs_info->requested_freq == policy->max)
|
||||
return;
|
||||
|
||||
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
|
||||
|
||||
/* max freq cannot be less than 100. But who knows.... */
|
||||
if (unlikely(freq_target == 0))
|
||||
freq_target = 5;
|
||||
|
||||
this_dbs_info->requested_freq += freq_target;
|
||||
if (this_dbs_info->requested_freq > policy->max)
|
||||
this_dbs_info->requested_freq = policy->max;
|
||||
|
||||
__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
|
||||
CPUFREQ_RELATION_H);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The optimal frequency is the frequency that is the lowest that
|
||||
* can support the current CPU usage without triggering the up
|
||||
* policy. To be safe, we focus 10 points under the threshold.
|
||||
*/
|
||||
if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
|
||||
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
|
||||
|
||||
this_dbs_info->requested_freq -= freq_target;
|
||||
if (this_dbs_info->requested_freq < policy->min)
|
||||
this_dbs_info->requested_freq = policy->min;
|
||||
|
||||
/*
|
||||
* if we cannot reduce the frequency anymore, break out early
|
||||
*/
|
||||
if (policy->cur == policy->min)
|
||||
return;
|
||||
|
||||
__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
|
||||
CPUFREQ_RELATION_H);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void do_dbs_timer(struct work_struct *work)
|
||||
{
|
||||
struct cpu_dbs_info_s *dbs_info =
|
||||
container_of(work, struct cpu_dbs_info_s, work.work);
|
||||
unsigned int cpu = dbs_info->cpu;
|
||||
|
||||
/* We want all CPUs to do sampling nearly on same jiffy */
|
||||
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||
|
||||
delay -= jiffies % delay;
|
||||
|
||||
mutex_lock(&dbs_info->timer_mutex);
|
||||
|
||||
dbs_check_cpu(dbs_info);
|
||||
|
||||
schedule_delayed_work_on(cpu, &dbs_info->work, delay);
|
||||
mutex_unlock(&dbs_info->timer_mutex);
|
||||
}
|
||||
|
||||
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
|
||||
{
|
||||
/* We want all CPUs to do sampling nearly on same jiffy */
|
||||
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||
delay -= jiffies % delay;
|
||||
|
||||
dbs_info->enable = 1;
|
||||
INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
|
||||
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
|
||||
}
|
||||
|
||||
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
|
||||
{
|
||||
dbs_info->enable = 0;
|
||||
cancel_delayed_work_sync(&dbs_info->work);
|
||||
}
|
||||
|
||||
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
unsigned int event)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpu_dbs_info_s *this_dbs_info;
|
||||
unsigned int j;
|
||||
int rc;
|
||||
|
||||
this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
|
||||
|
||||
switch (event) {
|
||||
case CPUFREQ_GOV_START:
|
||||
if ((!cpu_online(cpu)) || (!policy->cur))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dbs_mutex);
|
||||
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_info_s *j_dbs_info;
|
||||
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
|
||||
j_dbs_info->cur_policy = policy;
|
||||
|
||||
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
||||
&j_dbs_info->prev_cpu_wall);
|
||||
if (dbs_tuners_ins.ignore_nice)
|
||||
j_dbs_info->prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
}
|
||||
this_dbs_info->cpu = cpu;
|
||||
this_dbs_info->down_skip = 0;
|
||||
this_dbs_info->requested_freq = policy->cur;
|
||||
|
||||
mutex_init(&this_dbs_info->timer_mutex);
|
||||
dbs_enable++;
|
||||
/*
|
||||
* Start the timerschedule work, when this governor
|
||||
* is used for first time
|
||||
*/
|
||||
if (dbs_enable == 1) {
|
||||
unsigned int latency;
|
||||
/* policy latency is in nS. Convert it to uS first */
|
||||
latency = policy->cpuinfo.transition_latency / 1000;
|
||||
if (latency == 0)
|
||||
latency = 1;
|
||||
|
||||
rc = sysfs_create_group(cpufreq_global_kobject,
|
||||
&dbs_attr_group);
|
||||
if (rc) {
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* conservative does not implement micro like ondemand
|
||||
* governor, thus we are bound to jiffes/HZ
|
||||
*/
|
||||
min_sampling_rate =
|
||||
MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
|
||||
/* Bring kernel and HW constraints together */
|
||||
min_sampling_rate = max(min_sampling_rate,
|
||||
MIN_LATENCY_MULTIPLIER * latency);
|
||||
dbs_tuners_ins.sampling_rate =
|
||||
max(min_sampling_rate,
|
||||
latency * LATENCY_MULTIPLIER);
|
||||
|
||||
cpufreq_register_notifier(
|
||||
&dbs_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
}
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
dbs_timer_init(this_dbs_info);
|
||||
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_STOP:
|
||||
dbs_timer_exit(this_dbs_info);
|
||||
|
||||
mutex_lock(&dbs_mutex);
|
||||
dbs_enable--;
|
||||
mutex_destroy(&this_dbs_info->timer_mutex);
|
||||
|
||||
/*
|
||||
* Stop the timerschedule work, when this governor
|
||||
* is used for first time
|
||||
*/
|
||||
if (dbs_enable == 0)
|
||||
cpufreq_unregister_notifier(
|
||||
&dbs_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
|
||||
mutex_unlock(&dbs_mutex);
|
||||
if (!dbs_enable)
|
||||
sysfs_remove_group(cpufreq_global_kobject,
|
||||
&dbs_attr_group);
|
||||
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
mutex_lock(&this_dbs_info->timer_mutex);
|
||||
if (policy->max < this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(
|
||||
this_dbs_info->cur_policy,
|
||||
policy->max, CPUFREQ_RELATION_H);
|
||||
else if (policy->min > this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(
|
||||
this_dbs_info->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(this_dbs_info);
|
||||
mutex_unlock(&this_dbs_info->timer_mutex);
|
||||
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
return cpufreq_governor_dbs(&cs_dbs_data, policy, event);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
|
||||
@ -597,13 +333,14 @@ static
|
||||
#endif
|
||||
struct cpufreq_governor cpufreq_gov_conservative = {
|
||||
.name = "conservative",
|
||||
.governor = cpufreq_governor_dbs,
|
||||
.governor = cs_cpufreq_governor_dbs,
|
||||
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init cpufreq_gov_dbs_init(void)
|
||||
{
|
||||
mutex_init(&cs_dbs_data.mutex);
|
||||
return cpufreq_register_governor(&cpufreq_gov_conservative);
|
||||
}
|
||||
|
||||
@ -612,7 +349,6 @@ static void __exit cpufreq_gov_dbs_exit(void)
|
||||
cpufreq_unregister_governor(&cpufreq_gov_conservative);
|
||||
}
|
||||
|
||||
|
||||
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
|
||||
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
|
||||
"Low Latency Frequency Transition capable processors "
|
||||
|
318
drivers/cpufreq/cpufreq_governor.c
Normal file
318
drivers/cpufreq/cpufreq_governor.c
Normal file
@ -0,0 +1,318 @@
|
||||
/*
|
||||
* drivers/cpufreq/cpufreq_governor.c
|
||||
*
|
||||
* CPUFREQ governors common code
|
||||
*
|
||||
* Copyright (C) 2001 Russell King
|
||||
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
|
||||
* (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
|
||||
* (C) 2009 Alexander Clouter <alex@digriz.org.uk>
|
||||
* (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "cpufreq_governor.h"
|
||||
|
||||
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||
{
|
||||
u64 idle_time;
|
||||
u64 cur_wall_time;
|
||||
u64 busy_time;
|
||||
|
||||
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
|
||||
|
||||
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
|
||||
|
||||
idle_time = cur_wall_time - busy_time;
|
||||
if (wall)
|
||||
*wall = cputime_to_usecs(cur_wall_time);
|
||||
|
||||
return cputime_to_usecs(idle_time);
|
||||
}
|
||||
|
||||
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall)
|
||||
{
|
||||
u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
|
||||
|
||||
if (idle_time == -1ULL)
|
||||
return get_cpu_idle_time_jiffy(cpu, wall);
|
||||
else
|
||||
idle_time += get_cpu_iowait_time_us(cpu, wall);
|
||||
|
||||
return idle_time;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
|
||||
|
||||
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
|
||||
{
|
||||
struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
|
||||
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
||||
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned int max_load = 0;
|
||||
unsigned int ignore_nice;
|
||||
unsigned int j;
|
||||
|
||||
if (dbs_data->governor == GOV_ONDEMAND)
|
||||
ignore_nice = od_tuners->ignore_nice;
|
||||
else
|
||||
ignore_nice = cs_tuners->ignore_nice;
|
||||
|
||||
policy = cdbs->cur_policy;
|
||||
|
||||
/* Get Absolute Load (in terms of freq for ondemand gov) */
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_common_info *j_cdbs;
|
||||
u64 cur_wall_time, cur_idle_time, cur_iowait_time;
|
||||
unsigned int idle_time, wall_time, iowait_time;
|
||||
unsigned int load;
|
||||
|
||||
j_cdbs = dbs_data->get_cpu_cdbs(j);
|
||||
|
||||
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
|
||||
|
||||
wall_time = (unsigned int)
|
||||
(cur_wall_time - j_cdbs->prev_cpu_wall);
|
||||
j_cdbs->prev_cpu_wall = cur_wall_time;
|
||||
|
||||
idle_time = (unsigned int)
|
||||
(cur_idle_time - j_cdbs->prev_cpu_idle);
|
||||
j_cdbs->prev_cpu_idle = cur_idle_time;
|
||||
|
||||
if (ignore_nice) {
|
||||
u64 cur_nice;
|
||||
unsigned long cur_nice_jiffies;
|
||||
|
||||
cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
|
||||
cdbs->prev_cpu_nice;
|
||||
/*
|
||||
* Assumption: nice time between sampling periods will
|
||||
* be less than 2^32 jiffies for 32 bit sys
|
||||
*/
|
||||
cur_nice_jiffies = (unsigned long)
|
||||
cputime64_to_jiffies64(cur_nice);
|
||||
|
||||
cdbs->prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
idle_time += jiffies_to_usecs(cur_nice_jiffies);
|
||||
}
|
||||
|
||||
if (dbs_data->governor == GOV_ONDEMAND) {
|
||||
struct od_cpu_dbs_info_s *od_j_dbs_info =
|
||||
dbs_data->get_cpu_dbs_info_s(cpu);
|
||||
|
||||
cur_iowait_time = get_cpu_iowait_time_us(j,
|
||||
&cur_wall_time);
|
||||
if (cur_iowait_time == -1ULL)
|
||||
cur_iowait_time = 0;
|
||||
|
||||
iowait_time = (unsigned int) (cur_iowait_time -
|
||||
od_j_dbs_info->prev_cpu_iowait);
|
||||
od_j_dbs_info->prev_cpu_iowait = cur_iowait_time;
|
||||
|
||||
/*
|
||||
* For the purpose of ondemand, waiting for disk IO is
|
||||
* an indication that you're performance critical, and
|
||||
* not that the system is actually idle. So subtract the
|
||||
* iowait time from the cpu idle time.
|
||||
*/
|
||||
if (od_tuners->io_is_busy && idle_time >= iowait_time)
|
||||
idle_time -= iowait_time;
|
||||
}
|
||||
|
||||
if (unlikely(!wall_time || wall_time < idle_time))
|
||||
continue;
|
||||
|
||||
load = 100 * (wall_time - idle_time) / wall_time;
|
||||
|
||||
if (dbs_data->governor == GOV_ONDEMAND) {
|
||||
int freq_avg = __cpufreq_driver_getavg(policy, j);
|
||||
if (freq_avg <= 0)
|
||||
freq_avg = policy->cur;
|
||||
|
||||
load *= freq_avg;
|
||||
}
|
||||
|
||||
if (load > max_load)
|
||||
max_load = load;
|
||||
}
|
||||
|
||||
dbs_data->gov_check_cpu(cpu, max_load);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dbs_check_cpu);
|
||||
|
||||
static inline void dbs_timer_init(struct dbs_data *dbs_data,
|
||||
struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate)
|
||||
{
|
||||
int delay = delay_for_sampling_rate(sampling_rate);
|
||||
|
||||
INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer);
|
||||
schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay);
|
||||
}
|
||||
|
||||
static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs)
|
||||
{
|
||||
cancel_delayed_work_sync(&cdbs->work);
|
||||
}
|
||||
|
||||
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
|
||||
struct cpufreq_policy *policy, unsigned int event)
|
||||
{
|
||||
struct od_cpu_dbs_info_s *od_dbs_info = NULL;
|
||||
struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
|
||||
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
||||
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
||||
struct cpu_dbs_common_info *cpu_cdbs;
|
||||
unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
|
||||
int rc;
|
||||
|
||||
cpu_cdbs = dbs_data->get_cpu_cdbs(cpu);
|
||||
|
||||
if (dbs_data->governor == GOV_CONSERVATIVE) {
|
||||
cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
|
||||
sampling_rate = &cs_tuners->sampling_rate;
|
||||
ignore_nice = cs_tuners->ignore_nice;
|
||||
} else {
|
||||
od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
|
||||
sampling_rate = &od_tuners->sampling_rate;
|
||||
ignore_nice = od_tuners->ignore_nice;
|
||||
}
|
||||
|
||||
switch (event) {
|
||||
case CPUFREQ_GOV_START:
|
||||
if ((!cpu_online(cpu)) || (!policy->cur))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dbs_data->mutex);
|
||||
|
||||
dbs_data->enable++;
|
||||
cpu_cdbs->cpu = cpu;
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_common_info *j_cdbs;
|
||||
j_cdbs = dbs_data->get_cpu_cdbs(j);
|
||||
|
||||
j_cdbs->cur_policy = policy;
|
||||
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
|
||||
&j_cdbs->prev_cpu_wall);
|
||||
if (ignore_nice)
|
||||
j_cdbs->prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the timerschedule work, when this governor is used for
|
||||
* first time
|
||||
*/
|
||||
if (dbs_data->enable != 1)
|
||||
goto second_time;
|
||||
|
||||
rc = sysfs_create_group(cpufreq_global_kobject,
|
||||
dbs_data->attr_group);
|
||||
if (rc) {
|
||||
mutex_unlock(&dbs_data->mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* policy latency is in nS. Convert it to uS first */
|
||||
latency = policy->cpuinfo.transition_latency / 1000;
|
||||
if (latency == 0)
|
||||
latency = 1;
|
||||
|
||||
/*
|
||||
* conservative does not implement micro like ondemand
|
||||
* governor, thus we are bound to jiffes/HZ
|
||||
*/
|
||||
if (dbs_data->governor == GOV_CONSERVATIVE) {
|
||||
struct cs_ops *ops = dbs_data->gov_ops;
|
||||
|
||||
cpufreq_register_notifier(ops->notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
|
||||
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
|
||||
jiffies_to_usecs(10);
|
||||
} else {
|
||||
struct od_ops *ops = dbs_data->gov_ops;
|
||||
|
||||
od_tuners->io_is_busy = ops->io_busy();
|
||||
}
|
||||
|
||||
/* Bring kernel and HW constraints together */
|
||||
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
|
||||
MIN_LATENCY_MULTIPLIER * latency);
|
||||
*sampling_rate = max(dbs_data->min_sampling_rate, latency *
|
||||
LATENCY_MULTIPLIER);
|
||||
|
||||
second_time:
|
||||
if (dbs_data->governor == GOV_CONSERVATIVE) {
|
||||
cs_dbs_info->down_skip = 0;
|
||||
cs_dbs_info->enable = 1;
|
||||
cs_dbs_info->requested_freq = policy->cur;
|
||||
} else {
|
||||
struct od_ops *ops = dbs_data->gov_ops;
|
||||
od_dbs_info->rate_mult = 1;
|
||||
od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
||||
ops->powersave_bias_init_cpu(cpu);
|
||||
}
|
||||
mutex_unlock(&dbs_data->mutex);
|
||||
|
||||
mutex_init(&cpu_cdbs->timer_mutex);
|
||||
dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate);
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_STOP:
|
||||
if (dbs_data->governor == GOV_CONSERVATIVE)
|
||||
cs_dbs_info->enable = 0;
|
||||
|
||||
dbs_timer_exit(cpu_cdbs);
|
||||
|
||||
mutex_lock(&dbs_data->mutex);
|
||||
mutex_destroy(&cpu_cdbs->timer_mutex);
|
||||
dbs_data->enable--;
|
||||
if (!dbs_data->enable) {
|
||||
struct cs_ops *ops = dbs_data->gov_ops;
|
||||
|
||||
sysfs_remove_group(cpufreq_global_kobject,
|
||||
dbs_data->attr_group);
|
||||
if (dbs_data->governor == GOV_CONSERVATIVE)
|
||||
cpufreq_unregister_notifier(ops->notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
}
|
||||
mutex_unlock(&dbs_data->mutex);
|
||||
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
mutex_lock(&cpu_cdbs->timer_mutex);
|
||||
if (policy->max < cpu_cdbs->cur_policy->cur)
|
||||
__cpufreq_driver_target(cpu_cdbs->cur_policy,
|
||||
policy->max, CPUFREQ_RELATION_H);
|
||||
else if (policy->min > cpu_cdbs->cur_policy->cur)
|
||||
__cpufreq_driver_target(cpu_cdbs->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(dbs_data, cpu);
|
||||
mutex_unlock(&cpu_cdbs->timer_mutex);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
|
176
drivers/cpufreq/cpufreq_governor.h
Normal file
176
drivers/cpufreq/cpufreq_governor.h
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* drivers/cpufreq/cpufreq_governor.h
|
||||
*
|
||||
* Header file for CPUFreq governors common code
|
||||
*
|
||||
* Copyright (C) 2001 Russell King
|
||||
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
|
||||
* (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
|
||||
* (C) 2009 Alexander Clouter <alex@digriz.org.uk>
|
||||
* (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _CPUFREQ_GOVERNER_H
|
||||
#define _CPUFREQ_GOVERNER_H
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
/*
|
||||
* The polling frequency depends on the capability of the processor. Default
|
||||
* polling frequency is 1000 times the transition latency of the processor. The
|
||||
* governor will work on any processor with transition latency <= 10mS, using
|
||||
* appropriate sampling rate.
|
||||
*
|
||||
* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
|
||||
* this governor will not work. All times here are in uS.
|
||||
*/
|
||||
#define MIN_SAMPLING_RATE_RATIO (2)
|
||||
#define LATENCY_MULTIPLIER (1000)
|
||||
#define MIN_LATENCY_MULTIPLIER (100)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
||||
|
||||
/* Ondemand Sampling types */
|
||||
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
|
||||
|
||||
/* Macro creating sysfs show routines */
|
||||
#define show_one(_gov, file_name, object) \
|
||||
static ssize_t show_##file_name \
|
||||
(struct kobject *kobj, struct attribute *attr, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", _gov##_tuners.object); \
|
||||
}
|
||||
|
||||
#define define_get_cpu_dbs_routines(_dbs_info) \
|
||||
static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \
|
||||
{ \
|
||||
return &per_cpu(_dbs_info, cpu).cdbs; \
|
||||
} \
|
||||
\
|
||||
static void *get_cpu_dbs_info_s(int cpu) \
|
||||
{ \
|
||||
return &per_cpu(_dbs_info, cpu); \
|
||||
}
|
||||
|
||||
/*
|
||||
* Abbreviations:
|
||||
* dbs: used as a shortform for demand based switching It helps to keep variable
|
||||
* names smaller, simpler
|
||||
* cdbs: common dbs
|
||||
* on_*: On-demand governor
|
||||
* cs_*: Conservative governor
|
||||
*/
|
||||
|
||||
/* Per cpu structures */
|
||||
struct cpu_dbs_common_info {
|
||||
int cpu;
|
||||
u64 prev_cpu_idle;
|
||||
u64 prev_cpu_wall;
|
||||
u64 prev_cpu_nice;
|
||||
struct cpufreq_policy *cur_policy;
|
||||
struct delayed_work work;
|
||||
/*
|
||||
* percpu mutex that serializes governor limit change with gov_dbs_timer
|
||||
* invocation. We do not want gov_dbs_timer to run when user is changing
|
||||
* the governor or limits.
|
||||
*/
|
||||
struct mutex timer_mutex;
|
||||
};
|
||||
|
||||
struct od_cpu_dbs_info_s {
|
||||
struct cpu_dbs_common_info cdbs;
|
||||
u64 prev_cpu_iowait;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
unsigned int freq_lo;
|
||||
unsigned int freq_lo_jiffies;
|
||||
unsigned int freq_hi_jiffies;
|
||||
unsigned int rate_mult;
|
||||
unsigned int sample_type:1;
|
||||
};
|
||||
|
||||
struct cs_cpu_dbs_info_s {
|
||||
struct cpu_dbs_common_info cdbs;
|
||||
unsigned int down_skip;
|
||||
unsigned int requested_freq;
|
||||
unsigned int enable:1;
|
||||
};
|
||||
|
||||
/* Governers sysfs tunables */
|
||||
struct od_dbs_tuners {
|
||||
unsigned int ignore_nice;
|
||||
unsigned int sampling_rate;
|
||||
unsigned int sampling_down_factor;
|
||||
unsigned int up_threshold;
|
||||
unsigned int down_differential;
|
||||
unsigned int powersave_bias;
|
||||
unsigned int io_is_busy;
|
||||
};
|
||||
|
||||
struct cs_dbs_tuners {
|
||||
unsigned int ignore_nice;
|
||||
unsigned int sampling_rate;
|
||||
unsigned int sampling_down_factor;
|
||||
unsigned int up_threshold;
|
||||
unsigned int down_threshold;
|
||||
unsigned int freq_step;
|
||||
};
|
||||
|
||||
/* Per Governer data */
|
||||
struct dbs_data {
|
||||
/* Common across governors */
|
||||
#define GOV_ONDEMAND 0
|
||||
#define GOV_CONSERVATIVE 1
|
||||
int governor;
|
||||
unsigned int min_sampling_rate;
|
||||
unsigned int enable; /* number of CPUs using this policy */
|
||||
struct attribute_group *attr_group;
|
||||
void *tuners;
|
||||
|
||||
/* dbs_mutex protects dbs_enable in governor start/stop */
|
||||
struct mutex mutex;
|
||||
|
||||
struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
|
||||
void *(*get_cpu_dbs_info_s)(int cpu);
|
||||
void (*gov_dbs_timer)(struct work_struct *work);
|
||||
void (*gov_check_cpu)(int cpu, unsigned int load);
|
||||
|
||||
/* Governor specific ops, see below */
|
||||
void *gov_ops;
|
||||
};
|
||||
|
||||
/* Governor specific ops, will be passed to dbs_data->gov_ops */
|
||||
struct od_ops {
|
||||
int (*io_busy)(void);
|
||||
void (*powersave_bias_init_cpu)(int cpu);
|
||||
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
|
||||
unsigned int freq_next, unsigned int relation);
|
||||
void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq);
|
||||
};
|
||||
|
||||
struct cs_ops {
|
||||
struct notifier_block *notifier_block;
|
||||
};
|
||||
|
||||
static inline int delay_for_sampling_rate(unsigned int sampling_rate)
|
||||
{
|
||||
int delay = usecs_to_jiffies(sampling_rate);
|
||||
|
||||
/* We want all CPUs to do sampling nearly on same jiffy */
|
||||
if (num_online_cpus() > 1)
|
||||
delay -= jiffies % delay;
|
||||
|
||||
return delay;
|
||||
}
|
||||
|
||||
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
|
||||
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
|
||||
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
|
||||
struct cpufreq_policy *policy, unsigned int event);
|
||||
#endif /* _CPUFREQ_GOVERNER_H */
|
@ -10,24 +10,23 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/percpu-defs.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* dbs is used in this file as a shortform for demandbased switching
|
||||
* It helps to keep variable names smaller, simpler
|
||||
*/
|
||||
#include "cpufreq_governor.h"
|
||||
|
||||
/* On-demand governor macors */
|
||||
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
|
||||
#define DEF_FREQUENCY_UP_THRESHOLD (80)
|
||||
#define DEF_SAMPLING_DOWN_FACTOR (1)
|
||||
@ -38,80 +37,14 @@
|
||||
#define MIN_FREQUENCY_UP_THRESHOLD (11)
|
||||
#define MAX_FREQUENCY_UP_THRESHOLD (100)
|
||||
|
||||
/*
|
||||
* The polling frequency of this governor depends on the capability of
|
||||
* the processor. Default polling frequency is 1000 times the transition
|
||||
* latency of the processor. The governor will work on any processor with
|
||||
* transition latency <= 10mS, using appropriate sampling
|
||||
* rate.
|
||||
* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
|
||||
* this governor will not work.
|
||||
* All times here are in uS.
|
||||
*/
|
||||
#define MIN_SAMPLING_RATE_RATIO (2)
|
||||
|
||||
static unsigned int min_sampling_rate;
|
||||
|
||||
#define LATENCY_MULTIPLIER (1000)
|
||||
#define MIN_LATENCY_MULTIPLIER (100)
|
||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
||||
|
||||
static void do_dbs_timer(struct work_struct *work);
|
||||
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
unsigned int event);
|
||||
static struct dbs_data od_dbs_data;
|
||||
static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
|
||||
|
||||
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
|
||||
static
|
||||
static struct cpufreq_governor cpufreq_gov_ondemand;
|
||||
#endif
|
||||
struct cpufreq_governor cpufreq_gov_ondemand = {
|
||||
.name = "ondemand",
|
||||
.governor = cpufreq_governor_dbs,
|
||||
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* Sampling types */
|
||||
enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
|
||||
|
||||
struct cpu_dbs_info_s {
|
||||
cputime64_t prev_cpu_idle;
|
||||
cputime64_t prev_cpu_iowait;
|
||||
cputime64_t prev_cpu_wall;
|
||||
cputime64_t prev_cpu_nice;
|
||||
struct cpufreq_policy *cur_policy;
|
||||
struct delayed_work work;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
unsigned int freq_lo;
|
||||
unsigned int freq_lo_jiffies;
|
||||
unsigned int freq_hi_jiffies;
|
||||
unsigned int rate_mult;
|
||||
int cpu;
|
||||
unsigned int sample_type:1;
|
||||
/*
|
||||
* percpu mutex that serializes governor limit change with
|
||||
* do_dbs_timer invocation. We do not want do_dbs_timer to run
|
||||
* when user is changing the governor or limits.
|
||||
*/
|
||||
struct mutex timer_mutex;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
|
||||
|
||||
static unsigned int dbs_enable; /* number of CPUs using this policy */
|
||||
|
||||
/*
|
||||
* dbs_mutex protects dbs_enable in governor start/stop.
|
||||
*/
|
||||
static DEFINE_MUTEX(dbs_mutex);
|
||||
|
||||
static struct dbs_tuners {
|
||||
unsigned int sampling_rate;
|
||||
unsigned int up_threshold;
|
||||
unsigned int down_differential;
|
||||
unsigned int ignore_nice;
|
||||
unsigned int sampling_down_factor;
|
||||
unsigned int powersave_bias;
|
||||
unsigned int io_is_busy;
|
||||
} dbs_tuners_ins = {
|
||||
static struct od_dbs_tuners od_tuners = {
|
||||
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
|
||||
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
|
||||
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
|
||||
@ -119,48 +52,35 @@ static struct dbs_tuners {
|
||||
.powersave_bias = 0,
|
||||
};
|
||||
|
||||
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||
static void ondemand_powersave_bias_init_cpu(int cpu)
|
||||
{
|
||||
u64 idle_time;
|
||||
u64 cur_wall_time;
|
||||
u64 busy_time;
|
||||
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
||||
|
||||
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
|
||||
|
||||
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
|
||||
|
||||
idle_time = cur_wall_time - busy_time;
|
||||
if (wall)
|
||||
*wall = jiffies_to_usecs(cur_wall_time);
|
||||
|
||||
return jiffies_to_usecs(idle_time);
|
||||
dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
|
||||
dbs_info->freq_lo = 0;
|
||||
}
|
||||
|
||||
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
||||
/*
|
||||
* Not all CPUs want IO time to be accounted as busy; this depends on how
|
||||
* efficient idling at a higher frequency/voltage is.
|
||||
* Pavel Machek says this is not so for various generations of AMD and old
|
||||
* Intel systems.
|
||||
* Mike Chan (androidlcom) calis this is also not true for ARM.
|
||||
* Because of this, whitelist specific known (series) of CPUs by default, and
|
||||
* leave all others up to the user.
|
||||
*/
|
||||
static int should_io_be_busy(void)
|
||||
{
|
||||
u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
|
||||
|
||||
if (idle_time == -1ULL)
|
||||
return get_cpu_idle_time_jiffy(cpu, wall);
|
||||
else
|
||||
idle_time += get_cpu_iowait_time_us(cpu, wall);
|
||||
|
||||
return idle_time;
|
||||
}
|
||||
|
||||
static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
|
||||
{
|
||||
u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
|
||||
|
||||
if (iowait_time == -1ULL)
|
||||
return 0;
|
||||
|
||||
return iowait_time;
|
||||
#if defined(CONFIG_X86)
|
||||
/*
|
||||
* For Intel, Core 2 (model 15) andl later have an efficient idle.
|
||||
*/
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
boot_cpu_data.x86 == 6 &&
|
||||
boot_cpu_data.x86_model >= 15)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -169,14 +89,13 @@ static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wal
|
||||
* freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
|
||||
*/
|
||||
static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
|
||||
unsigned int freq_next,
|
||||
unsigned int relation)
|
||||
unsigned int freq_next, unsigned int relation)
|
||||
{
|
||||
unsigned int freq_req, freq_reduc, freq_avg;
|
||||
unsigned int freq_hi, freq_lo;
|
||||
unsigned int index = 0;
|
||||
unsigned int jiffies_total, jiffies_hi, jiffies_lo;
|
||||
struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
|
||||
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
|
||||
policy->cpu);
|
||||
|
||||
if (!dbs_info->freq_table) {
|
||||
@ -188,7 +107,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
|
||||
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
|
||||
relation, &index);
|
||||
freq_req = dbs_info->freq_table[index].frequency;
|
||||
freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
|
||||
freq_reduc = freq_req * od_tuners.powersave_bias / 1000;
|
||||
freq_avg = freq_req - freq_reduc;
|
||||
|
||||
/* Find freq bounds for freq_avg in freq_table */
|
||||
@ -207,7 +126,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
|
||||
dbs_info->freq_lo_jiffies = 0;
|
||||
return freq_lo;
|
||||
}
|
||||
jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||
jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate);
|
||||
jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
|
||||
jiffies_hi += ((freq_hi - freq_lo) / 2);
|
||||
jiffies_hi /= (freq_hi - freq_lo);
|
||||
@ -218,13 +137,6 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
|
||||
return freq_hi;
|
||||
}
|
||||
|
||||
static void ondemand_powersave_bias_init_cpu(int cpu)
|
||||
{
|
||||
struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
||||
dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
|
||||
dbs_info->freq_lo = 0;
|
||||
}
|
||||
|
||||
static void ondemand_powersave_bias_init(void)
|
||||
{
|
||||
int i;
|
||||
@ -233,83 +145,173 @@ static void ondemand_powersave_bias_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
|
||||
{
|
||||
if (od_tuners.powersave_bias)
|
||||
freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
|
||||
else if (p->cur == p->max)
|
||||
return;
|
||||
|
||||
__cpufreq_driver_target(p, freq, od_tuners.powersave_bias ?
|
||||
CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
|
||||
}
|
||||
|
||||
/*
|
||||
* Every sampling_rate, we check, if current idle time is less than 20%
|
||||
* (default), then we try to increase frequency Every sampling_rate, we look for
|
||||
* a the lowest frequency which can sustain the load while keeping idle time
|
||||
* over 30%. If such a frequency exist, we try to decrease to this frequency.
|
||||
*
|
||||
* Any frequency increase takes it to the maximum frequency. Frequency reduction
|
||||
* happens at minimum steps of 5% (default) of current frequency
|
||||
*/
|
||||
static void od_check_cpu(int cpu, unsigned int load_freq)
|
||||
{
|
||||
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
||||
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
|
||||
|
||||
dbs_info->freq_lo = 0;
|
||||
|
||||
/* Check for frequency increase */
|
||||
if (load_freq > od_tuners.up_threshold * policy->cur) {
|
||||
/* If switching to max speed, apply sampling_down_factor */
|
||||
if (policy->cur < policy->max)
|
||||
dbs_info->rate_mult =
|
||||
od_tuners.sampling_down_factor;
|
||||
dbs_freq_increase(policy, policy->max);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check for frequency decrease */
|
||||
/* if we cannot reduce the frequency anymore, break out early */
|
||||
if (policy->cur == policy->min)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The optimal frequency is the frequency that is the lowest that can
|
||||
* support the current CPU usage without triggering the up policy. To be
|
||||
* safe, we focus 10 points under the threshold.
|
||||
*/
|
||||
if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) *
|
||||
policy->cur) {
|
||||
unsigned int freq_next;
|
||||
freq_next = load_freq / (od_tuners.up_threshold -
|
||||
od_tuners.down_differential);
|
||||
|
||||
/* No longer fully busy, reset rate_mult */
|
||||
dbs_info->rate_mult = 1;
|
||||
|
||||
if (freq_next < policy->min)
|
||||
freq_next = policy->min;
|
||||
|
||||
if (!od_tuners.powersave_bias) {
|
||||
__cpufreq_driver_target(policy, freq_next,
|
||||
CPUFREQ_RELATION_L);
|
||||
} else {
|
||||
int freq = powersave_bias_target(policy, freq_next,
|
||||
CPUFREQ_RELATION_L);
|
||||
__cpufreq_driver_target(policy, freq,
|
||||
CPUFREQ_RELATION_L);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void od_dbs_timer(struct work_struct *work)
|
||||
{
|
||||
struct od_cpu_dbs_info_s *dbs_info =
|
||||
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
|
||||
unsigned int cpu = dbs_info->cdbs.cpu;
|
||||
int delay, sample_type = dbs_info->sample_type;
|
||||
|
||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
||||
|
||||
/* Common NORMAL_SAMPLE setup */
|
||||
dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
||||
if (sample_type == OD_SUB_SAMPLE) {
|
||||
delay = dbs_info->freq_lo_jiffies;
|
||||
__cpufreq_driver_target(dbs_info->cdbs.cur_policy,
|
||||
dbs_info->freq_lo, CPUFREQ_RELATION_H);
|
||||
} else {
|
||||
dbs_check_cpu(&od_dbs_data, cpu);
|
||||
if (dbs_info->freq_lo) {
|
||||
/* Setup timer for SUB_SAMPLE */
|
||||
dbs_info->sample_type = OD_SUB_SAMPLE;
|
||||
delay = dbs_info->freq_hi_jiffies;
|
||||
} else {
|
||||
delay = delay_for_sampling_rate(od_tuners.sampling_rate
|
||||
* dbs_info->rate_mult);
|
||||
}
|
||||
}
|
||||
|
||||
schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
}
|
||||
|
||||
/************************** sysfs interface ************************/
|
||||
|
||||
static ssize_t show_sampling_rate_min(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", min_sampling_rate);
|
||||
return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate);
|
||||
}
|
||||
|
||||
define_one_global_ro(sampling_rate_min);
|
||||
|
||||
/* cpufreq_ondemand Governor Tunables */
|
||||
#define show_one(file_name, object) \
|
||||
static ssize_t show_##file_name \
|
||||
(struct kobject *kobj, struct attribute *attr, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
|
||||
}
|
||||
show_one(sampling_rate, sampling_rate);
|
||||
show_one(io_is_busy, io_is_busy);
|
||||
show_one(up_threshold, up_threshold);
|
||||
show_one(sampling_down_factor, sampling_down_factor);
|
||||
show_one(ignore_nice_load, ignore_nice);
|
||||
show_one(powersave_bias, powersave_bias);
|
||||
|
||||
/**
|
||||
* update_sampling_rate - update sampling rate effective immediately if needed.
|
||||
* @new_rate: new sampling rate
|
||||
*
|
||||
* If new rate is smaller than the old, simply updaing
|
||||
* dbs_tuners_int.sampling_rate might not be appropriate. For example,
|
||||
* if the original sampling_rate was 1 second and the requested new sampling
|
||||
* rate is 10 ms because the user needs immediate reaction from ondemand
|
||||
* governor, but not sure if higher frequency will be required or not,
|
||||
* then, the governor may change the sampling rate too late; up to 1 second
|
||||
* later. Thus, if we are reducing the sampling rate, we need to make the
|
||||
* new value effective immediately.
|
||||
* dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
|
||||
* original sampling_rate was 1 second and the requested new sampling rate is 10
|
||||
* ms because the user needs immediate reaction from ondemand governor, but not
|
||||
* sure if higher frequency will be required or not, then, the governor may
|
||||
* change the sampling rate too late; up to 1 second later. Thus, if we are
|
||||
* reducing the sampling rate, we need to make the new value effective
|
||||
* immediately.
|
||||
*/
|
||||
static void update_sampling_rate(unsigned int new_rate)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
dbs_tuners_ins.sampling_rate = new_rate
|
||||
= max(new_rate, min_sampling_rate);
|
||||
od_tuners.sampling_rate = new_rate = max(new_rate,
|
||||
od_dbs_data.min_sampling_rate);
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
struct cpufreq_policy *policy;
|
||||
struct cpu_dbs_info_s *dbs_info;
|
||||
struct od_cpu_dbs_info_s *dbs_info;
|
||||
unsigned long next_sampling, appointed_at;
|
||||
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy)
|
||||
continue;
|
||||
if (policy->governor != &cpufreq_gov_ondemand) {
|
||||
cpufreq_cpu_put(policy);
|
||||
continue;
|
||||
}
|
||||
dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
mutex_lock(&dbs_info->timer_mutex);
|
||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
||||
|
||||
if (!delayed_work_pending(&dbs_info->work)) {
|
||||
mutex_unlock(&dbs_info->timer_mutex);
|
||||
if (!delayed_work_pending(&dbs_info->cdbs.work)) {
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
next_sampling = jiffies + usecs_to_jiffies(new_rate);
|
||||
appointed_at = dbs_info->work.timer.expires;
|
||||
|
||||
next_sampling = jiffies + usecs_to_jiffies(new_rate);
|
||||
appointed_at = dbs_info->cdbs.work.timer.expires;
|
||||
|
||||
if (time_before(next_sampling, appointed_at)) {
|
||||
|
||||
mutex_unlock(&dbs_info->timer_mutex);
|
||||
cancel_delayed_work_sync(&dbs_info->work);
|
||||
mutex_lock(&dbs_info->timer_mutex);
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
cancel_delayed_work_sync(&dbs_info->cdbs.work);
|
||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
||||
|
||||
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
|
||||
usecs_to_jiffies(new_rate));
|
||||
schedule_delayed_work_on(dbs_info->cdbs.cpu,
|
||||
&dbs_info->cdbs.work,
|
||||
usecs_to_jiffies(new_rate));
|
||||
|
||||
}
|
||||
mutex_unlock(&dbs_info->timer_mutex);
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -334,7 +336,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
|
||||
ret = sscanf(buf, "%u", &input);
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
dbs_tuners_ins.io_is_busy = !!input;
|
||||
od_tuners.io_is_busy = !!input;
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -349,7 +351,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
|
||||
input < MIN_FREQUENCY_UP_THRESHOLD) {
|
||||
return -EINVAL;
|
||||
}
|
||||
dbs_tuners_ins.up_threshold = input;
|
||||
od_tuners.up_threshold = input;
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -362,12 +364,12 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
|
||||
|
||||
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
|
||||
return -EINVAL;
|
||||
dbs_tuners_ins.sampling_down_factor = input;
|
||||
od_tuners.sampling_down_factor = input;
|
||||
|
||||
/* Reset down sampling multiplier in case it was active */
|
||||
for_each_online_cpu(j) {
|
||||
struct cpu_dbs_info_s *dbs_info;
|
||||
dbs_info = &per_cpu(od_cpu_dbs_info, j);
|
||||
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
|
||||
j);
|
||||
dbs_info->rate_mult = 1;
|
||||
}
|
||||
return count;
|
||||
@ -388,19 +390,20 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
|
||||
if (input > 1)
|
||||
input = 1;
|
||||
|
||||
if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
|
||||
if (input == od_tuners.ignore_nice) { /* nothing to do */
|
||||
return count;
|
||||
}
|
||||
dbs_tuners_ins.ignore_nice = input;
|
||||
od_tuners.ignore_nice = input;
|
||||
|
||||
/* we need to re-evaluate prev_cpu_idle */
|
||||
for_each_online_cpu(j) {
|
||||
struct cpu_dbs_info_s *dbs_info;
|
||||
struct od_cpu_dbs_info_s *dbs_info;
|
||||
dbs_info = &per_cpu(od_cpu_dbs_info, j);
|
||||
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
||||
&dbs_info->prev_cpu_wall);
|
||||
if (dbs_tuners_ins.ignore_nice)
|
||||
dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
|
||||
&dbs_info->cdbs.prev_cpu_wall);
|
||||
if (od_tuners.ignore_nice)
|
||||
dbs_info->cdbs.prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
|
||||
}
|
||||
return count;
|
||||
@ -419,17 +422,25 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
|
||||
if (input > 1000)
|
||||
input = 1000;
|
||||
|
||||
dbs_tuners_ins.powersave_bias = input;
|
||||
od_tuners.powersave_bias = input;
|
||||
ondemand_powersave_bias_init();
|
||||
return count;
|
||||
}
|
||||
|
||||
show_one(od, sampling_rate, sampling_rate);
|
||||
show_one(od, io_is_busy, io_is_busy);
|
||||
show_one(od, up_threshold, up_threshold);
|
||||
show_one(od, sampling_down_factor, sampling_down_factor);
|
||||
show_one(od, ignore_nice_load, ignore_nice);
|
||||
show_one(od, powersave_bias, powersave_bias);
|
||||
|
||||
define_one_global_rw(sampling_rate);
|
||||
define_one_global_rw(io_is_busy);
|
||||
define_one_global_rw(up_threshold);
|
||||
define_one_global_rw(sampling_down_factor);
|
||||
define_one_global_rw(ignore_nice_load);
|
||||
define_one_global_rw(powersave_bias);
|
||||
define_one_global_ro(sampling_rate_min);
|
||||
|
||||
static struct attribute *dbs_attributes[] = {
|
||||
&sampling_rate_min.attr,
|
||||
@ -442,354 +453,71 @@ static struct attribute *dbs_attributes[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group dbs_attr_group = {
|
||||
static struct attribute_group od_attr_group = {
|
||||
.attrs = dbs_attributes,
|
||||
.name = "ondemand",
|
||||
};
|
||||
|
||||
/************************** sysfs end ************************/
|
||||
|
||||
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
|
||||
{
|
||||
if (dbs_tuners_ins.powersave_bias)
|
||||
freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
|
||||
else if (p->cur == p->max)
|
||||
return;
|
||||
define_get_cpu_dbs_routines(od_cpu_dbs_info);
|
||||
|
||||
__cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
|
||||
CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
|
||||
static struct od_ops od_ops = {
|
||||
.io_busy = should_io_be_busy,
|
||||
.powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
|
||||
.powersave_bias_target = powersave_bias_target,
|
||||
.freq_increase = dbs_freq_increase,
|
||||
};
|
||||
|
||||
static struct dbs_data od_dbs_data = {
|
||||
.governor = GOV_ONDEMAND,
|
||||
.attr_group = &od_attr_group,
|
||||
.tuners = &od_tuners,
|
||||
.get_cpu_cdbs = get_cpu_cdbs,
|
||||
.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
|
||||
.gov_dbs_timer = od_dbs_timer,
|
||||
.gov_check_cpu = od_check_cpu,
|
||||
.gov_ops = &od_ops,
|
||||
};
|
||||
|
||||
static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
unsigned int event)
|
||||
{
|
||||
return cpufreq_governor_dbs(&od_dbs_data, policy, event);
|
||||
}
|
||||
|
||||
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||
{
|
||||
unsigned int max_load_freq;
|
||||
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned int j;
|
||||
|
||||
this_dbs_info->freq_lo = 0;
|
||||
policy = this_dbs_info->cur_policy;
|
||||
|
||||
/*
|
||||
* Every sampling_rate, we check, if current idle time is less
|
||||
* than 20% (default), then we try to increase frequency
|
||||
* Every sampling_rate, we look for a the lowest
|
||||
* frequency which can sustain the load while keeping idle time over
|
||||
* 30%. If such a frequency exist, we try to decrease to this frequency.
|
||||
*
|
||||
* Any frequency increase takes it to the maximum frequency.
|
||||
* Frequency reduction happens at minimum steps of
|
||||
* 5% (default) of current frequency
|
||||
*/
|
||||
|
||||
/* Get Absolute Load - in terms of freq */
|
||||
max_load_freq = 0;
|
||||
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_info_s *j_dbs_info;
|
||||
cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
|
||||
unsigned int idle_time, wall_time, iowait_time;
|
||||
unsigned int load, load_freq;
|
||||
int freq_avg;
|
||||
|
||||
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
|
||||
|
||||
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
|
||||
cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
|
||||
|
||||
wall_time = (unsigned int)
|
||||
(cur_wall_time - j_dbs_info->prev_cpu_wall);
|
||||
j_dbs_info->prev_cpu_wall = cur_wall_time;
|
||||
|
||||
idle_time = (unsigned int)
|
||||
(cur_idle_time - j_dbs_info->prev_cpu_idle);
|
||||
j_dbs_info->prev_cpu_idle = cur_idle_time;
|
||||
|
||||
iowait_time = (unsigned int)
|
||||
(cur_iowait_time - j_dbs_info->prev_cpu_iowait);
|
||||
j_dbs_info->prev_cpu_iowait = cur_iowait_time;
|
||||
|
||||
if (dbs_tuners_ins.ignore_nice) {
|
||||
u64 cur_nice;
|
||||
unsigned long cur_nice_jiffies;
|
||||
|
||||
cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
|
||||
j_dbs_info->prev_cpu_nice;
|
||||
/*
|
||||
* Assumption: nice time between sampling periods will
|
||||
* be less than 2^32 jiffies for 32 bit sys
|
||||
*/
|
||||
cur_nice_jiffies = (unsigned long)
|
||||
cputime64_to_jiffies64(cur_nice);
|
||||
|
||||
j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
idle_time += jiffies_to_usecs(cur_nice_jiffies);
|
||||
}
|
||||
|
||||
/*
|
||||
* For the purpose of ondemand, waiting for disk IO is an
|
||||
* indication that you're performance critical, and not that
|
||||
* the system is actually idle. So subtract the iowait time
|
||||
* from the cpu idle time.
|
||||
*/
|
||||
|
||||
if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
|
||||
idle_time -= iowait_time;
|
||||
|
||||
if (unlikely(!wall_time || wall_time < idle_time))
|
||||
continue;
|
||||
|
||||
load = 100 * (wall_time - idle_time) / wall_time;
|
||||
|
||||
freq_avg = __cpufreq_driver_getavg(policy, j);
|
||||
if (freq_avg <= 0)
|
||||
freq_avg = policy->cur;
|
||||
|
||||
load_freq = load * freq_avg;
|
||||
if (load_freq > max_load_freq)
|
||||
max_load_freq = load_freq;
|
||||
}
|
||||
|
||||
/* Check for frequency increase */
|
||||
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
|
||||
/* If switching to max speed, apply sampling_down_factor */
|
||||
if (policy->cur < policy->max)
|
||||
this_dbs_info->rate_mult =
|
||||
dbs_tuners_ins.sampling_down_factor;
|
||||
dbs_freq_increase(policy, policy->max);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check for frequency decrease */
|
||||
/* if we cannot reduce the frequency anymore, break out early */
|
||||
if (policy->cur == policy->min)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The optimal frequency is the frequency that is the lowest that
|
||||
* can support the current CPU usage without triggering the up
|
||||
* policy. To be safe, we focus 10 points under the threshold.
|
||||
*/
|
||||
if (max_load_freq <
|
||||
(dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
|
||||
policy->cur) {
|
||||
unsigned int freq_next;
|
||||
freq_next = max_load_freq /
|
||||
(dbs_tuners_ins.up_threshold -
|
||||
dbs_tuners_ins.down_differential);
|
||||
|
||||
/* No longer fully busy, reset rate_mult */
|
||||
this_dbs_info->rate_mult = 1;
|
||||
|
||||
if (freq_next < policy->min)
|
||||
freq_next = policy->min;
|
||||
|
||||
if (!dbs_tuners_ins.powersave_bias) {
|
||||
__cpufreq_driver_target(policy, freq_next,
|
||||
CPUFREQ_RELATION_L);
|
||||
} else {
|
||||
int freq = powersave_bias_target(policy, freq_next,
|
||||
CPUFREQ_RELATION_L);
|
||||
__cpufreq_driver_target(policy, freq,
|
||||
CPUFREQ_RELATION_L);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void do_dbs_timer(struct work_struct *work)
|
||||
{
|
||||
struct cpu_dbs_info_s *dbs_info =
|
||||
container_of(work, struct cpu_dbs_info_s, work.work);
|
||||
unsigned int cpu = dbs_info->cpu;
|
||||
int sample_type = dbs_info->sample_type;
|
||||
|
||||
int delay;
|
||||
|
||||
mutex_lock(&dbs_info->timer_mutex);
|
||||
|
||||
/* Common NORMAL_SAMPLE setup */
|
||||
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
|
||||
if (!dbs_tuners_ins.powersave_bias ||
|
||||
sample_type == DBS_NORMAL_SAMPLE) {
|
||||
dbs_check_cpu(dbs_info);
|
||||
if (dbs_info->freq_lo) {
|
||||
/* Setup timer for SUB_SAMPLE */
|
||||
dbs_info->sample_type = DBS_SUB_SAMPLE;
|
||||
delay = dbs_info->freq_hi_jiffies;
|
||||
} else {
|
||||
/* We want all CPUs to do sampling nearly on
|
||||
* same jiffy
|
||||
*/
|
||||
delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
|
||||
* dbs_info->rate_mult);
|
||||
|
||||
if (num_online_cpus() > 1)
|
||||
delay -= jiffies % delay;
|
||||
}
|
||||
} else {
|
||||
__cpufreq_driver_target(dbs_info->cur_policy,
|
||||
dbs_info->freq_lo, CPUFREQ_RELATION_H);
|
||||
delay = dbs_info->freq_lo_jiffies;
|
||||
}
|
||||
schedule_delayed_work_on(cpu, &dbs_info->work, delay);
|
||||
mutex_unlock(&dbs_info->timer_mutex);
|
||||
}
|
||||
|
||||
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
|
||||
{
|
||||
/* We want all CPUs to do sampling nearly on same jiffy */
|
||||
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
|
||||
|
||||
if (num_online_cpus() > 1)
|
||||
delay -= jiffies % delay;
|
||||
|
||||
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
|
||||
INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
|
||||
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
|
||||
}
|
||||
|
||||
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
|
||||
{
|
||||
cancel_delayed_work_sync(&dbs_info->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Not all CPUs want IO time to be accounted as busy; this dependson how
|
||||
* efficient idling at a higher frequency/voltage is.
|
||||
* Pavel Machek says this is not so for various generations of AMD and old
|
||||
* Intel systems.
|
||||
* Mike Chan (androidlcom) calis this is also not true for ARM.
|
||||
* Because of this, whitelist specific known (series) of CPUs by default, and
|
||||
* leave all others up to the user.
|
||||
*/
|
||||
static int should_io_be_busy(void)
|
||||
{
|
||||
#if defined(CONFIG_X86)
|
||||
/*
|
||||
* For Intel, Core 2 (model 15) andl later have an efficient idle.
|
||||
*/
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
boot_cpu_data.x86 == 6 &&
|
||||
boot_cpu_data.x86_model >= 15)
|
||||
return 1;
|
||||
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
|
||||
static
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
unsigned int event)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpu_dbs_info_s *this_dbs_info;
|
||||
unsigned int j;
|
||||
int rc;
|
||||
|
||||
this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
||||
|
||||
switch (event) {
|
||||
case CPUFREQ_GOV_START:
|
||||
if ((!cpu_online(cpu)) || (!policy->cur))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dbs_mutex);
|
||||
|
||||
dbs_enable++;
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_info_s *j_dbs_info;
|
||||
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
|
||||
j_dbs_info->cur_policy = policy;
|
||||
|
||||
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
||||
&j_dbs_info->prev_cpu_wall);
|
||||
if (dbs_tuners_ins.ignore_nice)
|
||||
j_dbs_info->prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
}
|
||||
this_dbs_info->cpu = cpu;
|
||||
this_dbs_info->rate_mult = 1;
|
||||
ondemand_powersave_bias_init_cpu(cpu);
|
||||
/*
|
||||
* Start the timerschedule work, when this governor
|
||||
* is used for first time
|
||||
*/
|
||||
if (dbs_enable == 1) {
|
||||
unsigned int latency;
|
||||
|
||||
rc = sysfs_create_group(cpufreq_global_kobject,
|
||||
&dbs_attr_group);
|
||||
if (rc) {
|
||||
mutex_unlock(&dbs_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* policy latency is in nS. Convert it to uS first */
|
||||
latency = policy->cpuinfo.transition_latency / 1000;
|
||||
if (latency == 0)
|
||||
latency = 1;
|
||||
/* Bring kernel and HW constraints together */
|
||||
min_sampling_rate = max(min_sampling_rate,
|
||||
MIN_LATENCY_MULTIPLIER * latency);
|
||||
dbs_tuners_ins.sampling_rate =
|
||||
max(min_sampling_rate,
|
||||
latency * LATENCY_MULTIPLIER);
|
||||
dbs_tuners_ins.io_is_busy = should_io_be_busy();
|
||||
}
|
||||
mutex_unlock(&dbs_mutex);
|
||||
|
||||
mutex_init(&this_dbs_info->timer_mutex);
|
||||
dbs_timer_init(this_dbs_info);
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_STOP:
|
||||
dbs_timer_exit(this_dbs_info);
|
||||
|
||||
mutex_lock(&dbs_mutex);
|
||||
mutex_destroy(&this_dbs_info->timer_mutex);
|
||||
dbs_enable--;
|
||||
mutex_unlock(&dbs_mutex);
|
||||
if (!dbs_enable)
|
||||
sysfs_remove_group(cpufreq_global_kobject,
|
||||
&dbs_attr_group);
|
||||
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
mutex_lock(&this_dbs_info->timer_mutex);
|
||||
if (policy->max < this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(this_dbs_info->cur_policy,
|
||||
policy->max, CPUFREQ_RELATION_H);
|
||||
else if (policy->min > this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(this_dbs_info->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(this_dbs_info);
|
||||
mutex_unlock(&this_dbs_info->timer_mutex);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
struct cpufreq_governor cpufreq_gov_ondemand = {
|
||||
.name = "ondemand",
|
||||
.governor = od_cpufreq_governor_dbs,
|
||||
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init cpufreq_gov_dbs_init(void)
|
||||
{
|
||||
u64 idle_time;
|
||||
int cpu = get_cpu();
|
||||
|
||||
mutex_init(&od_dbs_data.mutex);
|
||||
idle_time = get_cpu_idle_time_us(cpu, NULL);
|
||||
put_cpu();
|
||||
if (idle_time != -1ULL) {
|
||||
/* Idle micro accounting is supported. Use finer thresholds */
|
||||
dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
|
||||
dbs_tuners_ins.down_differential =
|
||||
MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
|
||||
od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
|
||||
od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
|
||||
/*
|
||||
* In nohz/micro accounting case we set the minimum frequency
|
||||
* not depending on HZ, but fixed (very low). The deferred
|
||||
* timer might skip some samples if idle/sleeping as needed.
|
||||
*/
|
||||
min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
|
||||
od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
|
||||
} else {
|
||||
/* For correct statistics, we need 10 ticks for each measure */
|
||||
min_sampling_rate =
|
||||
MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
|
||||
od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
|
||||
jiffies_to_usecs(10);
|
||||
}
|
||||
|
||||
return cpufreq_register_governor(&cpufreq_gov_ondemand);
|
||||
@ -800,7 +528,6 @@ static void __exit cpufreq_gov_dbs_exit(void)
|
||||
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
|
||||
}
|
||||
|
||||
|
||||
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
|
||||
MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
|
||||
MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
|
||||
|
@ -10,6 +10,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpufreq.h>
|
||||
|
@ -10,6 +10,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpufreq.h>
|
||||
|
@ -37,7 +37,7 @@ struct cpufreq_stats {
|
||||
unsigned int max_state;
|
||||
unsigned int state_num;
|
||||
unsigned int last_index;
|
||||
cputime64_t *time_in_state;
|
||||
u64 *time_in_state;
|
||||
unsigned int *freq_table;
|
||||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
unsigned int *trans_table;
|
||||
@ -223,7 +223,7 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
|
||||
count++;
|
||||
}
|
||||
|
||||
alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
|
||||
alloc_size = count * sizeof(int) + count * sizeof(u64);
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
alloc_size += count * count * sizeof(int);
|
||||
|
@ -11,6 +11,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/smp.h>
|
||||
|
@ -31,13 +31,13 @@ static unsigned int locking_frequency;
|
||||
static bool frequency_locked;
|
||||
static DEFINE_MUTEX(cpufreq_lock);
|
||||
|
||||
int exynos_verify_speed(struct cpufreq_policy *policy)
|
||||
static int exynos_verify_speed(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpufreq_frequency_table_verify(policy,
|
||||
exynos_info->freq_table);
|
||||
}
|
||||
|
||||
unsigned int exynos_getspeed(unsigned int cpu)
|
||||
static unsigned int exynos_getspeed(unsigned int cpu)
|
||||
{
|
||||
return clk_get_rate(exynos_info->cpu_clk) / 1000;
|
||||
}
|
||||
@ -100,7 +100,8 @@ static int exynos_target(struct cpufreq_policy *policy,
|
||||
}
|
||||
arm_volt = volt_table[index];
|
||||
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
for_each_cpu(freqs.cpu, policy->cpus)
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
|
||||
/* When the new frequency is higher than current frequency */
|
||||
if ((freqs.new > freqs.old) && !safe_arm_volt) {
|
||||
@ -115,7 +116,8 @@ static int exynos_target(struct cpufreq_policy *policy,
|
||||
if (freqs.new != freqs.old)
|
||||
exynos_info->set_freq(old_index, index);
|
||||
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
for_each_cpu(freqs.cpu, policy->cpus)
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
|
||||
/* When the new frequency is lower than current frequency */
|
||||
if ((freqs.new < freqs.old) ||
|
||||
@ -235,6 +237,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
cpumask_copy(policy->related_cpus, cpu_possible_mask);
|
||||
cpumask_copy(policy->cpus, cpu_online_mask);
|
||||
} else {
|
||||
policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
|
||||
cpumask_setall(policy->cpus);
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -930,7 +930,7 @@ static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
|
||||
static int longhaul_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
cpufreq_frequency_table_put_attr(policy->cpu);
|
||||
return 0;
|
||||
@ -946,7 +946,7 @@ static struct cpufreq_driver longhaul_driver = {
|
||||
.target = longhaul_target,
|
||||
.get = longhaul_get,
|
||||
.init = longhaul_cpu_init,
|
||||
.exit = __devexit_p(longhaul_cpu_exit),
|
||||
.exit = longhaul_cpu_exit,
|
||||
.name = "longhaul",
|
||||
.owner = THIS_MODULE,
|
||||
.attr = longhaul_attr,
|
||||
|
@ -1186,7 +1186,7 @@ err_out:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
|
||||
static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
|
||||
{
|
||||
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
|
||||
|
||||
@ -1242,7 +1242,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
|
||||
.target = powernowk8_target,
|
||||
.bios_limit = acpi_processor_get_bios_limit,
|
||||
.init = powernowk8_cpu_init,
|
||||
.exit = __devexit_p(powernowk8_cpu_exit),
|
||||
.exit = powernowk8_cpu_exit,
|
||||
.get = powernowk8_get,
|
||||
.name = "powernow-k8",
|
||||
.owner = THIS_MODULE,
|
||||
|
291
drivers/cpufreq/spear-cpufreq.c
Normal file
291
drivers/cpufreq/spear-cpufreq.c
Normal file
@ -0,0 +1,291 @@
|
||||
/*
|
||||
* drivers/cpufreq/spear-cpufreq.c
|
||||
*
|
||||
* CPU Frequency Scaling for SPEAr platform
|
||||
*
|
||||
* Copyright (C) 2012 ST Microelectronics
|
||||
* Deepak Sikri <deepak.sikri@st.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* SPEAr CPUFreq driver data structure */
|
||||
static struct {
|
||||
struct clk *clk;
|
||||
unsigned int transition_latency;
|
||||
struct cpufreq_frequency_table *freq_tbl;
|
||||
u32 cnt;
|
||||
} spear_cpufreq;
|
||||
|
||||
int spear_cpufreq_verify(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
|
||||
}
|
||||
|
||||
static unsigned int spear_cpufreq_get(unsigned int cpu)
|
||||
{
|
||||
return clk_get_rate(spear_cpufreq.clk) / 1000;
|
||||
}
|
||||
|
||||
static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
|
||||
{
|
||||
struct clk *sys_pclk;
|
||||
int pclk;
|
||||
/*
|
||||
* In SPEAr1340, cpu clk's parent sys clk can take input from
|
||||
* following sources
|
||||
*/
|
||||
const char *sys_clk_src[] = {
|
||||
"sys_syn_clk",
|
||||
"pll1_clk",
|
||||
"pll2_clk",
|
||||
"pll3_clk",
|
||||
};
|
||||
|
||||
/*
|
||||
* As sys clk can have multiple source with their own range
|
||||
* limitation so we choose possible sources accordingly
|
||||
*/
|
||||
if (newfreq <= 300000000)
|
||||
pclk = 0; /* src is sys_syn_clk */
|
||||
else if (newfreq > 300000000 && newfreq <= 500000000)
|
||||
pclk = 3; /* src is pll3_clk */
|
||||
else if (newfreq == 600000000)
|
||||
pclk = 1; /* src is pll1_clk */
|
||||
else
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* Get parent to sys clock */
|
||||
sys_pclk = clk_get(NULL, sys_clk_src[pclk]);
|
||||
if (IS_ERR(sys_pclk))
|
||||
pr_err("Failed to get %s clock\n", sys_clk_src[pclk]);
|
||||
|
||||
return sys_pclk;
|
||||
}
|
||||
|
||||
/*
|
||||
* In SPEAr1340, we cannot use newfreq directly because we need to actually
|
||||
* access a source clock (clk) which might not be ancestor of cpu at present.
|
||||
* Hence in SPEAr1340 we would operate on source clock directly before switching
|
||||
* cpu clock to it.
|
||||
*/
|
||||
static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
|
||||
{
|
||||
struct clk *sys_clk;
|
||||
int ret = 0;
|
||||
|
||||
sys_clk = clk_get_parent(spear_cpufreq.clk);
|
||||
if (IS_ERR(sys_clk)) {
|
||||
pr_err("failed to get cpu's parent (sys) clock\n");
|
||||
return PTR_ERR(sys_clk);
|
||||
}
|
||||
|
||||
/* Set the rate of the source clock before changing the parent */
|
||||
ret = clk_set_rate(sys_pclk, newfreq);
|
||||
if (ret) {
|
||||
pr_err("Failed to set sys clk rate to %lu\n", newfreq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_set_parent(sys_clk, sys_pclk);
|
||||
if (ret) {
|
||||
pr_err("Failed to set sys clk parent\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spear_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq, unsigned int relation)
|
||||
{
|
||||
struct cpufreq_freqs freqs;
|
||||
unsigned long newfreq;
|
||||
struct clk *srcclk;
|
||||
int index, ret, mult = 1;
|
||||
|
||||
if (cpufreq_frequency_table_target(policy, spear_cpufreq.freq_tbl,
|
||||
target_freq, relation, &index))
|
||||
return -EINVAL;
|
||||
|
||||
freqs.cpu = policy->cpu;
|
||||
freqs.old = spear_cpufreq_get(0);
|
||||
|
||||
newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
|
||||
if (of_machine_is_compatible("st,spear1340")) {
|
||||
/*
|
||||
* SPEAr1340 is special in the sense that due to the possibility
|
||||
* of multiple clock sources for cpu clk's parent we can have
|
||||
* different clock source for different frequency of cpu clk.
|
||||
* Hence we need to choose one from amongst these possible clock
|
||||
* sources.
|
||||
*/
|
||||
srcclk = spear1340_cpu_get_possible_parent(newfreq);
|
||||
if (IS_ERR(srcclk)) {
|
||||
pr_err("Failed to get src clk\n");
|
||||
return PTR_ERR(srcclk);
|
||||
}
|
||||
|
||||
/* SPEAr1340: src clk is always 2 * intended cpu clk */
|
||||
mult = 2;
|
||||
} else {
|
||||
/*
|
||||
* src clock to be altered is ancestor of cpu clock. Hence we
|
||||
* can directly work on cpu clk
|
||||
*/
|
||||
srcclk = spear_cpufreq.clk;
|
||||
}
|
||||
|
||||
newfreq = clk_round_rate(srcclk, newfreq * mult);
|
||||
if (newfreq < 0) {
|
||||
pr_err("clk_round_rate failed for cpu src clock\n");
|
||||
return newfreq;
|
||||
}
|
||||
|
||||
freqs.new = newfreq / 1000;
|
||||
freqs.new /= mult;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
|
||||
if (mult == 2)
|
||||
ret = spear1340_set_cpu_rate(srcclk, newfreq);
|
||||
else
|
||||
ret = clk_set_rate(spear_cpufreq.clk, newfreq);
|
||||
|
||||
/* Get current rate after clk_set_rate, in case of failure */
|
||||
if (ret) {
|
||||
pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
|
||||
freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
|
||||
}
|
||||
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spear_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
|
||||
if (ret) {
|
||||
pr_err("cpufreq_frequency_table_cpuinfo() failed");
|
||||
return ret;
|
||||
}
|
||||
|
||||
cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
|
||||
policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
|
||||
policy->cur = spear_cpufreq_get(0);
|
||||
|
||||
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
|
||||
cpumask_copy(policy->related_cpus, policy->cpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spear_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
cpufreq_frequency_table_put_attr(policy->cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct freq_attr *spear_cpufreq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct cpufreq_driver spear_cpufreq_driver = {
|
||||
.name = "cpufreq-spear",
|
||||
.flags = CPUFREQ_STICKY,
|
||||
.verify = spear_cpufreq_verify,
|
||||
.target = spear_cpufreq_target,
|
||||
.get = spear_cpufreq_get,
|
||||
.init = spear_cpufreq_init,
|
||||
.exit = spear_cpufreq_exit,
|
||||
.attr = spear_cpufreq_attr,
|
||||
};
|
||||
|
||||
static int spear_cpufreq_driver_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
const struct property *prop;
|
||||
struct cpufreq_frequency_table *freq_tbl;
|
||||
const __be32 *val;
|
||||
int cnt, i, ret;
|
||||
|
||||
np = of_find_node_by_path("/cpus/cpu@0");
|
||||
if (!np) {
|
||||
pr_err("No cpu node found");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(np, "clock-latency",
|
||||
&spear_cpufreq.transition_latency))
|
||||
spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
|
||||
|
||||
prop = of_find_property(np, "cpufreq_tbl", NULL);
|
||||
if (!prop || !prop->value) {
|
||||
pr_err("Invalid cpufreq_tbl");
|
||||
ret = -ENODEV;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
cnt = prop->length / sizeof(u32);
|
||||
val = prop->value;
|
||||
|
||||
freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
|
||||
if (!freq_tbl) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
freq_tbl[i].index = i;
|
||||
freq_tbl[i].frequency = be32_to_cpup(val++);
|
||||
}
|
||||
|
||||
freq_tbl[i].index = i;
|
||||
freq_tbl[i].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
spear_cpufreq.freq_tbl = freq_tbl;
|
||||
|
||||
of_node_put(np);
|
||||
|
||||
spear_cpufreq.clk = clk_get(NULL, "cpu_clk");
|
||||
if (IS_ERR(spear_cpufreq.clk)) {
|
||||
pr_err("Unable to get CPU clock\n");
|
||||
ret = PTR_ERR(spear_cpufreq.clk);
|
||||
goto out_put_mem;
|
||||
}
|
||||
|
||||
ret = cpufreq_register_driver(&spear_cpufreq_driver);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
pr_err("failed register driver: %d\n", ret);
|
||||
clk_put(spear_cpufreq.clk);
|
||||
|
||||
out_put_mem:
|
||||
kfree(freq_tbl);
|
||||
return ret;
|
||||
|
||||
out_put_node:
|
||||
of_node_put(np);
|
||||
return ret;
|
||||
}
|
||||
late_initcall(spear_cpufreq_driver_init);
|
||||
|
||||
MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
|
||||
MODULE_DESCRIPTION("SPEAr CPUFreq driver");
|
||||
MODULE_LICENSE("GPL");
|
@ -11,6 +11,7 @@
|
||||
#ifndef _LINUX_CPUFREQ_H
|
||||
#define _LINUX_CPUFREQ_H
|
||||
|
||||
#include <asm/cputime.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/threads.h>
|
||||
@ -22,6 +23,8 @@
|
||||
#include <asm/div64.h>
|
||||
|
||||
#define CPUFREQ_NAME_LEN 16
|
||||
/* Print length for names. Extra 1 space for accomodating '\n' in prints */
|
||||
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
@ -404,6 +407,4 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
|
||||
unsigned int cpu);
|
||||
|
||||
void cpufreq_frequency_table_put_attr(unsigned int cpu);
|
||||
|
||||
|
||||
#endif /* _LINUX_CPUFREQ_H */
|
||||
|
Loading…
Reference in New Issue
Block a user