mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 04:34:11 +08:00
Merge back earlier cpufreq changes for v4.8.
This commit is contained in:
commit
56b7808572
@ -96,7 +96,7 @@ new - new frequency
|
|||||||
For details about OPP, see Documentation/power/opp.txt
|
For details about OPP, see Documentation/power/opp.txt
|
||||||
|
|
||||||
dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
|
dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
|
||||||
cpufreq_frequency_table_cpuinfo which is provided with the list of
|
cpufreq_table_validate_and_show() which is provided with the list of
|
||||||
frequencies that are available for operation. This function provides
|
frequencies that are available for operation. This function provides
|
||||||
a ready to use conversion routine to translate the OPP layer's internal
|
a ready to use conversion routine to translate the OPP layer's internal
|
||||||
information about the available frequencies into a format readily
|
information about the available frequencies into a format readily
|
||||||
@ -110,7 +110,7 @@ dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
|
|||||||
/* Do things */
|
/* Do things */
|
||||||
r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
|
r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
|
||||||
if (!r)
|
if (!r)
|
||||||
cpufreq_frequency_table_cpuinfo(policy, freq_table);
|
cpufreq_table_validate_and_show(policy, freq_table);
|
||||||
/* Do other things */
|
/* Do other things */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,7 +231,7 @@ if you want to skip one entry in the table, set the frequency to
|
|||||||
CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending
|
CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending
|
||||||
order.
|
order.
|
||||||
|
|
||||||
By calling cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
|
By calling cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
|
||||||
struct cpufreq_frequency_table *table);
|
struct cpufreq_frequency_table *table);
|
||||||
the cpuinfo.min_freq and cpuinfo.max_freq values are detected, and
|
the cpuinfo.min_freq and cpuinfo.max_freq values are detected, and
|
||||||
policy->min and policy->max are set to the same values. This is
|
policy->min and policy->max are set to the same values. This is
|
||||||
@ -244,14 +244,12 @@ policy->max, and all other criteria are met. This is helpful for the
|
|||||||
->verify call.
|
->verify call.
|
||||||
|
|
||||||
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
||||||
struct cpufreq_frequency_table *table,
|
|
||||||
unsigned int target_freq,
|
unsigned int target_freq,
|
||||||
unsigned int relation,
|
unsigned int relation);
|
||||||
unsigned int *index);
|
|
||||||
|
|
||||||
is the corresponding frequency table helper for the ->target
|
is the corresponding frequency table helper for the ->target
|
||||||
stage. Just pass the values to this function, and the unsigned int
|
stage. Just pass the values to this function, and this function
|
||||||
index returns the number of the frequency table entry which contains
|
returns the number of the frequency table entry which contains
|
||||||
the frequency the CPU shall be set to.
|
the frequency the CPU shall be set to.
|
||||||
|
|
||||||
The following macros can be used as iterators over cpufreq_frequency_table:
|
The following macros can be used as iterators over cpufreq_frequency_table:
|
||||||
|
@ -85,61 +85,57 @@ static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
|
|||||||
cancel_delayed_work_sync(&info->work);
|
cancel_delayed_work_sync(&info->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
|
static int spu_gov_start(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
unsigned int cpu = policy->cpu;
|
unsigned int cpu = policy->cpu;
|
||||||
struct spu_gov_info_struct *info, *affected_info;
|
struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
|
||||||
|
struct spu_gov_info_struct *affected_info;
|
||||||
int i;
|
int i;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
info = &per_cpu(spu_gov_info, cpu);
|
if (!cpu_online(cpu)) {
|
||||||
|
printk(KERN_ERR "cpu %d is not online\n", cpu);
|
||||||
switch (event) {
|
return -EINVAL;
|
||||||
case CPUFREQ_GOV_START:
|
|
||||||
if (!cpu_online(cpu)) {
|
|
||||||
printk(KERN_ERR "cpu %d is not online\n", cpu);
|
|
||||||
ret = -EINVAL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!policy->cur) {
|
|
||||||
printk(KERN_ERR "no cpu specified in policy\n");
|
|
||||||
ret = -EINVAL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* initialize spu_gov_info for all affected cpus */
|
|
||||||
for_each_cpu(i, policy->cpus) {
|
|
||||||
affected_info = &per_cpu(spu_gov_info, i);
|
|
||||||
affected_info->policy = policy;
|
|
||||||
}
|
|
||||||
|
|
||||||
info->poll_int = POLL_TIME;
|
|
||||||
|
|
||||||
/* setup timer */
|
|
||||||
spu_gov_init_work(info);
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPUFREQ_GOV_STOP:
|
|
||||||
/* cancel timer */
|
|
||||||
spu_gov_cancel_work(info);
|
|
||||||
|
|
||||||
/* clean spu_gov_info for all affected cpus */
|
|
||||||
for_each_cpu (i, policy->cpus) {
|
|
||||||
info = &per_cpu(spu_gov_info, i);
|
|
||||||
info->policy = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
if (!policy->cur) {
|
||||||
|
printk(KERN_ERR "no cpu specified in policy\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* initialize spu_gov_info for all affected cpus */
|
||||||
|
for_each_cpu(i, policy->cpus) {
|
||||||
|
affected_info = &per_cpu(spu_gov_info, i);
|
||||||
|
affected_info->policy = policy;
|
||||||
|
}
|
||||||
|
|
||||||
|
info->poll_int = POLL_TIME;
|
||||||
|
|
||||||
|
/* setup timer */
|
||||||
|
spu_gov_init_work(info);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void spu_gov_stop(struct cpufreq_policy *policy)
|
||||||
|
{
|
||||||
|
unsigned int cpu = policy->cpu;
|
||||||
|
struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
/* cancel timer */
|
||||||
|
spu_gov_cancel_work(info);
|
||||||
|
|
||||||
|
/* clean spu_gov_info for all affected cpus */
|
||||||
|
for_each_cpu (i, policy->cpus) {
|
||||||
|
info = &per_cpu(spu_gov_info, i);
|
||||||
|
info->policy = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cpufreq_governor spu_governor = {
|
static struct cpufreq_governor spu_governor = {
|
||||||
.name = "spudemand",
|
.name = "spudemand",
|
||||||
.governor = spu_gov_govern,
|
.start = spu_gov_start,
|
||||||
|
.stop = spu_gov_stop,
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -25,16 +25,6 @@
|
|||||||
#ifndef _ASM_X86_TOPOLOGY_H
|
#ifndef _ASM_X86_TOPOLOGY_H
|
||||||
#define _ASM_X86_TOPOLOGY_H
|
#define _ASM_X86_TOPOLOGY_H
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
# ifdef CONFIG_SMP
|
|
||||||
# define ENABLE_TOPO_DEFINES
|
|
||||||
# endif
|
|
||||||
#else
|
|
||||||
# ifdef CONFIG_SMP
|
|
||||||
# define ENABLE_TOPO_DEFINES
|
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* to preserve the visibility of NUMA_NO_NODE definition,
|
* to preserve the visibility of NUMA_NO_NODE definition,
|
||||||
* moved to there from here. May be used independent of
|
* moved to there from here. May be used independent of
|
||||||
@ -123,7 +113,7 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
|||||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
||||||
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
||||||
|
|
||||||
#ifdef ENABLE_TOPO_DEFINES
|
#ifdef CONFIG_SMP
|
||||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||||
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||||
|
|
||||||
|
@ -300,15 +300,14 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* P4 Xeon errata 037 workaround.
|
* P4 Xeon erratum 037 workaround.
|
||||||
* Hardware prefetcher may cause stale data to be loaded into the cache.
|
* Hardware prefetcher may cause stale data to be loaded into the cache.
|
||||||
*/
|
*/
|
||||||
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
|
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
|
||||||
if (msr_set_bit(MSR_IA32_MISC_ENABLE,
|
if (msr_set_bit(MSR_IA32_MISC_ENABLE,
|
||||||
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
|
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
|
||||||
> 0) {
|
|
||||||
pr_info("CPU: C0 stepping P4 Xeon detected.\n");
|
pr_info("CPU: C0 stepping P4 Xeon detected.\n");
|
||||||
pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
|
pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
#include <asm/iosf_mbi.h>
|
#include <asm/iosf_mbi.h>
|
||||||
|
|
||||||
/* Power gate status reg */
|
/* Power gate status reg */
|
||||||
@ -143,8 +144,8 @@ static void punit_dbgfs_unregister(void)
|
|||||||
(kernel_ulong_t)&drv_data }
|
(kernel_ulong_t)&drv_data }
|
||||||
|
|
||||||
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
|
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
|
||||||
ICPU(55, punit_device_byt), /* Valleyview, Bay Trail */
|
ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
|
||||||
ICPU(76, punit_device_cht), /* Braswell, Cherry Trail */
|
ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -29,6 +29,7 @@ ACPI_MODULE_NAME("acpi_lpss");
|
|||||||
#ifdef CONFIG_X86_INTEL_LPSS
|
#ifdef CONFIG_X86_INTEL_LPSS
|
||||||
|
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
#include <asm/iosf_mbi.h>
|
#include <asm/iosf_mbi.h>
|
||||||
#include <asm/pmc_atom.h>
|
#include <asm/pmc_atom.h>
|
||||||
|
|
||||||
@ -229,8 +230,8 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
|
|||||||
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
||||||
|
|
||||||
static const struct x86_cpu_id lpss_cpu_ids[] = {
|
static const struct x86_cpu_id lpss_cpu_ids[] = {
|
||||||
ICPU(0x37), /* Valleyview, Bay Trail */
|
ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */
|
||||||
ICPU(0x4c), /* Braswell, Cherry Trail */
|
ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -31,23 +31,18 @@ config CPU_FREQ_BOOST_SW
|
|||||||
depends on THERMAL
|
depends on THERMAL
|
||||||
|
|
||||||
config CPU_FREQ_STAT
|
config CPU_FREQ_STAT
|
||||||
tristate "CPU frequency translation statistics"
|
bool "CPU frequency transition statistics"
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
This driver exports CPU frequency statistics information through sysfs
|
Export CPU frequency statistics information through sysfs.
|
||||||
file system.
|
|
||||||
|
|
||||||
To compile this driver as a module, choose M here: the
|
|
||||||
module will be called cpufreq_stats.
|
|
||||||
|
|
||||||
If in doubt, say N.
|
If in doubt, say N.
|
||||||
|
|
||||||
config CPU_FREQ_STAT_DETAILS
|
config CPU_FREQ_STAT_DETAILS
|
||||||
bool "CPU frequency translation statistics details"
|
bool "CPU frequency transition statistics details"
|
||||||
depends on CPU_FREQ_STAT
|
depends on CPU_FREQ_STAT
|
||||||
help
|
help
|
||||||
This will show detail CPU frequency translation table in sysfs file
|
Show detailed CPU frequency transition table in sysfs.
|
||||||
system.
|
|
||||||
|
|
||||||
If in doubt, say N.
|
If in doubt, say N.
|
||||||
|
|
||||||
|
@ -48,9 +48,8 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
|
|||||||
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
||||||
struct dbs_data *od_data = policy_dbs->dbs_data;
|
struct dbs_data *od_data = policy_dbs->dbs_data;
|
||||||
struct od_dbs_tuners *od_tuners = od_data->tuners;
|
struct od_dbs_tuners *od_tuners = od_data->tuners;
|
||||||
struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs);
|
|
||||||
|
|
||||||
if (!od_info->freq_table)
|
if (!policy->freq_table)
|
||||||
return freq_next;
|
return freq_next;
|
||||||
|
|
||||||
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
|
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
|
||||||
@ -92,10 +91,9 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
|
|||||||
else {
|
else {
|
||||||
unsigned int index;
|
unsigned int index;
|
||||||
|
|
||||||
cpufreq_frequency_table_target(policy,
|
index = cpufreq_frequency_table_target(policy,
|
||||||
od_info->freq_table, policy->cur - 1,
|
policy->cur - 1, CPUFREQ_RELATION_H);
|
||||||
CPUFREQ_RELATION_H, &index);
|
freq_next = policy->freq_table[index].frequency;
|
||||||
freq_next = od_info->freq_table[index].frequency;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data->freq_prev = freq_next;
|
data->freq_prev = freq_next;
|
||||||
|
@ -74,19 +74,12 @@ static inline bool has_target(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* internal prototypes */
|
/* internal prototypes */
|
||||||
static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
|
|
||||||
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
|
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
|
||||||
|
static int cpufreq_init_governor(struct cpufreq_policy *policy);
|
||||||
|
static void cpufreq_exit_governor(struct cpufreq_policy *policy);
|
||||||
static int cpufreq_start_governor(struct cpufreq_policy *policy);
|
static int cpufreq_start_governor(struct cpufreq_policy *policy);
|
||||||
|
static void cpufreq_stop_governor(struct cpufreq_policy *policy);
|
||||||
static inline void cpufreq_exit_governor(struct cpufreq_policy *policy)
|
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
|
||||||
{
|
|
||||||
(void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void cpufreq_stop_governor(struct cpufreq_policy *policy)
|
|
||||||
{
|
|
||||||
(void)cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Two notifier lists: the "policy" list is involved in the
|
* Two notifier lists: the "policy" list is involved in the
|
||||||
@ -133,15 +126,6 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
|
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
|
||||||
|
|
||||||
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
|
|
||||||
{
|
|
||||||
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
|
||||||
|
|
||||||
return policy && !policy_is_inactive(policy) ?
|
|
||||||
policy->freq_table : NULL;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
|
|
||||||
|
|
||||||
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||||
{
|
{
|
||||||
u64 idle_time;
|
u64 idle_time;
|
||||||
@ -354,6 +338,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
|
|||||||
pr_debug("FREQ: %lu - CPU: %lu\n",
|
pr_debug("FREQ: %lu - CPU: %lu\n",
|
||||||
(unsigned long)freqs->new, (unsigned long)freqs->cpu);
|
(unsigned long)freqs->new, (unsigned long)freqs->cpu);
|
||||||
trace_cpu_frequency(freqs->new, freqs->cpu);
|
trace_cpu_frequency(freqs->new, freqs->cpu);
|
||||||
|
cpufreq_stats_record_transition(policy, freqs->new);
|
||||||
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
|
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
|
||||||
CPUFREQ_POSTCHANGE, freqs);
|
CPUFREQ_POSTCHANGE, freqs);
|
||||||
if (likely(policy) && likely(policy->cpu == freqs->cpu))
|
if (likely(policy) && likely(policy->cpu == freqs->cpu))
|
||||||
@ -1115,6 +1100,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
|
|||||||
CPUFREQ_REMOVE_POLICY, policy);
|
CPUFREQ_REMOVE_POLICY, policy);
|
||||||
|
|
||||||
down_write(&policy->rwsem);
|
down_write(&policy->rwsem);
|
||||||
|
cpufreq_stats_free_table(policy);
|
||||||
cpufreq_remove_dev_symlink(policy);
|
cpufreq_remove_dev_symlink(policy);
|
||||||
kobj = &policy->kobj;
|
kobj = &policy->kobj;
|
||||||
cmp = &policy->kobj_unregister;
|
cmp = &policy->kobj_unregister;
|
||||||
@ -1265,13 +1251,12 @@ static int cpufreq_online(unsigned int cpu)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
|
||||||
CPUFREQ_START, policy);
|
|
||||||
|
|
||||||
if (new_policy) {
|
if (new_policy) {
|
||||||
ret = cpufreq_add_dev_interface(policy);
|
ret = cpufreq_add_dev_interface(policy);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_exit_policy;
|
goto out_exit_policy;
|
||||||
|
|
||||||
|
cpufreq_stats_create_table(policy);
|
||||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||||
CPUFREQ_CREATE_POLICY, policy);
|
CPUFREQ_CREATE_POLICY, policy);
|
||||||
|
|
||||||
@ -1280,6 +1265,9 @@ static int cpufreq_online(unsigned int cpu)
|
|||||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||||
|
CPUFREQ_START, policy);
|
||||||
|
|
||||||
ret = cpufreq_init_policy(policy);
|
ret = cpufreq_init_policy(policy);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
|
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
|
||||||
@ -1864,14 +1852,17 @@ static int __target_intermediate(struct cpufreq_policy *policy,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __target_index(struct cpufreq_policy *policy,
|
static int __target_index(struct cpufreq_policy *policy, int index)
|
||||||
struct cpufreq_frequency_table *freq_table, int index)
|
|
||||||
{
|
{
|
||||||
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
|
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
|
||||||
unsigned int intermediate_freq = 0;
|
unsigned int intermediate_freq = 0;
|
||||||
|
unsigned int newfreq = policy->freq_table[index].frequency;
|
||||||
int retval = -EINVAL;
|
int retval = -EINVAL;
|
||||||
bool notify;
|
bool notify;
|
||||||
|
|
||||||
|
if (newfreq == policy->cur)
|
||||||
|
return 0;
|
||||||
|
|
||||||
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
|
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
|
||||||
if (notify) {
|
if (notify) {
|
||||||
/* Handle switching to intermediate frequency */
|
/* Handle switching to intermediate frequency */
|
||||||
@ -1886,7 +1877,7 @@ static int __target_index(struct cpufreq_policy *policy,
|
|||||||
freqs.old = freqs.new;
|
freqs.old = freqs.new;
|
||||||
}
|
}
|
||||||
|
|
||||||
freqs.new = freq_table[index].frequency;
|
freqs.new = newfreq;
|
||||||
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
|
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
|
||||||
__func__, policy->cpu, freqs.old, freqs.new);
|
__func__, policy->cpu, freqs.old, freqs.new);
|
||||||
|
|
||||||
@ -1923,17 +1914,13 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||||||
unsigned int relation)
|
unsigned int relation)
|
||||||
{
|
{
|
||||||
unsigned int old_target_freq = target_freq;
|
unsigned int old_target_freq = target_freq;
|
||||||
struct cpufreq_frequency_table *freq_table;
|
int index;
|
||||||
int index, retval;
|
|
||||||
|
|
||||||
if (cpufreq_disabled())
|
if (cpufreq_disabled())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
/* Make sure that target_freq is within supported range */
|
/* Make sure that target_freq is within supported range */
|
||||||
if (target_freq > policy->max)
|
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||||
target_freq = policy->max;
|
|
||||||
if (target_freq < policy->min)
|
|
||||||
target_freq = policy->min;
|
|
||||||
|
|
||||||
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
|
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
|
||||||
policy->cpu, target_freq, relation, old_target_freq);
|
policy->cpu, target_freq, relation, old_target_freq);
|
||||||
@ -1956,23 +1943,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||||||
if (!cpufreq_driver->target_index)
|
if (!cpufreq_driver->target_index)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
freq_table = cpufreq_frequency_get_table(policy->cpu);
|
index = cpufreq_frequency_table_target(policy, target_freq, relation);
|
||||||
if (unlikely(!freq_table)) {
|
|
||||||
pr_err("%s: Unable to find freq_table\n", __func__);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
retval = cpufreq_frequency_table_target(policy, freq_table, target_freq,
|
return __target_index(policy, index);
|
||||||
relation, &index);
|
|
||||||
if (unlikely(retval)) {
|
|
||||||
pr_err("%s: Unable to find matching freq\n", __func__);
|
|
||||||
return retval;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (freq_table[index].frequency == policy->cur)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return __target_index(policy, freq_table, index);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
|
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
|
||||||
|
|
||||||
@ -1997,7 +1970,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
|
static int cpufreq_init_governor(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -2025,36 +1998,82 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event == CPUFREQ_GOV_POLICY_INIT)
|
if (!try_module_get(policy->governor->owner))
|
||||||
if (!try_module_get(policy->governor->owner))
|
return -EINVAL;
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
|
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
|
||||||
|
|
||||||
ret = policy->governor->governor(policy, event);
|
if (policy->governor->init) {
|
||||||
|
ret = policy->governor->init(policy);
|
||||||
if (event == CPUFREQ_GOV_POLICY_INIT) {
|
if (ret) {
|
||||||
if (ret)
|
|
||||||
module_put(policy->governor->owner);
|
module_put(policy->governor->owner);
|
||||||
else
|
return ret;
|
||||||
policy->governor->initialized++;
|
}
|
||||||
} else if (event == CPUFREQ_GOV_POLICY_EXIT) {
|
|
||||||
policy->governor->initialized--;
|
|
||||||
module_put(policy->governor->owner);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cpufreq_exit_governor(struct cpufreq_policy *policy)
|
||||||
|
{
|
||||||
|
if (cpufreq_suspended || !policy->governor)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
|
||||||
|
|
||||||
|
if (policy->governor->exit)
|
||||||
|
policy->governor->exit(policy);
|
||||||
|
|
||||||
|
module_put(policy->governor->owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpufreq_start_governor(struct cpufreq_policy *policy)
|
static int cpufreq_start_governor(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (cpufreq_suspended)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!policy->governor)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
|
||||||
|
|
||||||
if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
|
if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
|
||||||
cpufreq_update_current_freq(policy);
|
cpufreq_update_current_freq(policy);
|
||||||
|
|
||||||
ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
|
if (policy->governor->start) {
|
||||||
return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
|
ret = policy->governor->start(policy);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (policy->governor->limits)
|
||||||
|
policy->governor->limits(policy);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cpufreq_stop_governor(struct cpufreq_policy *policy)
|
||||||
|
{
|
||||||
|
if (cpufreq_suspended || !policy->governor)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
|
||||||
|
|
||||||
|
if (policy->governor->stop)
|
||||||
|
policy->governor->stop(policy);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cpufreq_governor_limits(struct cpufreq_policy *policy)
|
||||||
|
{
|
||||||
|
if (cpufreq_suspended || !policy->governor)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
|
||||||
|
|
||||||
|
if (policy->governor->limits)
|
||||||
|
policy->governor->limits(policy);
|
||||||
}
|
}
|
||||||
|
|
||||||
int cpufreq_register_governor(struct cpufreq_governor *governor)
|
int cpufreq_register_governor(struct cpufreq_governor *governor)
|
||||||
@ -2069,7 +2088,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
|
|||||||
|
|
||||||
mutex_lock(&cpufreq_governor_mutex);
|
mutex_lock(&cpufreq_governor_mutex);
|
||||||
|
|
||||||
governor->initialized = 0;
|
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
if (!find_governor(governor->name)) {
|
if (!find_governor(governor->name)) {
|
||||||
err = 0;
|
err = 0;
|
||||||
@ -2195,7 +2213,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||||||
|
|
||||||
if (new_policy->governor == policy->governor) {
|
if (new_policy->governor == policy->governor) {
|
||||||
pr_debug("cpufreq: governor limits update\n");
|
pr_debug("cpufreq: governor limits update\n");
|
||||||
return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
|
cpufreq_governor_limits(policy);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("governor switch\n");
|
pr_debug("governor switch\n");
|
||||||
@ -2210,7 +2229,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||||||
|
|
||||||
/* start new governor */
|
/* start new governor */
|
||||||
policy->governor = new_policy->governor;
|
policy->governor = new_policy->governor;
|
||||||
ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
|
ret = cpufreq_init_governor(policy);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = cpufreq_start_governor(policy);
|
ret = cpufreq_start_governor(policy);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
@ -2224,7 +2243,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||||||
pr_debug("starting governor %s failed\n", policy->governor->name);
|
pr_debug("starting governor %s failed\n", policy->governor->name);
|
||||||
if (old_gov) {
|
if (old_gov) {
|
||||||
policy->governor = old_gov;
|
policy->governor = old_gov;
|
||||||
if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
|
if (cpufreq_init_governor(policy))
|
||||||
policy->governor = NULL;
|
policy->governor = NULL;
|
||||||
else
|
else
|
||||||
cpufreq_start_governor(policy);
|
cpufreq_start_governor(policy);
|
||||||
@ -2305,26 +2324,25 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
|
|||||||
*********************************************************************/
|
*********************************************************************/
|
||||||
static int cpufreq_boost_set_sw(int state)
|
static int cpufreq_boost_set_sw(int state)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table *freq_table;
|
|
||||||
struct cpufreq_policy *policy;
|
struct cpufreq_policy *policy;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
for_each_active_policy(policy) {
|
for_each_active_policy(policy) {
|
||||||
freq_table = cpufreq_frequency_get_table(policy->cpu);
|
if (!policy->freq_table)
|
||||||
if (freq_table) {
|
continue;
|
||||||
ret = cpufreq_frequency_table_cpuinfo(policy,
|
|
||||||
freq_table);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("%s: Policy frequency update failed\n",
|
|
||||||
__func__);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
down_write(&policy->rwsem);
|
ret = cpufreq_frequency_table_cpuinfo(policy,
|
||||||
policy->user_policy.max = policy->max;
|
policy->freq_table);
|
||||||
cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
|
if (ret) {
|
||||||
up_write(&policy->rwsem);
|
pr_err("%s: Policy frequency update failed\n",
|
||||||
|
__func__);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
down_write(&policy->rwsem);
|
||||||
|
policy->user_policy.max = policy->max;
|
||||||
|
cpufreq_governor_limits(policy);
|
||||||
|
up_write(&policy->rwsem);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
struct cs_policy_dbs_info {
|
struct cs_policy_dbs_info {
|
||||||
struct policy_dbs_info policy_dbs;
|
struct policy_dbs_info policy_dbs;
|
||||||
unsigned int down_skip;
|
unsigned int down_skip;
|
||||||
unsigned int requested_freq;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
|
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
|
||||||
@ -75,19 +74,17 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
|
|||||||
|
|
||||||
/* Check for frequency increase */
|
/* Check for frequency increase */
|
||||||
if (load > dbs_data->up_threshold) {
|
if (load > dbs_data->up_threshold) {
|
||||||
|
unsigned int requested_freq = policy->cur;
|
||||||
|
|
||||||
dbs_info->down_skip = 0;
|
dbs_info->down_skip = 0;
|
||||||
|
|
||||||
/* if we are already at full speed then break out early */
|
/* if we are already at full speed then break out early */
|
||||||
if (dbs_info->requested_freq == policy->max)
|
if (requested_freq == policy->max)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
|
requested_freq += get_freq_target(cs_tuners, policy);
|
||||||
|
|
||||||
if (dbs_info->requested_freq > policy->max)
|
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
|
||||||
dbs_info->requested_freq = policy->max;
|
|
||||||
|
|
||||||
__cpufreq_driver_target(policy, dbs_info->requested_freq,
|
|
||||||
CPUFREQ_RELATION_H);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,36 +95,27 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
|
|||||||
|
|
||||||
/* Check for frequency decrease */
|
/* Check for frequency decrease */
|
||||||
if (load < cs_tuners->down_threshold) {
|
if (load < cs_tuners->down_threshold) {
|
||||||
unsigned int freq_target;
|
unsigned int freq_target, requested_freq = policy->cur;
|
||||||
/*
|
/*
|
||||||
* if we cannot reduce the frequency anymore, break out early
|
* if we cannot reduce the frequency anymore, break out early
|
||||||
*/
|
*/
|
||||||
if (policy->cur == policy->min)
|
if (requested_freq == policy->min)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
freq_target = get_freq_target(cs_tuners, policy);
|
freq_target = get_freq_target(cs_tuners, policy);
|
||||||
if (dbs_info->requested_freq > freq_target)
|
if (requested_freq > freq_target)
|
||||||
dbs_info->requested_freq -= freq_target;
|
requested_freq -= freq_target;
|
||||||
else
|
else
|
||||||
dbs_info->requested_freq = policy->min;
|
requested_freq = policy->min;
|
||||||
|
|
||||||
__cpufreq_driver_target(policy, dbs_info->requested_freq,
|
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
|
||||||
CPUFREQ_RELATION_L);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return dbs_data->sampling_rate;
|
return dbs_data->sampling_rate;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
|
||||||
void *data);
|
|
||||||
|
|
||||||
static struct notifier_block cs_cpufreq_notifier_block = {
|
|
||||||
.notifier_call = dbs_cpufreq_notifier,
|
|
||||||
};
|
|
||||||
|
|
||||||
/************************** sysfs interface ************************/
|
/************************** sysfs interface ************************/
|
||||||
static struct dbs_governor cs_dbs_gov;
|
|
||||||
|
|
||||||
static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
|
static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
@ -268,15 +256,13 @@ static void cs_free(struct policy_dbs_info *policy_dbs)
|
|||||||
kfree(to_dbs_info(policy_dbs));
|
kfree(to_dbs_info(policy_dbs));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cs_init(struct dbs_data *dbs_data, bool notify)
|
static int cs_init(struct dbs_data *dbs_data)
|
||||||
{
|
{
|
||||||
struct cs_dbs_tuners *tuners;
|
struct cs_dbs_tuners *tuners;
|
||||||
|
|
||||||
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
|
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
|
||||||
if (!tuners) {
|
if (!tuners)
|
||||||
pr_err("%s: kzalloc failed\n", __func__);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
|
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
|
||||||
tuners->freq_step = DEF_FREQUENCY_STEP;
|
tuners->freq_step = DEF_FREQUENCY_STEP;
|
||||||
@ -288,19 +274,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify)
|
|||||||
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
|
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
|
||||||
jiffies_to_usecs(10);
|
jiffies_to_usecs(10);
|
||||||
|
|
||||||
if (notify)
|
|
||||||
cpufreq_register_notifier(&cs_cpufreq_notifier_block,
|
|
||||||
CPUFREQ_TRANSITION_NOTIFIER);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cs_exit(struct dbs_data *dbs_data, bool notify)
|
static void cs_exit(struct dbs_data *dbs_data)
|
||||||
{
|
{
|
||||||
if (notify)
|
|
||||||
cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
|
|
||||||
CPUFREQ_TRANSITION_NOTIFIER);
|
|
||||||
|
|
||||||
kfree(dbs_data->tuners);
|
kfree(dbs_data->tuners);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -309,16 +287,10 @@ static void cs_start(struct cpufreq_policy *policy)
|
|||||||
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
|
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
|
||||||
|
|
||||||
dbs_info->down_skip = 0;
|
dbs_info->down_skip = 0;
|
||||||
dbs_info->requested_freq = policy->cur;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dbs_governor cs_dbs_gov = {
|
static struct dbs_governor cs_governor = {
|
||||||
.gov = {
|
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
|
||||||
.name = "conservative",
|
|
||||||
.governor = cpufreq_governor_dbs,
|
|
||||||
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
|
|
||||||
.owner = THIS_MODULE,
|
|
||||||
},
|
|
||||||
.kobj_type = { .default_attrs = cs_attributes },
|
.kobj_type = { .default_attrs = cs_attributes },
|
||||||
.gov_dbs_timer = cs_dbs_timer,
|
.gov_dbs_timer = cs_dbs_timer,
|
||||||
.alloc = cs_alloc,
|
.alloc = cs_alloc,
|
||||||
@ -328,33 +300,7 @@ static struct dbs_governor cs_dbs_gov = {
|
|||||||
.start = cs_start,
|
.start = cs_start,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov)
|
#define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov)
|
||||||
|
|
||||||
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
|
||||||
void *data)
|
|
||||||
{
|
|
||||||
struct cpufreq_freqs *freq = data;
|
|
||||||
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
|
|
||||||
struct cs_policy_dbs_info *dbs_info;
|
|
||||||
|
|
||||||
if (!policy)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* policy isn't governed by conservative governor */
|
|
||||||
if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
dbs_info = to_dbs_info(policy->governor_data);
|
|
||||||
/*
|
|
||||||
* we only care if our internally tracked freq moves outside the 'valid'
|
|
||||||
* ranges of frequency available to us otherwise we do not change it
|
|
||||||
*/
|
|
||||||
if (dbs_info->requested_freq > policy->max
|
|
||||||
|| dbs_info->requested_freq < policy->min)
|
|
||||||
dbs_info->requested_freq = freq->new;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init cpufreq_gov_dbs_init(void)
|
static int __init cpufreq_gov_dbs_init(void)
|
||||||
{
|
{
|
||||||
|
@ -336,17 +336,6 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
|
|||||||
synchronize_sched();
|
synchronize_sched();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gov_cancel_work(struct cpufreq_policy *policy)
|
|
||||||
{
|
|
||||||
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
|
||||||
|
|
||||||
gov_clear_update_util(policy_dbs->policy);
|
|
||||||
irq_work_sync(&policy_dbs->irq_work);
|
|
||||||
cancel_work_sync(&policy_dbs->work);
|
|
||||||
atomic_set(&policy_dbs->work_count, 0);
|
|
||||||
policy_dbs->work_in_progress = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
|
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
|
||||||
struct dbs_governor *gov)
|
struct dbs_governor *gov)
|
||||||
{
|
{
|
||||||
@ -389,7 +378,7 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
|
|||||||
gov->free(policy_dbs);
|
gov->free(policy_dbs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpufreq_governor_init(struct cpufreq_policy *policy)
|
int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct dbs_governor *gov = dbs_governor_of(policy);
|
struct dbs_governor *gov = dbs_governor_of(policy);
|
||||||
struct dbs_data *dbs_data;
|
struct dbs_data *dbs_data;
|
||||||
@ -429,7 +418,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
|
|||||||
|
|
||||||
gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
|
gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
|
||||||
|
|
||||||
ret = gov->init(dbs_data, !policy->governor->initialized);
|
ret = gov->init(dbs_data);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto free_policy_dbs_info;
|
goto free_policy_dbs_info;
|
||||||
|
|
||||||
@ -458,13 +447,13 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* Failure, so roll back. */
|
/* Failure, so roll back. */
|
||||||
pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
|
pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
|
||||||
|
|
||||||
policy->governor_data = NULL;
|
policy->governor_data = NULL;
|
||||||
|
|
||||||
if (!have_governor_per_policy())
|
if (!have_governor_per_policy())
|
||||||
gov->gdbs_data = NULL;
|
gov->gdbs_data = NULL;
|
||||||
gov->exit(dbs_data, !policy->governor->initialized);
|
gov->exit(dbs_data);
|
||||||
kfree(dbs_data);
|
kfree(dbs_data);
|
||||||
|
|
||||||
free_policy_dbs_info:
|
free_policy_dbs_info:
|
||||||
@ -474,8 +463,9 @@ out:
|
|||||||
mutex_unlock(&gov_dbs_data_mutex);
|
mutex_unlock(&gov_dbs_data_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
|
||||||
|
|
||||||
static int cpufreq_governor_exit(struct cpufreq_policy *policy)
|
void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct dbs_governor *gov = dbs_governor_of(policy);
|
struct dbs_governor *gov = dbs_governor_of(policy);
|
||||||
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
||||||
@ -493,17 +483,17 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy)
|
|||||||
if (!have_governor_per_policy())
|
if (!have_governor_per_policy())
|
||||||
gov->gdbs_data = NULL;
|
gov->gdbs_data = NULL;
|
||||||
|
|
||||||
gov->exit(dbs_data, policy->governor->initialized == 1);
|
gov->exit(dbs_data);
|
||||||
kfree(dbs_data);
|
kfree(dbs_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_policy_dbs_info(policy_dbs, gov);
|
free_policy_dbs_info(policy_dbs, gov);
|
||||||
|
|
||||||
mutex_unlock(&gov_dbs_data_mutex);
|
mutex_unlock(&gov_dbs_data_mutex);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
|
||||||
|
|
||||||
static int cpufreq_governor_start(struct cpufreq_policy *policy)
|
int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct dbs_governor *gov = dbs_governor_of(policy);
|
struct dbs_governor *gov = dbs_governor_of(policy);
|
||||||
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
||||||
@ -539,47 +529,28 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
|
|||||||
gov_set_update_util(policy_dbs, sampling_rate);
|
gov_set_update_util(policy_dbs, sampling_rate);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
|
||||||
|
|
||||||
static int cpufreq_governor_stop(struct cpufreq_policy *policy)
|
void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
gov_cancel_work(policy);
|
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cpufreq_governor_limits(struct cpufreq_policy *policy)
|
gov_clear_update_util(policy_dbs->policy);
|
||||||
|
irq_work_sync(&policy_dbs->irq_work);
|
||||||
|
cancel_work_sync(&policy_dbs->work);
|
||||||
|
atomic_set(&policy_dbs->work_count, 0);
|
||||||
|
policy_dbs->work_in_progress = false;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
|
||||||
|
|
||||||
|
void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
||||||
|
|
||||||
mutex_lock(&policy_dbs->timer_mutex);
|
mutex_lock(&policy_dbs->timer_mutex);
|
||||||
|
cpufreq_policy_apply_limits(policy);
|
||||||
if (policy->max < policy->cur)
|
|
||||||
__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
|
|
||||||
else if (policy->min > policy->cur)
|
|
||||||
__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
|
|
||||||
|
|
||||||
gov_update_sample_delay(policy_dbs, 0);
|
gov_update_sample_delay(policy_dbs, 0);
|
||||||
|
|
||||||
mutex_unlock(&policy_dbs->timer_mutex);
|
mutex_unlock(&policy_dbs->timer_mutex);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
|
||||||
int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
|
|
||||||
{
|
|
||||||
if (event == CPUFREQ_GOV_POLICY_INIT) {
|
|
||||||
return cpufreq_governor_init(policy);
|
|
||||||
} else if (policy->governor_data) {
|
|
||||||
switch (event) {
|
|
||||||
case CPUFREQ_GOV_POLICY_EXIT:
|
|
||||||
return cpufreq_governor_exit(policy);
|
|
||||||
case CPUFREQ_GOV_START:
|
|
||||||
return cpufreq_governor_start(policy);
|
|
||||||
case CPUFREQ_GOV_STOP:
|
|
||||||
return cpufreq_governor_stop(policy);
|
|
||||||
case CPUFREQ_GOV_LIMITS:
|
|
||||||
return cpufreq_governor_limits(policy);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
|
|
||||||
|
@ -138,8 +138,8 @@ struct dbs_governor {
|
|||||||
unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
|
unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
|
||||||
struct policy_dbs_info *(*alloc)(void);
|
struct policy_dbs_info *(*alloc)(void);
|
||||||
void (*free)(struct policy_dbs_info *policy_dbs);
|
void (*free)(struct policy_dbs_info *policy_dbs);
|
||||||
int (*init)(struct dbs_data *dbs_data, bool notify);
|
int (*init)(struct dbs_data *dbs_data);
|
||||||
void (*exit)(struct dbs_data *dbs_data, bool notify);
|
void (*exit)(struct dbs_data *dbs_data);
|
||||||
void (*start)(struct cpufreq_policy *policy);
|
void (*start)(struct cpufreq_policy *policy);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -148,6 +148,25 @@ static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy
|
|||||||
return container_of(policy->governor, struct dbs_governor, gov);
|
return container_of(policy->governor, struct dbs_governor, gov);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Governor callback routines */
|
||||||
|
int cpufreq_dbs_governor_init(struct cpufreq_policy *policy);
|
||||||
|
void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy);
|
||||||
|
int cpufreq_dbs_governor_start(struct cpufreq_policy *policy);
|
||||||
|
void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy);
|
||||||
|
void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
|
||||||
|
|
||||||
|
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
|
||||||
|
{ \
|
||||||
|
.name = _name_, \
|
||||||
|
.max_transition_latency = TRANSITION_LATENCY_LIMIT, \
|
||||||
|
.owner = THIS_MODULE, \
|
||||||
|
.init = cpufreq_dbs_governor_init, \
|
||||||
|
.exit = cpufreq_dbs_governor_exit, \
|
||||||
|
.start = cpufreq_dbs_governor_start, \
|
||||||
|
.stop = cpufreq_dbs_governor_stop, \
|
||||||
|
.limits = cpufreq_dbs_governor_limits, \
|
||||||
|
}
|
||||||
|
|
||||||
/* Governor specific operations */
|
/* Governor specific operations */
|
||||||
struct od_ops {
|
struct od_ops {
|
||||||
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
|
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
|
||||||
@ -155,7 +174,6 @@ struct od_ops {
|
|||||||
};
|
};
|
||||||
|
|
||||||
unsigned int dbs_update(struct cpufreq_policy *policy);
|
unsigned int dbs_update(struct cpufreq_policy *policy);
|
||||||
int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
|
|
||||||
void od_register_powersave_bias_handler(unsigned int (*f)
|
void od_register_powersave_bias_handler(unsigned int (*f)
|
||||||
(struct cpufreq_policy *, unsigned int, unsigned int),
|
(struct cpufreq_policy *, unsigned int, unsigned int),
|
||||||
unsigned int powersave_bias);
|
unsigned int powersave_bias);
|
||||||
|
@ -65,34 +65,32 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
|
|||||||
{
|
{
|
||||||
unsigned int freq_req, freq_reduc, freq_avg;
|
unsigned int freq_req, freq_reduc, freq_avg;
|
||||||
unsigned int freq_hi, freq_lo;
|
unsigned int freq_hi, freq_lo;
|
||||||
unsigned int index = 0;
|
unsigned int index;
|
||||||
unsigned int delay_hi_us;
|
unsigned int delay_hi_us;
|
||||||
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
||||||
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
|
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
|
||||||
struct dbs_data *dbs_data = policy_dbs->dbs_data;
|
struct dbs_data *dbs_data = policy_dbs->dbs_data;
|
||||||
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
||||||
|
struct cpufreq_frequency_table *freq_table = policy->freq_table;
|
||||||
|
|
||||||
if (!dbs_info->freq_table) {
|
if (!freq_table) {
|
||||||
dbs_info->freq_lo = 0;
|
dbs_info->freq_lo = 0;
|
||||||
dbs_info->freq_lo_delay_us = 0;
|
dbs_info->freq_lo_delay_us = 0;
|
||||||
return freq_next;
|
return freq_next;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
|
index = cpufreq_frequency_table_target(policy, freq_next, relation);
|
||||||
relation, &index);
|
freq_req = freq_table[index].frequency;
|
||||||
freq_req = dbs_info->freq_table[index].frequency;
|
|
||||||
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
|
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
|
||||||
freq_avg = freq_req - freq_reduc;
|
freq_avg = freq_req - freq_reduc;
|
||||||
|
|
||||||
/* Find freq bounds for freq_avg in freq_table */
|
/* Find freq bounds for freq_avg in freq_table */
|
||||||
index = 0;
|
index = cpufreq_frequency_table_target(policy, freq_avg,
|
||||||
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
|
CPUFREQ_RELATION_H);
|
||||||
CPUFREQ_RELATION_H, &index);
|
freq_lo = freq_table[index].frequency;
|
||||||
freq_lo = dbs_info->freq_table[index].frequency;
|
index = cpufreq_frequency_table_target(policy, freq_avg,
|
||||||
index = 0;
|
CPUFREQ_RELATION_L);
|
||||||
cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
|
freq_hi = freq_table[index].frequency;
|
||||||
CPUFREQ_RELATION_L, &index);
|
|
||||||
freq_hi = dbs_info->freq_table[index].frequency;
|
|
||||||
|
|
||||||
/* Find out how long we have to be in hi and lo freqs */
|
/* Find out how long we have to be in hi and lo freqs */
|
||||||
if (freq_hi == freq_lo) {
|
if (freq_hi == freq_lo) {
|
||||||
@ -113,7 +111,6 @@ static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
|
|||||||
{
|
{
|
||||||
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
|
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
|
||||||
|
|
||||||
dbs_info->freq_table = cpufreq_frequency_get_table(policy->cpu);
|
|
||||||
dbs_info->freq_lo = 0;
|
dbs_info->freq_lo = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -361,17 +358,15 @@ static void od_free(struct policy_dbs_info *policy_dbs)
|
|||||||
kfree(to_dbs_info(policy_dbs));
|
kfree(to_dbs_info(policy_dbs));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int od_init(struct dbs_data *dbs_data, bool notify)
|
static int od_init(struct dbs_data *dbs_data)
|
||||||
{
|
{
|
||||||
struct od_dbs_tuners *tuners;
|
struct od_dbs_tuners *tuners;
|
||||||
u64 idle_time;
|
u64 idle_time;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
|
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
|
||||||
if (!tuners) {
|
if (!tuners)
|
||||||
pr_err("%s: kzalloc failed\n", __func__);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
cpu = get_cpu();
|
cpu = get_cpu();
|
||||||
idle_time = get_cpu_idle_time_us(cpu, NULL);
|
idle_time = get_cpu_idle_time_us(cpu, NULL);
|
||||||
@ -402,7 +397,7 @@ static int od_init(struct dbs_data *dbs_data, bool notify)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void od_exit(struct dbs_data *dbs_data, bool notify)
|
static void od_exit(struct dbs_data *dbs_data)
|
||||||
{
|
{
|
||||||
kfree(dbs_data->tuners);
|
kfree(dbs_data->tuners);
|
||||||
}
|
}
|
||||||
@ -420,12 +415,7 @@ static struct od_ops od_ops = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct dbs_governor od_dbs_gov = {
|
static struct dbs_governor od_dbs_gov = {
|
||||||
.gov = {
|
.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
|
||||||
.name = "ondemand",
|
|
||||||
.governor = cpufreq_governor_dbs,
|
|
||||||
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
|
|
||||||
.owner = THIS_MODULE,
|
|
||||||
},
|
|
||||||
.kobj_type = { .default_attrs = od_attributes },
|
.kobj_type = { .default_attrs = od_attributes },
|
||||||
.gov_dbs_timer = od_dbs_timer,
|
.gov_dbs_timer = od_dbs_timer,
|
||||||
.alloc = od_alloc,
|
.alloc = od_alloc,
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
|
|
||||||
struct od_policy_dbs_info {
|
struct od_policy_dbs_info {
|
||||||
struct policy_dbs_info policy_dbs;
|
struct policy_dbs_info policy_dbs;
|
||||||
struct cpufreq_frequency_table *freq_table;
|
|
||||||
unsigned int freq_lo;
|
unsigned int freq_lo;
|
||||||
unsigned int freq_lo_delay_us;
|
unsigned int freq_lo_delay_us;
|
||||||
unsigned int freq_hi_delay_us;
|
unsigned int freq_hi_delay_us;
|
||||||
|
@ -16,27 +16,16 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
|
static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
|
||||||
unsigned int event)
|
|
||||||
{
|
{
|
||||||
switch (event) {
|
pr_debug("setting to %u kHz\n", policy->max);
|
||||||
case CPUFREQ_GOV_START:
|
__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
|
||||||
case CPUFREQ_GOV_LIMITS:
|
|
||||||
pr_debug("setting to %u kHz because of event %u\n",
|
|
||||||
policy->max, event);
|
|
||||||
__cpufreq_driver_target(policy, policy->max,
|
|
||||||
CPUFREQ_RELATION_H);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cpufreq_governor cpufreq_gov_performance = {
|
static struct cpufreq_governor cpufreq_gov_performance = {
|
||||||
.name = "performance",
|
.name = "performance",
|
||||||
.governor = cpufreq_governor_performance,
|
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
.limits = cpufreq_gov_performance_limits,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init cpufreq_gov_performance_init(void)
|
static int __init cpufreq_gov_performance_init(void)
|
||||||
|
@ -16,26 +16,15 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
|
static void cpufreq_gov_powersave_limits(struct cpufreq_policy *policy)
|
||||||
unsigned int event)
|
|
||||||
{
|
{
|
||||||
switch (event) {
|
pr_debug("setting to %u kHz\n", policy->min);
|
||||||
case CPUFREQ_GOV_START:
|
__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
|
||||||
case CPUFREQ_GOV_LIMITS:
|
|
||||||
pr_debug("setting to %u kHz because of event %u\n",
|
|
||||||
policy->min, event);
|
|
||||||
__cpufreq_driver_target(policy, policy->min,
|
|
||||||
CPUFREQ_RELATION_L);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cpufreq_governor cpufreq_gov_powersave = {
|
static struct cpufreq_governor cpufreq_gov_powersave = {
|
||||||
.name = "powersave",
|
.name = "powersave",
|
||||||
.governor = cpufreq_governor_powersave,
|
.limits = cpufreq_gov_powersave_limits,
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/cputime.h>
|
#include <linux/cputime.h>
|
||||||
|
|
||||||
static spinlock_t cpufreq_stats_lock;
|
static DEFINE_SPINLOCK(cpufreq_stats_lock);
|
||||||
|
|
||||||
struct cpufreq_stats {
|
struct cpufreq_stats {
|
||||||
unsigned int total_trans;
|
unsigned int total_trans;
|
||||||
@ -52,6 +52,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
|
|||||||
ssize_t len = 0;
|
ssize_t len = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (policy->fast_switch_enabled)
|
||||||
|
return 0;
|
||||||
|
|
||||||
cpufreq_stats_update(stats);
|
cpufreq_stats_update(stats);
|
||||||
for (i = 0; i < stats->state_num; i++) {
|
for (i = 0; i < stats->state_num; i++) {
|
||||||
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
|
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
|
||||||
@ -68,6 +71,9 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
|
|||||||
ssize_t len = 0;
|
ssize_t len = 0;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
|
if (policy->fast_switch_enabled)
|
||||||
|
return 0;
|
||||||
|
|
||||||
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
|
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
|
||||||
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
|
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
|
||||||
for (i = 0; i < stats->state_num; i++) {
|
for (i = 0; i < stats->state_num; i++) {
|
||||||
@ -130,7 +136,7 @@ static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
|
void cpufreq_stats_free_table(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct cpufreq_stats *stats = policy->stats;
|
struct cpufreq_stats *stats = policy->stats;
|
||||||
|
|
||||||
@ -146,39 +152,25 @@ static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
|
|||||||
policy->stats = NULL;
|
policy->stats = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpufreq_stats_free_table(unsigned int cpu)
|
void cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
||||||
{
|
|
||||||
struct cpufreq_policy *policy;
|
|
||||||
|
|
||||||
policy = cpufreq_cpu_get(cpu);
|
|
||||||
if (!policy)
|
|
||||||
return;
|
|
||||||
|
|
||||||
__cpufreq_stats_free_table(policy);
|
|
||||||
|
|
||||||
cpufreq_cpu_put(policy);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
|
||||||
{
|
{
|
||||||
unsigned int i = 0, count = 0, ret = -ENOMEM;
|
unsigned int i = 0, count = 0, ret = -ENOMEM;
|
||||||
struct cpufreq_stats *stats;
|
struct cpufreq_stats *stats;
|
||||||
unsigned int alloc_size;
|
unsigned int alloc_size;
|
||||||
unsigned int cpu = policy->cpu;
|
|
||||||
struct cpufreq_frequency_table *pos, *table;
|
struct cpufreq_frequency_table *pos, *table;
|
||||||
|
|
||||||
/* We need cpufreq table for creating stats table */
|
/* We need cpufreq table for creating stats table */
|
||||||
table = cpufreq_frequency_get_table(cpu);
|
table = policy->freq_table;
|
||||||
if (unlikely(!table))
|
if (unlikely(!table))
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
/* stats already initialized */
|
/* stats already initialized */
|
||||||
if (policy->stats)
|
if (policy->stats)
|
||||||
return -EEXIST;
|
return;
|
||||||
|
|
||||||
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
|
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
|
||||||
if (!stats)
|
if (!stats)
|
||||||
return -ENOMEM;
|
return;
|
||||||
|
|
||||||
/* Find total allocation size */
|
/* Find total allocation size */
|
||||||
cpufreq_for_each_valid_entry(pos, table)
|
cpufreq_for_each_valid_entry(pos, table)
|
||||||
@ -215,80 +207,32 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
|||||||
policy->stats = stats;
|
policy->stats = stats;
|
||||||
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
|
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
/* We failed, release resources */
|
/* We failed, release resources */
|
||||||
policy->stats = NULL;
|
policy->stats = NULL;
|
||||||
kfree(stats->time_in_state);
|
kfree(stats->time_in_state);
|
||||||
free_stat:
|
free_stat:
|
||||||
kfree(stats);
|
kfree(stats);
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpufreq_stats_create_table(unsigned int cpu)
|
void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
|
||||||
|
unsigned int new_freq)
|
||||||
{
|
{
|
||||||
struct cpufreq_policy *policy;
|
struct cpufreq_stats *stats = policy->stats;
|
||||||
|
|
||||||
/*
|
|
||||||
* "likely(!policy)" because normally cpufreq_stats will be registered
|
|
||||||
* before cpufreq driver
|
|
||||||
*/
|
|
||||||
policy = cpufreq_cpu_get(cpu);
|
|
||||||
if (likely(!policy))
|
|
||||||
return;
|
|
||||||
|
|
||||||
__cpufreq_stats_create_table(policy);
|
|
||||||
|
|
||||||
cpufreq_cpu_put(policy);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
|
|
||||||
unsigned long val, void *data)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
struct cpufreq_policy *policy = data;
|
|
||||||
|
|
||||||
if (val == CPUFREQ_CREATE_POLICY)
|
|
||||||
ret = __cpufreq_stats_create_table(policy);
|
|
||||||
else if (val == CPUFREQ_REMOVE_POLICY)
|
|
||||||
__cpufreq_stats_free_table(policy);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
|
|
||||||
unsigned long val, void *data)
|
|
||||||
{
|
|
||||||
struct cpufreq_freqs *freq = data;
|
|
||||||
struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
|
|
||||||
struct cpufreq_stats *stats;
|
|
||||||
int old_index, new_index;
|
int old_index, new_index;
|
||||||
|
|
||||||
if (!policy) {
|
if (!stats) {
|
||||||
pr_err("%s: No policy found\n", __func__);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (val != CPUFREQ_POSTCHANGE)
|
|
||||||
goto put_policy;
|
|
||||||
|
|
||||||
if (!policy->stats) {
|
|
||||||
pr_debug("%s: No stats found\n", __func__);
|
pr_debug("%s: No stats found\n", __func__);
|
||||||
goto put_policy;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
stats = policy->stats;
|
|
||||||
|
|
||||||
old_index = stats->last_index;
|
old_index = stats->last_index;
|
||||||
new_index = freq_table_get_index(stats, freq->new);
|
new_index = freq_table_get_index(stats, new_freq);
|
||||||
|
|
||||||
/* We can't do stats->time_in_state[-1]= .. */
|
/* We can't do stats->time_in_state[-1]= .. */
|
||||||
if (old_index == -1 || new_index == -1)
|
if (old_index == -1 || new_index == -1 || old_index == new_index)
|
||||||
goto put_policy;
|
return;
|
||||||
|
|
||||||
if (old_index == new_index)
|
|
||||||
goto put_policy;
|
|
||||||
|
|
||||||
cpufreq_stats_update(stats);
|
cpufreq_stats_update(stats);
|
||||||
|
|
||||||
@ -297,61 +241,4 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
|
|||||||
stats->trans_table[old_index * stats->max_state + new_index]++;
|
stats->trans_table[old_index * stats->max_state + new_index]++;
|
||||||
#endif
|
#endif
|
||||||
stats->total_trans++;
|
stats->total_trans++;
|
||||||
|
|
||||||
put_policy:
|
|
||||||
cpufreq_cpu_put(policy);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct notifier_block notifier_policy_block = {
|
|
||||||
.notifier_call = cpufreq_stat_notifier_policy
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct notifier_block notifier_trans_block = {
|
|
||||||
.notifier_call = cpufreq_stat_notifier_trans
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init cpufreq_stats_init(void)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
spin_lock_init(&cpufreq_stats_lock);
|
|
||||||
ret = cpufreq_register_notifier(¬ifier_policy_block,
|
|
||||||
CPUFREQ_POLICY_NOTIFIER);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
cpufreq_stats_create_table(cpu);
|
|
||||||
|
|
||||||
ret = cpufreq_register_notifier(¬ifier_trans_block,
|
|
||||||
CPUFREQ_TRANSITION_NOTIFIER);
|
|
||||||
if (ret) {
|
|
||||||
cpufreq_unregister_notifier(¬ifier_policy_block,
|
|
||||||
CPUFREQ_POLICY_NOTIFIER);
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
cpufreq_stats_free_table(cpu);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
static void __exit cpufreq_stats_exit(void)
|
|
||||||
{
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
cpufreq_unregister_notifier(¬ifier_policy_block,
|
|
||||||
CPUFREQ_POLICY_NOTIFIER);
|
|
||||||
cpufreq_unregister_notifier(¬ifier_trans_block,
|
|
||||||
CPUFREQ_TRANSITION_NOTIFIER);
|
|
||||||
for_each_online_cpu(cpu)
|
|
||||||
cpufreq_stats_free_table(cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
|
|
||||||
MODULE_DESCRIPTION("Export cpufreq stats via sysfs");
|
|
||||||
MODULE_LICENSE("GPL");
|
|
||||||
|
|
||||||
module_init(cpufreq_stats_init);
|
|
||||||
module_exit(cpufreq_stats_exit);
|
|
||||||
|
@ -65,66 +65,66 @@ static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
|
static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
|
||||||
unsigned int event)
|
{
|
||||||
|
mutex_lock(&userspace_mutex);
|
||||||
|
kfree(policy->governor_data);
|
||||||
|
policy->governor_data = NULL;
|
||||||
|
mutex_unlock(&userspace_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
unsigned int *setspeed = policy->governor_data;
|
unsigned int *setspeed = policy->governor_data;
|
||||||
unsigned int cpu = policy->cpu;
|
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
if (event == CPUFREQ_GOV_POLICY_INIT)
|
BUG_ON(!policy->cur);
|
||||||
return cpufreq_userspace_policy_init(policy);
|
pr_debug("started managing cpu %u\n", policy->cpu);
|
||||||
|
|
||||||
if (!setspeed)
|
mutex_lock(&userspace_mutex);
|
||||||
return -EINVAL;
|
per_cpu(cpu_is_managed, policy->cpu) = 1;
|
||||||
|
*setspeed = policy->cur;
|
||||||
|
mutex_unlock(&userspace_mutex);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
switch (event) {
|
static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
|
||||||
case CPUFREQ_GOV_POLICY_EXIT:
|
{
|
||||||
mutex_lock(&userspace_mutex);
|
unsigned int *setspeed = policy->governor_data;
|
||||||
policy->governor_data = NULL;
|
|
||||||
kfree(setspeed);
|
|
||||||
mutex_unlock(&userspace_mutex);
|
|
||||||
break;
|
|
||||||
case CPUFREQ_GOV_START:
|
|
||||||
BUG_ON(!policy->cur);
|
|
||||||
pr_debug("started managing cpu %u\n", cpu);
|
|
||||||
|
|
||||||
mutex_lock(&userspace_mutex);
|
pr_debug("managing cpu %u stopped\n", policy->cpu);
|
||||||
per_cpu(cpu_is_managed, cpu) = 1;
|
|
||||||
*setspeed = policy->cur;
|
|
||||||
mutex_unlock(&userspace_mutex);
|
|
||||||
break;
|
|
||||||
case CPUFREQ_GOV_STOP:
|
|
||||||
pr_debug("managing cpu %u stopped\n", cpu);
|
|
||||||
|
|
||||||
mutex_lock(&userspace_mutex);
|
mutex_lock(&userspace_mutex);
|
||||||
per_cpu(cpu_is_managed, cpu) = 0;
|
per_cpu(cpu_is_managed, policy->cpu) = 0;
|
||||||
*setspeed = 0;
|
*setspeed = 0;
|
||||||
mutex_unlock(&userspace_mutex);
|
mutex_unlock(&userspace_mutex);
|
||||||
break;
|
}
|
||||||
case CPUFREQ_GOV_LIMITS:
|
|
||||||
mutex_lock(&userspace_mutex);
|
|
||||||
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
|
|
||||||
cpu, policy->min, policy->max, policy->cur, *setspeed);
|
|
||||||
|
|
||||||
if (policy->max < *setspeed)
|
static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
|
||||||
__cpufreq_driver_target(policy, policy->max,
|
{
|
||||||
CPUFREQ_RELATION_H);
|
unsigned int *setspeed = policy->governor_data;
|
||||||
else if (policy->min > *setspeed)
|
|
||||||
__cpufreq_driver_target(policy, policy->min,
|
mutex_lock(&userspace_mutex);
|
||||||
CPUFREQ_RELATION_L);
|
|
||||||
else
|
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
|
||||||
__cpufreq_driver_target(policy, *setspeed,
|
policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
|
||||||
CPUFREQ_RELATION_L);
|
|
||||||
mutex_unlock(&userspace_mutex);
|
if (policy->max < *setspeed)
|
||||||
break;
|
__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
|
||||||
}
|
else if (policy->min > *setspeed)
|
||||||
return rc;
|
__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
|
||||||
|
else
|
||||||
|
__cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
|
||||||
|
|
||||||
|
mutex_unlock(&userspace_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cpufreq_governor cpufreq_gov_userspace = {
|
static struct cpufreq_governor cpufreq_gov_userspace = {
|
||||||
.name = "userspace",
|
.name = "userspace",
|
||||||
.governor = cpufreq_governor_userspace,
|
.init = cpufreq_userspace_policy_init,
|
||||||
|
.exit = cpufreq_userspace_policy_exit,
|
||||||
|
.start = cpufreq_userspace_policy_start,
|
||||||
|
.stop = cpufreq_userspace_policy_stop,
|
||||||
|
.limits = cpufreq_userspace_policy_limits,
|
||||||
.store_setspeed = cpufreq_set,
|
.store_setspeed = cpufreq_set,
|
||||||
.show_setspeed = show_speed,
|
.show_setspeed = show_speed,
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
@ -38,26 +38,6 @@ struct davinci_cpufreq {
|
|||||||
};
|
};
|
||||||
static struct davinci_cpufreq cpufreq;
|
static struct davinci_cpufreq cpufreq;
|
||||||
|
|
||||||
static int davinci_verify_speed(struct cpufreq_policy *policy)
|
|
||||||
{
|
|
||||||
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
|
|
||||||
struct cpufreq_frequency_table *freq_table = pdata->freq_table;
|
|
||||||
struct clk *armclk = cpufreq.armclk;
|
|
||||||
|
|
||||||
if (freq_table)
|
|
||||||
return cpufreq_frequency_table_verify(policy, freq_table);
|
|
||||||
|
|
||||||
if (policy->cpu)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
cpufreq_verify_within_cpu_limits(policy);
|
|
||||||
policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
|
|
||||||
policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
|
|
||||||
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
|
|
||||||
policy->cpuinfo.max_freq);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
|
static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
|
||||||
{
|
{
|
||||||
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
|
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
|
||||||
@ -121,7 +101,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
|
|||||||
|
|
||||||
static struct cpufreq_driver davinci_driver = {
|
static struct cpufreq_driver davinci_driver = {
|
||||||
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
|
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
|
||||||
.verify = davinci_verify_speed,
|
.verify = cpufreq_generic_frequency_table_verify,
|
||||||
.target_index = davinci_target,
|
.target_index = davinci_target,
|
||||||
.get = cpufreq_generic_get,
|
.get = cpufreq_generic_get,
|
||||||
.init = davinci_cpu_init,
|
.init = davinci_cpu_init,
|
||||||
|
@ -63,8 +63,6 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
|
|||||||
else
|
else
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
|
|
||||||
|
|
||||||
|
|
||||||
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
|
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
|
||||||
struct cpufreq_frequency_table *table)
|
struct cpufreq_frequency_table *table)
|
||||||
@ -108,20 +106,16 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
|
|||||||
*/
|
*/
|
||||||
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
|
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table *table =
|
if (!policy->freq_table)
|
||||||
cpufreq_frequency_get_table(policy->cpu);
|
|
||||||
if (!table)
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
return cpufreq_frequency_table_verify(policy, table);
|
return cpufreq_frequency_table_verify(policy, policy->freq_table);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
|
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
|
||||||
|
|
||||||
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
||||||
struct cpufreq_frequency_table *table,
|
unsigned int target_freq,
|
||||||
unsigned int target_freq,
|
unsigned int relation)
|
||||||
unsigned int relation,
|
|
||||||
unsigned int *index)
|
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table optimal = {
|
struct cpufreq_frequency_table optimal = {
|
||||||
.driver_data = ~0,
|
.driver_data = ~0,
|
||||||
@ -132,7 +126,9 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
|||||||
.frequency = 0,
|
.frequency = 0,
|
||||||
};
|
};
|
||||||
struct cpufreq_frequency_table *pos;
|
struct cpufreq_frequency_table *pos;
|
||||||
|
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||||
unsigned int freq, diff, i = 0;
|
unsigned int freq, diff, i = 0;
|
||||||
|
int index;
|
||||||
|
|
||||||
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
|
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
|
||||||
target_freq, relation, policy->cpu);
|
target_freq, relation, policy->cpu);
|
||||||
@ -196,25 +192,26 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (optimal.driver_data > i) {
|
if (optimal.driver_data > i) {
|
||||||
if (suboptimal.driver_data > i)
|
if (suboptimal.driver_data > i) {
|
||||||
return -EINVAL;
|
WARN(1, "Invalid frequency table: %d\n", policy->cpu);
|
||||||
*index = suboptimal.driver_data;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
index = suboptimal.driver_data;
|
||||||
} else
|
} else
|
||||||
*index = optimal.driver_data;
|
index = optimal.driver_data;
|
||||||
|
|
||||||
pr_debug("target index is %u, freq is:%u kHz\n", *index,
|
pr_debug("target index is %u, freq is:%u kHz\n", index,
|
||||||
table[*index].frequency);
|
table[index].frequency);
|
||||||
|
return index;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
|
||||||
|
|
||||||
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
||||||
unsigned int freq)
|
unsigned int freq)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table *pos, *table;
|
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
|
||||||
|
|
||||||
table = cpufreq_frequency_get_table(policy->cpu);
|
|
||||||
if (unlikely(!table)) {
|
if (unlikely(!table)) {
|
||||||
pr_debug("%s: Unable to find frequency table\n", __func__);
|
pr_debug("%s: Unable to find frequency table\n", __func__);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
|
|
||||||
#define ATOM_RATIOS 0x66a
|
#define ATOM_RATIOS 0x66a
|
||||||
#define ATOM_VIDS 0x66b
|
#define ATOM_VIDS 0x66b
|
||||||
@ -1091,6 +1092,26 @@ static struct cpu_defaults knl_params = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct cpu_defaults bxt_params = {
|
||||||
|
.pid_policy = {
|
||||||
|
.sample_rate_ms = 10,
|
||||||
|
.deadband = 0,
|
||||||
|
.setpoint = 60,
|
||||||
|
.p_gain_pct = 14,
|
||||||
|
.d_gain_pct = 0,
|
||||||
|
.i_gain_pct = 4,
|
||||||
|
},
|
||||||
|
.funcs = {
|
||||||
|
.get_max = core_get_max_pstate,
|
||||||
|
.get_max_physical = core_get_max_pstate_physical,
|
||||||
|
.get_min = core_get_min_pstate,
|
||||||
|
.get_turbo = core_get_turbo_pstate,
|
||||||
|
.get_scaling = core_get_scaling,
|
||||||
|
.get_val = core_get_val,
|
||||||
|
.get_target_pstate = get_target_pstate_use_cpu_load,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
||||||
{
|
{
|
||||||
int max_perf = cpu->pstate.turbo_pstate;
|
int max_perf = cpu->pstate.turbo_pstate;
|
||||||
@ -1334,29 +1355,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
|
|||||||
(unsigned long)&policy }
|
(unsigned long)&policy }
|
||||||
|
|
||||||
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
||||||
ICPU(0x2a, core_params),
|
ICPU(INTEL_FAM6_SANDYBRIDGE, core_params),
|
||||||
ICPU(0x2d, core_params),
|
ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params),
|
||||||
ICPU(0x37, silvermont_params),
|
ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params),
|
||||||
ICPU(0x3a, core_params),
|
ICPU(INTEL_FAM6_IVYBRIDGE, core_params),
|
||||||
ICPU(0x3c, core_params),
|
ICPU(INTEL_FAM6_HASWELL_CORE, core_params),
|
||||||
ICPU(0x3d, core_params),
|
ICPU(INTEL_FAM6_BROADWELL_CORE, core_params),
|
||||||
ICPU(0x3e, core_params),
|
ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params),
|
||||||
ICPU(0x3f, core_params),
|
ICPU(INTEL_FAM6_HASWELL_X, core_params),
|
||||||
ICPU(0x45, core_params),
|
ICPU(INTEL_FAM6_HASWELL_ULT, core_params),
|
||||||
ICPU(0x46, core_params),
|
ICPU(INTEL_FAM6_HASWELL_GT3E, core_params),
|
||||||
ICPU(0x47, core_params),
|
ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params),
|
||||||
ICPU(0x4c, airmont_params),
|
ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params),
|
||||||
ICPU(0x4e, core_params),
|
ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params),
|
||||||
ICPU(0x4f, core_params),
|
ICPU(INTEL_FAM6_BROADWELL_X, core_params),
|
||||||
ICPU(0x5e, core_params),
|
ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params),
|
||||||
ICPU(0x56, core_params),
|
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
|
||||||
ICPU(0x57, knl_params),
|
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params),
|
||||||
|
ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
||||||
|
|
||||||
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
|
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
|
||||||
ICPU(0x56, core_params),
|
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
clk = clk_get(cpu_dev, 0);
|
clk = clk_get(cpu_dev, NULL);
|
||||||
if (IS_ERR(clk)) {
|
if (IS_ERR(clk)) {
|
||||||
pr_err("Cannot get clock for CPU %d\n", cpu);
|
pr_err("Cannot get clock for CPU %d\n", cpu);
|
||||||
return PTR_ERR(clk);
|
return PTR_ERR(clk);
|
||||||
|
@ -760,9 +760,8 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
|
|||||||
struct cpufreq_policy policy;
|
struct cpufreq_policy policy;
|
||||||
|
|
||||||
cpufreq_get_policy(&policy, cpu);
|
cpufreq_get_policy(&policy, cpu);
|
||||||
cpufreq_frequency_table_target(&policy, policy.freq_table,
|
index = cpufreq_frequency_table_target(&policy, policy.cur,
|
||||||
policy.cur,
|
CPUFREQ_RELATION_C);
|
||||||
CPUFREQ_RELATION_C, &index);
|
|
||||||
powernv_cpufreq_target_index(&policy, index);
|
powernv_cpufreq_target_index(&policy, index);
|
||||||
cpumask_andnot(&mask, &mask, policy.cpus);
|
cpumask_andnot(&mask, &mask, policy.cpus);
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ static int pmi_notifier(struct notifier_block *nb,
|
|||||||
unsigned long event, void *data)
|
unsigned long event, void *data)
|
||||||
{
|
{
|
||||||
struct cpufreq_policy *policy = data;
|
struct cpufreq_policy *policy = data;
|
||||||
struct cpufreq_frequency_table *cbe_freqs;
|
struct cpufreq_frequency_table *cbe_freqs = policy->freq_table;
|
||||||
u8 node;
|
u8 node;
|
||||||
|
|
||||||
/* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
|
/* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
|
||||||
@ -103,7 +103,6 @@ static int pmi_notifier(struct notifier_block *nb,
|
|||||||
if (event == CPUFREQ_START)
|
if (event == CPUFREQ_START)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
|
|
||||||
node = cbe_cpu_to_node(policy->cpu);
|
node = cbe_cpu_to_node(policy->cpu);
|
||||||
|
|
||||||
pr_debug("got notified, event=%lu, node=%u\n", event, node);
|
pr_debug("got notified, event=%lu, node=%u\n", event, node);
|
||||||
|
@ -293,12 +293,8 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
|
|||||||
__func__, policy, target_freq, relation);
|
__func__, policy, target_freq, relation);
|
||||||
|
|
||||||
if (ftab) {
|
if (ftab) {
|
||||||
if (cpufreq_frequency_table_target(policy, ftab,
|
index = cpufreq_frequency_table_target(policy, target_freq,
|
||||||
target_freq, relation,
|
relation);
|
||||||
&index)) {
|
|
||||||
s3c_freq_dbg("%s: table failed\n", __func__);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
|
s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
|
||||||
target_freq, index, ftab[index].frequency);
|
target_freq, index, ftab[index].frequency);
|
||||||
@ -315,7 +311,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
|
|||||||
pll = NULL;
|
pll = NULL;
|
||||||
} else {
|
} else {
|
||||||
struct cpufreq_policy tmp_policy;
|
struct cpufreq_policy tmp_policy;
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* we keep the cpu pll table in Hz, to ensure we get an
|
/* we keep the cpu pll table in Hz, to ensure we get an
|
||||||
* accurate value for the PLL output. */
|
* accurate value for the PLL output. */
|
||||||
@ -323,20 +318,14 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
|
|||||||
tmp_policy.min = policy->min * 1000;
|
tmp_policy.min = policy->min * 1000;
|
||||||
tmp_policy.max = policy->max * 1000;
|
tmp_policy.max = policy->max * 1000;
|
||||||
tmp_policy.cpu = policy->cpu;
|
tmp_policy.cpu = policy->cpu;
|
||||||
|
tmp_policy.freq_table = pll_reg;
|
||||||
|
|
||||||
/* cpufreq_frequency_table_target uses a pointer to 'index'
|
/* cpufreq_frequency_table_target returns the index
|
||||||
* which is the number of the table entry, not the value of
|
* of the table entry, not the value of
|
||||||
* the table entry's index field. */
|
* the table entry's index field. */
|
||||||
|
|
||||||
ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg,
|
index = cpufreq_frequency_table_target(&tmp_policy, target_freq,
|
||||||
target_freq, relation,
|
relation);
|
||||||
&index);
|
|
||||||
|
|
||||||
if (ret < 0) {
|
|
||||||
pr_err("%s: no PLL available\n", __func__);
|
|
||||||
goto err_notpossible;
|
|
||||||
}
|
|
||||||
|
|
||||||
pll = pll_reg + index;
|
pll = pll_reg + index;
|
||||||
|
|
||||||
s3c_freq_dbg("%s: target %u => %u\n",
|
s3c_freq_dbg("%s: target %u => %u\n",
|
||||||
@ -346,10 +335,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
|
|||||||
}
|
}
|
||||||
|
|
||||||
return s3c_cpufreq_settarget(policy, target_freq, pll);
|
return s3c_cpufreq_settarget(policy, target_freq, pll);
|
||||||
|
|
||||||
err_notpossible:
|
|
||||||
pr_err("no compatible settings for %d\n", target_freq);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
|
struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
|
||||||
@ -571,11 +556,7 @@ static int s3c_cpufreq_build_freq(void)
|
|||||||
{
|
{
|
||||||
int size, ret;
|
int size, ret;
|
||||||
|
|
||||||
if (!cpu_cur.info->calc_freqtable)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
kfree(ftab);
|
kfree(ftab);
|
||||||
ftab = NULL;
|
|
||||||
|
|
||||||
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
|
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
|
||||||
size++;
|
size++;
|
||||||
|
@ -246,12 +246,8 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
|
|||||||
new_freq = s5pv210_freq_table[index].frequency;
|
new_freq = s5pv210_freq_table[index].frequency;
|
||||||
|
|
||||||
/* Finding current running level index */
|
/* Finding current running level index */
|
||||||
if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
|
priv_index = cpufreq_frequency_table_target(policy, old_freq,
|
||||||
old_freq, CPUFREQ_RELATION_H,
|
CPUFREQ_RELATION_H);
|
||||||
&priv_index)) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
arm_volt = dvs_conf[index].arm_volt;
|
arm_volt = dvs_conf[index].arm_volt;
|
||||||
int_volt = dvs_conf[index].int_volt;
|
int_volt = dvs_conf[index].int_volt;
|
||||||
|
@ -62,6 +62,7 @@
|
|||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
#include <asm/mwait.h>
|
#include <asm/mwait.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
|
|
||||||
@ -1020,38 +1021,38 @@ static const struct idle_cpu idle_cpu_bxt = {
|
|||||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
|
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
|
||||||
|
|
||||||
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
||||||
ICPU(0x1a, idle_cpu_nehalem),
|
ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
|
||||||
ICPU(0x1e, idle_cpu_nehalem),
|
ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nehalem),
|
||||||
ICPU(0x1f, idle_cpu_nehalem),
|
ICPU(INTEL_FAM6_WESTMERE2, idle_cpu_nehalem),
|
||||||
ICPU(0x25, idle_cpu_nehalem),
|
ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem),
|
||||||
ICPU(0x2c, idle_cpu_nehalem),
|
ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem),
|
||||||
ICPU(0x2e, idle_cpu_nehalem),
|
ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem),
|
||||||
ICPU(0x1c, idle_cpu_atom),
|
ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom),
|
||||||
ICPU(0x26, idle_cpu_lincroft),
|
ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft),
|
||||||
ICPU(0x2f, idle_cpu_nehalem),
|
ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem),
|
||||||
ICPU(0x2a, idle_cpu_snb),
|
ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb),
|
||||||
ICPU(0x2d, idle_cpu_snb),
|
ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
|
||||||
ICPU(0x36, idle_cpu_atom),
|
ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
|
||||||
ICPU(0x37, idle_cpu_byt),
|
ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
|
||||||
ICPU(0x4c, idle_cpu_cht),
|
ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
|
||||||
ICPU(0x3a, idle_cpu_ivb),
|
ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
|
||||||
ICPU(0x3e, idle_cpu_ivt),
|
ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
|
||||||
ICPU(0x3c, idle_cpu_hsw),
|
ICPU(INTEL_FAM6_HASWELL_CORE, idle_cpu_hsw),
|
||||||
ICPU(0x3f, idle_cpu_hsw),
|
ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw),
|
||||||
ICPU(0x45, idle_cpu_hsw),
|
ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw),
|
||||||
ICPU(0x46, idle_cpu_hsw),
|
ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw),
|
||||||
ICPU(0x4d, idle_cpu_avn),
|
ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn),
|
||||||
ICPU(0x3d, idle_cpu_bdw),
|
ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw),
|
||||||
ICPU(0x47, idle_cpu_bdw),
|
ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw),
|
||||||
ICPU(0x4f, idle_cpu_bdw),
|
ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw),
|
||||||
ICPU(0x56, idle_cpu_bdw),
|
ICPU(INTEL_FAM6_BROADWELL_XEON_D, idle_cpu_bdw),
|
||||||
ICPU(0x4e, idle_cpu_skl),
|
ICPU(INTEL_FAM6_SKYLAKE_MOBILE, idle_cpu_skl),
|
||||||
ICPU(0x5e, idle_cpu_skl),
|
ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, idle_cpu_skl),
|
||||||
ICPU(0x8e, idle_cpu_skl),
|
ICPU(INTEL_FAM6_KABYLAKE_MOBILE, idle_cpu_skl),
|
||||||
ICPU(0x9e, idle_cpu_skl),
|
ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl),
|
||||||
ICPU(0x55, idle_cpu_skx),
|
ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
|
||||||
ICPU(0x57, idle_cpu_knl),
|
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
|
||||||
ICPU(0x5c, idle_cpu_bxt),
|
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
|
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
|
||||||
@ -1261,13 +1262,13 @@ static void intel_idle_state_table_update(void)
|
|||||||
{
|
{
|
||||||
switch (boot_cpu_data.x86_model) {
|
switch (boot_cpu_data.x86_model) {
|
||||||
|
|
||||||
case 0x3e: /* IVT */
|
case INTEL_FAM6_IVYBRIDGE_X:
|
||||||
ivt_idle_state_table_update();
|
ivt_idle_state_table_update();
|
||||||
break;
|
break;
|
||||||
case 0x5c: /* BXT */
|
case INTEL_FAM6_ATOM_GOLDMONT:
|
||||||
bxt_idle_state_table_update();
|
bxt_idle_state_table_update();
|
||||||
break;
|
break;
|
||||||
case 0x5e: /* SKL-H */
|
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||||
sklh_idle_state_table_update();
|
sklh_idle_state_table_update();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
#include <asm/iosf_mbi.h>
|
#include <asm/iosf_mbi.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -126,7 +127,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
|
|||||||
static bool sdhci_acpi_byt(void)
|
static bool sdhci_acpi_byt(void)
|
||||||
{
|
{
|
||||||
static const struct x86_cpu_id byt[] = {
|
static const struct x86_cpu_id byt[] = {
|
||||||
{ X86_VENDOR_INTEL, 6, 0x37 },
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
|
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
#include <asm/intel_pmc_ipc.h>
|
#include <asm/intel_pmc_ipc.h>
|
||||||
#include <asm/intel_punit_ipc.h>
|
#include <asm/intel_punit_ipc.h>
|
||||||
#include <asm/intel_telemetry.h>
|
#include <asm/intel_telemetry.h>
|
||||||
@ -331,7 +332,7 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
|
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
|
||||||
TELEM_DEBUGFS_CPU(0x5c, telem_apl_debugfs_conf),
|
TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
#include <asm/intel_pmc_ipc.h>
|
#include <asm/intel_pmc_ipc.h>
|
||||||
#include <asm/intel_punit_ipc.h>
|
#include <asm/intel_punit_ipc.h>
|
||||||
#include <asm/intel_telemetry.h>
|
#include <asm/intel_telemetry.h>
|
||||||
@ -163,7 +164,7 @@ static struct telemetry_plt_config telem_apl_config = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct x86_cpu_id telemetry_cpu_ids[] = {
|
static const struct x86_cpu_id telemetry_cpu_ids[] = {
|
||||||
TELEM_CPU(0x5c, telem_apl_config),
|
TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
|
|
||||||
/* Local defines */
|
/* Local defines */
|
||||||
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
|
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
|
||||||
@ -1096,27 +1097,34 @@ static const struct rapl_defaults rapl_defaults_cht = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct x86_cpu_id rapl_ids[] __initconst = {
|
static const struct x86_cpu_id rapl_ids[] __initconst = {
|
||||||
RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */
|
RAPL_CPU(INTEL_FAM6_SANDYBRIDGE, rapl_defaults_core),
|
||||||
RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */
|
RAPL_CPU(INTEL_FAM6_SANDYBRIDGE_X, rapl_defaults_core),
|
||||||
RAPL_CPU(0x37, rapl_defaults_byt),/* Valleyview */
|
|
||||||
RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
|
RAPL_CPU(INTEL_FAM6_IVYBRIDGE, rapl_defaults_core),
|
||||||
RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
|
|
||||||
RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
|
RAPL_CPU(INTEL_FAM6_HASWELL_CORE, rapl_defaults_core),
|
||||||
RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
|
RAPL_CPU(INTEL_FAM6_HASWELL_ULT, rapl_defaults_core),
|
||||||
RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
|
RAPL_CPU(INTEL_FAM6_HASWELL_GT3E, rapl_defaults_core),
|
||||||
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
|
RAPL_CPU(INTEL_FAM6_HASWELL_X, rapl_defaults_hsw_server),
|
||||||
RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
|
|
||||||
RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
|
RAPL_CPU(INTEL_FAM6_BROADWELL_CORE, rapl_defaults_core),
|
||||||
RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
|
RAPL_CPU(INTEL_FAM6_BROADWELL_GT3E, rapl_defaults_core),
|
||||||
RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
|
RAPL_CPU(INTEL_FAM6_BROADWELL_XEON_D, rapl_defaults_core),
|
||||||
RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */
|
RAPL_CPU(INTEL_FAM6_BROADWELL_X, rapl_defaults_hsw_server),
|
||||||
RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
|
|
||||||
RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */
|
RAPL_CPU(INTEL_FAM6_SKYLAKE_DESKTOP, rapl_defaults_core),
|
||||||
RAPL_CPU(0X5C, rapl_defaults_core),/* Broxton */
|
RAPL_CPU(INTEL_FAM6_SKYLAKE_MOBILE, rapl_defaults_core),
|
||||||
RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
|
RAPL_CPU(INTEL_FAM6_SKYLAKE_X, rapl_defaults_hsw_server),
|
||||||
RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
|
RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core),
|
||||||
RAPL_CPU(0x8E, rapl_defaults_core),/* Kabylake */
|
RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core),
|
||||||
RAPL_CPU(0x9E, rapl_defaults_core),/* Kabylake */
|
|
||||||
|
RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt),
|
||||||
|
RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht),
|
||||||
|
RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD1, rapl_defaults_tng),
|
||||||
|
RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD2, rapl_defaults_ann),
|
||||||
|
RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
|
||||||
|
|
||||||
|
RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
|
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
|
||||||
|
@ -787,22 +787,34 @@ __cpufreq_cooling_register(struct device_node *np,
|
|||||||
const struct cpumask *clip_cpus, u32 capacitance,
|
const struct cpumask *clip_cpus, u32 capacitance,
|
||||||
get_static_t plat_static_func)
|
get_static_t plat_static_func)
|
||||||
{
|
{
|
||||||
|
struct cpufreq_policy *policy;
|
||||||
struct thermal_cooling_device *cool_dev;
|
struct thermal_cooling_device *cool_dev;
|
||||||
struct cpufreq_cooling_device *cpufreq_dev;
|
struct cpufreq_cooling_device *cpufreq_dev;
|
||||||
char dev_name[THERMAL_NAME_LENGTH];
|
char dev_name[THERMAL_NAME_LENGTH];
|
||||||
struct cpufreq_frequency_table *pos, *table;
|
struct cpufreq_frequency_table *pos, *table;
|
||||||
|
struct cpumask temp_mask;
|
||||||
unsigned int freq, i, num_cpus;
|
unsigned int freq, i, num_cpus;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
|
cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
|
||||||
if (!table) {
|
policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
|
||||||
pr_debug("%s: CPUFreq table not found\n", __func__);
|
if (!policy) {
|
||||||
|
pr_debug("%s: CPUFreq policy not found\n", __func__);
|
||||||
return ERR_PTR(-EPROBE_DEFER);
|
return ERR_PTR(-EPROBE_DEFER);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
table = policy->freq_table;
|
||||||
|
if (!table) {
|
||||||
|
pr_debug("%s: CPUFreq table not found\n", __func__);
|
||||||
|
cool_dev = ERR_PTR(-ENODEV);
|
||||||
|
goto put_policy;
|
||||||
|
}
|
||||||
|
|
||||||
cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
|
cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
|
||||||
if (!cpufreq_dev)
|
if (!cpufreq_dev) {
|
||||||
return ERR_PTR(-ENOMEM);
|
cool_dev = ERR_PTR(-ENOMEM);
|
||||||
|
goto put_policy;
|
||||||
|
}
|
||||||
|
|
||||||
num_cpus = cpumask_weight(clip_cpus);
|
num_cpus = cpumask_weight(clip_cpus);
|
||||||
cpufreq_dev->time_in_idle = kcalloc(num_cpus,
|
cpufreq_dev->time_in_idle = kcalloc(num_cpus,
|
||||||
@ -892,7 +904,7 @@ __cpufreq_cooling_register(struct device_node *np,
|
|||||||
CPUFREQ_POLICY_NOTIFIER);
|
CPUFREQ_POLICY_NOTIFIER);
|
||||||
mutex_unlock(&cooling_cpufreq_lock);
|
mutex_unlock(&cooling_cpufreq_lock);
|
||||||
|
|
||||||
return cool_dev;
|
goto put_policy;
|
||||||
|
|
||||||
remove_idr:
|
remove_idr:
|
||||||
release_idr(&cpufreq_idr, cpufreq_dev->id);
|
release_idr(&cpufreq_idr, cpufreq_dev->id);
|
||||||
@ -906,6 +918,8 @@ free_time_in_idle:
|
|||||||
kfree(cpufreq_dev->time_in_idle);
|
kfree(cpufreq_dev->time_in_idle);
|
||||||
free_cdev:
|
free_cdev:
|
||||||
kfree(cpufreq_dev);
|
kfree(cpufreq_dev);
|
||||||
|
put_policy:
|
||||||
|
cpufreq_cpu_put(policy);
|
||||||
|
|
||||||
return cool_dev;
|
return cool_dev;
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
|
#include <asm/intel-family.h>
|
||||||
#include "intel_soc_dts_iosf.h"
|
#include "intel_soc_dts_iosf.h"
|
||||||
|
|
||||||
#define CRITICAL_OFFSET_FROM_TJ_MAX 5000
|
#define CRITICAL_OFFSET_FROM_TJ_MAX 5000
|
||||||
@ -42,7 +43,8 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct x86_cpu_id soc_thermal_ids[] = {
|
static const struct x86_cpu_id soc_thermal_ids[] = {
|
||||||
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
|
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
|
||||||
|
BYT_SOC_DTS_APIC_IRQ},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
|
MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
|
||||||
|
@ -185,6 +185,18 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
|
|||||||
static inline void disable_cpufreq(void) { }
|
static inline void disable_cpufreq(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_FREQ_STAT
|
||||||
|
void cpufreq_stats_create_table(struct cpufreq_policy *policy);
|
||||||
|
void cpufreq_stats_free_table(struct cpufreq_policy *policy);
|
||||||
|
void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
|
||||||
|
unsigned int new_freq);
|
||||||
|
#else
|
||||||
|
static inline void cpufreq_stats_create_table(struct cpufreq_policy *policy) { }
|
||||||
|
static inline void cpufreq_stats_free_table(struct cpufreq_policy *policy) { }
|
||||||
|
static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
|
||||||
|
unsigned int new_freq) { }
|
||||||
|
#endif /* CONFIG_CPU_FREQ_STAT */
|
||||||
|
|
||||||
/*********************************************************************
|
/*********************************************************************
|
||||||
* CPUFREQ DRIVER INTERFACE *
|
* CPUFREQ DRIVER INTERFACE *
|
||||||
*********************************************************************/
|
*********************************************************************/
|
||||||
@ -455,18 +467,13 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
|
|||||||
#define MIN_LATENCY_MULTIPLIER (20)
|
#define MIN_LATENCY_MULTIPLIER (20)
|
||||||
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
|
||||||
|
|
||||||
/* Governor Events */
|
|
||||||
#define CPUFREQ_GOV_START 1
|
|
||||||
#define CPUFREQ_GOV_STOP 2
|
|
||||||
#define CPUFREQ_GOV_LIMITS 3
|
|
||||||
#define CPUFREQ_GOV_POLICY_INIT 4
|
|
||||||
#define CPUFREQ_GOV_POLICY_EXIT 5
|
|
||||||
|
|
||||||
struct cpufreq_governor {
|
struct cpufreq_governor {
|
||||||
char name[CPUFREQ_NAME_LEN];
|
char name[CPUFREQ_NAME_LEN];
|
||||||
int initialized;
|
int (*init)(struct cpufreq_policy *policy);
|
||||||
int (*governor) (struct cpufreq_policy *policy,
|
void (*exit)(struct cpufreq_policy *policy);
|
||||||
unsigned int event);
|
int (*start)(struct cpufreq_policy *policy);
|
||||||
|
void (*stop)(struct cpufreq_policy *policy);
|
||||||
|
void (*limits)(struct cpufreq_policy *policy);
|
||||||
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
|
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
|
||||||
char *buf);
|
char *buf);
|
||||||
int (*store_setspeed) (struct cpufreq_policy *policy,
|
int (*store_setspeed) (struct cpufreq_policy *policy,
|
||||||
@ -493,6 +500,14 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor);
|
|||||||
struct cpufreq_governor *cpufreq_default_governor(void);
|
struct cpufreq_governor *cpufreq_default_governor(void);
|
||||||
struct cpufreq_governor *cpufreq_fallback_governor(void);
|
struct cpufreq_governor *cpufreq_fallback_governor(void);
|
||||||
|
|
||||||
|
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
|
||||||
|
{
|
||||||
|
if (policy->max < policy->cur)
|
||||||
|
__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
|
||||||
|
else if (policy->min > policy->cur)
|
||||||
|
__cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
|
||||||
|
}
|
||||||
|
|
||||||
/* Governor attribute set */
|
/* Governor attribute set */
|
||||||
struct gov_attr_set {
|
struct gov_attr_set {
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
@ -583,10 +598,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
|
|||||||
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
|
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
|
||||||
|
|
||||||
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
||||||
struct cpufreq_frequency_table *table,
|
|
||||||
unsigned int target_freq,
|
unsigned int target_freq,
|
||||||
unsigned int relation,
|
unsigned int relation);
|
||||||
unsigned int *index);
|
|
||||||
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
||||||
unsigned int freq);
|
unsigned int freq);
|
||||||
|
|
||||||
@ -617,8 +630,6 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/* the following funtion is for cpufreq core use only */
|
|
||||||
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
|
|
||||||
|
|
||||||
/* the following are really really optional */
|
/* the following are really really optional */
|
||||||
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
|
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
|
||||||
|
@ -394,7 +394,7 @@ static int sugov_init(struct cpufreq_policy *policy)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sugov_exit(struct cpufreq_policy *policy)
|
static void sugov_exit(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct sugov_policy *sg_policy = policy->governor_data;
|
struct sugov_policy *sg_policy = policy->governor_data;
|
||||||
struct sugov_tunables *tunables = sg_policy->tunables;
|
struct sugov_tunables *tunables = sg_policy->tunables;
|
||||||
@ -412,7 +412,6 @@ static int sugov_exit(struct cpufreq_policy *policy)
|
|||||||
mutex_unlock(&global_tunables_lock);
|
mutex_unlock(&global_tunables_lock);
|
||||||
|
|
||||||
sugov_policy_free(sg_policy);
|
sugov_policy_free(sg_policy);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sugov_start(struct cpufreq_policy *policy)
|
static int sugov_start(struct cpufreq_policy *policy)
|
||||||
@ -444,7 +443,7 @@ static int sugov_start(struct cpufreq_policy *policy)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sugov_stop(struct cpufreq_policy *policy)
|
static void sugov_stop(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct sugov_policy *sg_policy = policy->governor_data;
|
struct sugov_policy *sg_policy = policy->governor_data;
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
@ -456,53 +455,29 @@ static int sugov_stop(struct cpufreq_policy *policy)
|
|||||||
|
|
||||||
irq_work_sync(&sg_policy->irq_work);
|
irq_work_sync(&sg_policy->irq_work);
|
||||||
cancel_work_sync(&sg_policy->work);
|
cancel_work_sync(&sg_policy->work);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sugov_limits(struct cpufreq_policy *policy)
|
static void sugov_limits(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct sugov_policy *sg_policy = policy->governor_data;
|
struct sugov_policy *sg_policy = policy->governor_data;
|
||||||
|
|
||||||
if (!policy->fast_switch_enabled) {
|
if (!policy->fast_switch_enabled) {
|
||||||
mutex_lock(&sg_policy->work_lock);
|
mutex_lock(&sg_policy->work_lock);
|
||||||
|
cpufreq_policy_apply_limits(policy);
|
||||||
if (policy->max < policy->cur)
|
|
||||||
__cpufreq_driver_target(policy, policy->max,
|
|
||||||
CPUFREQ_RELATION_H);
|
|
||||||
else if (policy->min > policy->cur)
|
|
||||||
__cpufreq_driver_target(policy, policy->min,
|
|
||||||
CPUFREQ_RELATION_L);
|
|
||||||
|
|
||||||
mutex_unlock(&sg_policy->work_lock);
|
mutex_unlock(&sg_policy->work_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_policy->need_freq_update = true;
|
sg_policy->need_freq_update = true;
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int sugov_governor(struct cpufreq_policy *policy, unsigned int event)
|
|
||||||
{
|
|
||||||
if (event == CPUFREQ_GOV_POLICY_INIT) {
|
|
||||||
return sugov_init(policy);
|
|
||||||
} else if (policy->governor_data) {
|
|
||||||
switch (event) {
|
|
||||||
case CPUFREQ_GOV_POLICY_EXIT:
|
|
||||||
return sugov_exit(policy);
|
|
||||||
case CPUFREQ_GOV_START:
|
|
||||||
return sugov_start(policy);
|
|
||||||
case CPUFREQ_GOV_STOP:
|
|
||||||
return sugov_stop(policy);
|
|
||||||
case CPUFREQ_GOV_LIMITS:
|
|
||||||
return sugov_limits(policy);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cpufreq_governor schedutil_gov = {
|
static struct cpufreq_governor schedutil_gov = {
|
||||||
.name = "schedutil",
|
.name = "schedutil",
|
||||||
.governor = sugov_governor,
|
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
.init = sugov_init,
|
||||||
|
.exit = sugov_exit,
|
||||||
|
.start = sugov_start,
|
||||||
|
.stop = sugov_stop,
|
||||||
|
.limits = sugov_limits,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init sugov_module_init(void)
|
static int __init sugov_module_init(void)
|
||||||
|
Loading…
Reference in New Issue
Block a user