mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 01:04:08 +08:00
Merge back earlier cpufreq material for v4.3.
This commit is contained in:
commit
c6e53c69ef
@ -784,9 +784,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
|
||||
|
||||
EXPORT_SYMBOL(acpi_processor_register_performance);
|
||||
|
||||
void
|
||||
acpi_processor_unregister_performance(struct acpi_processor_performance
|
||||
*performance, unsigned int cpu)
|
||||
void acpi_processor_unregister_performance(unsigned int cpu)
|
||||
{
|
||||
struct acpi_processor *pr;
|
||||
|
||||
|
@ -65,18 +65,21 @@ enum {
|
||||
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
|
||||
|
||||
struct acpi_cpufreq_data {
|
||||
struct acpi_processor_performance *acpi_data;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
unsigned int resume;
|
||||
unsigned int cpu_feature;
|
||||
unsigned int acpi_perf_cpu;
|
||||
cpumask_var_t freqdomain_cpus;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
|
||||
|
||||
/* acpi_perf_data is a pointer to percpu data. */
|
||||
static struct acpi_processor_performance __percpu *acpi_perf_data;
|
||||
|
||||
static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
|
||||
{
|
||||
return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
|
||||
}
|
||||
|
||||
static struct cpufreq_driver acpi_cpufreq_driver;
|
||||
|
||||
static unsigned int acpi_pstate_strict;
|
||||
@ -144,7 +147,7 @@ static int _store_boost(int val)
|
||||
|
||||
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||
struct acpi_cpufreq_data *data = policy->driver_data;
|
||||
|
||||
return cpufreq_show_cpus(data->freqdomain_cpus, buf);
|
||||
}
|
||||
@ -202,7 +205,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
|
||||
struct acpi_processor_performance *perf;
|
||||
int i;
|
||||
|
||||
perf = data->acpi_data;
|
||||
perf = to_perf_data(data);
|
||||
|
||||
for (i = 0; i < perf->state_count; i++) {
|
||||
if (value == perf->states[i].status)
|
||||
@ -221,7 +224,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
|
||||
else
|
||||
msr &= INTEL_MSR_RANGE;
|
||||
|
||||
perf = data->acpi_data;
|
||||
perf = to_perf_data(data);
|
||||
|
||||
cpufreq_for_each_entry(pos, data->freq_table)
|
||||
if (msr == perf->states[pos->driver_data].status)
|
||||
@ -327,7 +330,8 @@ static void drv_write(struct drv_cmd *cmd)
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static u32 get_cur_val(const struct cpumask *mask)
|
||||
static u32
|
||||
get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
|
||||
{
|
||||
struct acpi_processor_performance *perf;
|
||||
struct drv_cmd cmd;
|
||||
@ -335,7 +339,7 @@ static u32 get_cur_val(const struct cpumask *mask)
|
||||
if (unlikely(cpumask_empty(mask)))
|
||||
return 0;
|
||||
|
||||
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
|
||||
switch (data->cpu_feature) {
|
||||
case SYSTEM_INTEL_MSR_CAPABLE:
|
||||
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
|
||||
@ -346,7 +350,7 @@ static u32 get_cur_val(const struct cpumask *mask)
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
cmd.type = SYSTEM_IO_CAPABLE;
|
||||
perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
|
||||
perf = to_perf_data(data);
|
||||
cmd.addr.io.port = perf->control_register.address;
|
||||
cmd.addr.io.bit_width = perf->control_register.bit_width;
|
||||
break;
|
||||
@ -364,19 +368,24 @@ static u32 get_cur_val(const struct cpumask *mask)
|
||||
|
||||
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
|
||||
struct acpi_cpufreq_data *data;
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned int freq;
|
||||
unsigned int cached_freq;
|
||||
|
||||
pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
|
||||
|
||||
if (unlikely(data == NULL ||
|
||||
data->acpi_data == NULL || data->freq_table == NULL)) {
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (unlikely(!policy))
|
||||
return 0;
|
||||
}
|
||||
|
||||
cached_freq = data->freq_table[data->acpi_data->state].frequency;
|
||||
freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
|
||||
data = policy->driver_data;
|
||||
cpufreq_cpu_put(policy);
|
||||
if (unlikely(!data || !data->freq_table))
|
||||
return 0;
|
||||
|
||||
cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
|
||||
freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
|
||||
if (freq != cached_freq) {
|
||||
/*
|
||||
* The dreaded BIOS frequency change behind our back.
|
||||
@ -397,7 +406,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
cur_freq = extract_freq(get_cur_val(mask), data);
|
||||
cur_freq = extract_freq(get_cur_val(mask, data), data);
|
||||
if (cur_freq == freq)
|
||||
return 1;
|
||||
udelay(10);
|
||||
@ -408,18 +417,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
|
||||
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int index)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||
struct acpi_cpufreq_data *data = policy->driver_data;
|
||||
struct acpi_processor_performance *perf;
|
||||
struct drv_cmd cmd;
|
||||
unsigned int next_perf_state = 0; /* Index into perf table */
|
||||
int result = 0;
|
||||
|
||||
if (unlikely(data == NULL ||
|
||||
data->acpi_data == NULL || data->freq_table == NULL)) {
|
||||
if (unlikely(data == NULL || data->freq_table == NULL)) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
perf = data->acpi_data;
|
||||
perf = to_perf_data(data);
|
||||
next_perf_state = data->freq_table[index].driver_data;
|
||||
if (perf->state == next_perf_state) {
|
||||
if (unlikely(data->resume)) {
|
||||
@ -482,8 +490,9 @@ out:
|
||||
static unsigned long
|
||||
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
|
||||
{
|
||||
struct acpi_processor_performance *perf = data->acpi_data;
|
||||
struct acpi_processor_performance *perf;
|
||||
|
||||
perf = to_perf_data(data);
|
||||
if (cpu_khz) {
|
||||
/* search the closest match to cpu_khz */
|
||||
unsigned int i;
|
||||
@ -672,17 +681,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
|
||||
per_cpu(acfreq_data, cpu) = data;
|
||||
perf = per_cpu_ptr(acpi_perf_data, cpu);
|
||||
data->acpi_perf_cpu = cpu;
|
||||
policy->driver_data = data;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
|
||||
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
|
||||
|
||||
result = acpi_processor_register_performance(data->acpi_data, cpu);
|
||||
result = acpi_processor_register_performance(perf, cpu);
|
||||
if (result)
|
||||
goto err_free_mask;
|
||||
|
||||
perf = data->acpi_data;
|
||||
policy->shared_type = perf->shared_type;
|
||||
|
||||
/*
|
||||
@ -838,26 +847,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
err_freqfree:
|
||||
kfree(data->freq_table);
|
||||
err_unreg:
|
||||
acpi_processor_unregister_performance(perf, cpu);
|
||||
acpi_processor_unregister_performance(cpu);
|
||||
err_free_mask:
|
||||
free_cpumask_var(data->freqdomain_cpus);
|
||||
err_free:
|
||||
kfree(data);
|
||||
per_cpu(acfreq_data, cpu) = NULL;
|
||||
policy->driver_data = NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||
struct acpi_cpufreq_data *data = policy->driver_data;
|
||||
|
||||
pr_debug("acpi_cpufreq_cpu_exit\n");
|
||||
|
||||
if (data) {
|
||||
per_cpu(acfreq_data, policy->cpu) = NULL;
|
||||
acpi_processor_unregister_performance(data->acpi_data,
|
||||
policy->cpu);
|
||||
policy->driver_data = NULL;
|
||||
acpi_processor_unregister_performance(data->acpi_perf_cpu);
|
||||
free_cpumask_var(data->freqdomain_cpus);
|
||||
kfree(data->freq_table);
|
||||
kfree(data);
|
||||
@ -868,7 +876,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||
|
||||
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||
struct acpi_cpufreq_data *data = policy->driver_data;
|
||||
|
||||
pr_debug("acpi_cpufreq_resume\n");
|
||||
|
||||
@ -880,7 +888,9 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
||||
static struct freq_attr *acpi_cpufreq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
&freqdomain_cpus,
|
||||
NULL, /* this is a placeholder for cpb, do not remove */
|
||||
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
|
||||
&cpb,
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -953,17 +963,16 @@ static int __init acpi_cpufreq_init(void)
|
||||
* only if configured. This is considered legacy code, which
|
||||
* will probably be removed at some point in the future.
|
||||
*/
|
||||
if (check_amd_hwpstate_cpu(0)) {
|
||||
struct freq_attr **iter;
|
||||
if (!check_amd_hwpstate_cpu(0)) {
|
||||
struct freq_attr **attr;
|
||||
|
||||
pr_debug("adding sysfs entry for cpb\n");
|
||||
pr_debug("CPB unsupported, do not expose it\n");
|
||||
|
||||
for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
|
||||
;
|
||||
|
||||
/* make sure there is a terminator behind it */
|
||||
if (iter[1] == NULL)
|
||||
*iter = &cpb;
|
||||
for (attr = acpi_cpufreq_attr; *attr; attr++)
|
||||
if (*attr == &cpb) {
|
||||
*attr = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
acpi_cpufreq_boost_init();
|
||||
|
@ -112,12 +112,6 @@ static inline bool has_target(void)
|
||||
return cpufreq_driver->target_index || cpufreq_driver->target;
|
||||
}
|
||||
|
||||
/*
|
||||
* rwsem to guarantee that cpufreq driver module doesn't unload during critical
|
||||
* sections
|
||||
*/
|
||||
static DECLARE_RWSEM(cpufreq_rwsem);
|
||||
|
||||
/* internal prototypes */
|
||||
static int __cpufreq_governor(struct cpufreq_policy *policy,
|
||||
unsigned int event);
|
||||
@ -277,10 +271,6 @@ EXPORT_SYMBOL_GPL(cpufreq_generic_get);
|
||||
* If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
|
||||
* freed as that depends on the kobj count.
|
||||
*
|
||||
* It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
|
||||
* valid policy is found. This is done to make sure the driver doesn't get
|
||||
* unregistered while the policy is being used.
|
||||
*
|
||||
* Return: A valid policy on success, otherwise NULL on failure.
|
||||
*/
|
||||
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
|
||||
@ -291,9 +281,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
|
||||
if (WARN_ON(cpu >= nr_cpu_ids))
|
||||
return NULL;
|
||||
|
||||
if (!down_read_trylock(&cpufreq_rwsem))
|
||||
return NULL;
|
||||
|
||||
/* get the cpufreq driver */
|
||||
read_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
|
||||
@ -306,9 +293,6 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
|
||||
|
||||
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
if (!policy)
|
||||
up_read(&cpufreq_rwsem);
|
||||
|
||||
return policy;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
|
||||
@ -320,13 +304,10 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
|
||||
*
|
||||
* This decrements the kobject reference count incremented earlier by calling
|
||||
* cpufreq_cpu_get().
|
||||
*
|
||||
* It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
|
||||
*/
|
||||
void cpufreq_cpu_put(struct cpufreq_policy *policy)
|
||||
{
|
||||
kobject_put(&policy->kobj);
|
||||
up_read(&cpufreq_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
|
||||
|
||||
@ -851,9 +832,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
|
||||
struct freq_attr *fattr = to_attr(attr);
|
||||
ssize_t ret;
|
||||
|
||||
if (!down_read_trylock(&cpufreq_rwsem))
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&policy->rwsem);
|
||||
|
||||
if (fattr->show)
|
||||
@ -862,7 +840,6 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
|
||||
ret = -EIO;
|
||||
|
||||
up_read(&policy->rwsem);
|
||||
up_read(&cpufreq_rwsem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -879,9 +856,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
|
||||
if (!cpu_online(policy->cpu))
|
||||
goto unlock;
|
||||
|
||||
if (!down_read_trylock(&cpufreq_rwsem))
|
||||
goto unlock;
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
|
||||
/* Updating inactive policies is invalid, so avoid doing that. */
|
||||
@ -897,8 +871,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
unlock_policy_rwsem:
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
up_read(&cpufreq_rwsem);
|
||||
unlock:
|
||||
put_online_cpus();
|
||||
|
||||
@ -1060,11 +1032,10 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
|
||||
return cpufreq_add_dev_symlink(policy);
|
||||
}
|
||||
|
||||
static void cpufreq_init_policy(struct cpufreq_policy *policy)
|
||||
static int cpufreq_init_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_governor *gov = NULL;
|
||||
struct cpufreq_policy new_policy;
|
||||
int ret = 0;
|
||||
|
||||
memcpy(&new_policy, policy, sizeof(*policy));
|
||||
|
||||
@ -1083,12 +1054,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
|
||||
cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
|
||||
|
||||
/* set default policy */
|
||||
ret = cpufreq_set_policy(policy, &new_policy);
|
||||
if (ret) {
|
||||
pr_debug("setting policy failed\n");
|
||||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
}
|
||||
return cpufreq_set_policy(policy, &new_policy);
|
||||
}
|
||||
|
||||
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
|
||||
@ -1276,15 +1242,11 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
? add_cpu_dev_symlink(policy, cpu) : 0;
|
||||
}
|
||||
|
||||
if (!down_read_trylock(&cpufreq_rwsem))
|
||||
return 0;
|
||||
|
||||
/* Check if this CPU already has a policy to manage it */
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
if (policy && !policy_is_inactive(policy)) {
|
||||
WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
|
||||
ret = cpufreq_add_policy_cpu(policy, cpu, dev);
|
||||
up_read(&cpufreq_rwsem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1297,7 +1259,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
recover_policy = false;
|
||||
policy = cpufreq_policy_alloc(dev);
|
||||
if (!policy)
|
||||
goto nomem_out;
|
||||
goto out_release_rwsem;
|
||||
}
|
||||
|
||||
cpumask_copy(policy->cpus, cpumask_of(cpu));
|
||||
@ -1308,7 +1270,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
ret = cpufreq_driver->init(policy);
|
||||
if (ret) {
|
||||
pr_debug("initialization failed\n");
|
||||
goto err_set_policy_cpu;
|
||||
goto out_free_policy;
|
||||
}
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
@ -1340,7 +1302,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
policy->cur = cpufreq_driver->get(policy->cpu);
|
||||
if (!policy->cur) {
|
||||
pr_err("%s: ->get() failed\n", __func__);
|
||||
goto err_get_freq;
|
||||
goto out_exit_policy;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1390,7 +1352,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
if (!recover_policy) {
|
||||
ret = cpufreq_add_dev_interface(policy, dev);
|
||||
if (ret)
|
||||
goto err_out_unregister;
|
||||
goto out_exit_policy;
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_CREATE_POLICY, policy);
|
||||
|
||||
@ -1399,7 +1361,12 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
}
|
||||
|
||||
cpufreq_init_policy(policy);
|
||||
ret = cpufreq_init_policy(policy);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
|
||||
__func__, cpu, ret);
|
||||
goto out_remove_policy_notify;
|
||||
}
|
||||
|
||||
if (!recover_policy) {
|
||||
policy->user_policy.policy = policy->policy;
|
||||
@ -1409,8 +1376,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
|
||||
kobject_uevent(&policy->kobj, KOBJ_ADD);
|
||||
|
||||
up_read(&cpufreq_rwsem);
|
||||
|
||||
/* Callback for handling stuff after policy is ready */
|
||||
if (cpufreq_driver->ready)
|
||||
cpufreq_driver->ready(policy);
|
||||
@ -1419,17 +1384,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_unregister:
|
||||
err_get_freq:
|
||||
out_remove_policy_notify:
|
||||
/* cpufreq_policy_free() will notify based on this */
|
||||
recover_policy = true;
|
||||
out_exit_policy:
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
err_set_policy_cpu:
|
||||
out_free_policy:
|
||||
cpufreq_policy_free(policy, recover_policy);
|
||||
nomem_out:
|
||||
up_read(&cpufreq_rwsem);
|
||||
|
||||
out_release_rwsem:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2296,16 +2261,31 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
old_gov = policy->governor;
|
||||
/* end old governor */
|
||||
if (old_gov) {
|
||||
__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
||||
if (ret) {
|
||||
/* This can happen due to race with other operations */
|
||||
pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
|
||||
__func__, old_gov->name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
up_write(&policy->rwsem);
|
||||
__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
|
||||
down_write(&policy->rwsem);
|
||||
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to Exit Governor: %s (%d)\n",
|
||||
__func__, old_gov->name, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* start new governor */
|
||||
policy->governor = new_policy->governor;
|
||||
if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
|
||||
if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
|
||||
if (!ret) {
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
up_write(&policy->rwsem);
|
||||
@ -2317,11 +2297,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
pr_debug("starting governor %s failed\n", policy->governor->name);
|
||||
if (old_gov) {
|
||||
policy->governor = old_gov;
|
||||
__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
|
||||
__cpufreq_governor(policy, CPUFREQ_GOV_START);
|
||||
if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
|
||||
policy->governor = NULL;
|
||||
else
|
||||
__cpufreq_governor(policy, CPUFREQ_GOV_START);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
|
||||
out:
|
||||
pr_debug("governor: change or update limits\n");
|
||||
@ -2588,19 +2570,20 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
|
||||
|
||||
pr_debug("unregistering driver %s\n", driver->name);
|
||||
|
||||
/* Protect against concurrent cpu hotplug */
|
||||
get_online_cpus();
|
||||
subsys_interface_unregister(&cpufreq_interface);
|
||||
if (cpufreq_boost_supported())
|
||||
cpufreq_sysfs_remove_file(&boost.attr);
|
||||
|
||||
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
|
||||
|
||||
down_write(&cpufreq_rwsem);
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
|
||||
cpufreq_driver = NULL;
|
||||
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
up_write(&cpufreq_rwsem);
|
||||
put_online_cpus();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
|
||||
static void cs_check_cpu(int cpu, unsigned int load)
|
||||
{
|
||||
struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
|
||||
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
|
||||
struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
|
||||
struct dbs_data *dbs_data = policy->governor_data;
|
||||
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
||||
|
||||
@ -102,26 +102,15 @@ static void cs_check_cpu(int cpu, unsigned int load)
|
||||
}
|
||||
}
|
||||
|
||||
static void cs_dbs_timer(struct work_struct *work)
|
||||
static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
|
||||
struct dbs_data *dbs_data, bool modify_all)
|
||||
{
|
||||
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
|
||||
struct cs_cpu_dbs_info_s, cdbs.work.work);
|
||||
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
|
||||
struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
|
||||
cpu);
|
||||
struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
|
||||
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
||||
int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
|
||||
bool modify_all = true;
|
||||
|
||||
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
|
||||
if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
|
||||
modify_all = false;
|
||||
else
|
||||
dbs_check_cpu(dbs_data, cpu);
|
||||
if (modify_all)
|
||||
dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);
|
||||
|
||||
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
|
||||
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
|
||||
return delay_for_sampling_rate(cs_tuners->sampling_rate);
|
||||
}
|
||||
|
||||
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||
@ -135,7 +124,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||
if (!dbs_info->enable)
|
||||
return 0;
|
||||
|
||||
policy = dbs_info->cdbs.cur_policy;
|
||||
policy = dbs_info->cdbs.shared->policy;
|
||||
|
||||
/*
|
||||
* we only care if our internally tracked freq moves outside the 'valid'
|
||||
|
@ -32,10 +32,10 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
|
||||
|
||||
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
|
||||
{
|
||||
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
|
||||
struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
|
||||
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
||||
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
||||
struct cpufreq_policy *policy;
|
||||
struct cpufreq_policy *policy = cdbs->shared->policy;
|
||||
unsigned int sampling_rate;
|
||||
unsigned int max_load = 0;
|
||||
unsigned int ignore_nice;
|
||||
@ -60,11 +60,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
|
||||
ignore_nice = cs_tuners->ignore_nice_load;
|
||||
}
|
||||
|
||||
policy = cdbs->cur_policy;
|
||||
|
||||
/* Get Absolute Load */
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_common_info *j_cdbs;
|
||||
struct cpu_dbs_info *j_cdbs;
|
||||
u64 cur_wall_time, cur_idle_time;
|
||||
unsigned int idle_time, wall_time;
|
||||
unsigned int load;
|
||||
@ -163,9 +161,9 @@ EXPORT_SYMBOL_GPL(dbs_check_cpu);
|
||||
static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
|
||||
unsigned int delay)
|
||||
{
|
||||
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
|
||||
struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
|
||||
|
||||
mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
|
||||
mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
|
||||
}
|
||||
|
||||
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
|
||||
@ -199,33 +197,63 @@ EXPORT_SYMBOL_GPL(gov_queue_work);
|
||||
static inline void gov_cancel_work(struct dbs_data *dbs_data,
|
||||
struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpu_dbs_common_info *cdbs;
|
||||
struct cpu_dbs_info *cdbs;
|
||||
int i;
|
||||
|
||||
for_each_cpu(i, policy->cpus) {
|
||||
cdbs = dbs_data->cdata->get_cpu_cdbs(i);
|
||||
cancel_delayed_work_sync(&cdbs->work);
|
||||
cancel_delayed_work_sync(&cdbs->dwork);
|
||||
}
|
||||
}
|
||||
|
||||
/* Will return if we need to evaluate cpu load again or not */
|
||||
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
|
||||
unsigned int sampling_rate)
|
||||
static bool need_load_eval(struct cpu_common_dbs_info *shared,
|
||||
unsigned int sampling_rate)
|
||||
{
|
||||
if (policy_is_shared(cdbs->cur_policy)) {
|
||||
if (policy_is_shared(shared->policy)) {
|
||||
ktime_t time_now = ktime_get();
|
||||
s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
|
||||
s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
|
||||
|
||||
/* Do nothing if we recently have sampled */
|
||||
if (delta_us < (s64)(sampling_rate / 2))
|
||||
return false;
|
||||
else
|
||||
cdbs->time_stamp = time_now;
|
||||
shared->time_stamp = time_now;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(need_load_eval);
|
||||
|
||||
static void dbs_timer(struct work_struct *work)
|
||||
{
|
||||
struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
|
||||
dwork.work);
|
||||
struct cpu_common_dbs_info *shared = cdbs->shared;
|
||||
struct cpufreq_policy *policy = shared->policy;
|
||||
struct dbs_data *dbs_data = policy->governor_data;
|
||||
unsigned int sampling_rate, delay;
|
||||
bool modify_all = true;
|
||||
|
||||
mutex_lock(&shared->timer_mutex);
|
||||
|
||||
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
|
||||
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
||||
|
||||
sampling_rate = cs_tuners->sampling_rate;
|
||||
} else {
|
||||
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
||||
|
||||
sampling_rate = od_tuners->sampling_rate;
|
||||
}
|
||||
|
||||
if (!need_load_eval(cdbs->shared, sampling_rate))
|
||||
modify_all = false;
|
||||
|
||||
delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
|
||||
gov_queue_work(dbs_data, policy, delay, modify_all);
|
||||
|
||||
mutex_unlock(&shared->timer_mutex);
|
||||
}
|
||||
|
||||
static void set_sampling_rate(struct dbs_data *dbs_data,
|
||||
unsigned int sampling_rate)
|
||||
@ -239,6 +267,37 @@ static void set_sampling_rate(struct dbs_data *dbs_data,
|
||||
}
|
||||
}
|
||||
|
||||
static int alloc_common_dbs_info(struct cpufreq_policy *policy,
|
||||
struct common_dbs_data *cdata)
|
||||
{
|
||||
struct cpu_common_dbs_info *shared;
|
||||
int j;
|
||||
|
||||
/* Allocate memory for the common information for policy->cpus */
|
||||
shared = kzalloc(sizeof(*shared), GFP_KERNEL);
|
||||
if (!shared)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Set shared for all CPUs, online+offline */
|
||||
for_each_cpu(j, policy->related_cpus)
|
||||
cdata->get_cpu_cdbs(j)->shared = shared;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_common_dbs_info(struct cpufreq_policy *policy,
|
||||
struct common_dbs_data *cdata)
|
||||
{
|
||||
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
|
||||
struct cpu_common_dbs_info *shared = cdbs->shared;
|
||||
int j;
|
||||
|
||||
for_each_cpu(j, policy->cpus)
|
||||
cdata->get_cpu_cdbs(j)->shared = NULL;
|
||||
|
||||
kfree(shared);
|
||||
}
|
||||
|
||||
static int cpufreq_governor_init(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data,
|
||||
struct common_dbs_data *cdata)
|
||||
@ -246,9 +305,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
|
||||
unsigned int latency;
|
||||
int ret;
|
||||
|
||||
/* State should be equivalent to EXIT */
|
||||
if (policy->governor_data)
|
||||
return -EBUSY;
|
||||
|
||||
if (dbs_data) {
|
||||
if (WARN_ON(have_governor_per_policy()))
|
||||
return -EINVAL;
|
||||
|
||||
ret = alloc_common_dbs_info(policy, cdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dbs_data->usage_count++;
|
||||
policy->governor_data = dbs_data;
|
||||
return 0;
|
||||
@ -258,12 +326,16 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
|
||||
if (!dbs_data)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = alloc_common_dbs_info(policy, cdata);
|
||||
if (ret)
|
||||
goto free_dbs_data;
|
||||
|
||||
dbs_data->cdata = cdata;
|
||||
dbs_data->usage_count = 1;
|
||||
|
||||
ret = cdata->init(dbs_data, !policy->governor->initialized);
|
||||
if (ret)
|
||||
goto free_dbs_data;
|
||||
goto free_common_dbs_info;
|
||||
|
||||
/* policy latency is in ns. Convert it to us first */
|
||||
latency = policy->cpuinfo.transition_latency / 1000;
|
||||
@ -300,15 +372,22 @@ put_kobj:
|
||||
}
|
||||
cdata_exit:
|
||||
cdata->exit(dbs_data, !policy->governor->initialized);
|
||||
free_common_dbs_info:
|
||||
free_common_dbs_info(policy, cdata);
|
||||
free_dbs_data:
|
||||
kfree(dbs_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cpufreq_governor_exit(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
static int cpufreq_governor_exit(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
{
|
||||
struct common_dbs_data *cdata = dbs_data->cdata;
|
||||
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
|
||||
|
||||
/* State should be equivalent to INIT */
|
||||
if (!cdbs->shared || cdbs->shared->policy)
|
||||
return -EBUSY;
|
||||
|
||||
policy->governor_data = NULL;
|
||||
if (!--dbs_data->usage_count) {
|
||||
@ -323,6 +402,9 @@ static void cpufreq_governor_exit(struct cpufreq_policy *policy,
|
||||
cdata->exit(dbs_data, policy->governor->initialized == 1);
|
||||
kfree(dbs_data);
|
||||
}
|
||||
|
||||
free_common_dbs_info(policy, cdata);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_governor_start(struct cpufreq_policy *policy,
|
||||
@ -330,12 +412,17 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
|
||||
{
|
||||
struct common_dbs_data *cdata = dbs_data->cdata;
|
||||
unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
|
||||
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
|
||||
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
|
||||
struct cpu_common_dbs_info *shared = cdbs->shared;
|
||||
int io_busy = 0;
|
||||
|
||||
if (!policy->cur)
|
||||
return -EINVAL;
|
||||
|
||||
/* State should be equivalent to INIT */
|
||||
if (!shared || shared->policy)
|
||||
return -EBUSY;
|
||||
|
||||
if (cdata->governor == GOV_CONSERVATIVE) {
|
||||
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
||||
|
||||
@ -349,12 +436,14 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
|
||||
io_busy = od_tuners->io_is_busy;
|
||||
}
|
||||
|
||||
shared->policy = policy;
|
||||
shared->time_stamp = ktime_get();
|
||||
mutex_init(&shared->timer_mutex);
|
||||
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j);
|
||||
struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
|
||||
unsigned int prev_load;
|
||||
|
||||
j_cdbs->cpu = j;
|
||||
j_cdbs->cur_policy = policy;
|
||||
j_cdbs->prev_cpu_idle =
|
||||
get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
|
||||
|
||||
@ -366,8 +455,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
|
||||
if (ignore_nice)
|
||||
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
|
||||
mutex_init(&j_cdbs->timer_mutex);
|
||||
INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
|
||||
INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer);
|
||||
}
|
||||
|
||||
if (cdata->governor == GOV_CONSERVATIVE) {
|
||||
@ -386,20 +474,24 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
|
||||
od_ops->powersave_bias_init_cpu(cpu);
|
||||
}
|
||||
|
||||
/* Initiate timer time stamp */
|
||||
cpu_cdbs->time_stamp = ktime_get();
|
||||
|
||||
gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
|
||||
true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpufreq_governor_stop(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
static int cpufreq_governor_stop(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
{
|
||||
struct common_dbs_data *cdata = dbs_data->cdata;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
|
||||
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
|
||||
struct cpu_common_dbs_info *shared = cdbs->shared;
|
||||
|
||||
/* State should be equivalent to START */
|
||||
if (!shared || !shared->policy)
|
||||
return -EBUSY;
|
||||
|
||||
gov_cancel_work(dbs_data, policy);
|
||||
|
||||
if (cdata->governor == GOV_CONSERVATIVE) {
|
||||
struct cs_cpu_dbs_info_s *cs_dbs_info =
|
||||
@ -408,38 +500,40 @@ static void cpufreq_governor_stop(struct cpufreq_policy *policy,
|
||||
cs_dbs_info->enable = 0;
|
||||
}
|
||||
|
||||
gov_cancel_work(dbs_data, policy);
|
||||
|
||||
mutex_destroy(&cpu_cdbs->timer_mutex);
|
||||
cpu_cdbs->cur_policy = NULL;
|
||||
shared->policy = NULL;
|
||||
mutex_destroy(&shared->timer_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpufreq_governor_limits(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
static int cpufreq_governor_limits(struct cpufreq_policy *policy,
|
||||
struct dbs_data *dbs_data)
|
||||
{
|
||||
struct common_dbs_data *cdata = dbs_data->cdata;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
|
||||
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
|
||||
|
||||
if (!cpu_cdbs->cur_policy)
|
||||
return;
|
||||
/* State should be equivalent to START */
|
||||
if (!cdbs->shared || !cdbs->shared->policy)
|
||||
return -EBUSY;
|
||||
|
||||
mutex_lock(&cpu_cdbs->timer_mutex);
|
||||
if (policy->max < cpu_cdbs->cur_policy->cur)
|
||||
__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max,
|
||||
mutex_lock(&cdbs->shared->timer_mutex);
|
||||
if (policy->max < cdbs->shared->policy->cur)
|
||||
__cpufreq_driver_target(cdbs->shared->policy, policy->max,
|
||||
CPUFREQ_RELATION_H);
|
||||
else if (policy->min > cpu_cdbs->cur_policy->cur)
|
||||
__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min,
|
||||
else if (policy->min > cdbs->shared->policy->cur)
|
||||
__cpufreq_driver_target(cdbs->shared->policy, policy->min,
|
||||
CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(dbs_data, cpu);
|
||||
mutex_unlock(&cpu_cdbs->timer_mutex);
|
||||
mutex_unlock(&cdbs->shared->timer_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
struct common_dbs_data *cdata, unsigned int event)
|
||||
{
|
||||
struct dbs_data *dbs_data;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
/* Lock governor to block concurrent initialization of governor */
|
||||
mutex_lock(&cdata->mutex);
|
||||
@ -449,7 +543,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
else
|
||||
dbs_data = cdata->gdbs_data;
|
||||
|
||||
if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
|
||||
if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
@ -459,17 +553,19 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
ret = cpufreq_governor_init(policy, dbs_data, cdata);
|
||||
break;
|
||||
case CPUFREQ_GOV_POLICY_EXIT:
|
||||
cpufreq_governor_exit(policy, dbs_data);
|
||||
ret = cpufreq_governor_exit(policy, dbs_data);
|
||||
break;
|
||||
case CPUFREQ_GOV_START:
|
||||
ret = cpufreq_governor_start(policy, dbs_data);
|
||||
break;
|
||||
case CPUFREQ_GOV_STOP:
|
||||
cpufreq_governor_stop(policy, dbs_data);
|
||||
ret = cpufreq_governor_stop(policy, dbs_data);
|
||||
break;
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
cpufreq_governor_limits(policy, dbs_data);
|
||||
ret = cpufreq_governor_limits(policy, dbs_data);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
unlock:
|
||||
|
@ -109,7 +109,7 @@ store_one(_gov, file_name)
|
||||
|
||||
/* create helper routines */
|
||||
#define define_get_cpu_dbs_routines(_dbs_info) \
|
||||
static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \
|
||||
static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
|
||||
{ \
|
||||
return &per_cpu(_dbs_info, cpu).cdbs; \
|
||||
} \
|
||||
@ -128,9 +128,20 @@ static void *get_cpu_dbs_info_s(int cpu) \
|
||||
* cs_*: Conservative governor
|
||||
*/
|
||||
|
||||
/* Common to all CPUs of a policy */
|
||||
struct cpu_common_dbs_info {
|
||||
struct cpufreq_policy *policy;
|
||||
/*
|
||||
* percpu mutex that serializes governor limit change with dbs_timer
|
||||
* invocation. We do not want dbs_timer to run when user is changing
|
||||
* the governor or limits.
|
||||
*/
|
||||
struct mutex timer_mutex;
|
||||
ktime_t time_stamp;
|
||||
};
|
||||
|
||||
/* Per cpu structures */
|
||||
struct cpu_dbs_common_info {
|
||||
int cpu;
|
||||
struct cpu_dbs_info {
|
||||
u64 prev_cpu_idle;
|
||||
u64 prev_cpu_wall;
|
||||
u64 prev_cpu_nice;
|
||||
@ -141,19 +152,12 @@ struct cpu_dbs_common_info {
|
||||
* wake-up from idle.
|
||||
*/
|
||||
unsigned int prev_load;
|
||||
struct cpufreq_policy *cur_policy;
|
||||
struct delayed_work work;
|
||||
/*
|
||||
* percpu mutex that serializes governor limit change with gov_dbs_timer
|
||||
* invocation. We do not want gov_dbs_timer to run when user is changing
|
||||
* the governor or limits.
|
||||
*/
|
||||
struct mutex timer_mutex;
|
||||
ktime_t time_stamp;
|
||||
struct delayed_work dwork;
|
||||
struct cpu_common_dbs_info *shared;
|
||||
};
|
||||
|
||||
struct od_cpu_dbs_info_s {
|
||||
struct cpu_dbs_common_info cdbs;
|
||||
struct cpu_dbs_info cdbs;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
unsigned int freq_lo;
|
||||
unsigned int freq_lo_jiffies;
|
||||
@ -163,7 +167,7 @@ struct od_cpu_dbs_info_s {
|
||||
};
|
||||
|
||||
struct cs_cpu_dbs_info_s {
|
||||
struct cpu_dbs_common_info cdbs;
|
||||
struct cpu_dbs_info cdbs;
|
||||
unsigned int down_skip;
|
||||
unsigned int requested_freq;
|
||||
unsigned int enable:1;
|
||||
@ -204,9 +208,11 @@ struct common_dbs_data {
|
||||
*/
|
||||
struct dbs_data *gdbs_data;
|
||||
|
||||
struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
|
||||
struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
|
||||
void *(*get_cpu_dbs_info_s)(int cpu);
|
||||
void (*gov_dbs_timer)(struct work_struct *work);
|
||||
unsigned int (*gov_dbs_timer)(struct cpu_dbs_info *cdbs,
|
||||
struct dbs_data *dbs_data,
|
||||
bool modify_all);
|
||||
void (*gov_check_cpu)(int cpu, unsigned int load);
|
||||
int (*init)(struct dbs_data *dbs_data, bool notify);
|
||||
void (*exit)(struct dbs_data *dbs_data, bool notify);
|
||||
@ -265,8 +271,6 @@ static ssize_t show_sampling_rate_min_gov_pol \
|
||||
extern struct mutex cpufreq_governor_lock;
|
||||
|
||||
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
|
||||
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
|
||||
unsigned int sampling_rate);
|
||||
int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
struct common_dbs_data *cdata, unsigned int event);
|
||||
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
|
||||
|
@ -155,7 +155,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
|
||||
static void od_check_cpu(int cpu, unsigned int load)
|
||||
{
|
||||
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
||||
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
|
||||
struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
|
||||
struct dbs_data *dbs_data = policy->governor_data;
|
||||
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
||||
|
||||
@ -191,46 +191,40 @@ static void od_check_cpu(int cpu, unsigned int load)
|
||||
}
|
||||
}
|
||||
|
||||
static void od_dbs_timer(struct work_struct *work)
|
||||
static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs,
|
||||
struct dbs_data *dbs_data, bool modify_all)
|
||||
{
|
||||
struct od_cpu_dbs_info_s *dbs_info =
|
||||
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
|
||||
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
|
||||
struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
|
||||
struct cpufreq_policy *policy = cdbs->shared->policy;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
|
||||
cpu);
|
||||
struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
|
||||
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
|
||||
int delay = 0, sample_type = core_dbs_info->sample_type;
|
||||
bool modify_all = true;
|
||||
int delay = 0, sample_type = dbs_info->sample_type;
|
||||
|
||||
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
|
||||
if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
|
||||
modify_all = false;
|
||||
if (!modify_all)
|
||||
goto max_delay;
|
||||
}
|
||||
|
||||
/* Common NORMAL_SAMPLE setup */
|
||||
core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
||||
dbs_info->sample_type = OD_NORMAL_SAMPLE;
|
||||
if (sample_type == OD_SUB_SAMPLE) {
|
||||
delay = core_dbs_info->freq_lo_jiffies;
|
||||
__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
|
||||
core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
|
||||
delay = dbs_info->freq_lo_jiffies;
|
||||
__cpufreq_driver_target(policy, dbs_info->freq_lo,
|
||||
CPUFREQ_RELATION_H);
|
||||
} else {
|
||||
dbs_check_cpu(dbs_data, cpu);
|
||||
if (core_dbs_info->freq_lo) {
|
||||
if (dbs_info->freq_lo) {
|
||||
/* Setup timer for SUB_SAMPLE */
|
||||
core_dbs_info->sample_type = OD_SUB_SAMPLE;
|
||||
delay = core_dbs_info->freq_hi_jiffies;
|
||||
dbs_info->sample_type = OD_SUB_SAMPLE;
|
||||
delay = dbs_info->freq_hi_jiffies;
|
||||
}
|
||||
}
|
||||
|
||||
max_delay:
|
||||
if (!delay)
|
||||
delay = delay_for_sampling_rate(od_tuners->sampling_rate
|
||||
* core_dbs_info->rate_mult);
|
||||
* dbs_info->rate_mult);
|
||||
|
||||
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
|
||||
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
|
||||
return delay;
|
||||
}
|
||||
|
||||
/************************** sysfs interface ************************/
|
||||
@ -273,27 +267,27 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
|
||||
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
||||
mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
|
||||
|
||||
if (!delayed_work_pending(&dbs_info->cdbs.work)) {
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
|
||||
mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
next_sampling = jiffies + usecs_to_jiffies(new_rate);
|
||||
appointed_at = dbs_info->cdbs.work.timer.expires;
|
||||
appointed_at = dbs_info->cdbs.dwork.timer.expires;
|
||||
|
||||
if (time_before(next_sampling, appointed_at)) {
|
||||
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
cancel_delayed_work_sync(&dbs_info->cdbs.work);
|
||||
mutex_lock(&dbs_info->cdbs.timer_mutex);
|
||||
mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
|
||||
cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
|
||||
mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
|
||||
|
||||
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
|
||||
usecs_to_jiffies(new_rate), true);
|
||||
gov_queue_work(dbs_data, policy,
|
||||
usecs_to_jiffies(new_rate), true);
|
||||
|
||||
}
|
||||
mutex_unlock(&dbs_info->cdbs.timer_mutex);
|
||||
mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -556,13 +550,16 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
struct cpu_common_dbs_info *shared;
|
||||
|
||||
if (cpumask_test_cpu(cpu, &done))
|
||||
continue;
|
||||
|
||||
policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
|
||||
if (!policy)
|
||||
shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared;
|
||||
if (!shared)
|
||||
continue;
|
||||
|
||||
policy = shared->policy;
|
||||
cpumask_or(&done, &done, policy->cpus);
|
||||
|
||||
if (policy->governor != &cpufreq_gov_ondemand)
|
||||
|
@ -78,7 +78,7 @@ static int eps_acpi_init(void)
|
||||
static int eps_acpi_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
if (eps_acpi_cpu_perf) {
|
||||
acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0);
|
||||
acpi_processor_unregister_performance(0);
|
||||
free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
|
||||
kfree(eps_acpi_cpu_perf);
|
||||
eps_acpi_cpu_perf = NULL;
|
||||
|
@ -29,7 +29,6 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
struct cpufreq_acpi_io {
|
||||
struct acpi_processor_performance acpi_data;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
unsigned int resume;
|
||||
};
|
||||
|
||||
@ -221,6 +220,7 @@ acpi_cpufreq_cpu_init (
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpufreq_acpi_io *data;
|
||||
unsigned int result = 0;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
|
||||
pr_debug("acpi_cpufreq_cpu_init\n");
|
||||
|
||||
@ -254,10 +254,10 @@ acpi_cpufreq_cpu_init (
|
||||
}
|
||||
|
||||
/* alloc freq_table */
|
||||
data->freq_table = kzalloc(sizeof(*data->freq_table) *
|
||||
freq_table = kzalloc(sizeof(*freq_table) *
|
||||
(data->acpi_data.state_count + 1),
|
||||
GFP_KERNEL);
|
||||
if (!data->freq_table) {
|
||||
if (!freq_table) {
|
||||
result = -ENOMEM;
|
||||
goto err_unreg;
|
||||
}
|
||||
@ -276,14 +276,14 @@ acpi_cpufreq_cpu_init (
|
||||
for (i = 0; i <= data->acpi_data.state_count; i++)
|
||||
{
|
||||
if (i < data->acpi_data.state_count) {
|
||||
data->freq_table[i].frequency =
|
||||
freq_table[i].frequency =
|
||||
data->acpi_data.states[i].core_frequency * 1000;
|
||||
} else {
|
||||
data->freq_table[i].frequency = CPUFREQ_TABLE_END;
|
||||
freq_table[i].frequency = CPUFREQ_TABLE_END;
|
||||
}
|
||||
}
|
||||
|
||||
result = cpufreq_table_validate_and_show(policy, data->freq_table);
|
||||
result = cpufreq_table_validate_and_show(policy, freq_table);
|
||||
if (result) {
|
||||
goto err_freqfree;
|
||||
}
|
||||
@ -311,9 +311,9 @@ acpi_cpufreq_cpu_init (
|
||||
return (result);
|
||||
|
||||
err_freqfree:
|
||||
kfree(data->freq_table);
|
||||
kfree(freq_table);
|
||||
err_unreg:
|
||||
acpi_processor_unregister_performance(&data->acpi_data, cpu);
|
||||
acpi_processor_unregister_performance(cpu);
|
||||
err_free:
|
||||
kfree(data);
|
||||
acpi_io_data[cpu] = NULL;
|
||||
@ -332,8 +332,8 @@ acpi_cpufreq_cpu_exit (
|
||||
|
||||
if (data) {
|
||||
acpi_io_data[policy->cpu] = NULL;
|
||||
acpi_processor_unregister_performance(&data->acpi_data,
|
||||
policy->cpu);
|
||||
acpi_processor_unregister_performance(policy->cpu);
|
||||
kfree(policy->freq_table);
|
||||
kfree(data);
|
||||
}
|
||||
|
||||
|
@ -98,11 +98,10 @@ static int integrator_set_target(struct cpufreq_policy *policy,
|
||||
/* get current setting */
|
||||
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
|
||||
|
||||
if (machine_is_integrator()) {
|
||||
if (machine_is_integrator())
|
||||
vco.s = (cm_osc >> 8) & 7;
|
||||
} else if (machine_is_cintegrator()) {
|
||||
else if (machine_is_cintegrator())
|
||||
vco.s = 1;
|
||||
}
|
||||
vco.v = cm_osc & 255;
|
||||
vco.r = 22;
|
||||
freqs.old = icst_hz(&cclk_params, vco) / 1000;
|
||||
@ -163,11 +162,10 @@ static unsigned int integrator_get(unsigned int cpu)
|
||||
/* detect memory etc. */
|
||||
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
|
||||
|
||||
if (machine_is_integrator()) {
|
||||
if (machine_is_integrator())
|
||||
vco.s = (cm_osc >> 8) & 7;
|
||||
} else {
|
||||
else
|
||||
vco.s = 1;
|
||||
}
|
||||
vco.v = cm_osc & 255;
|
||||
vco.r = 22;
|
||||
|
||||
@ -203,7 +201,7 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
|
||||
struct resource *res;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
if (!res)
|
||||
return -ENODEV;
|
||||
|
||||
cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
|
||||
@ -234,6 +232,6 @@ static struct platform_driver integrator_cpufreq_driver = {
|
||||
module_platform_driver_probe(integrator_cpufreq_driver,
|
||||
integrator_cpufreq_probe);
|
||||
|
||||
MODULE_AUTHOR ("Russell M. King");
|
||||
MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
|
||||
MODULE_LICENSE ("GPL");
|
||||
MODULE_AUTHOR("Russell M. King");
|
||||
MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -484,12 +484,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
|
||||
}
|
||||
/************************** sysfs end ************************/
|
||||
|
||||
static void intel_pstate_hwp_enable(void)
|
||||
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
|
||||
{
|
||||
hwp_active++;
|
||||
pr_info("intel_pstate: HWP enabled\n");
|
||||
|
||||
wrmsrl( MSR_PM_ENABLE, 0x1);
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
|
||||
}
|
||||
|
||||
static int byt_get_min_pstate(void)
|
||||
@ -933,6 +932,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
|
||||
cpu = all_cpu_data[cpunum];
|
||||
|
||||
cpu->cpu = cpunum;
|
||||
|
||||
if (hwp_active)
|
||||
intel_pstate_hwp_enable(cpu);
|
||||
|
||||
intel_pstate_get_cpu_pstates(cpu);
|
||||
|
||||
init_timer_deferrable(&cpu->timer);
|
||||
@ -1246,7 +1249,7 @@ static int __init intel_pstate_init(void)
|
||||
return -ENOMEM;
|
||||
|
||||
if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
|
||||
intel_pstate_hwp_enable();
|
||||
hwp_active++;
|
||||
|
||||
if (!hwp_active && hwp_only)
|
||||
goto out;
|
||||
|
@ -421,7 +421,7 @@ static int powernow_acpi_init(void)
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
acpi_processor_unregister_performance(acpi_processor_perf, 0);
|
||||
acpi_processor_unregister_performance(0);
|
||||
err1:
|
||||
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
|
||||
err05:
|
||||
@ -661,7 +661,7 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
|
||||
if (acpi_processor_perf) {
|
||||
acpi_processor_unregister_performance(acpi_processor_perf, 0);
|
||||
acpi_processor_unregister_performance(0);
|
||||
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
|
||||
kfree(acpi_processor_perf);
|
||||
}
|
||||
|
@ -795,7 +795,7 @@ err_out_mem:
|
||||
kfree(powernow_table);
|
||||
|
||||
err_out:
|
||||
acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
|
||||
acpi_processor_unregister_performance(data->cpu);
|
||||
|
||||
/* data->acpi_data.state_count informs us at ->exit()
|
||||
* whether ACPI was used */
|
||||
@ -863,8 +863,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
|
||||
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
|
||||
{
|
||||
if (data->acpi_data.state_count)
|
||||
acpi_processor_unregister_performance(&data->acpi_data,
|
||||
data->cpu);
|
||||
acpi_processor_unregister_performance(data->cpu);
|
||||
free_cpumask_var(data->acpi_data.shared_cpu_map);
|
||||
}
|
||||
|
||||
|
@ -560,11 +560,9 @@ static int __init xen_acpi_processor_init(void)
|
||||
|
||||
return 0;
|
||||
err_unregister:
|
||||
for_each_possible_cpu(i) {
|
||||
struct acpi_processor_performance *perf;
|
||||
perf = per_cpu_ptr(acpi_perf_data, i);
|
||||
acpi_processor_unregister_performance(perf, i);
|
||||
}
|
||||
for_each_possible_cpu(i)
|
||||
acpi_processor_unregister_performance(i);
|
||||
|
||||
err_out:
|
||||
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
|
||||
free_acpi_perf_data();
|
||||
@ -579,11 +577,9 @@ static void __exit xen_acpi_processor_exit(void)
|
||||
kfree(acpi_ids_done);
|
||||
kfree(acpi_id_present);
|
||||
kfree(acpi_id_cst_present);
|
||||
for_each_possible_cpu(i) {
|
||||
struct acpi_processor_performance *perf;
|
||||
perf = per_cpu_ptr(acpi_perf_data, i);
|
||||
acpi_processor_unregister_performance(perf, i);
|
||||
}
|
||||
for_each_possible_cpu(i)
|
||||
acpi_processor_unregister_performance(i);
|
||||
|
||||
free_acpi_perf_data();
|
||||
}
|
||||
|
||||
|
@ -228,10 +228,7 @@ extern int acpi_processor_preregister_performance(struct
|
||||
|
||||
extern int acpi_processor_register_performance(struct acpi_processor_performance
|
||||
*performance, unsigned int cpu);
|
||||
extern void acpi_processor_unregister_performance(struct
|
||||
acpi_processor_performance
|
||||
*performance,
|
||||
unsigned int cpu);
|
||||
extern void acpi_processor_unregister_performance(unsigned int cpu);
|
||||
|
||||
/* note: this locks both the calling module and the processor module
|
||||
if a _PPC object exists, rmmod is disallowed then */
|
||||
|
Loading…
Reference in New Issue
Block a user