2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 19:53:59 +08:00

Merge branch 'pm-cpufreq'

* pm-cpufreq:
  intel_pstate: Add Haswell CPU models
  Revert "cpufreq: make sure frequency transitions are serialized"
  cpufreq: Use signed type for 'ret' variable, to store negative error values
  cpufreq: Remove temporary fix for race between CPU hotplug and sysfs-writes
  cpufreq: Synchronize the cpufreq store_*() routines with CPU hotplug
  cpufreq: Invoke __cpufreq_remove_dev_finish() after releasing cpu_hotplug.lock
  cpufreq: Split __cpufreq_remove_dev() into two parts
  cpufreq: Fix wrong time unit conversion
  cpufreq: serialize calls to __cpufreq_governor()
  cpufreq: don't allow governor limits to be changed when it is disabled
This commit is contained in:
Rafael J. Wysocki 2013-09-11 15:23:15 +02:00
commit 0df03a30c3
4 changed files with 76 additions and 35 deletions

View File

@ -280,13 +280,6 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
switch (state) {
case CPUFREQ_PRECHANGE:
if (WARN(policy->transition_ongoing ==
cpumask_weight(policy->cpus),
"In middle of another frequency transition\n"))
return;
policy->transition_ongoing++;
/* detect if the driver reported a value as "old frequency"
* which is not equal to what the cpufreq core thinks is
* "old frequency".
@ -306,12 +299,6 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
break;
case CPUFREQ_POSTCHANGE:
if (WARN(!policy->transition_ongoing,
"No frequency transition in progress\n"))
return;
policy->transition_ongoing--;
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
(unsigned long)freqs->cpu);
@ -437,7 +424,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *policy,
static ssize_t store_##file_name \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
unsigned int ret; \
int ret; \
struct cpufreq_policy new_policy; \
\
ret = cpufreq_get_policy(&new_policy, policy->cpu); \
@ -490,7 +477,7 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int ret;
int ret;
char str_governor[16];
struct cpufreq_policy new_policy;
@ -694,8 +681,13 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
get_online_cpus();
if (!cpu_online(policy->cpu))
goto unlock;
if (!down_read_trylock(&cpufreq_rwsem))
goto exit;
goto unlock;
if (lock_policy_rwsem_write(policy->cpu) < 0)
goto up_read;
@ -709,7 +701,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
up_read:
up_read(&cpufreq_rwsem);
exit:
unlock:
put_online_cpus();
return ret;
}
@ -1141,22 +1135,14 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
return cpu_dev->id;
}
/**
* __cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
static int __cpufreq_remove_dev(struct device *dev,
struct subsys_interface *sif, bool frozen)
static int __cpufreq_remove_dev_prepare(struct device *dev,
struct subsys_interface *sif,
bool frozen)
{
unsigned int cpu = dev->id, cpus;
int new_cpu, ret;
unsigned long flags;
struct cpufreq_policy *policy;
struct kobject *kobj;
struct completion *cmp;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@ -1213,6 +1199,33 @@ static int __cpufreq_remove_dev(struct device *dev,
}
}
return 0;
}
static int __cpufreq_remove_dev_finish(struct device *dev,
struct subsys_interface *sif,
bool frozen)
{
unsigned int cpu = dev->id, cpus;
int ret;
unsigned long flags;
struct cpufreq_policy *policy;
struct kobject *kobj;
struct completion *cmp;
read_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL;
}
lock_policy_rwsem_read(cpu);
cpus = cpumask_weight(policy->cpus);
unlock_policy_rwsem_read(cpu);
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
if (cpufreq_driver->target) {
@ -1272,6 +1285,27 @@ static int __cpufreq_remove_dev(struct device *dev,
return 0;
}
/**
* __cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
static inline int __cpufreq_remove_dev(struct device *dev,
struct subsys_interface *sif,
bool frozen)
{
int ret;
ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
if (!ret)
ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
return ret;
}
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
@ -1610,8 +1644,6 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
if (policy->transition_ongoing)
return -EBUSY;
/* Make sure that target_freq is within supported range */
if (target_freq > policy->max)
@ -1692,8 +1724,9 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->cpu, event);
mutex_lock(&cpufreq_governor_lock);
if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
(policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
|| (!policy->governor_enabled
&& (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
mutex_unlock(&cpufreq_governor_lock);
return -EBUSY;
}
@ -1994,7 +2027,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
__cpufreq_remove_dev(dev, NULL, frozen);
__cpufreq_remove_dev_prepare(dev, NULL, frozen);
break;
case CPU_POST_DEAD:
__cpufreq_remove_dev_finish(dev, NULL, frozen);
break;
case CPU_DOWN_FAILED:

View File

@ -74,7 +74,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
for (i = 0; i < stat->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
(unsigned long long)
cputime64_to_clock_t(stat->time_in_state[i]));
jiffies_64_to_clock_t(stat->time_in_state[i]));
}
return len;
}

View File

@ -522,6 +522,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x2a, default_policy),
ICPU(0x2d, default_policy),
ICPU(0x3a, default_policy),
ICPU(0x3c, default_policy),
ICPU(0x3e, default_policy),
ICPU(0x3f, default_policy),
ICPU(0x45, default_policy),
ICPU(0x46, default_policy),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);

View File

@ -85,7 +85,6 @@ struct cpufreq_policy {
struct list_head policy_list;
struct kobject kobj;
struct completion kobj_unregister;
int transition_ongoing; /* Tracks transition status */
};
/* Only for ACPI */