2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00

cpufreq: intel_pstate: Add locking around HWP requests

To avoid race conditions from multiple threads, increase the scope
of intel_pstate_limits_lock to include HWP requests also.

Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
[ rjw: Subject ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Srinivas Pandruvada 2016-12-06 13:32:15 -08:00 committed by Rafael J. Wysocki
parent 4dd63b49a7
commit b59fe54053

View File

@ -853,11 +853,11 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
limits->no_turbo = clamp_t(int, input, 0, 1);
mutex_unlock(&intel_pstate_limits_lock);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
mutex_unlock(&intel_pstate_limits_lock);
return count;
}
@ -882,10 +882,11 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
mutex_unlock(&intel_pstate_limits_lock);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
mutex_unlock(&intel_pstate_limits_lock);
return count;
}
@ -910,10 +911,11 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
mutex_unlock(&intel_pstate_limits_lock);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
mutex_unlock(&intel_pstate_limits_lock);
return count;
}
@ -1664,7 +1666,6 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
{
mutex_lock(&intel_pstate_limits_lock);
limits->no_turbo = 0;
limits->turbo_disabled = 0;
limits->max_perf_pct = 100;
@ -1675,15 +1676,12 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
limits->max_sysfs_pct = 100;
limits->min_policy_pct = 0;
limits->min_sysfs_pct = 0;
mutex_unlock(&intel_pstate_limits_lock);
}
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
struct perf_limits *limits)
{
mutex_lock(&intel_pstate_limits_lock);
limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
policy->cpuinfo.max_freq);
limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
@ -1714,8 +1712,6 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
mutex_unlock(&intel_pstate_limits_lock);
pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
limits->max_perf_pct, limits->min_perf_pct);
}
@ -1744,6 +1740,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
if (per_cpu_limits)
perf_limits = cpu->perf_limits;
mutex_lock(&intel_pstate_limits_lock);
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
if (!perf_limits) {
limits = &performance_limits;
@ -1778,6 +1776,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_hwp_set_policy(policy);
mutex_unlock(&intel_pstate_limits_lock);
return 0;
}