mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 07:04:10 +08:00
cpufreq: intel_pstate: Implement the ->adjust_perf() callback
Make intel_pstate expose the ->adjust_perf() callback when it operates in the passive mode with HWP enabled which causes the schedutil governor to use that callback instead of ->fast_switch(). The minimum and target performance-level values passed by the governor to ->adjust_perf() are converted to HWP.REQ.MIN and HWP.REQ.DESIRED, respectively, which allows the processor to adjust its configuration to maximize energy-efficiency while providing sufficient capacity. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
This commit is contained in:
parent
ee2cc4276b
commit
a365ab6b9d
@ -2526,20 +2526,19 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
|
|||||||
fp_toint(cpu->iowait_boost * 100));
|
fp_toint(cpu->iowait_boost * 100));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
|
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 min, u32 max,
|
||||||
bool strict, bool fast_switch)
|
u32 desired, bool fast_switch)
|
||||||
{
|
{
|
||||||
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
|
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
|
||||||
|
|
||||||
value &= ~HWP_MIN_PERF(~0L);
|
value &= ~HWP_MIN_PERF(~0L);
|
||||||
value |= HWP_MIN_PERF(target_pstate);
|
value |= HWP_MIN_PERF(min);
|
||||||
|
|
||||||
/*
|
|
||||||
* The entire MSR needs to be updated in order to update the HWP min
|
|
||||||
* field in it, so opportunistically update the max too if needed.
|
|
||||||
*/
|
|
||||||
value &= ~HWP_MAX_PERF(~0L);
|
value &= ~HWP_MAX_PERF(~0L);
|
||||||
value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
|
value |= HWP_MAX_PERF(max);
|
||||||
|
|
||||||
|
value &= ~HWP_DESIRED_PERF(~0L);
|
||||||
|
value |= HWP_DESIRED_PERF(desired);
|
||||||
|
|
||||||
if (value == prev)
|
if (value == prev)
|
||||||
return;
|
return;
|
||||||
@ -2569,11 +2568,15 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
|
|||||||
int old_pstate = cpu->pstate.current_pstate;
|
int old_pstate = cpu->pstate.current_pstate;
|
||||||
|
|
||||||
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
|
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
|
||||||
if (hwp_active)
|
if (hwp_active) {
|
||||||
intel_cpufreq_adjust_hwp(cpu, target_pstate,
|
int max_pstate = policy->strict_target ?
|
||||||
policy->strict_target, fast_switch);
|
target_pstate : cpu->max_perf_ratio;
|
||||||
else if (target_pstate != old_pstate)
|
|
||||||
|
intel_cpufreq_adjust_hwp(cpu, target_pstate, max_pstate, 0,
|
||||||
|
fast_switch);
|
||||||
|
} else if (target_pstate != old_pstate) {
|
||||||
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
|
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
|
||||||
|
}
|
||||||
|
|
||||||
cpu->pstate.current_pstate = target_pstate;
|
cpu->pstate.current_pstate = target_pstate;
|
||||||
|
|
||||||
@ -2634,6 +2637,47 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
|||||||
return target_pstate * cpu->pstate.scaling;
|
return target_pstate * cpu->pstate.scaling;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_cpufreq_adjust_perf(unsigned int cpunum,
|
||||||
|
unsigned long min_perf,
|
||||||
|
unsigned long target_perf,
|
||||||
|
unsigned long capacity)
|
||||||
|
{
|
||||||
|
struct cpudata *cpu = all_cpu_data[cpunum];
|
||||||
|
int old_pstate = cpu->pstate.current_pstate;
|
||||||
|
int cap_pstate, min_pstate, max_pstate, target_pstate;
|
||||||
|
|
||||||
|
update_turbo_state();
|
||||||
|
cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
|
||||||
|
cpu->pstate.turbo_pstate;
|
||||||
|
|
||||||
|
/* Optimization: Avoid unnecessary divisions. */
|
||||||
|
|
||||||
|
target_pstate = cap_pstate;
|
||||||
|
if (target_perf < capacity)
|
||||||
|
target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
|
||||||
|
|
||||||
|
min_pstate = cap_pstate;
|
||||||
|
if (min_perf < capacity)
|
||||||
|
min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
|
||||||
|
|
||||||
|
if (min_pstate < cpu->pstate.min_pstate)
|
||||||
|
min_pstate = cpu->pstate.min_pstate;
|
||||||
|
|
||||||
|
if (min_pstate < cpu->min_perf_ratio)
|
||||||
|
min_pstate = cpu->min_perf_ratio;
|
||||||
|
|
||||||
|
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
|
||||||
|
if (max_pstate < min_pstate)
|
||||||
|
max_pstate = min_pstate;
|
||||||
|
|
||||||
|
target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
|
||||||
|
|
||||||
|
intel_cpufreq_adjust_hwp(cpu, min_pstate, max_pstate, target_pstate, true);
|
||||||
|
|
||||||
|
cpu->pstate.current_pstate = target_pstate;
|
||||||
|
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
|
||||||
|
}
|
||||||
|
|
||||||
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
int max_state, turbo_max, min_freq, max_freq, ret;
|
int max_state, turbo_max, min_freq, max_freq, ret;
|
||||||
@ -3032,6 +3076,8 @@ static int __init intel_pstate_init(void)
|
|||||||
intel_pstate.attr = hwp_cpufreq_attrs;
|
intel_pstate.attr = hwp_cpufreq_attrs;
|
||||||
intel_cpufreq.attr = hwp_cpufreq_attrs;
|
intel_cpufreq.attr = hwp_cpufreq_attrs;
|
||||||
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
|
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
|
||||||
|
intel_cpufreq.fast_switch = NULL;
|
||||||
|
intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
|
||||||
if (!default_driver)
|
if (!default_driver)
|
||||||
default_driver = &intel_pstate;
|
default_driver = &intel_pstate;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user