2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-24 14:45:12 +08:00

Power management fixes for 5.11-rc8

Address a performance regression related to scale-invariance on x86
 that may prevent turbo CPU frequencies from being used in certain
 workloads on systems using acpi-cpufreq as the CPU performance
 scaling driver and schedutil as the scaling governor.
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAmAkGloSHHJqd0Byand5
 c29ja2kubmV0AAoJEILEb/54YlRxCNkP/3uQly0iE4WdsxiBlWgF6zQH5PkezzSu
 vXY0E2NbzAlUpke3zDIZ6zkN6DfG1yAl4vpsVzy5N/kTzFnaFPbPH3ylZ2x/oKBZ
 Rd9yl0uz13UR4txkY49ZRF3c3vMhFoGzNfIXYjMQDGevyfarMLdpR96GFbpFTCT5
 I5gZfDtOuAwpXY+Mr+UplTu7PTrmkf2jfQ/T/b+jog3NAjqODwnLT8dwIOTZnuPk
 vbCOhb5vsiUHaqilrKkuGS5TzGsb/KCa6k4kaf7WhoFiU99KKcaZRLca/4FlJuVj
 Q4rgSrtPsbvhG2vmucprunrsyt21JQMDnERqMlPcEls/c0ONgS4fMc5YJlO6KgZZ
 Mlu01f/oE84jQ//0Y3LVi6v6w+yOiBi1Ie9yD8wnOkn6c+r6sWWbvd5Kg/guGnwi
 LLSdemslw4r0ltimFmWD5I86ZXDJ1gwU9iuv+SdxoyppHHwOOAu5l/FhEgNvuWbl
 LeuLrl7BhYTbN40ouKivoQ8smTpI0EmZX2MRm+l5NV4hHQ+8df16Rt7hoFvAdI2L
 VJe/i0sgOcdCVnSovxQ8WeuSMGQtqrgFC4B/9+q6WOgAGIAFtyHQUtpHzB+XsIRo
 P2VAmxcdJsqrmnbtoxxopMcqov5cAYI5PPJO/4yR+Z+gOBjeBvEJaxDF/shFT2NO
 iaAQXEYLIPW9
 =iOYP
 -----END PGP SIGNATURE-----

Merge tag 'pm-5.11-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "Address a performance regression related to scale-invariance on x86
  that may prevent turbo CPU frequencies from being used in certain
  workloads on systems using acpi-cpufreq as the CPU performance scaling
  driver and schedutil as the scaling governor"

* tag 'pm-5.11-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpufreq: ACPI: Update arch scale-invariance max perf ratio if CPPC is not there
  cpufreq: ACPI: Extend frequency tables to cover boost frequencies
This commit is contained in:
Linus Torvalds 2021-02-10 12:03:35 -08:00
commit 291009f656
2 changed files with 105 additions and 13 deletions

View File

@ -1833,6 +1833,7 @@ void arch_set_max_freq_ratio(bool turbo_disabled)
arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
arch_turbo_freq_ratio;
}
EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
static bool turbo_disabled(void)
{

View File

@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
@ -53,6 +54,7 @@ struct acpi_cpufreq_data {
unsigned int resume;
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
unsigned int first_perf_state;
cpumask_var_t freqdomain_cpus;
void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
@ -221,10 +223,10 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
perf = to_perf_data(data);
cpufreq_for_each_entry(pos, policy->freq_table)
cpufreq_for_each_entry(pos, policy->freq_table + data->first_perf_state)
if (msr == perf->states[pos->driver_data].status)
return pos->frequency;
return policy->freq_table[0].frequency;
return policy->freq_table[data->first_perf_state].frequency;
}
static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
@ -363,6 +365,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
struct cpufreq_policy *policy;
unsigned int freq;
unsigned int cached_freq;
unsigned int state;
pr_debug("%s (%d)\n", __func__, cpu);
@ -374,7 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
if (unlikely(!data || !policy->freq_table))
return 0;
cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
state = to_perf_data(data)->state;
if (state < data->first_perf_state)
state = data->first_perf_state;
cached_freq = policy->freq_table[state].frequency;
freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
if (freq != cached_freq) {
/*
@ -628,16 +635,54 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
}
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
static u64 get_max_boost_ratio(unsigned int cpu)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
int ret;
if (acpi_pstate_strict)
return 0;
ret = cppc_get_perf_caps(cpu, &perf_caps);
if (ret) {
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
cpu, ret);
return 0;
}
highest_perf = perf_caps.highest_perf;
nominal_perf = perf_caps.nominal_perf;
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
return 0;
}
if (highest_perf < nominal_perf) {
pr_debug("CPU%d: nominal performance above highest\n", cpu);
return 0;
}
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
#else
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
unsigned int valid_states = 0;
unsigned int cpu = policy->cpu;
struct acpi_cpufreq_data *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
struct acpi_processor_performance *perf;
struct cpufreq_frequency_table *freq_table;
struct acpi_processor_performance *perf;
struct acpi_cpufreq_data *data;
unsigned int cpu = policy->cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
unsigned int valid_states = 0;
unsigned int result = 0;
unsigned int state_count;
u64 max_boost_ratio;
unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
@ -750,8 +795,28 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
GFP_KERNEL);
state_count = perf->state_count + 1;
max_boost_ratio = get_max_boost_ratio(cpu);
if (max_boost_ratio) {
/*
* Make a room for one more entry to represent the highest
* available "boost" frequency.
*/
state_count++;
valid_states++;
data->first_perf_state = valid_states;
} else {
/*
* If the maximum "boost" frequency is unknown, ask the arch
* scale-invariance code to use the "nominal" performance for
* CPU utilization scaling so as to prevent the schedutil
* governor from selecting inadequate CPU frequencies.
*/
arch_set_max_freq_ratio(true);
}
freq_table = kcalloc(state_count, sizeof(*freq_table), GFP_KERNEL);
if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
@ -785,6 +850,30 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
valid_states++;
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
if (max_boost_ratio) {
unsigned int state = data->first_perf_state;
unsigned int freq = freq_table[state].frequency;
/*
* Because the loop above sorts the freq_table entries in the
* descending order, freq is the maximum frequency in the table.
* Assume that it corresponds to the CPPC nominal frequency and
* use it to populate the frequency field of the extra "boost"
* frequency entry.
*/
freq_table[0].frequency = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
/*
* The purpose of the extra "boost" frequency entry is to make
* the rest of cpufreq aware of the real maximum frequency, but
* the way to request it is the same as for the first_perf_state
* entry that is expected to cover the entire range of "boost"
* frequencies of the CPU, so copy the driver_data value from
* that entry.
*/
freq_table[0].driver_data = freq_table[state].driver_data;
}
policy->freq_table = freq_table;
perf->state = 0;
@ -858,8 +947,10 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
{
struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
policy->cpu);
struct acpi_cpufreq_data *data = policy->driver_data;
unsigned int freq = policy->freq_table[data->first_perf_state].frequency;
if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
if (perf->states[0].core_frequency * 1000 != freq)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
}