mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
x86/amd: Detect preferred cores in amd_get_boost_ratio_numerator()
AMD systems that support preferred cores will use "166" as their numerator for max frequency calculations instead of "255". Add a function for detecting preferred cores by looking at the highest perf value on all cores. If preferred cores are enabled return 166 and if disabled the value in the highest perf register. As the function will be called multiple times, cache the values for the boost numerator and if preferred cores will be enabled in global variables. Reviewed-by: Gautham R. Shenoy <gautham.shenoy@amd.com> Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
This commit is contained in:
parent
2819bfef64
commit
279f838a61
@ -9,6 +9,16 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/topology.h>
|
||||
|
||||
#define CPPC_HIGHEST_PERF_PREFCORE 166
|
||||
|
||||
enum amd_pref_core {
|
||||
AMD_PREF_CORE_UNKNOWN = 0,
|
||||
AMD_PREF_CORE_SUPPORTED,
|
||||
AMD_PREF_CORE_UNSUPPORTED,
|
||||
};
|
||||
static enum amd_pref_core amd_pref_core_detected;
|
||||
static u64 boost_numerator;
|
||||
|
||||
/* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
|
||||
|
||||
bool cpc_supported_by_cpu(void)
|
||||
@ -146,6 +156,66 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_get_highest_perf);
|
||||
|
||||
/**
|
||||
* amd_detect_prefcore: Detect if CPUs in the system support preferred cores
|
||||
* @detected: Output variable for the result of the detection.
|
||||
*
|
||||
* Determine whether CPUs in the system support preferred cores. On systems
|
||||
* that support preferred cores, different highest perf values will be found
|
||||
* on different cores. On other systems, the highest perf value will be the
|
||||
* same on all cores.
|
||||
*
|
||||
* The result of the detection will be stored in the 'detected' parameter.
|
||||
*
|
||||
* Return: 0 for success, negative error code otherwise
|
||||
*/
|
||||
int amd_detect_prefcore(bool *detected)
|
||||
{
|
||||
int cpu, count = 0;
|
||||
u64 highest_perf[2] = {0};
|
||||
|
||||
if (WARN_ON(!detected))
|
||||
return -EINVAL;
|
||||
|
||||
switch (amd_pref_core_detected) {
|
||||
case AMD_PREF_CORE_SUPPORTED:
|
||||
*detected = true;
|
||||
return 0;
|
||||
case AMD_PREF_CORE_UNSUPPORTED:
|
||||
*detected = false;
|
||||
return 0;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
ret = amd_get_highest_perf(cpu, &tmp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!count || (count == 1 && tmp != highest_perf[0]))
|
||||
highest_perf[count++] = tmp;
|
||||
|
||||
if (count == 2)
|
||||
break;
|
||||
}
|
||||
|
||||
*detected = (count == 2);
|
||||
boost_numerator = highest_perf[0];
|
||||
|
||||
amd_pref_core_detected = *detected ? AMD_PREF_CORE_SUPPORTED :
|
||||
AMD_PREF_CORE_UNSUPPORTED;
|
||||
|
||||
pr_debug("AMD CPPC preferred core is %ssupported (highest perf: 0x%llx)\n",
|
||||
*detected ? "" : "un", highest_perf[0]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_detect_prefcore);
|
||||
|
||||
/**
|
||||
* amd_get_boost_ratio_numerator: Get the numerator to use for boost ratio calculation
|
||||
* @cpu: CPU to get numerator for.
|
||||
@ -155,24 +225,27 @@ EXPORT_SYMBOL_GPL(amd_get_highest_perf);
|
||||
* a CPU. On systems that support preferred cores, this will be a hardcoded
|
||||
* value. On other systems this will the highest performance register value.
|
||||
*
|
||||
* If booting the system with amd-pstate enabled but preferred cores disabled then
|
||||
* the correct boost numerator will be returned to match hardware capabilities
|
||||
* even if the preferred cores scheduling hints are not enabled.
|
||||
*
|
||||
* Return: 0 for success, negative error code otherwise.
|
||||
*/
|
||||
int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
bool prefcore;
|
||||
int ret;
|
||||
|
||||
if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
|
||||
(c->x86_model >= 0x70 && c->x86_model < 0x80))) {
|
||||
*numerator = 166;
|
||||
ret = amd_detect_prefcore(&prefcore);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* without preferred cores, return the highest perf register value */
|
||||
if (!prefcore) {
|
||||
*numerator = boost_numerator;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
|
||||
(c->x86_model >= 0x40 && c->x86_model < 0x70))) {
|
||||
*numerator = 166;
|
||||
return 0;
|
||||
}
|
||||
*numerator = 255;
|
||||
*numerator = CPPC_HIGHEST_PERF_PREFCORE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -815,32 +815,18 @@ static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
|
||||
|
||||
static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
|
||||
{
|
||||
int ret, prio;
|
||||
u32 highest_perf;
|
||||
|
||||
ret = amd_get_highest_perf(cpudata->cpu, &highest_perf);
|
||||
if (ret)
|
||||
/* user disabled or not detected */
|
||||
if (!amd_pstate_prefcore)
|
||||
return;
|
||||
|
||||
cpudata->hw_prefcore = true;
|
||||
/* check if CPPC preferred core feature is enabled*/
|
||||
if (highest_perf < CPPC_MAX_PERF)
|
||||
prio = (int)highest_perf;
|
||||
else {
|
||||
pr_debug("AMD CPPC preferred core is unsupported!\n");
|
||||
cpudata->hw_prefcore = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!amd_pstate_prefcore)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The priorities can be set regardless of whether or not
|
||||
* sched_set_itmt_support(true) has been called and it is valid to
|
||||
* update them at any time after it has been called.
|
||||
*/
|
||||
sched_set_itmt_core_prio(prio, cpudata->cpu);
|
||||
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
|
||||
|
||||
schedule_work(&sched_prefcore_work);
|
||||
}
|
||||
@ -1011,12 +997,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
cpudata->cpu = policy->cpu;
|
||||
|
||||
amd_pstate_init_prefcore(cpudata);
|
||||
|
||||
ret = amd_pstate_init_perf(cpudata);
|
||||
if (ret)
|
||||
goto free_cpudata1;
|
||||
|
||||
amd_pstate_init_prefcore(cpudata);
|
||||
|
||||
ret = amd_pstate_init_freq(cpudata);
|
||||
if (ret)
|
||||
goto free_cpudata1;
|
||||
@ -1466,12 +1452,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
||||
cpudata->cpu = policy->cpu;
|
||||
cpudata->epp_policy = 0;
|
||||
|
||||
amd_pstate_init_prefcore(cpudata);
|
||||
|
||||
ret = amd_pstate_init_perf(cpudata);
|
||||
if (ret)
|
||||
goto free_cpudata1;
|
||||
|
||||
amd_pstate_init_prefcore(cpudata);
|
||||
|
||||
ret = amd_pstate_init_freq(cpudata);
|
||||
if (ret)
|
||||
goto free_cpudata1;
|
||||
@ -1916,6 +1902,12 @@ static int __init amd_pstate_init(void)
|
||||
static_call_update(amd_pstate_update_perf, cppc_update_perf);
|
||||
}
|
||||
|
||||
if (amd_pstate_prefcore) {
|
||||
ret = amd_detect_prefcore(&amd_pstate_prefcore);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* enable amd pstate feature */
|
||||
ret = amd_pstate_enable(true);
|
||||
if (ret) {
|
||||
|
@ -161,6 +161,7 @@ extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps);
|
||||
extern int cppc_set_auto_sel(int cpu, bool enable);
|
||||
extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
|
||||
extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
|
||||
extern int amd_detect_prefcore(bool *detected);
|
||||
#else /* !CONFIG_ACPI_CPPC_LIB */
|
||||
static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
|
||||
{
|
||||
@ -242,6 +243,10 @@ static inline int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline int amd_detect_prefcore(bool *detected)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif /* !CONFIG_ACPI_CPPC_LIB */
|
||||
|
||||
#endif /* _CPPC_ACPI_H*/
|
||||
|
Loading…
Reference in New Issue
Block a user