- Add a feature flag which denotes AMD CPUs supporting workload classification

with the purpose of using such hints when making scheduling decisions
 
 - Determine the boost enumerator for each AMD core based on its type: efficiency
   or performance, in the cppc driver
 
 - Add the type of a CPU to the topology CPU descriptor with the goal of
   supporting and making decisions based on the type of the respective core
 
 - Add a feature flag to denote AMD cores which have heterogeneous topology and
   enable SD_ASYM_PACKING for those
 
 - Check microcode revisions before disabling PCID on Intel
 
 - Cleanups and fixlets
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmc7q0UACgkQEsHwGGHe
 VUq27Q//TADIn/rZj95OuWLYFXduOpzdyfF6BAOabRjUpIWTGJ5YdKjj1TCA2wUE
 6SiHZWQxQropB3NgeICcDT+3OGdGzE2qywzpXspUDsBPraWx+9CA56qREYafpRps
 88ZQZJWHla2/0kHN5oM4fYe05mWMLAFgIhG4tPH/7sj54Zqar40nhVksz3WjKAid
 yEfzbdVeRI5sNoujyHzGANXI0Fo98nAyi5Qj9kXL9W/UV1JmoQ78Rq7V9IIgOBsc
 l6Gv/h0CNtH9voqfrfUb07VHk8ZqSJ37xUnrnKdidncWGCWEAoZRr7wU+I9CHKIs
 tzdx+zq6JC3YN0IwsZCjk4me+BqVLJxW2oDgW7esPifye6ElyEo4T9UO9LEpE1qm
 ReAByoIMdSXWwXuITwy4NxLPKPCpU7RyJCiqFzpJp0g4qUq2cmlyERDirf6eknXL
 s+dmRaglEdcQT/EL+Y+vfFdQtLdwJmOu+nPPjjFxeRcIDB+u1sXJMEFbyvkLL6FE
 HOdNxL+5n/3M8Lbh77KIS5uCcjXL2VCkZK2/hyoifUb+JZR/ENoqYjElkMXOplyV
 KQIfcTzVCLRVvZApf/MMkTO86cpxMDs7YLYkgFxDsBjRdoq/Mzub8yzWn6kLZtmP
 ANNH4uYVtjrHE1nxJSA0JgYQlJKYeNU5yhLiTLKhHL5BwDYfiz8=
 =420r
 -----END PGP SIGNATURE-----

Merge tag 'x86_cpu_for_v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpuid updates from Borislav Petkov:

 - Add a feature flag which denotes AMD CPUs supporting workload
   classification with the purpose of using such hints when making
   scheduling decisions

 - Determine the boost enumerator for each AMD core based on its type:
   efficiency or performance, in the cppc driver

 - Add the type of a CPU to the topology CPU descriptor with the goal of
   supporting and making decisions based on the type of the respective
   core

 - Add a feature flag to denote AMD cores which have heterogeneous
   topology and enable SD_ASYM_PACKING for those

 - Check microcode revisions before disabling PCID on Intel

 - Cleanups and fixlets

* tag 'x86_cpu_for_v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/cpu: Remove redundant CONFIG_NUMA guard around numa_add_cpu()
  x86/cpu: Fix FAM5_QUARK_X1000 to use X86_MATCH_VFM()
  x86/cpu: Fix formatting of cpuid_bits[] in scattered.c
  x86/cpufeatures: Add X86_FEATURE_AMD_WORKLOAD_CLASS feature bit
  x86/amd: Use heterogeneous core topology for identifying boost numerator
  x86/cpu: Add CPU type to struct cpuinfo_topology
  x86/cpu: Enable SD_ASYM_PACKING for PKG domain on AMD
  x86/cpufeatures: Add X86_FEATURE_AMD_HETEROGENEOUS_CORES
  x86/cpufeatures: Rename X86_FEATURE_FAST_CPPC to have AMD prefix
  x86/mm: Don't disable PCID when INVLPG has been fixed by microcode
This commit is contained in:
Linus Torvalds 2024-11-19 12:27:19 -08:00
commit d8d78a90e7
18 changed files with 149 additions and 49 deletions

View File

@ -473,7 +473,9 @@
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */
#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32 + 7) /* Workload Classification */
/*
* BUG word(s)

View File

@ -177,10 +177,15 @@
#define INTEL_XEON_PHI_KNM IFM(6, 0x85) /* Knights Mill */
/* Family 5 */
#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
#define INTEL_QUARK_X1000 IFM(5, 0x09) /* Quark X1000 SoC */
/* Family 19 */
#define INTEL_PANTHERCOVE_X IFM(19, 0x01) /* Diamond Rapids */
/* CPU core types */
enum intel_cpu_type {
INTEL_CPU_TYPE_ATOM = 0x20,
INTEL_CPU_TYPE_CORE = 0x40,
};
#endif /* _ASM_X86_INTEL_FAMILY_H */

View File

@ -105,6 +105,24 @@ struct cpuinfo_topology {
// Cache level topology IDs
u32 llc_id;
u32 l2c_id;
// Hardware defined CPU-type
union {
u32 cpu_type;
struct {
// CPUID.1A.EAX[23-0]
u32 intel_native_model_id :24;
// CPUID.1A.EAX[31-24]
u32 intel_type :8;
};
struct {
// CPUID 0x80000026.EBX
u32 amd_num_processors :16,
amd_power_eff_ranking :8,
amd_native_model_id :4,
amd_type :4;
};
};
};
struct cpuinfo_x86 {

View File

@ -114,6 +114,12 @@ enum x86_topology_domains {
TOPO_MAX_DOMAIN,
};
enum x86_topology_cpu_type {
TOPO_CPU_TYPE_PERFORMANCE,
TOPO_CPU_TYPE_EFFICIENCY,
TOPO_CPU_TYPE_UNKNOWN,
};
struct x86_topology_system {
unsigned int dom_shifts[TOPO_MAX_DOMAIN];
unsigned int dom_size[TOPO_MAX_DOMAIN];
@ -149,6 +155,9 @@ extern unsigned int __max_threads_per_core;
extern unsigned int __num_threads_per_package;
extern unsigned int __num_cores_per_package;
const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c);
enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c);
static inline unsigned int topology_max_packages(void)
{
return __max_logical_packages;

View File

@ -239,8 +239,10 @@ EXPORT_SYMBOL_GPL(amd_detect_prefcore);
*/
int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
{
enum x86_topology_cpu_type core_type = get_topology_cpu_type(&cpu_data(cpu));
bool prefcore;
int ret;
u32 tmp;
ret = amd_detect_prefcore(&prefcore);
if (ret)
@ -266,6 +268,27 @@ int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
break;
}
}
/* detect if running on heterogeneous design */
if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES)) {
switch (core_type) {
case TOPO_CPU_TYPE_UNKNOWN:
pr_warn("Undefined core type found for cpu %d\n", cpu);
break;
case TOPO_CPU_TYPE_PERFORMANCE:
/* use the max scale for performance cores */
*numerator = CPPC_HIGHEST_PERF_PERFORMANCE;
return 0;
case TOPO_CPU_TYPE_EFFICIENCY:
/* use the highest perf value for efficiency cores */
ret = amd_get_highest_perf(cpu, &tmp);
if (ret)
return ret;
*numerator = tmp;
return 0;
}
}
*numerator = CPPC_HIGHEST_PERF_PREFCORE;
return 0;

View File

@ -1906,9 +1906,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
/* Init Machine Check Exception if available. */
mcheck_cpu_init(c);
#ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id());
#endif
}
/*

View File

@ -22,6 +22,7 @@ static int cpu_debug_show(struct seq_file *m, void *p)
seq_printf(m, "die_id: %u\n", c->topo.die_id);
seq_printf(m, "cu_id: %u\n", c->topo.cu_id);
seq_printf(m, "core_id: %u\n", c->topo.core_id);
seq_printf(m, "cpu_type: %s\n", get_topology_cpu_type_name(c));
seq_printf(m, "logical_pkg_id: %u\n", c->topo.logical_pkg_id);
seq_printf(m, "logical_die_id: %u\n", c->topo.logical_die_id);
seq_printf(m, "llc_id: %u\n", c->topo.llc_id);

View File

@ -24,34 +24,36 @@ struct cpuid_bit {
* levels are different and there is a separate entry for each.
*/
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 },
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
{ X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 },
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_LOCAL, CPUID_EDX, 2, 0x0000000f, 1 },
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
{ X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 },
{ X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 },
{ X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 },
{ X86_FEATURE_SGX1, CPUID_EAX, 0, 0x00000012, 0 },
{ X86_FEATURE_SGX2, CPUID_EAX, 1, 0x00000012, 0 },
{ X86_FEATURE_SGX_EDECCSSA, CPUID_EAX, 11, 0x00000012, 0 },
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 },
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
{ X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 },
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_LOCAL, CPUID_EDX, 2, 0x0000000f, 1 },
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
{ X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 },
{ X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 },
{ X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 },
{ X86_FEATURE_SGX1, CPUID_EAX, 0, 0x00000012, 0 },
{ X86_FEATURE_SGX2, CPUID_EAX, 1, 0x00000012, 0 },
{ X86_FEATURE_SGX_EDECCSSA, CPUID_EAX, 11, 0x00000012, 0 },
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_AMD_WORKLOAD_CLASS, CPUID_EAX, 22, 0x80000021, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
{ X86_FEATURE_AMD_HETEROGENEOUS_CORES, CPUID_EAX, 30, 0x80000026, 0 },
{ 0, 0, 0, 0, 0 }
};

View File

@ -182,6 +182,9 @@ static void parse_topology_amd(struct topo_scan *tscan)
if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
has_topoext = cpu_parse_topology_ext(tscan);
if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES))
tscan->c->topo.cpu_type = cpuid_ebx(0x80000026);
if (!has_topoext && !parse_8000_0008(tscan))
return;

View File

@ -3,6 +3,7 @@
#include <xen/xen.h>
#include <asm/intel-family.h>
#include <asm/apic.h>
#include <asm/processor.h>
#include <asm/smp.h>
@ -27,6 +28,36 @@ void topology_set_dom(struct topo_scan *tscan, enum x86_topology_domains dom,
}
}
enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c)
{
if (c->x86_vendor == X86_VENDOR_INTEL) {
switch (c->topo.intel_type) {
case INTEL_CPU_TYPE_ATOM: return TOPO_CPU_TYPE_EFFICIENCY;
case INTEL_CPU_TYPE_CORE: return TOPO_CPU_TYPE_PERFORMANCE;
}
}
if (c->x86_vendor == X86_VENDOR_AMD) {
switch (c->topo.amd_type) {
case 0: return TOPO_CPU_TYPE_PERFORMANCE;
case 1: return TOPO_CPU_TYPE_EFFICIENCY;
}
}
return TOPO_CPU_TYPE_UNKNOWN;
}
const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c)
{
switch (get_topology_cpu_type(c)) {
case TOPO_CPU_TYPE_PERFORMANCE:
return "performance";
case TOPO_CPU_TYPE_EFFICIENCY:
return "efficiency";
default:
return "unknown";
}
}
static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c)
{
struct {
@ -87,6 +118,7 @@ static void parse_topology(struct topo_scan *tscan, bool early)
.cu_id = 0xff,
.llc_id = BAD_APICID,
.l2c_id = BAD_APICID,
.cpu_type = TOPO_CPU_TYPE_UNKNOWN,
};
struct cpuinfo_x86 *c = tscan->c;
struct {
@ -132,6 +164,8 @@ static void parse_topology(struct topo_scan *tscan, bool early)
case X86_VENDOR_INTEL:
if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
parse_legacy(tscan);
if (c->cpuid_level >= 0x1a)
c->topo.cpu_type = cpuid_eax(0x1a);
break;
case X86_VENDOR_HYGON:
if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))

View File

@ -497,8 +497,9 @@ static int x86_cluster_flags(void)
static int x86_die_flags(void)
{
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
return x86_sched_itmt_flags();
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU) ||
cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES))
return x86_sched_itmt_flags();
return 0;
}

View File

@ -263,28 +263,33 @@ static void __init probe_page_size_mask(void)
}
/*
* INVLPG may not properly flush Global entries
* on these CPUs when PCIDs are enabled.
* INVLPG may not properly flush Global entries on
* these CPUs. New microcode fixes the issue.
*/
static const struct x86_cpu_id invlpg_miss_ids[] = {
X86_MATCH_VFM(INTEL_ALDERLAKE, 0),
X86_MATCH_VFM(INTEL_ALDERLAKE_L, 0),
X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0),
X86_MATCH_VFM(INTEL_RAPTORLAKE, 0),
X86_MATCH_VFM(INTEL_RAPTORLAKE_P, 0),
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, 0),
X86_MATCH_VFM(INTEL_ALDERLAKE, 0x2e),
X86_MATCH_VFM(INTEL_ALDERLAKE_L, 0x42c),
X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0x11),
X86_MATCH_VFM(INTEL_RAPTORLAKE, 0x118),
X86_MATCH_VFM(INTEL_RAPTORLAKE_P, 0x4117),
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, 0x2e),
{}
};
static void setup_pcid(void)
{
const struct x86_cpu_id *invlpg_miss_match;
if (!IS_ENABLED(CONFIG_X86_64))
return;
if (!boot_cpu_has(X86_FEATURE_PCID))
return;
if (x86_match_cpu(invlpg_miss_ids)) {
invlpg_miss_match = x86_match_cpu(invlpg_miss_ids);
if (invlpg_miss_match &&
boot_cpu_data.microcode < invlpg_miss_match->driver_data) {
pr_info("Incomplete global flushes, disabling PCID");
setup_clear_cpu_cap(X86_FEATURE_PCID);
return;

View File

@ -656,8 +656,7 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
}
static const struct x86_cpu_id efi_capsule_quirk_ids[] = {
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000,
&qrk_capsule_setup_info),
X86_MATCH_VFM(INTEL_QUARK_X1000, &qrk_capsule_setup_info),
{ }
};

View File

@ -569,7 +569,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
}
static const struct x86_cpu_id imr_ids[] __initconst = {
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
X86_MATCH_VFM(INTEL_QUARK_X1000, NULL),
{}
};

View File

@ -105,7 +105,7 @@ static void __init imr_self_test(void)
}
static const struct x86_cpu_id imr_ids[] __initconst = {
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
X86_MATCH_VFM(INTEL_QUARK_X1000, NULL),
{}
};

View File

@ -850,7 +850,7 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
transition_delay_ns = cppc_get_transition_latency(cpu);
if (transition_delay_ns == CPUFREQ_ETERNAL) {
if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
else
return AMD_PSTATE_TRANSITION_DELAY;

View File

@ -401,7 +401,7 @@ err_ret:
}
static const struct x86_cpu_id qrk_thermal_ids[] __initconst = {
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
X86_MATCH_VFM(INTEL_QUARK_X1000, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);

View File

@ -472,7 +472,7 @@
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
/*
* BUG word(s)