mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-20 09:34:44 +08:00
8fa590bf34
* Enable the per-vcpu dirty-ring tracking mechanism, together with an option to keep the good old dirty log around for pages that are dirtied by something other than a vcpu. * Switch to the relaxed parallel fault handling, using RCU to delay page table reclaim and giving better performance under load. * Relax the MTE ABI, allowing a VMM to use the MAP_SHARED mapping option, which multi-process VMMs such as crosvm rely on (see merge commit382b5b87a9
: "Fix a number of issues with MTE, such as races on the tags being initialised vs the PG_mte_tagged flag as well as the lack of support for VM_SHARED when KVM is involved. Patches from Catalin Marinas and Peter Collingbourne"). * Merge the pKVM shadow vcpu state tracking that allows the hypervisor to have its own view of a vcpu, keeping that state private. * Add support for the PMUv3p5 architecture revision, bringing support for 64bit counters on systems that support it, and fix the no-quite-compliant CHAIN-ed counter support for the machines that actually exist out there. * Fix a handful of minor issues around 52bit VA/PA support (64kB pages only) as a prefix of the oncoming support for 4kB and 16kB pages. * Pick a small set of documentation and spelling fixes, because no good merge window would be complete without those. s390: * Second batch of the lazy destroy patches * First batch of KVM changes for kernel virtual != physical address support * Removal of a unused function x86: * Allow compiling out SMM support * Cleanup and documentation of SMM state save area format * Preserve interrupt shadow in SMM state save area * Respond to generic signals during slow page faults * Fixes and optimizations for the non-executable huge page errata fix. * Reprogram all performance counters on PMU filter change * Cleanups to Hyper-V emulation and tests * Process Hyper-V TLB flushes from a nested guest (i.e. from a L2 guest running on top of a L1 Hyper-V hypervisor) * Advertise several new Intel features * x86 Xen-for-KVM: ** Allow the Xen runstate information to cross a page boundary ** Allow XEN_RUNSTATE_UPDATE flag behaviour to be configured ** Add support for 32-bit guests in SCHEDOP_poll * Notable x86 fixes and cleanups: ** One-off fixes for various emulation flows (SGX, VMXON, NRIPS=0). ** Reinstate IBPB on emulated VM-Exit that was incorrectly dropped a few years back when eliminating unnecessary barriers when switching between vmcs01 and vmcs02. ** Clean up vmread_error_trampoline() to make it more obvious that params must be passed on the stack, even for x86-64. ** Let userspace set all supported bits in MSR_IA32_FEAT_CTL irrespective of the current guest CPUID. ** Fudge around a race with TSC refinement that results in KVM incorrectly thinking a guest needs TSC scaling when running on a CPU with a constant TSC, but no hardware-enumerated TSC frequency. ** Advertise (on AMD) that the SMM_CTL MSR is not supported ** Remove unnecessary exports Generic: * Support for responding to signals during page faults; introduces new FOLL_INTERRUPTIBLE flag that was reviewed by mm folks Selftests: * Fix an inverted check in the access tracking perf test, and restore support for asserting that there aren't too many idle pages when running on bare metal. * Fix build errors that occur in certain setups (unsure exactly what is unique about the problematic setup) due to glibc overriding static_assert() to a variant that requires a custom message. * Introduce actual atomics for clear/set_bit() in selftests * Add support for pinning vCPUs in dirty_log_perf_test. * Rename the so called "perf_util" framework to "memstress". * Add a lightweight psuedo RNG for guest use, and use it to randomize the access pattern and write vs. read percentage in the memstress tests. * Add a common ucall implementation; code dedup and pre-work for running SEV (and beyond) guests in selftests. * Provide a common constructor and arch hook, which will eventually be used by x86 to automatically select the right hypercall (AMD vs. Intel). * A bunch of added/enabled/fixed selftests for ARM64, covering memslots, breakpoints, stage-2 faults and access tracking. * x86-specific selftest changes: ** Clean up x86's page table management. ** Clean up and enhance the "smaller maxphyaddr" test, and add a related test to cover generic emulation failure. ** Clean up the nEPT support checks. ** Add X86_PROPERTY_* framework to retrieve multi-bit CPUID values. ** Fix an ordering issue in the AMX test introduced by recent conversions to use kvm_cpu_has(), and harden the code to guard against similar bugs in the future. Anything that tiggers caching of KVM's supported CPUID, kvm_cpu_has() in this case, effectively hides opt-in XSAVE features if the caching occurs before the test opts in via prctl(). Documentation: * Remove deleted ioctls from documentation * Clean up the docs for the x86 MSR filter. * Various fixes -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmOaFrcUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroPemQgAq49excg2Cc+EsHnZw3vu/QWdA0Rt KhL3OgKxuHNjCbD2O9n2t5di7eJOTQ7F7T0eDm3xPTr4FS8LQ2327/mQePU/H2CF mWOpq9RBWLzFsSTeVA2Mz9TUTkYSnDHYuRsBvHyw/n9cL76BWVzjImldFtjYjjex yAwl8c5itKH6bc7KO+5ydswbvBzODkeYKUSBNdbn6m0JGQST7XppNwIAJvpiHsii Qgpk0e4Xx9q4PXG/r5DedI6BlufBsLhv0aE9SHPzyKH3JbbUFhJYI8ZD5OhBQuYW MwxK2KlM5Jm5ud2NZDDlsMmmvd1lnYCFDyqNozaKEWC1Y5rq1AbMa51fXA== =QAYX -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm updates from Paolo Bonzini: "ARM64: - Enable the per-vcpu dirty-ring tracking mechanism, together with an option to keep the good old dirty log around for pages that are dirtied by something other than a vcpu. - Switch to the relaxed parallel fault handling, using RCU to delay page table reclaim and giving better performance under load. - Relax the MTE ABI, allowing a VMM to use the MAP_SHARED mapping option, which multi-process VMMs such as crosvm rely on (see merge commit382b5b87a9
: "Fix a number of issues with MTE, such as races on the tags being initialised vs the PG_mte_tagged flag as well as the lack of support for VM_SHARED when KVM is involved. Patches from Catalin Marinas and Peter Collingbourne"). - Merge the pKVM shadow vcpu state tracking that allows the hypervisor to have its own view of a vcpu, keeping that state private. - Add support for the PMUv3p5 architecture revision, bringing support for 64bit counters on systems that support it, and fix the no-quite-compliant CHAIN-ed counter support for the machines that actually exist out there. - Fix a handful of minor issues around 52bit VA/PA support (64kB pages only) as a prefix of the oncoming support for 4kB and 16kB pages. - Pick a small set of documentation and spelling fixes, because no good merge window would be complete without those. s390: - Second batch of the lazy destroy patches - First batch of KVM changes for kernel virtual != physical address support - Removal of a unused function x86: - Allow compiling out SMM support - Cleanup and documentation of SMM state save area format - Preserve interrupt shadow in SMM state save area - Respond to generic signals during slow page faults - Fixes and optimizations for the non-executable huge page errata fix. - Reprogram all performance counters on PMU filter change - Cleanups to Hyper-V emulation and tests - Process Hyper-V TLB flushes from a nested guest (i.e. from a L2 guest running on top of a L1 Hyper-V hypervisor) - Advertise several new Intel features - x86 Xen-for-KVM: - Allow the Xen runstate information to cross a page boundary - Allow XEN_RUNSTATE_UPDATE flag behaviour to be configured - Add support for 32-bit guests in SCHEDOP_poll - Notable x86 fixes and cleanups: - One-off fixes for various emulation flows (SGX, VMXON, NRIPS=0). - Reinstate IBPB on emulated VM-Exit that was incorrectly dropped a few years back when eliminating unnecessary barriers when switching between vmcs01 and vmcs02. - Clean up vmread_error_trampoline() to make it more obvious that params must be passed on the stack, even for x86-64. - Let userspace set all supported bits in MSR_IA32_FEAT_CTL irrespective of the current guest CPUID. - Fudge around a race with TSC refinement that results in KVM incorrectly thinking a guest needs TSC scaling when running on a CPU with a constant TSC, but no hardware-enumerated TSC frequency. - Advertise (on AMD) that the SMM_CTL MSR is not supported - Remove unnecessary exports Generic: - Support for responding to signals during page faults; introduces new FOLL_INTERRUPTIBLE flag that was reviewed by mm folks Selftests: - Fix an inverted check in the access tracking perf test, and restore support for asserting that there aren't too many idle pages when running on bare metal. - Fix build errors that occur in certain setups (unsure exactly what is unique about the problematic setup) due to glibc overriding static_assert() to a variant that requires a custom message. - Introduce actual atomics for clear/set_bit() in selftests - Add support for pinning vCPUs in dirty_log_perf_test. - Rename the so called "perf_util" framework to "memstress". - Add a lightweight psuedo RNG for guest use, and use it to randomize the access pattern and write vs. read percentage in the memstress tests. - Add a common ucall implementation; code dedup and pre-work for running SEV (and beyond) guests in selftests. - Provide a common constructor and arch hook, which will eventually be used by x86 to automatically select the right hypercall (AMD vs. Intel). - A bunch of added/enabled/fixed selftests for ARM64, covering memslots, breakpoints, stage-2 faults and access tracking. - x86-specific selftest changes: - Clean up x86's page table management. - Clean up and enhance the "smaller maxphyaddr" test, and add a related test to cover generic emulation failure. - Clean up the nEPT support checks. - Add X86_PROPERTY_* framework to retrieve multi-bit CPUID values. - Fix an ordering issue in the AMX test introduced by recent conversions to use kvm_cpu_has(), and harden the code to guard against similar bugs in the future. Anything that tiggers caching of KVM's supported CPUID, kvm_cpu_has() in this case, effectively hides opt-in XSAVE features if the caching occurs before the test opts in via prctl(). Documentation: - Remove deleted ioctls from documentation - Clean up the docs for the x86 MSR filter. - Various fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (361 commits) KVM: x86: Add proper ReST tables for userspace MSR exits/flags KVM: selftests: Allocate ucall pool from MEM_REGION_DATA KVM: arm64: selftests: Align VA space allocator with TTBR0 KVM: arm64: Fix benign bug with incorrect use of VA_BITS KVM: arm64: PMU: Fix period computation for 64bit counters with 32bit overflow KVM: x86: Advertise that the SMM_CTL MSR is not supported KVM: x86: remove unnecessary exports KVM: selftests: Fix spelling mistake "probabalistic" -> "probabilistic" tools: KVM: selftests: Convert clear/set_bit() to actual atomics tools: Drop "atomic_" prefix from atomic test_and_set_bit() tools: Drop conflicting non-atomic test_and_{clear,set}_bit() helpers KVM: selftests: Use non-atomic clear/set bit helpers in KVM tests perf tools: Use dedicated non-atomic clear/set bit helpers tools: Take @bit as an "unsigned long" in {clear,set}_bit() helpers KVM: arm64: selftests: Enable single-step without a "full" ucall() KVM: x86: fix APICv/x2AVIC disabled when vm reboot by itself KVM: Remove stale comment about KVM_REQ_UNHALT KVM: Add missing arch for KVM_CREATE_DEVICE and KVM_{SET,GET}_DEVICE_ATTR KVM: Reference to kvm_userspace_memory_region in doc and comments KVM: Delete all references to removed KVM_SET_MEMORY_ALIAS ioctl ...
209 lines
6.5 KiB
C
209 lines
6.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef ARCH_X86_KVM_REVERSE_CPUID_H
|
|
#define ARCH_X86_KVM_REVERSE_CPUID_H
|
|
|
|
#include <uapi/asm/kvm.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/cpufeatures.h>
|
|
|
|
/*
|
|
* Hardware-defined CPUID leafs that are either scattered by the kernel or are
|
|
* unknown to the kernel, but need to be directly used by KVM. Note, these
|
|
* word values conflict with the kernel's "bug" caps, but KVM doesn't use those.
|
|
*/
|
|
enum kvm_only_cpuid_leafs {
|
|
CPUID_12_EAX = NCAPINTS,
|
|
CPUID_7_1_EDX,
|
|
NR_KVM_CPU_CAPS,
|
|
|
|
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
|
|
};
|
|
|
|
/*
|
|
* Define a KVM-only feature flag.
|
|
*
|
|
* For features that are scattered by cpufeatures.h, __feature_translate() also
|
|
* needs to be updated to translate the kernel-defined feature into the
|
|
* KVM-defined feature.
|
|
*
|
|
* For features that are 100% KVM-only, i.e. not defined by cpufeatures.h,
|
|
* forego the intermediate KVM_X86_FEATURE and directly define X86_FEATURE_* so
|
|
* that X86_FEATURE_* can be used in KVM. No __feature_translate() handling is
|
|
* needed in this case.
|
|
*/
|
|
#define KVM_X86_FEATURE(w, f) ((w)*32 + (f))
|
|
|
|
/* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */
|
|
#define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0)
|
|
#define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1)
|
|
#define KVM_X86_FEATURE_SGX_EDECCSSA KVM_X86_FEATURE(CPUID_12_EAX, 11)
|
|
|
|
/* Intel-defined sub-features, CPUID level 0x00000007:1 (EDX) */
|
|
#define X86_FEATURE_AVX_VNNI_INT8 KVM_X86_FEATURE(CPUID_7_1_EDX, 4)
|
|
#define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5)
|
|
#define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
|
|
|
|
struct cpuid_reg {
|
|
u32 function;
|
|
u32 index;
|
|
int reg;
|
|
};
|
|
|
|
static const struct cpuid_reg reverse_cpuid[] = {
|
|
[CPUID_1_EDX] = { 1, 0, CPUID_EDX},
|
|
[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
|
|
[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
|
|
[CPUID_1_ECX] = { 1, 0, CPUID_ECX},
|
|
[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
|
|
[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
|
|
[CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
|
|
[CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
|
|
[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
|
|
[CPUID_6_EAX] = { 6, 0, CPUID_EAX},
|
|
[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
|
|
[CPUID_7_ECX] = { 7, 0, CPUID_ECX},
|
|
[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
|
|
[CPUID_7_EDX] = { 7, 0, CPUID_EDX},
|
|
[CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
|
|
[CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX},
|
|
[CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
|
|
[CPUID_7_1_EDX] = { 7, 1, CPUID_EDX},
|
|
};
|
|
|
|
/*
|
|
* Reverse CPUID and its derivatives can only be used for hardware-defined
|
|
* feature words, i.e. words whose bits directly correspond to a CPUID leaf.
|
|
* Retrieving a feature bit or masking guest CPUID from a Linux-defined word
|
|
* is nonsensical as the bit number/mask is an arbitrary software-defined value
|
|
* and can't be used by KVM to query/control guest capabilities. And obviously
|
|
* the leaf being queried must have an entry in the lookup table.
|
|
*/
|
|
static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
|
|
{
|
|
BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
|
|
BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
|
|
BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
|
|
BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
|
|
BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
|
|
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
|
|
}
|
|
|
|
/*
|
|
* Translate feature bits that are scattered in the kernel's cpufeatures word
|
|
* into KVM feature words that align with hardware's definitions.
|
|
*/
|
|
static __always_inline u32 __feature_translate(int x86_feature)
|
|
{
|
|
if (x86_feature == X86_FEATURE_SGX1)
|
|
return KVM_X86_FEATURE_SGX1;
|
|
else if (x86_feature == X86_FEATURE_SGX2)
|
|
return KVM_X86_FEATURE_SGX2;
|
|
else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
|
|
return KVM_X86_FEATURE_SGX_EDECCSSA;
|
|
|
|
return x86_feature;
|
|
}
|
|
|
|
static __always_inline u32 __feature_leaf(int x86_feature)
|
|
{
|
|
return __feature_translate(x86_feature) / 32;
|
|
}
|
|
|
|
/*
|
|
* Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
|
|
* the hardware defined bit number (stored in bits 4:0) and a software defined
|
|
* "word" (stored in bits 31:5). The word is used to index into arrays of
|
|
* bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
|
|
*/
|
|
static __always_inline u32 __feature_bit(int x86_feature)
|
|
{
|
|
x86_feature = __feature_translate(x86_feature);
|
|
|
|
reverse_cpuid_check(x86_feature / 32);
|
|
return 1 << (x86_feature & 31);
|
|
}
|
|
|
|
#define feature_bit(name) __feature_bit(X86_FEATURE_##name)
|
|
|
|
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
|
|
{
|
|
unsigned int x86_leaf = __feature_leaf(x86_feature);
|
|
|
|
reverse_cpuid_check(x86_leaf);
|
|
return reverse_cpuid[x86_leaf];
|
|
}
|
|
|
|
static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
|
|
u32 reg)
|
|
{
|
|
switch (reg) {
|
|
case CPUID_EAX:
|
|
return &entry->eax;
|
|
case CPUID_EBX:
|
|
return &entry->ebx;
|
|
case CPUID_ECX:
|
|
return &entry->ecx;
|
|
case CPUID_EDX:
|
|
return &entry->edx;
|
|
default:
|
|
BUILD_BUG();
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
|
|
unsigned int x86_feature)
|
|
{
|
|
const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
|
|
|
|
return __cpuid_entry_get_reg(entry, cpuid.reg);
|
|
}
|
|
|
|
static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
|
|
unsigned int x86_feature)
|
|
{
|
|
u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
|
|
|
|
return *reg & __feature_bit(x86_feature);
|
|
}
|
|
|
|
static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
|
|
unsigned int x86_feature)
|
|
{
|
|
return cpuid_entry_get(entry, x86_feature);
|
|
}
|
|
|
|
static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
|
|
unsigned int x86_feature)
|
|
{
|
|
u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
|
|
|
|
*reg &= ~__feature_bit(x86_feature);
|
|
}
|
|
|
|
static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
|
|
unsigned int x86_feature)
|
|
{
|
|
u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
|
|
|
|
*reg |= __feature_bit(x86_feature);
|
|
}
|
|
|
|
static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
|
|
unsigned int x86_feature,
|
|
bool set)
|
|
{
|
|
u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
|
|
|
|
/*
|
|
* Open coded instead of using cpuid_entry_{clear,set}() to coerce the
|
|
* compiler into using CMOV instead of Jcc when possible.
|
|
*/
|
|
if (set)
|
|
*reg |= __feature_bit(x86_feature);
|
|
else
|
|
*reg &= ~__feature_bit(x86_feature);
|
|
}
|
|
|
|
#endif /* ARCH_X86_KVM_REVERSE_CPUID_H */
|