mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 08:44:21 +08:00
KVM x86 changes for 6.4:
- Optimize CR0.WP toggling by avoiding an MMU reload when TDP is enabled, and by giving the guest control of CR0.WP when EPT is enabled on VMX (VMX-only because SVM doesn't support per-bit controls) - Add CR0/CR4 helpers to query single bits, and clean up related code where KVM was interpreting kvm_read_cr4_bits()'s "unsigned long" return as a bool - Move AMD_PSFD to cpufeatures.h and purge KVM's definition - Misc cleanups -----BEGIN PGP SIGNATURE----- iQJGBAABCgAwFiEEMHr+pfEFOIzK+KY1YJEiAU0MEvkFAmRGr2sSHHNlYW5qY0Bn b29nbGUuY29tAAoJEGCRIgFNDBL5b80P/2ayACpc7iV2DysXkrxOdn1JmMu9BeHd 3oMb7bydf79LMNAO+NKPqVjo74yZ/Lh8UyufJGgF3HnSCdumx5Iklyx6/2PUHu/I 8xT1H7VlIGQMcNy0G4hMus34ZcafJl4y+BXgMEqEErLcy3n598UvFGJ+C0/4lnux 2Gk7dLASHq/mVVKReBM/kD4RhCVy5Venz6zkk9KbwDLHAmfejVK5bSqDYAnO1WtV IBWetxlVyMZCnfPV2drhzgNVwiHvYvCaMBW+cUk5cH8Z2r0VZVDERmc1D4/rd04t xs9lMk6CdNU7REQfblA0xMgeO/dNAXq5Fs4FfcM8OTBZU32KKafPhgW1uj2Sv+9l nbb1XxZ7C0EcBhKVbUD6zRl05vjHwxlRgoi0yWUqERthFKNXHV42JJgaNn4fxDYS tOBKBNkM9z6tCGN2aZv6GwhsEyY2y7oLdbZUGK9/FM3mF1VBASms1BTwokJXTxCD pkOpAGeN5hxOlC4/wl6iHJTrz9oaJUj5E5kMD1oK6oQJgnnfqH0kVTG/ui/OUtJg 8N3amYO/d7InFvuE0f9R6TqZVhTN2QefHmNJaEldsmYp1NMI8Ep8JIhQKRA2LZVE CGRxyrPj5CESerAItAI6tshEre5W8aScEzhpmd6HgHmahhQJsCEj+3q/J8FPWLG/ iQ3GnggrklfU =qj7D -----END PGP SIGNATURE----- Merge tag 'kvm-x86-misc-6.4' of https://github.com/kvm-x86/linux into HEAD KVM x86 changes for 6.4: - Optimize CR0.WP toggling by avoiding an MMU reload when TDP is enabled, and by giving the guest control of CR0.WP when EPT is enabled on VMX (VMX-only because SVM doesn't support per-bit controls) - Add CR0/CR4 helpers to query single bits, and clean up related code where KVM was interpreting kvm_read_cr4_bits()'s "unsigned long" return as a bool - Move AMD_PSFD to cpufeatures.h and purge KVM's definition - Misc cleanups
This commit is contained in:
commit
a1c288f87d
@ -337,6 +337,7 @@
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
|
||||
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
|
||||
#define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */
|
||||
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
|
||||
#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
|
||||
|
||||
|
@ -60,12 +60,6 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This one is tied to SSB in the user API, and not
|
||||
* visible in /proc/cpuinfo.
|
||||
*/
|
||||
#define KVM_X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */
|
||||
|
||||
#define F feature_bit
|
||||
|
||||
/* Scattered Flag - For features that are scattered by cpufeatures.h. */
|
||||
@ -266,7 +260,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
|
||||
/* Update OSXSAVE bit */
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVE))
|
||||
cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
|
||||
kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE));
|
||||
|
||||
cpuid_entry_change(best, X86_FEATURE_APIC,
|
||||
vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
|
||||
@ -275,7 +269,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
|
||||
best = cpuid_entry2_find(entries, nent, 7, 0);
|
||||
if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
|
||||
cpuid_entry_change(best, X86_FEATURE_OSPKE,
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
|
||||
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE));
|
||||
|
||||
best = cpuid_entry2_find(entries, nent, 0xD, 0);
|
||||
if (best)
|
||||
@ -715,7 +709,7 @@ void kvm_set_cpu_caps(void)
|
||||
F(CLZERO) | F(XSAVEERPTR) |
|
||||
F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
|
||||
F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
|
||||
__feature_bit(KVM_X86_FEATURE_AMD_PSFD)
|
||||
F(AMD_PSFD)
|
||||
);
|
||||
|
||||
/*
|
||||
|
@ -1640,6 +1640,14 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||
goto exception;
|
||||
break;
|
||||
case VCPU_SREG_CS:
|
||||
/*
|
||||
* KVM uses "none" when loading CS as part of emulating Real
|
||||
* Mode exceptions and IRET (handled above). In all other
|
||||
* cases, loading CS without a control transfer is a KVM bug.
|
||||
*/
|
||||
if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE))
|
||||
goto exception;
|
||||
|
||||
if (!(seg_desc.type & 8))
|
||||
goto exception;
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
|
||||
#define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
|
||||
#define KVM_POSSIBLE_CR4_GUEST_BITS \
|
||||
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
|
||||
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
|
||||
@ -157,6 +157,14 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||
return vcpu->arch.cr0 & mask;
|
||||
}
|
||||
|
||||
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu,
|
||||
unsigned long cr0_bit)
|
||||
{
|
||||
BUILD_BUG_ON(!is_power_of_2(cr0_bit));
|
||||
|
||||
return !!kvm_read_cr0_bits(vcpu, cr0_bit);
|
||||
}
|
||||
|
||||
static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_read_cr0_bits(vcpu, ~0UL);
|
||||
@ -171,6 +179,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
|
||||
return vcpu->arch.cr4 & mask;
|
||||
}
|
||||
|
||||
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu,
|
||||
unsigned long cr4_bit)
|
||||
{
|
||||
BUILD_BUG_ON(!is_power_of_2(cr4_bit));
|
||||
|
||||
return !!kvm_read_cr4_bits(vcpu, cr4_bit);
|
||||
}
|
||||
|
||||
static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
|
||||
|
@ -113,6 +113,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
|
||||
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
|
||||
u64 fault_address, char *insn, int insn_len);
|
||||
void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu *mmu);
|
||||
|
||||
int kvm_mmu_load(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
|
||||
@ -132,7 +134,7 @@ static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
|
||||
{
|
||||
BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
|
||||
|
||||
return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
|
||||
return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
|
||||
? cr3 & X86_CR3_PCID_MASK
|
||||
: 0;
|
||||
}
|
||||
@ -153,6 +155,24 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.mmu->root_role.level);
|
||||
}
|
||||
|
||||
static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu *mmu)
|
||||
{
|
||||
/*
|
||||
* When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
|
||||
* @mmu's snapshot of CR0.WP and thus all related paging metadata may
|
||||
* be stale. Refresh CR0.WP and the metadata on-demand when checking
|
||||
* for permission faults. Exempt nested MMUs, i.e. MMUs for shadowing
|
||||
* nEPT and nNPT, as CR0.WP is ignored in both cases. Note, KVM does
|
||||
* need to refresh nested_mmu, a.k.a. the walker used to translate L2
|
||||
* GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
|
||||
*/
|
||||
if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
|
||||
return;
|
||||
|
||||
__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if a given access (described through the I/D, W/R and U/S bits of a
|
||||
* page fault error code pfec) causes a permission fault with the given PTE
|
||||
@ -184,8 +204,12 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
|
||||
bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
|
||||
int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
|
||||
bool fault = (mmu->permissions[index] >> pte_access) & 1;
|
||||
u32 errcode = PFERR_PRESENT_MASK;
|
||||
bool fault;
|
||||
|
||||
kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
|
||||
|
||||
fault = (mmu->permissions[index] >> pte_access) & 1;
|
||||
|
||||
WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
|
||||
if (unlikely(mmu->pkru_mask)) {
|
||||
|
@ -5112,6 +5112,21 @@ kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
|
||||
return role;
|
||||
}
|
||||
|
||||
void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu *mmu)
|
||||
{
|
||||
const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP);
|
||||
|
||||
BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
|
||||
BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
|
||||
|
||||
if (is_cr0_wp(mmu) == cr0_wp)
|
||||
return;
|
||||
|
||||
mmu->cpu_role.base.cr0_wp = cr0_wp;
|
||||
reset_guest_paging_metadata(vcpu, mmu);
|
||||
}
|
||||
|
||||
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* tdp_root_level is architecture forced level, use it if nonzero */
|
||||
|
@ -540,9 +540,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
||||
if (!pmc)
|
||||
return 1;
|
||||
|
||||
if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
|
||||
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
|
||||
(static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
|
||||
(kvm_read_cr0(vcpu) & X86_CR0_PE))
|
||||
kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
|
||||
return 1;
|
||||
|
||||
*data = pmc_read_counter(pmc) & mask;
|
||||
|
@ -4522,7 +4522,6 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
void *insn, int insn_len)
|
||||
{
|
||||
bool smep, smap, is_user;
|
||||
unsigned long cr4;
|
||||
u64 error_code;
|
||||
|
||||
/* Emulation is always possible when KVM has access to all guest state. */
|
||||
@ -4614,9 +4613,8 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
|
||||
if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
|
||||
goto resume_guest;
|
||||
|
||||
cr4 = kvm_read_cr4(vcpu);
|
||||
smep = cr4 & X86_CR4_SMEP;
|
||||
smap = cr4 & X86_CR4_SMAP;
|
||||
smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP);
|
||||
smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP);
|
||||
is_user = svm_get_cpl(vcpu) == 3;
|
||||
if (smap && (!smep || is_user)) {
|
||||
pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
|
||||
|
@ -4481,7 +4481,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
||||
* CR0_GUEST_HOST_MASK is already set in the original vmcs01
|
||||
* (KVM doesn't change it);
|
||||
*/
|
||||
vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
|
||||
vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
|
||||
vmx_set_cr0(vcpu, vmcs12->host_cr0);
|
||||
|
||||
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
|
||||
@ -4632,7 +4632,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
|
||||
|
||||
vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
|
||||
vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
|
||||
vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
|
||||
|
||||
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
|
||||
@ -5154,7 +5154,7 @@ static int handle_vmxon(struct kvm_vcpu *vcpu)
|
||||
* does force CR0.PE=1, but only to also force VM86 in order to emulate
|
||||
* Real Mode, and so there's no need to check CR0.PE manually.
|
||||
*/
|
||||
if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
|
||||
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) {
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
|
@ -4746,7 +4746,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
|
||||
/* 22.2.1, 20.8.1 */
|
||||
vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
|
||||
|
||||
vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
|
||||
vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
|
||||
vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
|
||||
|
||||
set_cr4_guest_host_mask(vmx);
|
||||
@ -5136,7 +5136,7 @@ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
|
||||
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
|
||||
return true;
|
||||
|
||||
return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) &&
|
||||
return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) &&
|
||||
(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
|
||||
}
|
||||
|
||||
@ -5473,7 +5473,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
case 3: /* lmsw */
|
||||
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
|
||||
trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
|
||||
trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
|
||||
kvm_lmsw(vcpu, val);
|
||||
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
@ -7531,7 +7531,7 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
||||
if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
|
||||
return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
|
||||
|
||||
if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
|
||||
if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
|
||||
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
|
||||
cache = MTRR_TYPE_WRBACK;
|
||||
else
|
||||
|
@ -640,6 +640,24 @@ BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
|
||||
(1 << VCPU_EXREG_EXIT_INFO_1) | \
|
||||
(1 << VCPU_EXREG_EXIT_INFO_2))
|
||||
|
||||
static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
|
||||
{
|
||||
unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
|
||||
|
||||
/*
|
||||
* CR0.WP needs to be intercepted when KVM is shadowing legacy paging
|
||||
* in order to construct shadow PTEs with the correct protections.
|
||||
* Note! CR0.WP technically can be passed through to the guest if
|
||||
* paging is disabled, but checking CR0.PG would generate a cyclical
|
||||
* dependency of sorts due to forcing the caller to ensure CR0 holds
|
||||
* the correct value prior to determining which CR0 bits can be owned
|
||||
* by L1. Keep it simple and limit the optimization to EPT.
|
||||
*/
|
||||
if (!enable_ept)
|
||||
bits &= ~X86_CR0_WP;
|
||||
return bits;
|
||||
}
|
||||
|
||||
static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
|
||||
{
|
||||
return container_of(kvm, struct kvm_vmx, kvm);
|
||||
|
@ -194,7 +194,7 @@ bool __read_mostly eager_page_split = true;
|
||||
module_param(eager_page_split, bool, 0644);
|
||||
|
||||
/* Enable/disable SMT_RSB bug mitigation */
|
||||
bool __read_mostly mitigate_smt_rsb;
|
||||
static bool __read_mostly mitigate_smt_rsb;
|
||||
module_param(mitigate_smt_rsb, bool, 0444);
|
||||
|
||||
/*
|
||||
@ -841,7 +841,7 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
|
||||
|
||||
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
|
||||
{
|
||||
if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
|
||||
if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
|
||||
return true;
|
||||
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
@ -906,6 +906,24 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
|
||||
|
||||
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
|
||||
{
|
||||
/*
|
||||
* CR0.WP is incorporated into the MMU role, but only for non-nested,
|
||||
* indirect shadow MMUs. If paging is disabled, no updates are needed
|
||||
* as there are no permission bits to emulate. If TDP is enabled, the
|
||||
* MMU's metadata needs to be updated, e.g. so that emulating guest
|
||||
* translations does the right thing, but there's no need to unload the
|
||||
* root as CR0.WP doesn't affect SPTEs.
|
||||
*/
|
||||
if ((cr0 ^ old_cr0) == X86_CR0_WP) {
|
||||
if (!(cr0 & X86_CR0_PG))
|
||||
return;
|
||||
|
||||
if (tdp_enabled) {
|
||||
kvm_init_mmu(vcpu);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
|
||||
kvm_clear_async_pf_completion_queue(vcpu);
|
||||
kvm_async_pf_hash_reset(vcpu);
|
||||
@ -965,7 +983,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
return 1;
|
||||
|
||||
if (!(cr0 & X86_CR0_PG) &&
|
||||
(is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
|
||||
(is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
|
||||
return 1;
|
||||
|
||||
static_call(kvm_x86_set_cr0)(vcpu, cr0);
|
||||
@ -987,7 +1005,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.guest_state_protected)
|
||||
return;
|
||||
|
||||
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
|
||||
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
|
||||
|
||||
if (vcpu->arch.xcr0 != host_xcr0)
|
||||
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
|
||||
@ -1001,7 +1019,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
vcpu->arch.pkru != vcpu->arch.host_pkru &&
|
||||
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)))
|
||||
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
|
||||
write_pkru(vcpu->arch.pkru);
|
||||
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
|
||||
}
|
||||
@ -1015,14 +1033,14 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) {
|
||||
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
|
||||
vcpu->arch.pkru = rdpkru();
|
||||
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
|
||||
write_pkru(vcpu->arch.host_pkru);
|
||||
}
|
||||
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
|
||||
|
||||
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
|
||||
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
|
||||
|
||||
if (vcpu->arch.xcr0 != host_xcr0)
|
||||
xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
|
||||
@ -1178,9 +1196,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
return 1;
|
||||
|
||||
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
|
||||
if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
|
||||
return 1;
|
||||
|
||||
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
|
||||
if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
|
||||
return 1;
|
||||
@ -1227,7 +1242,7 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
|
||||
* PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
|
||||
* with PCIDE=0.
|
||||
*/
|
||||
if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
|
||||
if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
|
||||
return;
|
||||
|
||||
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
|
||||
@ -1242,9 +1257,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
||||
bool skip_tlb_flush = false;
|
||||
unsigned long pcid = 0;
|
||||
#ifdef CONFIG_X86_64
|
||||
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
|
||||
|
||||
if (pcid_enabled) {
|
||||
if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
|
||||
skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
|
||||
cr3 &= ~X86_CR3_PCID_NOFLUSH;
|
||||
pcid = cr3 & X86_CR3_PCID_MASK;
|
||||
@ -5056,7 +5069,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
if (mce->status & MCI_STATUS_UC) {
|
||||
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
|
||||
!kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
|
||||
!kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
|
||||
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
|
||||
return 0;
|
||||
}
|
||||
@ -13263,7 +13276,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
|
||||
return 1;
|
||||
}
|
||||
|
||||
pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
|
||||
pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
|
||||
|
||||
switch (type) {
|
||||
case INVPCID_TYPE_INDIV_ADDR:
|
||||
|
@ -123,15 +123,15 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
|
||||
|
||||
static inline bool is_protmode(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
|
||||
return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE);
|
||||
}
|
||||
|
||||
static inline int is_long_mode(struct kvm_vcpu *vcpu)
|
||||
static inline bool is_long_mode(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
return vcpu->arch.efer & EFER_LMA;
|
||||
return !!(vcpu->arch.efer & EFER_LMA);
|
||||
#else
|
||||
return 0;
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -171,19 +171,19 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
|
||||
}
|
||||
|
||||
static inline int is_pae(struct kvm_vcpu *vcpu)
|
||||
static inline bool is_pae(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
|
||||
return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE);
|
||||
}
|
||||
|
||||
static inline int is_pse(struct kvm_vcpu *vcpu)
|
||||
static inline bool is_pse(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
|
||||
return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE);
|
||||
}
|
||||
|
||||
static inline int is_paging(struct kvm_vcpu *vcpu)
|
||||
static inline bool is_paging(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
|
||||
return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG));
|
||||
}
|
||||
|
||||
static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
|
||||
@ -193,7 +193,7 @@ static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
|
||||
return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48;
|
||||
}
|
||||
|
||||
static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
|
||||
|
Loading…
Reference in New Issue
Block a user