mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 04:34:11 +08:00
5d5103595e
Reinitialize IA32_FEAT_CTL on the BSP during wakeup to handle the case
where firmware doesn't initialize or save/restore across S3. This fixes
a bug where IA32_FEAT_CTL is left uninitialized and results in VMXON
taking a #GP due to VMX not being fully enabled, i.e. breaks KVM.
Use init_ia32_feat_ctl() to "restore" IA32_FEAT_CTL as it already deals
with the case where the MSR is locked, and because APs already redo
init_ia32_feat_ctl() during suspend by virtue of the SMP boot flow being
used to reinitialize APs upon wakeup. Do the call in the early wakeup
flow to avoid dependencies in the syscore_ops chain, e.g. simply adding
a resume hook is not guaranteed to work, as KVM does VMXON in its own
resume hook, kvm_resume(), when KVM has active guests.
Fixes: 21bd3467a5
("KVM: VMX: Drop initialization of IA32_FEAT_CTL MSR")
Reported-by: Brad Campbell <lists2009@fnarfbargle.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Liam Merwick <liam.merwick@oracle.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Tested-by: Brad Campbell <lists2009@fnarfbargle.com>
Cc: stable@vger.kernel.org # v5.6
Link: https://lkml.kernel.org/r/20200608174134.11157-1-sean.j.christopherson@intel.com
67 lines
1.7 KiB
C
67 lines
1.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_CPU_H
|
|
#define _ASM_X86_CPU_H
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/topology.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/percpu.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
extern void prefill_possible_map(void);
|
|
|
|
#else /* CONFIG_SMP */
|
|
|
|
static inline void prefill_possible_map(void) {}
|
|
|
|
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
|
|
#define cpu_acpi_id(cpu) 0
|
|
#define safe_smp_processor_id() 0
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
struct x86_cpu {
|
|
struct cpu cpu;
|
|
};
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
extern int arch_register_cpu(int num);
|
|
extern void arch_unregister_cpu(int);
|
|
extern void start_cpu0(void);
|
|
#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
|
|
extern int _debug_hotplug_cpu(int cpu, int action);
|
|
#endif
|
|
#endif
|
|
|
|
int mwait_usable(const struct cpuinfo_x86 *);
|
|
|
|
unsigned int x86_family(unsigned int sig);
|
|
unsigned int x86_model(unsigned int sig);
|
|
unsigned int x86_stepping(unsigned int sig);
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
|
|
extern void switch_to_sld(unsigned long tifn);
|
|
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
|
|
extern bool handle_guest_split_lock(unsigned long ip);
|
|
#else
|
|
static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
|
|
static inline void switch_to_sld(unsigned long tifn) {}
|
|
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool handle_guest_split_lock(unsigned long ip)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
#ifdef CONFIG_IA32_FEAT_CTL
|
|
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
|
|
#else
|
|
static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
|
|
#endif
|
|
#endif /* _ASM_X86_CPU_H */
|