mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
KVM: x86: Serialize vendor module initialization (hardware setup)
Acquire a new mutex, vendor_module_lock, in kvm_x86_vendor_init() while doing hardware setup to ensure that concurrent calls are fully serialized. KVM rejects attempts to load vendor modules if a different module has already been loaded, but doesn't handle the case where multiple vendor modules are loaded at the same time, and module_init() doesn't run under the global module_mutex. Note, in practice, this is likely a benign bug as no platform exists that supports both SVM and VMX, i.e. barring a weird VM setup, one of the vendor modules is guaranteed to fail a support check before modifying common KVM state. Alternatively, KVM could perform an atomic CMPXCHG on .hardware_enable, but that comes with its own ugliness as it would require setting .hardware_enable before success is guaranteed, e.g. attempting to load the "wrong" could result in spurious failure to load the "right" module. Introduce a new mutex as using kvm_lock is extremely deadlock prone due to kvm_lock being taken under cpus_write_lock(), and in the future, under under cpus_read_lock(). Any operation that takes cpus_read_lock() while holding kvm_lock would potentially deadlock, e.g. kvm_timer_init() takes cpus_read_lock() to register a callback. In theory, KVM could avoid such problematic paths, i.e. do less setup under kvm_lock, but avoiding all calls to cpus_read_lock() is subtly difficult and thus fragile. E.g. updating static calls also acquires cpus_read_lock(). Inverting the lock ordering, i.e. always taking kvm_lock outside cpus_read_lock(), is not a viable option as kvm_lock is taken in various callbacks that may be invoked under cpus_read_lock(), e.g. x86's kvmclock_cpufreq_notifier(). The lockdep splat below is dependent on future patches to take cpus_read_lock() in hardware_enable_all(), but as above, deadlock is already is already possible. ====================================================== WARNING: possible circular locking dependency detected 6.0.0-smp--7ec93244f194-init2 #27 Tainted: G O ------------------------------------------------------ stable/251833 is trying to acquire lock: ffffffffc097ea28 (kvm_lock){+.+.}-{3:3}, at: hardware_enable_all+0x1f/0xc0 [kvm] but task is already holding lock: ffffffffa2456828 (cpu_hotplug_lock){++++}-{0:0}, at: hardware_enable_all+0xf/0xc0 [kvm] which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (cpu_hotplug_lock){++++}-{0:0}: cpus_read_lock+0x2a/0xa0 __cpuhp_setup_state+0x2b/0x60 __kvm_x86_vendor_init+0x16a/0x1870 [kvm] kvm_x86_vendor_init+0x23/0x40 [kvm] 0xffffffffc0a4d02b do_one_initcall+0x110/0x200 do_init_module+0x4f/0x250 load_module+0x1730/0x18f0 __se_sys_finit_module+0xca/0x100 __x64_sys_finit_module+0x1d/0x20 do_syscall_64+0x3d/0x80 entry_SYSCALL_64_after_hwframe+0x63/0xcd -> #0 (kvm_lock){+.+.}-{3:3}: __lock_acquire+0x16f4/0x30d0 lock_acquire+0xb2/0x190 __mutex_lock+0x98/0x6f0 mutex_lock_nested+0x1b/0x20 hardware_enable_all+0x1f/0xc0 [kvm] kvm_dev_ioctl+0x45e/0x930 [kvm] __se_sys_ioctl+0x77/0xc0 __x64_sys_ioctl+0x1d/0x20 do_syscall_64+0x3d/0x80 entry_SYSCALL_64_after_hwframe+0x63/0xcd other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(cpu_hotplug_lock); lock(kvm_lock); lock(cpu_hotplug_lock); lock(kvm_lock); *** DEADLOCK *** 1 lock held by stable/251833: #0: ffffffffa2456828 (cpu_hotplug_lock){++++}-{0:0}, at: hardware_enable_all+0xf/0xc0 [kvm] Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20221130230934.1014142-16-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
e32b120071
commit
3af4a9e61e
@ -291,3 +291,9 @@ time it will be set using the Dirty tracking mechanism described above.
|
||||
wakeup notification event since external interrupts from the
|
||||
assigned devices happens, we will find the vCPU on the list to
|
||||
wakeup.
|
||||
|
||||
``vendor_module_lock``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
:Type: mutex
|
||||
:Arch: x86
|
||||
:Protects: loading a vendor module (kvm_amd or kvm_intel)
|
||||
|
@ -128,6 +128,7 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
|
||||
static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
|
||||
static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
|
||||
|
||||
static DEFINE_MUTEX(vendor_module_lock);
|
||||
struct kvm_x86_ops kvm_x86_ops __read_mostly;
|
||||
|
||||
#define KVM_X86_OP(func) \
|
||||
@ -9301,7 +9302,7 @@ void kvm_arch_exit(void)
|
||||
|
||||
}
|
||||
|
||||
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
|
||||
static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
|
||||
{
|
||||
u64 host_pat;
|
||||
int r;
|
||||
@ -9434,6 +9435,17 @@ out_free_x86_emulator_cache:
|
||||
kmem_cache_destroy(x86_emulator_cache);
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
|
||||
{
|
||||
int r;
|
||||
|
||||
mutex_lock(&vendor_module_lock);
|
||||
r = __kvm_x86_vendor_init(ops);
|
||||
mutex_unlock(&vendor_module_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
|
||||
|
||||
void kvm_x86_vendor_exit(void)
|
||||
@ -9457,7 +9469,6 @@ void kvm_x86_vendor_exit(void)
|
||||
cancel_work_sync(&pvclock_gtod_work);
|
||||
#endif
|
||||
static_call(kvm_x86_hardware_unsetup)();
|
||||
kvm_x86_ops.hardware_enable = NULL;
|
||||
kvm_mmu_vendor_module_exit();
|
||||
free_percpu(user_return_msrs);
|
||||
kmem_cache_destroy(x86_emulator_cache);
|
||||
@ -9465,6 +9476,9 @@ void kvm_x86_vendor_exit(void)
|
||||
static_key_deferred_flush(&kvm_xen_enabled);
|
||||
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
|
||||
#endif
|
||||
mutex_lock(&vendor_module_lock);
|
||||
kvm_x86_ops.hardware_enable = NULL;
|
||||
mutex_unlock(&vendor_module_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user