Merge branch 'kvm-fix-svm-races' into kvm-master

This commit is contained in:
Paolo Bonzini 2021-03-31 07:50:54 -04:00
commit 6ebae23c07

View File

@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
return true; return true;
} }
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma; bool vmcb12_lma;
/*
* FIXME: these should be done after copying the fields,
* to avoid TOC/TOU races. For these save area checks
* the possible damage is limited since kvm_set_cr0 and
* kvm_set_cr4 handle failure; EFER_SVME is an exception
* so it is force-set later in nested_prepare_vmcb_save.
*/
if ((vmcb12->save.efer & EFER_SVME) == 0) if ((vmcb12->save.efer & EFER_SVME) == 0)
return false; return false;
@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
return false; return false;
return nested_vmcb_check_controls(&vmcb12->control); return true;
} }
static void load_nested_vmcb_control(struct vcpu_svm *svm, static void load_nested_vmcb_control(struct vcpu_svm *svm,
@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
svm->vmcb->save.gdtr = vmcb12->save.gdtr; svm->vmcb->save.gdtr = vmcb12->save.gdtr;
svm->vmcb->save.idtr = vmcb12->save.idtr; svm->vmcb->save.idtr = vmcb12->save.idtr;
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
svm_set_efer(&svm->vcpu, vmcb12->save.efer);
/*
* Force-set EFER_SVME even though it is checked earlier on the
* VMCB12, because the guest can flip the bit between the check
* and now. Clearing EFER_SVME would call svm_free_nested.
*/
svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2; svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
@ -468,7 +482,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
svm->nested.vmcb12_gpa = vmcb12_gpa; svm->nested.vmcb12_gpa = vmcb12_gpa;
load_nested_vmcb_control(svm, &vmcb12->control);
nested_prepare_vmcb_control(svm); nested_prepare_vmcb_control(svm);
nested_prepare_vmcb_save(svm, vmcb12); nested_prepare_vmcb_save(svm, vmcb12);
@ -515,7 +528,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
if (WARN_ON_ONCE(!svm->nested.initialized)) if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL; return -EINVAL;
if (!nested_vmcb_checks(svm, vmcb12)) { load_nested_vmcb_control(svm, &vmcb12->control);
if (!nested_vmcb_check_save(svm, vmcb12) ||
!nested_vmcb_check_controls(&svm->nested.ctl)) {
vmcb12->control.exit_code = SVM_EXIT_ERR; vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0; vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0; vmcb12->control.exit_info_1 = 0;
@ -1209,6 +1225,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
*/ */
if (!(save->cr0 & X86_CR0_PG)) if (!(save->cr0 & X86_CR0_PG))
goto out_free; goto out_free;
if (!(save->efer & EFER_SVME))
goto out_free;
/* /*
* All checks done, we can enter guest mode. L1 control fields * All checks done, we can enter guest mode. L1 control fields