KVM: nVMX: Don't evaluate "emulation required" on nested VM-Exit

Use the "internal" variants of setting segment registers when stuffing
state on nested VM-Exit in order to skip the "emulation required"
updates.  VM-Exit must always go to protected mode, and all segments are
mostly hardcoded (to valid values) on VM-Exit.  The bits of the segments
that aren't hardcoded are explicitly checked during VM-Enter, e.g. the
selector RPLs must all be zero.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210713163324.627647-30-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2021-07-13 09:33:07 -07:00 committed by Paolo Bonzini
parent 1dd7a4f18f
commit 816be9e9be
3 changed files with 11 additions and 13 deletions

View File

@ -4267,7 +4267,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
seg.l = 1;
else
seg.db = 1;
vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
__vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
seg = (struct kvm_segment) {
.base = 0,
.limit = 0xFFFFFFFF,
@ -4278,17 +4278,17 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
.g = 1
};
seg.selector = vmcs12->host_ds_selector;
vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
__vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
seg.selector = vmcs12->host_es_selector;
vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
__vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
seg.selector = vmcs12->host_ss_selector;
vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
__vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
seg.selector = vmcs12->host_fs_selector;
seg.base = vmcs12->host_fs_base;
vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
__vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
seg.selector = vmcs12->host_gs_selector;
seg.base = vmcs12->host_gs_base;
vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
__vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
seg = (struct kvm_segment) {
.base = vmcs12->host_tr_base,
.limit = 0x67,
@ -4296,11 +4296,11 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
.type = 11,
.present = 1
};
vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
__vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
memset(&seg, 0, sizeof(seg));
seg.unusable = 1;
vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
__vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
kvm_set_dr(vcpu, 7, 0x400);
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);

View File

@ -2719,8 +2719,6 @@ static __init int alloc_kvm_area(void)
return 0;
}
static void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
struct kvm_segment *save)
{
@ -3293,7 +3291,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
return ar;
}
static void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@ -3330,7 +3328,7 @@ static void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, in
vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
}
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
static void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
{
__vmx_set_segment(vcpu, var, seg);

View File

@ -371,7 +371,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
void ept_save_pdptrs(struct kvm_vcpu *vcpu);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);