mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 21:54:11 +08:00
KVM: nVMX/nSVM: Fix bug which sets vcpu->arch.tsc_offset to L1 tsc_offset
Since commite79f245dde
("X86/KVM: Properly update 'tsc_offset' to represent the running guest"), vcpu->arch.tsc_offset meaning was changed to always reflect the tsc_offset value set on active VMCS. Regardless if vCPU is currently running L1 or L2. However, above mentioned commit failed to also change kvm_vcpu_write_tsc_offset() to set vcpu->arch.tsc_offset correctly. This is because vmx_write_tsc_offset() could set the tsc_offset value in active VMCS to given offset parameter *plus vmcs12->tsc_offset*. However, kvm_vcpu_write_tsc_offset() just sets vcpu->arch.tsc_offset to given offset parameter. Without taking into account the possible addition of vmcs12->tsc_offset. (Same is true for SVM case). Fix this issue by changing kvm_x86_ops->write_tsc_offset() to return actually set tsc_offset in active VMCS and modify kvm_vcpu_write_tsc_offset() to set returned value in vcpu->arch.tsc_offset. In addition, rename write_tsc_offset() callback to write_l1_tsc_offset() to make it clear that it is meant to set L1 TSC offset. Fixes:e79f245dde
("X86/KVM: Properly update 'tsc_offset' to represent the running guest") Reviewed-by: Liran Alon <liran.alon@oracle.com> Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com> Reviewed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com> Signed-off-by: Leonid Shatz <leonid.shatz@oracle.com> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
1e4329ee2c
commit
326e742533
@ -1094,7 +1094,8 @@ struct kvm_x86_ops {
|
||||
bool (*has_wbinvd_exit)(void);
|
||||
|
||||
u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
|
||||
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
|
||||
/* Returns actual tsc_offset set in active VMCS */
|
||||
u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
|
||||
|
||||
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
|
||||
|
||||
|
@ -1446,7 +1446,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.tsc_offset;
|
||||
}
|
||||
|
||||
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
||||
static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
u64 g_tsc_offset = 0;
|
||||
@ -1464,6 +1464,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
||||
svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
|
||||
|
||||
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
||||
return svm->vmcb->control.tsc_offset;
|
||||
}
|
||||
|
||||
static void avic_init_vmcb(struct vcpu_svm *svm)
|
||||
@ -7152,7 +7153,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
||||
.has_wbinvd_exit = svm_has_wbinvd_exit,
|
||||
|
||||
.read_l1_tsc_offset = svm_read_l1_tsc_offset,
|
||||
.write_tsc_offset = svm_write_tsc_offset,
|
||||
.write_l1_tsc_offset = svm_write_l1_tsc_offset,
|
||||
|
||||
.set_tdp_cr3 = set_tdp_cr3,
|
||||
|
||||
|
@ -3464,11 +3464,9 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.tsc_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* writes 'offset' into guest's timestamp counter offset register
|
||||
*/
|
||||
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
||||
static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
||||
{
|
||||
u64 active_offset = offset;
|
||||
if (is_guest_mode(vcpu)) {
|
||||
/*
|
||||
* We're here if L1 chose not to trap WRMSR to TSC. According
|
||||
@ -3476,17 +3474,16 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
||||
* set for L2 remains unchanged, and still needs to be added
|
||||
* to the newly set TSC to get L2's TSC.
|
||||
*/
|
||||
struct vmcs12 *vmcs12;
|
||||
/* recalculate vmcs02.TSC_OFFSET: */
|
||||
vmcs12 = get_vmcs12(vcpu);
|
||||
vmcs_write64(TSC_OFFSET, offset +
|
||||
(nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
|
||||
vmcs12->tsc_offset : 0));
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING))
|
||||
active_offset += vmcs12->tsc_offset;
|
||||
} else {
|
||||
trace_kvm_write_tsc_offset(vcpu->vcpu_id,
|
||||
vmcs_read64(TSC_OFFSET), offset);
|
||||
vmcs_write64(TSC_OFFSET, offset);
|
||||
}
|
||||
|
||||
vmcs_write64(TSC_OFFSET, active_offset);
|
||||
return active_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -15074,7 +15071,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
||||
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
|
||||
|
||||
.read_l1_tsc_offset = vmx_read_l1_tsc_offset,
|
||||
.write_tsc_offset = vmx_write_tsc_offset,
|
||||
.write_l1_tsc_offset = vmx_write_l1_tsc_offset,
|
||||
|
||||
.set_tdp_cr3 = vmx_set_cr3,
|
||||
|
||||
|
@ -1665,8 +1665,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
|
||||
|
||||
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
||||
{
|
||||
kvm_x86_ops->write_tsc_offset(vcpu, offset);
|
||||
vcpu->arch.tsc_offset = offset;
|
||||
vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
|
||||
}
|
||||
|
||||
static inline bool kvm_check_tsc_unstable(void)
|
||||
@ -1794,7 +1793,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
|
||||
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
|
||||
s64 adjustment)
|
||||
{
|
||||
kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
|
||||
u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
|
||||
kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
|
||||
}
|
||||
|
||||
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
|
||||
|
Loading…
Reference in New Issue
Block a user