mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-12 07:34:08 +08:00
KVM: X86: Delegate tsc-offset calculation to architecture code
With TSC scaling in SVM the tsc-offset needs to be calculated differently. This patch propagates this calculation into the architecture specific modules so that this complexity can be handled there. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
4051b18801
commit
857e40999e
@ -609,6 +609,8 @@ struct kvm_x86_ops {
|
|||||||
void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
|
void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
|
||||||
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
|
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
|
||||||
|
|
||||||
|
u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
|
||||||
|
|
||||||
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
|
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
|
||||||
|
|
||||||
int (*check_intercept)(struct kvm_vcpu *vcpu,
|
int (*check_intercept)(struct kvm_vcpu *vcpu,
|
||||||
|
@ -943,6 +943,15 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
|
|||||||
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
|
||||||
|
{
|
||||||
|
u64 tsc;
|
||||||
|
|
||||||
|
tsc = svm_scale_tsc(vcpu, native_read_tsc());
|
||||||
|
|
||||||
|
return target_tsc - tsc;
|
||||||
|
}
|
||||||
|
|
||||||
static void init_vmcb(struct vcpu_svm *svm)
|
static void init_vmcb(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||||
@ -4194,6 +4203,7 @@ static struct kvm_x86_ops svm_x86_ops = {
|
|||||||
.set_tsc_khz = svm_set_tsc_khz,
|
.set_tsc_khz = svm_set_tsc_khz,
|
||||||
.write_tsc_offset = svm_write_tsc_offset,
|
.write_tsc_offset = svm_write_tsc_offset,
|
||||||
.adjust_tsc_offset = svm_adjust_tsc_offset,
|
.adjust_tsc_offset = svm_adjust_tsc_offset,
|
||||||
|
.compute_tsc_offset = svm_compute_tsc_offset,
|
||||||
|
|
||||||
.set_tdp_cr3 = set_tdp_cr3,
|
.set_tdp_cr3 = set_tdp_cr3,
|
||||||
|
|
||||||
|
@ -1184,6 +1184,11 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
|
|||||||
vmcs_write64(TSC_OFFSET, offset + adjustment);
|
vmcs_write64(TSC_OFFSET, offset + adjustment);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
|
||||||
|
{
|
||||||
|
return target_tsc - native_read_tsc();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reads an msr value (of 'msr_index') into 'pdata'.
|
* Reads an msr value (of 'msr_index') into 'pdata'.
|
||||||
* Returns 0 on success, non-0 otherwise.
|
* Returns 0 on success, non-0 otherwise.
|
||||||
@ -4510,6 +4515,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|||||||
.set_tsc_khz = vmx_set_tsc_khz,
|
.set_tsc_khz = vmx_set_tsc_khz,
|
||||||
.write_tsc_offset = vmx_write_tsc_offset,
|
.write_tsc_offset = vmx_write_tsc_offset,
|
||||||
.adjust_tsc_offset = vmx_adjust_tsc_offset,
|
.adjust_tsc_offset = vmx_adjust_tsc_offset,
|
||||||
|
.compute_tsc_offset = vmx_compute_tsc_offset,
|
||||||
|
|
||||||
.set_tdp_cr3 = vmx_set_cr3,
|
.set_tdp_cr3 = vmx_set_cr3,
|
||||||
|
|
||||||
|
@ -977,7 +977,7 @@ static u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
|
|||||||
return __this_cpu_read(cpu_tsc_khz);
|
return __this_cpu_read(cpu_tsc_khz);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 nsec_to_cycles(u64 nsec)
|
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
|
||||||
{
|
{
|
||||||
u64 ret;
|
u64 ret;
|
||||||
|
|
||||||
@ -985,7 +985,7 @@ static inline u64 nsec_to_cycles(u64 nsec)
|
|||||||
if (kvm_tsc_changes_freq())
|
if (kvm_tsc_changes_freq())
|
||||||
printk_once(KERN_WARNING
|
printk_once(KERN_WARNING
|
||||||
"kvm: unreliable cycle conversion on adjustable rate TSC\n");
|
"kvm: unreliable cycle conversion on adjustable rate TSC\n");
|
||||||
ret = nsec * __this_cpu_read(cpu_tsc_khz);
|
ret = nsec * vcpu_tsc_khz(vcpu);
|
||||||
do_div(ret, USEC_PER_SEC);
|
do_div(ret, USEC_PER_SEC);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -1015,7 +1015,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
|
|||||||
s64 sdiff;
|
s64 sdiff;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
|
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
|
||||||
offset = data - native_read_tsc();
|
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
|
||||||
ns = get_kernel_ns();
|
ns = get_kernel_ns();
|
||||||
elapsed = ns - kvm->arch.last_tsc_nsec;
|
elapsed = ns - kvm->arch.last_tsc_nsec;
|
||||||
sdiff = data - kvm->arch.last_tsc_write;
|
sdiff = data - kvm->arch.last_tsc_write;
|
||||||
@ -1031,13 +1031,13 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
|
|||||||
* In that case, for a reliable TSC, we can match TSC offsets,
|
* In that case, for a reliable TSC, we can match TSC offsets,
|
||||||
* or make a best guest using elapsed value.
|
* or make a best guest using elapsed value.
|
||||||
*/
|
*/
|
||||||
if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
|
if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
|
||||||
elapsed < 5ULL * NSEC_PER_SEC) {
|
elapsed < 5ULL * NSEC_PER_SEC) {
|
||||||
if (!check_tsc_unstable()) {
|
if (!check_tsc_unstable()) {
|
||||||
offset = kvm->arch.last_tsc_offset;
|
offset = kvm->arch.last_tsc_offset;
|
||||||
pr_debug("kvm: matched tsc offset for %llu\n", data);
|
pr_debug("kvm: matched tsc offset for %llu\n", data);
|
||||||
} else {
|
} else {
|
||||||
u64 delta = nsec_to_cycles(elapsed);
|
u64 delta = nsec_to_cycles(vcpu, elapsed);
|
||||||
offset += delta;
|
offset += delta;
|
||||||
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
|
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user