mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
KVM: X86: implement the logic for spinlock optimization
get_cpl requires vcpu_load, so we must cache the result (whether the vcpu was preempted when its cpl=0) in kvm_vcpu_arch. Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
199b5763d3
commit
de63ad4cf4
@ -688,6 +688,9 @@ struct kvm_vcpu_arch {
|
||||
|
||||
/* GPA available (AMD only) */
|
||||
bool gpa_available;
|
||||
|
||||
/* be preempted when it's in kernel-mode(cpl=0) */
|
||||
bool preempted_in_kernel;
|
||||
};
|
||||
|
||||
struct kvm_lpage_info {
|
||||
|
@ -1274,7 +1274,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||
|
||||
switch (code) {
|
||||
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
|
||||
kvm_vcpu_on_spin(vcpu, false);
|
||||
kvm_vcpu_on_spin(vcpu, true);
|
||||
break;
|
||||
case HVCALL_POST_MESSAGE:
|
||||
case HVCALL_SIGNAL_EVENT:
|
||||
|
@ -3749,7 +3749,10 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
|
||||
|
||||
static int pause_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
kvm_vcpu_on_spin(&svm->vcpu, false);
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
bool in_kernel = (svm_get_cpl(vcpu) == 0);
|
||||
|
||||
kvm_vcpu_on_spin(vcpu, in_kernel);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -6781,7 +6781,13 @@ static int handle_pause(struct kvm_vcpu *vcpu)
|
||||
if (ple_gap)
|
||||
grow_ple_window(vcpu);
|
||||
|
||||
kvm_vcpu_on_spin(vcpu, false);
|
||||
/*
|
||||
* Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
|
||||
* VM-execution control is ignored if CPL > 0. OTOH, KVM
|
||||
* never set PAUSE_EXITING and just set PLE if supported,
|
||||
* so the vcpu must be CPL=0 if it gets a PAUSE exit.
|
||||
*/
|
||||
kvm_vcpu_on_spin(vcpu, true);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
|
@ -2873,6 +2873,10 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int idx;
|
||||
|
||||
if (vcpu->preempted)
|
||||
vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
|
||||
|
||||
/*
|
||||
* Disable page faults because we're in atomic context here.
|
||||
* kvm_write_guest_offset_cached() would call might_fault()
|
||||
@ -7985,6 +7989,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
kvm_pmu_init(vcpu);
|
||||
|
||||
vcpu->arch.pending_external_vector = -1;
|
||||
vcpu->arch.preempted_in_kernel = false;
|
||||
|
||||
kvm_hv_vcpu_init(vcpu);
|
||||
|
||||
@ -8434,7 +8439,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||
|
||||
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
return vcpu->arch.preempted_in_kernel;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
||||
|
Loading…
Reference in New Issue
Block a user