mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 18:14:07 +08:00
KVM: ppc: Handle guest idle by emulating MSR[WE] writes
This reduces host CPU usage when the guest is idle. However, the guest must set MSR[WE] in its idle loop, which Linux did not do until 2.6.26. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Jerone Young <jyoung5@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
parent
3fe913e7c5
commit
45c5eb67da
@ -49,6 +49,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "inst_emu", VCPU_STAT(emulated_inst_exits) },
|
||||
{ "dec", VCPU_STAT(dec_exits) },
|
||||
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
|
@ -36,13 +36,12 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
||||
|
||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
|
||||
{
|
||||
/* XXX implement me */
|
||||
return 0;
|
||||
return !!(v->arch.pending_exceptions);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
|
||||
{
|
||||
return 1;
|
||||
return !(v->arch.msr & MSR_WE);
|
||||
}
|
||||
|
||||
|
||||
@ -214,6 +213,11 @@ static void kvmppc_decrementer_func(unsigned long data)
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
|
||||
|
||||
kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
|
||||
|
||||
if (waitqueue_active(&vcpu->wq)) {
|
||||
wake_up_interruptible(&vcpu->wq);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
@ -339,6 +343,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
int r;
|
||||
sigset_t sigsaved;
|
||||
|
||||
vcpu_load(vcpu);
|
||||
|
||||
if (vcpu->sigset_active)
|
||||
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
|
||||
|
||||
@ -363,12 +369,20 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
if (vcpu->sigset_active)
|
||||
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
|
||||
|
||||
vcpu_put(vcpu);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
|
||||
{
|
||||
kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
|
||||
|
||||
if (waitqueue_active(&vcpu->wq)) {
|
||||
wake_up_interruptible(&vcpu->wq);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -59,6 +59,7 @@ struct kvm_vcpu_stat {
|
||||
u32 emulated_inst_exits;
|
||||
u32 dec_exits;
|
||||
u32 ext_intr_exits;
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
struct tlbe {
|
||||
|
@ -77,12 +77,17 @@ static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
|
||||
clear_bit(priority, &vcpu->arch.pending_exceptions);
|
||||
}
|
||||
|
||||
/* Helper function for "full" MSR writes. No need to call this if only EE is
|
||||
* changing. */
|
||||
static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
|
||||
{
|
||||
if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
|
||||
kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
|
||||
|
||||
vcpu->arch.msr = new_msr;
|
||||
|
||||
if (vcpu->arch.msr & MSR_WE)
|
||||
kvm_vcpu_block(vcpu);
|
||||
}
|
||||
|
||||
#endif /* __POWERPC_KVM_PPC_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user