2007-10-10 23:16:19 +08:00
|
|
|
/*
|
|
|
|
* Kernel-based Virtual Machine driver for Linux
|
|
|
|
*
|
|
|
|
* derived from drivers/kvm/kvm_main.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Qumranet, Inc.
|
2008-07-29 00:26:26 +08:00
|
|
|
* Copyright (C) 2008 Qumranet, Inc.
|
|
|
|
* Copyright IBM Corporation, 2008
|
2007-10-10 23:16:19 +08:00
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Avi Kivity <avi@qumranet.com>
|
|
|
|
* Yaniv Kamay <yaniv@qumranet.com>
|
2008-07-29 00:26:26 +08:00
|
|
|
* Amit Shah <amit.shah@qumranet.com>
|
|
|
|
* Ben-Ami Yassour <benami@il.ibm.com>
|
2007-10-10 23:16:19 +08:00
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2007-12-16 17:02:48 +08:00
|
|
|
#include <linux/kvm_host.h>
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
#include "irq.h"
|
2007-12-14 09:35:10 +08:00
|
|
|
#include "mmu.h"
|
2008-01-28 05:10:22 +08:00
|
|
|
#include "i8254.h"
|
2008-03-25 05:14:53 +08:00
|
|
|
#include "tss.h"
|
2008-06-28 01:58:02 +08:00
|
|
|
#include "kvm_cache_regs.h"
|
2008-07-03 19:59:22 +08:00
|
|
|
#include "x86.h"
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
|
2008-02-16 03:52:47 +08:00
|
|
|
#include <linux/clocksource.h>
|
2008-07-29 00:26:26 +08:00
|
|
|
#include <linux/interrupt.h>
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
#include <linux/kvm.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2007-10-29 23:08:51 +08:00
|
|
|
#include <linux/module.h>
|
2007-11-20 16:25:04 +08:00
|
|
|
#include <linux/mman.h>
|
2007-12-12 23:46:12 +08:00
|
|
|
#include <linux/highmem.h>
|
2008-12-03 21:43:34 +08:00
|
|
|
#include <linux/iommu.h>
|
2008-09-14 08:48:28 +08:00
|
|
|
#include <linux/intel-iommu.h>
|
2009-02-05 00:52:04 +08:00
|
|
|
#include <linux/cpufreq.h>
|
2007-10-10 23:16:19 +08:00
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
2007-11-14 20:08:51 +08:00
|
|
|
#include <asm/msr.h>
|
2008-02-20 23:57:21 +08:00
|
|
|
#include <asm/desc.h>
|
2008-10-09 16:01:54 +08:00
|
|
|
#include <asm/mtrr.h>
|
2007-10-10 23:16:19 +08:00
|
|
|
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
#define MAX_IO_MSRS 256
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
#define CR0_RESERVED_BITS \
|
|
|
|
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
|
|
|
|
| X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
|
|
|
|
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
|
|
|
|
#define CR4_RESERVED_BITS \
|
|
|
|
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
|
|
|
|
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
|
|
|
|
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
|
|
|
|
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
|
|
|
|
|
|
|
|
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
|
2008-01-31 21:57:38 +08:00
|
|
|
/* EFER defaults:
|
|
|
|
* - enable syscall per default because its emulated by KVM
|
|
|
|
* - enable LME and LMA per default on 64 bit KVM
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
|
|
|
|
#else
|
|
|
|
static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
|
|
|
|
#endif
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
|
2007-11-18 22:24:12 +08:00
|
|
|
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
|
|
|
|
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
|
2007-11-01 06:24:23 +08:00
|
|
|
|
2008-02-12 00:37:23 +08:00
|
|
|
static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
|
|
|
|
struct kvm_cpuid_entry2 __user *entries);
|
2008-11-26 03:17:11 +08:00
|
|
|
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
|
|
|
|
u32 function, u32 index);
|
2008-02-12 00:37:23 +08:00
|
|
|
|
2007-11-14 20:09:30 +08:00
|
|
|
struct kvm_x86_ops *kvm_x86_ops;
|
2008-06-28 01:58:02 +08:00
|
|
|
EXPORT_SYMBOL_GPL(kvm_x86_ops);
|
2007-11-14 20:09:30 +08:00
|
|
|
|
2007-11-01 06:24:23 +08:00
|
|
|
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
2007-11-18 22:24:12 +08:00
|
|
|
{ "pf_fixed", VCPU_STAT(pf_fixed) },
|
|
|
|
{ "pf_guest", VCPU_STAT(pf_guest) },
|
|
|
|
{ "tlb_flush", VCPU_STAT(tlb_flush) },
|
|
|
|
{ "invlpg", VCPU_STAT(invlpg) },
|
|
|
|
{ "exits", VCPU_STAT(exits) },
|
|
|
|
{ "io_exits", VCPU_STAT(io_exits) },
|
|
|
|
{ "mmio_exits", VCPU_STAT(mmio_exits) },
|
|
|
|
{ "signal_exits", VCPU_STAT(signal_exits) },
|
|
|
|
{ "irq_window", VCPU_STAT(irq_window_exits) },
|
2008-05-15 18:23:25 +08:00
|
|
|
{ "nmi_window", VCPU_STAT(nmi_window_exits) },
|
2007-11-18 22:24:12 +08:00
|
|
|
{ "halt_exits", VCPU_STAT(halt_exits) },
|
|
|
|
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
|
2008-02-21 03:30:30 +08:00
|
|
|
{ "hypercalls", VCPU_STAT(hypercalls) },
|
2007-11-18 22:24:12 +08:00
|
|
|
{ "request_irq", VCPU_STAT(request_irq_exits) },
|
|
|
|
{ "irq_exits", VCPU_STAT(irq_exits) },
|
|
|
|
{ "host_state_reload", VCPU_STAT(host_state_reload) },
|
|
|
|
{ "efer_reload", VCPU_STAT(efer_reload) },
|
|
|
|
{ "fpu_reload", VCPU_STAT(fpu_reload) },
|
|
|
|
{ "insn_emulation", VCPU_STAT(insn_emulation) },
|
|
|
|
{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
|
2008-09-01 20:57:51 +08:00
|
|
|
{ "irq_injections", VCPU_STAT(irq_injections) },
|
2008-09-26 15:30:55 +08:00
|
|
|
{ "nmi_injections", VCPU_STAT(nmi_injections) },
|
2007-11-18 22:37:07 +08:00
|
|
|
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
|
|
|
|
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
|
|
|
|
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
|
|
|
|
{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
|
|
|
|
{ "mmu_flooded", VM_STAT(mmu_flooded) },
|
|
|
|
{ "mmu_recycled", VM_STAT(mmu_recycled) },
|
2007-12-19 01:47:18 +08:00
|
|
|
{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
|
2008-09-24 00:18:39 +08:00
|
|
|
{ "mmu_unsync", VM_STAT(mmu_unsync) },
|
2007-11-21 05:01:14 +08:00
|
|
|
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
|
2008-02-23 22:44:30 +08:00
|
|
|
{ "largepages", VM_STAT(lpages) },
|
2007-11-01 06:24:23 +08:00
|
|
|
{ NULL }
|
|
|
|
};
|
|
|
|
|
2007-10-29 23:08:51 +08:00
|
|
|
unsigned long segment_base(u16 selector)
|
|
|
|
{
|
|
|
|
struct descriptor_table gdt;
|
2008-02-20 23:57:21 +08:00
|
|
|
struct desc_struct *d;
|
2007-10-29 23:08:51 +08:00
|
|
|
unsigned long table_base;
|
|
|
|
unsigned long v;
|
|
|
|
|
|
|
|
if (selector == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
asm("sgdt %0" : "=m"(gdt));
|
|
|
|
table_base = gdt.base;
|
|
|
|
|
|
|
|
if (selector & 4) { /* from ldt */
|
|
|
|
u16 ldt_selector;
|
|
|
|
|
|
|
|
asm("sldt %0" : "=g"(ldt_selector));
|
|
|
|
table_base = segment_base(ldt_selector);
|
|
|
|
}
|
2008-02-20 23:57:21 +08:00
|
|
|
d = (struct desc_struct *)(table_base + (selector & ~7));
|
|
|
|
v = d->base0 | ((unsigned long)d->base1 << 16) |
|
|
|
|
((unsigned long)d->base2 << 24);
|
2007-10-29 23:08:51 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-02-20 23:57:21 +08:00
|
|
|
if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
|
|
|
|
v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
|
2007-10-29 23:08:51 +08:00
|
|
|
#endif
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(segment_base);
|
|
|
|
|
2007-10-29 23:09:10 +08:00
|
|
|
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (irqchip_in_kernel(vcpu->kvm))
|
2007-12-13 23:50:52 +08:00
|
|
|
return vcpu->arch.apic_base;
|
2007-10-29 23:09:10 +08:00
|
|
|
else
|
2007-12-13 23:50:52 +08:00
|
|
|
return vcpu->arch.apic_base;
|
2007-10-29 23:09:10 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_get_apic_base);
|
|
|
|
|
|
|
|
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
|
|
|
|
{
|
|
|
|
/* TODO: reserve bits check */
|
|
|
|
if (irqchip_in_kernel(vcpu->kvm))
|
|
|
|
kvm_lapic_set_base(vcpu, data);
|
|
|
|
else
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.apic_base = data;
|
2007-10-29 23:09:10 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
|
|
|
|
|
2007-11-25 19:41:11 +08:00
|
|
|
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
WARN_ON(vcpu->arch.exception.pending);
|
|
|
|
vcpu->arch.exception.pending = true;
|
|
|
|
vcpu->arch.exception.has_error_code = false;
|
|
|
|
vcpu->arch.exception.nr = nr;
|
2007-11-25 19:41:11 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_queue_exception);
|
|
|
|
|
2007-11-25 20:04:58 +08:00
|
|
|
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
|
|
|
|
u32 error_code)
|
|
|
|
{
|
|
|
|
++vcpu->stat.pf_guest;
|
2008-11-26 03:17:11 +08:00
|
|
|
|
2008-02-26 23:49:16 +08:00
|
|
|
if (vcpu->arch.exception.pending) {
|
|
|
|
if (vcpu->arch.exception.nr == PF_VECTOR) {
|
|
|
|
printk(KERN_DEBUG "kvm: inject_page_fault:"
|
|
|
|
" double fault 0x%lx\n", addr);
|
|
|
|
vcpu->arch.exception.nr = DF_VECTOR;
|
|
|
|
vcpu->arch.exception.error_code = 0;
|
|
|
|
} else if (vcpu->arch.exception.nr == DF_VECTOR) {
|
|
|
|
/* triple fault -> shutdown */
|
|
|
|
set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
|
|
|
|
}
|
2007-11-25 20:04:58 +08:00
|
|
|
return;
|
|
|
|
}
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cr2 = addr;
|
2007-11-25 20:04:58 +08:00
|
|
|
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
|
|
|
|
}
|
|
|
|
|
2008-05-15 09:52:48 +08:00
|
|
|
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.nmi_pending = 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_inject_nmi);
|
|
|
|
|
2007-11-25 19:41:11 +08:00
|
|
|
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
WARN_ON(vcpu->arch.exception.pending);
|
|
|
|
vcpu->arch.exception.pending = true;
|
|
|
|
vcpu->arch.exception.has_error_code = true;
|
|
|
|
vcpu->arch.exception.nr = nr;
|
|
|
|
vcpu->arch.exception.error_code = error_code;
|
2007-11-25 19:41:11 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
|
|
|
|
|
|
|
|
static void __queue_exception(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
|
|
|
|
vcpu->arch.exception.has_error_code,
|
|
|
|
vcpu->arch.exception.error_code);
|
2007-11-25 19:41:11 +08:00
|
|
|
}
|
|
|
|
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
/*
|
|
|
|
* Load the pae pdptrs. Return true is they are all valid.
|
|
|
|
*/
|
|
|
|
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
|
|
|
|
{
|
|
|
|
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
|
|
|
|
unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
|
|
|
|
int i;
|
|
|
|
int ret;
|
2007-12-13 23:50:52 +08:00
|
|
|
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
|
|
|
|
ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
|
|
|
|
offset * sizeof(u64), sizeof(pdpte));
|
|
|
|
if (ret < 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
|
2009-03-31 23:03:45 +08:00
|
|
|
if (is_present_pte(pdpte[i]) &&
|
|
|
|
(pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = 1;
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
out:
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2008-02-07 20:47:43 +08:00
|
|
|
EXPORT_SYMBOL_GPL(load_pdptrs);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
|
2007-11-21 08:57:59 +08:00
|
|
|
static bool pdptrs_changed(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
|
2007-11-21 08:57:59 +08:00
|
|
|
bool changed = true;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (is_long_mode(vcpu) || !is_pae(vcpu))
|
|
|
|
return false;
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
|
2007-11-21 08:57:59 +08:00
|
|
|
if (r < 0)
|
|
|
|
goto out;
|
2007-12-13 23:50:52 +08:00
|
|
|
changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
|
2007-11-21 08:57:59 +08:00
|
|
|
out:
|
|
|
|
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2008-02-24 17:20:43 +08:00
|
|
|
void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
{
|
|
|
|
if (cr0 & CR0_RESERVED_BITS) {
|
|
|
|
printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
|
2007-12-13 23:50:52 +08:00
|
|
|
cr0, vcpu->arch.cr0);
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
|
|
|
|
printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
|
|
|
|
printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
|
|
|
|
"and a clear PE flag\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
|
|
|
|
#ifdef CONFIG_X86_64
|
2007-12-13 23:50:52 +08:00
|
|
|
if ((vcpu->arch.shadow_efer & EFER_LME)) {
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
int cs_db, cs_l;
|
|
|
|
|
|
|
|
if (!is_pae(vcpu)) {
|
|
|
|
printk(KERN_DEBUG "set_cr0: #GP, start paging "
|
|
|
|
"in long mode while PAE is disabled\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
|
|
|
|
if (cs_l) {
|
|
|
|
printk(KERN_DEBUG "set_cr0: #GP, start paging "
|
|
|
|
"in long mode while CS.L == 1\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
2007-12-13 23:50:52 +08:00
|
|
|
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
|
|
|
|
"reserved bits\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_x86_ops->set_cr0(vcpu, cr0);
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cr0 = cr0;
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
|
|
|
|
kvm_mmu_reset_context(vcpu);
|
|
|
|
return;
|
|
|
|
}
|
2008-02-24 17:20:43 +08:00
|
|
|
EXPORT_SYMBOL_GPL(kvm_set_cr0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
|
2008-02-24 17:20:43 +08:00
|
|
|
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
{
|
2008-02-24 17:20:43 +08:00
|
|
|
kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
|
2008-04-11 03:31:10 +08:00
|
|
|
KVMTRACE_1D(LMSW, vcpu,
|
|
|
|
(u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
|
|
|
|
handler);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
}
|
2008-02-24 17:20:43 +08:00
|
|
|
EXPORT_SYMBOL_GPL(kvm_lmsw);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
|
2008-02-24 17:20:43 +08:00
|
|
|
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
{
|
2009-05-25 03:19:00 +08:00
|
|
|
unsigned long old_cr4 = vcpu->arch.cr4;
|
|
|
|
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
|
|
|
|
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
if (cr4 & CR4_RESERVED_BITS) {
|
|
|
|
printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_long_mode(vcpu)) {
|
|
|
|
if (!(cr4 & X86_CR4_PAE)) {
|
|
|
|
printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
|
|
|
|
"in long mode\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-05-25 03:19:00 +08:00
|
|
|
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
|
|
|
|
&& ((cr4 ^ old_cr4) & pdptr_bits)
|
2007-12-13 23:50:52 +08:00
|
|
|
&& !load_pdptrs(vcpu, vcpu->arch.cr3)) {
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cr4 & X86_CR4_VMXE) {
|
|
|
|
printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
kvm_x86_ops->set_cr4(vcpu, cr4);
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cr4 = cr4;
|
2009-01-11 23:19:35 +08:00
|
|
|
vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
kvm_mmu_reset_context(vcpu);
|
|
|
|
}
|
2008-02-24 17:20:43 +08:00
|
|
|
EXPORT_SYMBOL_GPL(kvm_set_cr4);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
|
2008-02-24 17:20:43 +08:00
|
|
|
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
|
2008-09-24 00:18:34 +08:00
|
|
|
kvm_mmu_sync_roots(vcpu);
|
2007-11-21 08:57:59 +08:00
|
|
|
kvm_mmu_flush_tlb(vcpu);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
if (is_long_mode(vcpu)) {
|
|
|
|
if (cr3 & CR3_L_MODE_RESERVED_BITS) {
|
|
|
|
printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (is_pae(vcpu)) {
|
|
|
|
if (cr3 & CR3_PAE_RESERVED_BITS) {
|
|
|
|
printk(KERN_DEBUG
|
|
|
|
"set_cr3: #GP, reserved bits\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
|
|
|
|
printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
|
|
|
|
"reserved bits\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We don't check reserved bits in nonpae mode, because
|
|
|
|
* this isn't enforced, and VMware depends on this.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Does the new cr3 value map to physical memory? (Note, we
|
|
|
|
* catch an invalid cr3 even in real-mode, because it would
|
|
|
|
* cause trouble later on when we turn on paging anyway.)
|
|
|
|
*
|
|
|
|
* A real CPU would silently accept an invalid cr3 and would
|
|
|
|
* attempt to use it - with largely undefined (and often hard
|
|
|
|
* to debug) behavior on the guest side.
|
|
|
|
*/
|
|
|
|
if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
else {
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cr3 = cr3;
|
|
|
|
vcpu->arch.mmu.new_cr3(vcpu);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
}
|
|
|
|
}
|
2008-02-24 17:20:43 +08:00
|
|
|
EXPORT_SYMBOL_GPL(kvm_set_cr3);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
|
2008-02-24 17:20:43 +08:00
|
|
|
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
{
|
|
|
|
if (cr8 & CR8_RESERVED_BITS) {
|
|
|
|
printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (irqchip_in_kernel(vcpu->kvm))
|
|
|
|
kvm_lapic_set_tpr(vcpu, cr8);
|
|
|
|
else
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cr8 = cr8;
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
}
|
2008-02-24 17:20:43 +08:00
|
|
|
EXPORT_SYMBOL_GPL(kvm_set_cr8);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
|
2008-02-24 17:20:43 +08:00
|
|
|
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
{
|
|
|
|
if (irqchip_in_kernel(vcpu->kvm))
|
|
|
|
return kvm_lapic_get_cr8(vcpu);
|
|
|
|
else
|
2007-12-13 23:50:52 +08:00
|
|
|
return vcpu->arch.cr8;
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
}
|
2008-02-24 17:20:43 +08:00
|
|
|
EXPORT_SYMBOL_GPL(kvm_get_cr8);
|
KVM: Portability: Move control register helper functions to x86.c
This patch moves the definitions of CR0_RESERVED_BITS,
CR4_RESERVED_BITS, and CR8_RESERVED_BITS along with the following
functions from kvm_main.c to x86.c:
set_cr0(), set_cr3(), set_cr4(), set_cr8(), get_cr8(), lmsw(),
load_pdptrs()
The static function wrapper inject_gp is duplicated in kvm_main.c and
x86.c for now, the version in kvm_main.c should disappear once the last
user of it is gone too.
The function load_pdptrs is no longer static, and now defined in x86.h
for the time being, until the last user of it is gone from kvm_main.c.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-29 23:09:35 +08:00
|
|
|
|
2008-11-26 03:17:11 +08:00
|
|
|
static inline u32 bit(int bitno)
|
|
|
|
{
|
|
|
|
return 1 << (bitno & 31);
|
|
|
|
}
|
|
|
|
|
2007-10-10 23:16:19 +08:00
|
|
|
/*
|
|
|
|
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
|
|
|
|
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
|
|
|
|
*
|
|
|
|
* This list is modified at module load time to reflect the
|
|
|
|
* capabilities of the host cpu.
|
|
|
|
*/
|
|
|
|
static u32 msrs_to_save[] = {
|
|
|
|
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
|
|
|
|
MSR_K6_STAR,
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
|
|
|
|
#endif
|
2008-02-16 03:52:47 +08:00
|
|
|
MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
|
2008-11-26 03:17:05 +08:00
|
|
|
MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
|
2007-10-10 23:16:19 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static unsigned num_msrs_to_save;
|
|
|
|
|
|
|
|
static u32 emulated_msrs[] = {
|
|
|
|
MSR_IA32_MISC_ENABLE,
|
|
|
|
};
|
|
|
|
|
2007-10-31 01:44:17 +08:00
|
|
|
static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|
|
|
{
|
2008-01-31 21:57:37 +08:00
|
|
|
if (efer & efer_reserved_bits) {
|
2007-10-31 01:44:17 +08:00
|
|
|
printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
|
|
|
|
efer);
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
2007-10-31 01:44:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_paging(vcpu)
|
2007-12-13 23:50:52 +08:00
|
|
|
&& (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
|
2007-10-31 01:44:17 +08:00
|
|
|
printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
2007-10-31 01:44:17 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-02-02 23:23:51 +08:00
|
|
|
if (efer & EFER_FFXSR) {
|
|
|
|
struct kvm_cpuid_entry2 *feat;
|
|
|
|
|
|
|
|
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
|
|
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
|
|
|
|
printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-26 03:17:11 +08:00
|
|
|
if (efer & EFER_SVME) {
|
|
|
|
struct kvm_cpuid_entry2 *feat;
|
|
|
|
|
|
|
|
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
|
|
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
|
|
|
|
printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-31 01:44:17 +08:00
|
|
|
kvm_x86_ops->set_efer(vcpu, efer);
|
|
|
|
|
|
|
|
efer &= ~EFER_LMA;
|
2007-12-13 23:50:52 +08:00
|
|
|
efer |= vcpu->arch.shadow_efer & EFER_LMA;
|
2007-10-31 01:44:17 +08:00
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.shadow_efer = efer;
|
2009-03-31 16:31:54 +08:00
|
|
|
|
|
|
|
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
|
|
|
|
kvm_mmu_reset_context(vcpu);
|
2007-10-31 01:44:17 +08:00
|
|
|
}
|
|
|
|
|
2008-01-31 21:57:37 +08:00
|
|
|
void kvm_enable_efer_bits(u64 mask)
|
|
|
|
{
|
|
|
|
efer_reserved_bits &= ~mask;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
|
|
|
|
|
|
|
|
|
2007-10-31 01:44:17 +08:00
|
|
|
/*
|
|
|
|
* Writes msr value into into the appropriate "register".
|
|
|
|
* Returns 0 on success, non-0 otherwise.
|
|
|
|
* Assumes vcpu_load() was already called.
|
|
|
|
*/
|
|
|
|
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|
|
|
{
|
|
|
|
return kvm_x86_ops->set_msr(vcpu, msr_index, data);
|
|
|
|
}
|
|
|
|
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
/*
|
|
|
|
* Adapt set_msr() to msr_io()'s calling convention
|
|
|
|
*/
|
|
|
|
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
|
|
|
|
{
|
|
|
|
return kvm_set_msr(vcpu, index, *data);
|
|
|
|
}
|
|
|
|
|
2008-02-16 03:52:47 +08:00
|
|
|
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
|
|
|
|
{
|
|
|
|
static int version;
|
2008-06-03 22:17:31 +08:00
|
|
|
struct pvclock_wall_clock wc;
|
|
|
|
struct timespec now, sys, boot;
|
2008-02-16 03:52:47 +08:00
|
|
|
|
|
|
|
if (!wall_clock)
|
|
|
|
return;
|
|
|
|
|
|
|
|
version++;
|
|
|
|
|
|
|
|
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
|
|
|
|
|
2008-06-03 22:17:31 +08:00
|
|
|
/*
|
|
|
|
* The guest calculates current wall clock time by adding
|
|
|
|
* system time (updated by kvm_write_guest_time below) to the
|
|
|
|
* wall clock specified here. guest system time equals host
|
|
|
|
* system time for us, thus we must fill in host boot time here.
|
|
|
|
*/
|
|
|
|
now = current_kernel_time();
|
|
|
|
ktime_get_ts(&sys);
|
|
|
|
boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
|
|
|
|
|
|
|
|
wc.sec = boot.tv_sec;
|
|
|
|
wc.nsec = boot.tv_nsec;
|
|
|
|
wc.version = version;
|
2008-02-16 03:52:47 +08:00
|
|
|
|
|
|
|
kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
|
|
|
|
|
|
|
|
version++;
|
|
|
|
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
|
|
|
|
}
|
|
|
|
|
2008-06-03 22:17:31 +08:00
|
|
|
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
|
|
|
|
{
|
|
|
|
uint32_t quotient, remainder;
|
|
|
|
|
|
|
|
/* Don't try to replace with do_div(), this one calculates
|
|
|
|
* "(dividend << 32) / divisor" */
|
|
|
|
__asm__ ( "divl %4"
|
|
|
|
: "=a" (quotient), "=d" (remainder)
|
|
|
|
: "0" (0), "1" (dividend), "r" (divisor) );
|
|
|
|
return quotient;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
|
|
|
|
{
|
|
|
|
uint64_t nsecs = 1000000000LL;
|
|
|
|
int32_t shift = 0;
|
|
|
|
uint64_t tps64;
|
|
|
|
uint32_t tps32;
|
|
|
|
|
|
|
|
tps64 = tsc_khz * 1000LL;
|
|
|
|
while (tps64 > nsecs*2) {
|
|
|
|
tps64 >>= 1;
|
|
|
|
shift--;
|
|
|
|
}
|
|
|
|
|
|
|
|
tps32 = (uint32_t)tps64;
|
|
|
|
while (tps32 <= (uint32_t)nsecs) {
|
|
|
|
tps32 <<= 1;
|
|
|
|
shift++;
|
|
|
|
}
|
|
|
|
|
|
|
|
hv_clock->tsc_shift = shift;
|
|
|
|
hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
|
|
|
|
|
|
|
|
pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
|
2008-10-16 13:01:25 +08:00
|
|
|
__func__, tsc_khz, hv_clock->tsc_shift,
|
2008-06-03 22:17:31 +08:00
|
|
|
hv_clock->tsc_to_system_mul);
|
|
|
|
}
|
|
|
|
|
2009-02-05 00:52:04 +08:00
|
|
|
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
|
|
|
|
|
2008-02-16 03:52:47 +08:00
|
|
|
static void kvm_write_guest_time(struct kvm_vcpu *v)
|
|
|
|
{
|
|
|
|
struct timespec ts;
|
|
|
|
unsigned long flags;
|
|
|
|
struct kvm_vcpu_arch *vcpu = &v->arch;
|
|
|
|
void *shared_kaddr;
|
2009-04-12 20:49:07 +08:00
|
|
|
unsigned long this_tsc_khz;
|
2008-02-16 03:52:47 +08:00
|
|
|
|
|
|
|
if ((!vcpu->time_page))
|
|
|
|
return;
|
|
|
|
|
2009-04-12 20:49:07 +08:00
|
|
|
this_tsc_khz = get_cpu_var(cpu_tsc_khz);
|
|
|
|
if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
|
|
|
|
kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
|
|
|
|
vcpu->hv_clock_tsc_khz = this_tsc_khz;
|
2008-06-03 22:17:31 +08:00
|
|
|
}
|
2009-04-12 20:49:07 +08:00
|
|
|
put_cpu_var(cpu_tsc_khz);
|
2008-06-03 22:17:31 +08:00
|
|
|
|
2008-02-16 03:52:47 +08:00
|
|
|
/* Keep irq disabled to prevent changes to the clock */
|
|
|
|
local_irq_save(flags);
|
|
|
|
kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
|
|
|
|
&vcpu->hv_clock.tsc_timestamp);
|
|
|
|
ktime_get_ts(&ts);
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
/* With all the info we got, fill in the values */
|
|
|
|
|
|
|
|
vcpu->hv_clock.system_time = ts.tv_nsec +
|
|
|
|
(NSEC_PER_SEC * (u64)ts.tv_sec);
|
|
|
|
/*
|
|
|
|
* The interface expects us to write an even number signaling that the
|
|
|
|
* update is finished. Since the guest won't see the intermediate
|
2008-06-03 22:17:31 +08:00
|
|
|
* state, we just increase by 2 at the end.
|
2008-02-16 03:52:47 +08:00
|
|
|
*/
|
2008-06-03 22:17:31 +08:00
|
|
|
vcpu->hv_clock.version += 2;
|
2008-02-16 03:52:47 +08:00
|
|
|
|
|
|
|
shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
|
|
|
|
|
|
|
|
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
|
2008-06-03 22:17:31 +08:00
|
|
|
sizeof(vcpu->hv_clock));
|
2008-02-16 03:52:47 +08:00
|
|
|
|
|
|
|
kunmap_atomic(shared_kaddr, KM_USER0);
|
|
|
|
|
|
|
|
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
2009-02-05 00:52:04 +08:00
|
|
|
static int kvm_request_guest_time_update(struct kvm_vcpu *v)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu_arch *vcpu = &v->arch;
|
|
|
|
|
|
|
|
if (!vcpu->time_page)
|
|
|
|
return 0;
|
|
|
|
set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-05-27 01:06:35 +08:00
|
|
|
static bool msr_mtrr_valid(unsigned msr)
|
|
|
|
{
|
|
|
|
switch (msr) {
|
|
|
|
case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
|
|
|
|
case MSR_MTRRfix64K_00000:
|
|
|
|
case MSR_MTRRfix16K_80000:
|
|
|
|
case MSR_MTRRfix16K_A0000:
|
|
|
|
case MSR_MTRRfix4K_C0000:
|
|
|
|
case MSR_MTRRfix4K_C8000:
|
|
|
|
case MSR_MTRRfix4K_D0000:
|
|
|
|
case MSR_MTRRfix4K_D8000:
|
|
|
|
case MSR_MTRRfix4K_E0000:
|
|
|
|
case MSR_MTRRfix4K_E8000:
|
|
|
|
case MSR_MTRRfix4K_F0000:
|
|
|
|
case MSR_MTRRfix4K_F8000:
|
|
|
|
case MSR_MTRRdefType:
|
|
|
|
case MSR_IA32_CR_PAT:
|
|
|
|
return true;
|
|
|
|
case 0x2f8:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|
|
|
{
|
2008-10-09 16:01:54 +08:00
|
|
|
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
|
|
|
|
|
2008-05-27 01:06:35 +08:00
|
|
|
if (!msr_mtrr_valid(msr))
|
|
|
|
return 1;
|
|
|
|
|
2008-10-09 16:01:54 +08:00
|
|
|
if (msr == MSR_MTRRdefType) {
|
|
|
|
vcpu->arch.mtrr_state.def_type = data;
|
|
|
|
vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
|
|
|
|
} else if (msr == MSR_MTRRfix64K_00000)
|
|
|
|
p[0] = data;
|
|
|
|
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
|
|
|
|
p[1 + msr - MSR_MTRRfix16K_80000] = data;
|
|
|
|
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
|
|
|
|
p[3 + msr - MSR_MTRRfix4K_C0000] = data;
|
|
|
|
else if (msr == MSR_IA32_CR_PAT)
|
|
|
|
vcpu->arch.pat = data;
|
|
|
|
else { /* Variable MTRRs */
|
|
|
|
int idx, is_mtrr_mask;
|
|
|
|
u64 *pt;
|
|
|
|
|
|
|
|
idx = (msr - 0x200) / 2;
|
|
|
|
is_mtrr_mask = msr - 0x200 - 2 * idx;
|
|
|
|
if (!is_mtrr_mask)
|
|
|
|
pt =
|
|
|
|
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
|
|
|
|
else
|
|
|
|
pt =
|
|
|
|
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
|
|
|
|
*pt = data;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_mmu_reset_context(vcpu);
|
2008-05-27 01:06:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2007-10-31 01:44:17 +08:00
|
|
|
|
|
|
|
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|
|
|
{
|
|
|
|
switch (msr) {
|
|
|
|
case MSR_EFER:
|
|
|
|
set_efer(vcpu, data);
|
|
|
|
break;
|
|
|
|
case MSR_IA32_MC0_STATUS:
|
|
|
|
pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
|
2008-03-04 04:59:56 +08:00
|
|
|
__func__, data);
|
2007-10-31 01:44:17 +08:00
|
|
|
break;
|
|
|
|
case MSR_IA32_MCG_STATUS:
|
|
|
|
pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
|
2008-03-04 04:59:56 +08:00
|
|
|
__func__, data);
|
2007-10-31 01:44:17 +08:00
|
|
|
break;
|
2008-02-12 03:28:27 +08:00
|
|
|
case MSR_IA32_MCG_CTL:
|
|
|
|
pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
|
2008-03-04 04:59:56 +08:00
|
|
|
__func__, data);
|
2008-02-12 03:28:27 +08:00
|
|
|
break;
|
2008-07-22 14:00:45 +08:00
|
|
|
case MSR_IA32_DEBUGCTLMSR:
|
|
|
|
if (!data) {
|
|
|
|
/* We support the non-activated case already */
|
|
|
|
break;
|
|
|
|
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
|
|
|
|
/* Values other than LBR and BTF are vendor-specific,
|
|
|
|
thus reserved and should throw a #GP */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
|
|
|
|
__func__, data);
|
|
|
|
break;
|
2007-10-31 01:44:17 +08:00
|
|
|
case MSR_IA32_UCODE_REV:
|
|
|
|
case MSR_IA32_UCODE_WRITE:
|
2008-12-29 23:32:28 +08:00
|
|
|
case MSR_VM_HSAVE_PA:
|
2007-10-31 01:44:17 +08:00
|
|
|
break;
|
2008-05-27 01:06:35 +08:00
|
|
|
case 0x200 ... 0x2ff:
|
|
|
|
return set_msr_mtrr(vcpu, msr, data);
|
2007-10-31 01:44:17 +08:00
|
|
|
case MSR_IA32_APICBASE:
|
|
|
|
kvm_set_apic_base(vcpu, data);
|
|
|
|
break;
|
|
|
|
case MSR_IA32_MISC_ENABLE:
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.ia32_misc_enable_msr = data;
|
2007-10-31 01:44:17 +08:00
|
|
|
break;
|
2008-02-16 03:52:47 +08:00
|
|
|
case MSR_KVM_WALL_CLOCK:
|
|
|
|
vcpu->kvm->arch.wall_clock = data;
|
|
|
|
kvm_write_wall_clock(vcpu->kvm, data);
|
|
|
|
break;
|
|
|
|
case MSR_KVM_SYSTEM_TIME: {
|
|
|
|
if (vcpu->arch.time_page) {
|
|
|
|
kvm_release_page_dirty(vcpu->arch.time_page);
|
|
|
|
vcpu->arch.time_page = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
vcpu->arch.time = data;
|
|
|
|
|
|
|
|
/* we verify if the enable bit is set... */
|
|
|
|
if (!(data & 1))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* ...but clean it before doing the actual write */
|
|
|
|
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
|
|
|
|
|
|
|
|
vcpu->arch.time_page =
|
|
|
|
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
|
|
|
|
|
|
|
|
if (is_error_page(vcpu->arch.time_page)) {
|
|
|
|
kvm_release_page_clean(vcpu->arch.time_page);
|
|
|
|
vcpu->arch.time_page = NULL;
|
|
|
|
}
|
|
|
|
|
2009-02-05 00:52:04 +08:00
|
|
|
kvm_request_guest_time_update(vcpu);
|
2008-02-16 03:52:47 +08:00
|
|
|
break;
|
|
|
|
}
|
2007-10-31 01:44:17 +08:00
|
|
|
default:
|
2007-12-19 18:02:40 +08:00
|
|
|
pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
|
2007-10-31 01:44:17 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_set_msr_common);
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reads an msr value (of 'msr_index') into 'pdata'.
|
|
|
|
* Returns 0 on success, non-0 otherwise.
|
|
|
|
* Assumes vcpu_load() was already called.
|
|
|
|
*/
|
|
|
|
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
|
|
|
|
{
|
|
|
|
return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
|
|
|
|
}
|
|
|
|
|
2008-05-27 01:06:35 +08:00
|
|
|
static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|
|
|
{
|
2008-10-09 16:01:54 +08:00
|
|
|
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
|
|
|
|
|
2008-05-27 01:06:35 +08:00
|
|
|
if (!msr_mtrr_valid(msr))
|
|
|
|
return 1;
|
|
|
|
|
2008-10-09 16:01:54 +08:00
|
|
|
if (msr == MSR_MTRRdefType)
|
|
|
|
*pdata = vcpu->arch.mtrr_state.def_type +
|
|
|
|
(vcpu->arch.mtrr_state.enabled << 10);
|
|
|
|
else if (msr == MSR_MTRRfix64K_00000)
|
|
|
|
*pdata = p[0];
|
|
|
|
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
|
|
|
|
*pdata = p[1 + msr - MSR_MTRRfix16K_80000];
|
|
|
|
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
|
|
|
|
*pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
|
|
|
|
else if (msr == MSR_IA32_CR_PAT)
|
|
|
|
*pdata = vcpu->arch.pat;
|
|
|
|
else { /* Variable MTRRs */
|
|
|
|
int idx, is_mtrr_mask;
|
|
|
|
u64 *pt;
|
|
|
|
|
|
|
|
idx = (msr - 0x200) / 2;
|
|
|
|
is_mtrr_mask = msr - 0x200 - 2 * idx;
|
|
|
|
if (!is_mtrr_mask)
|
|
|
|
pt =
|
|
|
|
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
|
|
|
|
else
|
|
|
|
pt =
|
|
|
|
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
|
|
|
|
*pdata = *pt;
|
|
|
|
}
|
|
|
|
|
2008-05-27 01:06:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-31 01:44:17 +08:00
|
|
|
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|
|
|
{
|
|
|
|
u64 data;
|
|
|
|
|
|
|
|
switch (msr) {
|
|
|
|
case 0xc0010010: /* SYSCFG */
|
|
|
|
case 0xc0010015: /* HWCR */
|
|
|
|
case MSR_IA32_PLATFORM_ID:
|
|
|
|
case MSR_IA32_P5_MC_ADDR:
|
|
|
|
case MSR_IA32_P5_MC_TYPE:
|
|
|
|
case MSR_IA32_MC0_CTL:
|
|
|
|
case MSR_IA32_MCG_STATUS:
|
|
|
|
case MSR_IA32_MCG_CAP:
|
2008-02-12 03:28:27 +08:00
|
|
|
case MSR_IA32_MCG_CTL:
|
2007-10-31 01:44:17 +08:00
|
|
|
case MSR_IA32_MC0_MISC:
|
|
|
|
case MSR_IA32_MC0_MISC+4:
|
|
|
|
case MSR_IA32_MC0_MISC+8:
|
|
|
|
case MSR_IA32_MC0_MISC+12:
|
|
|
|
case MSR_IA32_MC0_MISC+16:
|
2008-08-29 17:52:07 +08:00
|
|
|
case MSR_IA32_MC0_MISC+20:
|
2007-10-31 01:44:17 +08:00
|
|
|
case MSR_IA32_UCODE_REV:
|
|
|
|
case MSR_IA32_EBL_CR_POWERON:
|
2008-07-22 14:00:45 +08:00
|
|
|
case MSR_IA32_DEBUGCTLMSR:
|
|
|
|
case MSR_IA32_LASTBRANCHFROMIP:
|
|
|
|
case MSR_IA32_LASTBRANCHTOIP:
|
|
|
|
case MSR_IA32_LASTINTFROMIP:
|
|
|
|
case MSR_IA32_LASTINTTOIP:
|
2008-12-29 23:32:28 +08:00
|
|
|
case MSR_VM_HSAVE_PA:
|
2009-03-20 15:09:00 +08:00
|
|
|
case MSR_P6_EVNTSEL0:
|
|
|
|
case MSR_P6_EVNTSEL1:
|
2009-06-15 15:55:34 +08:00
|
|
|
case MSR_K7_EVNTSEL0:
|
2007-10-31 01:44:17 +08:00
|
|
|
data = 0;
|
|
|
|
break;
|
2008-05-27 01:06:35 +08:00
|
|
|
case MSR_MTRRcap:
|
|
|
|
data = 0x500 | KVM_NR_VAR_MTRR;
|
|
|
|
break;
|
|
|
|
case 0x200 ... 0x2ff:
|
|
|
|
return get_msr_mtrr(vcpu, msr, pdata);
|
2007-10-31 01:44:17 +08:00
|
|
|
case 0xcd: /* fsb frequency */
|
|
|
|
data = 3;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_APICBASE:
|
|
|
|
data = kvm_get_apic_base(vcpu);
|
|
|
|
break;
|
|
|
|
case MSR_IA32_MISC_ENABLE:
|
2007-12-13 23:50:52 +08:00
|
|
|
data = vcpu->arch.ia32_misc_enable_msr;
|
2007-10-31 01:44:17 +08:00
|
|
|
break;
|
2008-02-21 19:11:01 +08:00
|
|
|
case MSR_IA32_PERF_STATUS:
|
|
|
|
/* TSC increment by tick */
|
|
|
|
data = 1000ULL;
|
|
|
|
/* CPU multiplier */
|
|
|
|
data |= (((uint64_t)4ULL) << 40);
|
|
|
|
break;
|
2007-10-31 01:44:17 +08:00
|
|
|
case MSR_EFER:
|
2007-12-13 23:50:52 +08:00
|
|
|
data = vcpu->arch.shadow_efer;
|
2007-10-31 01:44:17 +08:00
|
|
|
break;
|
2008-02-16 03:52:47 +08:00
|
|
|
case MSR_KVM_WALL_CLOCK:
|
|
|
|
data = vcpu->kvm->arch.wall_clock;
|
|
|
|
break;
|
|
|
|
case MSR_KVM_SYSTEM_TIME:
|
|
|
|
data = vcpu->arch.time;
|
|
|
|
break;
|
2007-10-31 01:44:17 +08:00
|
|
|
default:
|
|
|
|
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
*pdata = data;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_get_msr_common);
|
|
|
|
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
/*
|
|
|
|
* Read or write a bunch of msrs. All parameters are kernel addresses.
|
|
|
|
*
|
|
|
|
* @return number of msrs set successfully.
|
|
|
|
*/
|
|
|
|
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
|
|
|
|
struct kvm_msr_entry *entries,
|
|
|
|
int (*do_msr)(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned index, u64 *data))
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
2008-03-30 07:17:59 +08:00
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
for (i = 0; i < msrs->nmsrs; ++i)
|
|
|
|
if (do_msr(vcpu, entries[i].index, &entries[i].data))
|
|
|
|
break;
|
2008-03-30 07:17:59 +08:00
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read or write a bunch of msrs. Parameters are user addresses.
|
|
|
|
*
|
|
|
|
* @return number of msrs set successfully.
|
|
|
|
*/
|
|
|
|
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
|
|
|
|
int (*do_msr)(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned index, u64 *data),
|
|
|
|
int writeback)
|
|
|
|
{
|
|
|
|
struct kvm_msrs msrs;
|
|
|
|
struct kvm_msr_entry *entries;
|
|
|
|
int r, n;
|
|
|
|
unsigned size;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&msrs, user_msrs, sizeof msrs))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
r = -E2BIG;
|
|
|
|
if (msrs.nmsrs >= MAX_IO_MSRS)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
r = -ENOMEM;
|
|
|
|
size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
|
|
|
|
entries = vmalloc(size);
|
|
|
|
if (!entries)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(entries, user_msrs->entries, size))
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
r = n = __msr_io(vcpu, &msrs, entries, do_msr);
|
|
|
|
if (r < 0)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (writeback && copy_to_user(user_msrs->entries, entries, size))
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
r = n;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
vfree(entries);
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2007-11-15 23:07:47 +08:00
|
|
|
int kvm_dev_ioctl_check_extension(long ext)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
switch (ext) {
|
|
|
|
case KVM_CAP_IRQCHIP:
|
|
|
|
case KVM_CAP_HLT:
|
|
|
|
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
|
|
|
|
case KVM_CAP_SET_TSS_ADDR:
|
2007-11-21 23:10:04 +08:00
|
|
|
case KVM_CAP_EXT_CPUID:
|
2009-02-05 00:52:04 +08:00
|
|
|
case KVM_CAP_CLOCKSOURCE:
|
2008-01-28 05:10:22 +08:00
|
|
|
case KVM_CAP_PIT:
|
2008-02-23 01:21:36 +08:00
|
|
|
case KVM_CAP_NOP_IO_DELAY:
|
2008-04-12 00:24:45 +08:00
|
|
|
case KVM_CAP_MP_STATE:
|
2008-07-29 16:30:57 +08:00
|
|
|
case KVM_CAP_SYNC_MMU:
|
2008-12-31 01:55:06 +08:00
|
|
|
case KVM_CAP_REINJECT_CONTROL:
|
2009-02-04 23:28:14 +08:00
|
|
|
case KVM_CAP_IRQ_INJECT_STATUS:
|
2009-03-12 21:45:39 +08:00
|
|
|
case KVM_CAP_ASSIGN_DEV_IRQ:
|
2007-11-15 23:07:47 +08:00
|
|
|
r = 1;
|
|
|
|
break;
|
2008-05-30 22:05:55 +08:00
|
|
|
case KVM_CAP_COALESCED_MMIO:
|
|
|
|
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
|
|
|
break;
|
2007-12-26 19:57:04 +08:00
|
|
|
case KVM_CAP_VAPIC:
|
|
|
|
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
|
|
|
|
break;
|
2008-02-20 17:53:16 +08:00
|
|
|
case KVM_CAP_NR_VCPUS:
|
|
|
|
r = KVM_MAX_VCPUS;
|
|
|
|
break;
|
2008-02-20 17:59:20 +08:00
|
|
|
case KVM_CAP_NR_MEMSLOTS:
|
|
|
|
r = KVM_MEMORY_SLOTS;
|
|
|
|
break;
|
2008-02-23 01:21:37 +08:00
|
|
|
case KVM_CAP_PV_MMU:
|
|
|
|
r = !tdp_enabled;
|
|
|
|
break;
|
2008-09-14 08:48:28 +08:00
|
|
|
case KVM_CAP_IOMMU:
|
2008-12-03 21:43:34 +08:00
|
|
|
r = iommu_found();
|
2008-09-14 08:48:28 +08:00
|
|
|
break;
|
2007-11-15 23:07:47 +08:00
|
|
|
default:
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-10-10 23:16:19 +08:00
|
|
|
long kvm_arch_dev_ioctl(struct file *filp,
|
|
|
|
unsigned int ioctl, unsigned long arg)
|
|
|
|
{
|
|
|
|
void __user *argp = (void __user *)arg;
|
|
|
|
long r;
|
|
|
|
|
|
|
|
switch (ioctl) {
|
|
|
|
case KVM_GET_MSR_INDEX_LIST: {
|
|
|
|
struct kvm_msr_list __user *user_msr_list = argp;
|
|
|
|
struct kvm_msr_list msr_list;
|
|
|
|
unsigned n;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
|
|
|
|
goto out;
|
|
|
|
n = msr_list.nmsrs;
|
|
|
|
msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
|
|
|
|
if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
|
|
|
|
goto out;
|
|
|
|
r = -E2BIG;
|
|
|
|
if (n < num_msrs_to_save)
|
|
|
|
goto out;
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
|
|
|
|
num_msrs_to_save * sizeof(u32)))
|
|
|
|
goto out;
|
|
|
|
if (copy_to_user(user_msr_list->indices
|
|
|
|
+ num_msrs_to_save * sizeof(u32),
|
|
|
|
&emulated_msrs,
|
|
|
|
ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-02-12 00:37:23 +08:00
|
|
|
case KVM_GET_SUPPORTED_CPUID: {
|
|
|
|
struct kvm_cpuid2 __user *cpuid_arg = argp;
|
|
|
|
struct kvm_cpuid2 cpuid;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
|
|
|
|
goto out;
|
|
|
|
r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
|
2009-01-15 00:56:00 +08:00
|
|
|
cpuid_arg->entries);
|
2008-02-12 00:37:23 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
2007-10-10 23:16:19 +08:00
|
|
|
default:
|
|
|
|
r = -EINVAL;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
|
{
|
|
|
|
kvm_x86_ops->vcpu_load(vcpu, cpu);
|
2009-02-05 00:52:04 +08:00
|
|
|
kvm_request_guest_time_update(vcpu);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
kvm_x86_ops->vcpu_put(vcpu);
|
2007-11-16 00:38:46 +08:00
|
|
|
kvm_put_guest_fpu(vcpu);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
}
|
|
|
|
|
2007-11-21 23:10:04 +08:00
|
|
|
static int is_efer_nx(void)
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
{
|
2009-05-03 23:50:55 +08:00
|
|
|
unsigned long long efer = 0;
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
|
2009-05-03 23:50:55 +08:00
|
|
|
rdmsrl_safe(MSR_EFER, &efer);
|
2007-11-21 23:10:04 +08:00
|
|
|
return efer & EFER_NX;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct kvm_cpuid_entry2 *e, *entry;
|
|
|
|
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
entry = NULL;
|
2007-12-13 23:50:52 +08:00
|
|
|
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
|
|
|
|
e = &vcpu->arch.cpuid_entries[i];
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
if (e->function == 0x80000001) {
|
|
|
|
entry = e;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-11-21 23:10:04 +08:00
|
|
|
if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
entry->edx &= ~(1 << 20);
|
|
|
|
printk(KERN_INFO "kvm: guest NX capability removed\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-21 23:10:04 +08:00
|
|
|
/* when an old userspace process fills a new kernel module */
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_cpuid *cpuid,
|
|
|
|
struct kvm_cpuid_entry __user *entries)
|
2007-11-21 23:10:04 +08:00
|
|
|
{
|
|
|
|
int r, i;
|
|
|
|
struct kvm_cpuid_entry *cpuid_entries;
|
|
|
|
|
|
|
|
r = -E2BIG;
|
|
|
|
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
|
|
|
|
goto out;
|
|
|
|
r = -ENOMEM;
|
|
|
|
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
|
|
|
|
if (!cpuid_entries)
|
|
|
|
goto out;
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(cpuid_entries, entries,
|
|
|
|
cpuid->nent * sizeof(struct kvm_cpuid_entry)))
|
|
|
|
goto out_free;
|
|
|
|
for (i = 0; i < cpuid->nent; i++) {
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
|
|
|
|
vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
|
|
|
|
vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
|
|
|
|
vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
|
|
|
|
vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
|
|
|
|
vcpu->arch.cpuid_entries[i].index = 0;
|
|
|
|
vcpu->arch.cpuid_entries[i].flags = 0;
|
|
|
|
vcpu->arch.cpuid_entries[i].padding[0] = 0;
|
|
|
|
vcpu->arch.cpuid_entries[i].padding[1] = 0;
|
|
|
|
vcpu->arch.cpuid_entries[i].padding[2] = 0;
|
|
|
|
}
|
|
|
|
vcpu->arch.cpuid_nent = cpuid->nent;
|
2007-11-21 23:10:04 +08:00
|
|
|
cpuid_fix_nx_cap(vcpu);
|
|
|
|
r = 0;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
vfree(cpuid_entries);
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
|
2009-01-15 00:56:00 +08:00
|
|
|
struct kvm_cpuid2 *cpuid,
|
|
|
|
struct kvm_cpuid_entry2 __user *entries)
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = -E2BIG;
|
|
|
|
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
|
|
|
|
goto out;
|
|
|
|
r = -EFAULT;
|
2007-12-13 23:50:52 +08:00
|
|
|
if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
|
2007-11-21 23:10:04 +08:00
|
|
|
cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
goto out;
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cpuid_nent = cpuid->nent;
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2007-11-21 23:10:04 +08:00
|
|
|
static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
|
2009-01-15 00:56:00 +08:00
|
|
|
struct kvm_cpuid2 *cpuid,
|
|
|
|
struct kvm_cpuid_entry2 __user *entries)
|
2007-11-21 23:10:04 +08:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = -E2BIG;
|
2007-12-13 23:50:52 +08:00
|
|
|
if (cpuid->nent < vcpu->arch.cpuid_nent)
|
2007-11-21 23:10:04 +08:00
|
|
|
goto out;
|
|
|
|
r = -EFAULT;
|
2007-12-13 23:50:52 +08:00
|
|
|
if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
|
2009-01-15 00:56:00 +08:00
|
|
|
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
|
2007-11-21 23:10:04 +08:00
|
|
|
goto out;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
2007-12-13 23:50:52 +08:00
|
|
|
cpuid->nent = vcpu->arch.cpuid_nent;
|
2007-11-21 23:10:04 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
2009-01-15 00:56:00 +08:00
|
|
|
u32 index)
|
2007-11-21 23:10:04 +08:00
|
|
|
{
|
|
|
|
entry->function = function;
|
|
|
|
entry->index = index;
|
|
|
|
cpuid_count(entry->function, entry->index,
|
2009-01-15 00:56:00 +08:00
|
|
|
&entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
|
2007-11-21 23:10:04 +08:00
|
|
|
entry->flags = 0;
|
|
|
|
}
|
|
|
|
|
2009-05-10 18:55:35 +08:00
|
|
|
#define F(x) bit(X86_FEATURE_##x)
|
|
|
|
|
2007-11-21 23:10:04 +08:00
|
|
|
static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|
|
|
u32 index, int *nent, int maxnent)
|
|
|
|
{
|
2009-05-10 18:55:35 +08:00
|
|
|
unsigned f_nx = is_efer_nx() ? F(NX) : 0;
|
2007-11-21 23:10:04 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
2009-05-10 18:55:35 +08:00
|
|
|
unsigned f_lm = F(LM);
|
|
|
|
#else
|
|
|
|
unsigned f_lm = 0;
|
2007-11-21 23:10:04 +08:00
|
|
|
#endif
|
2009-05-10 18:55:35 +08:00
|
|
|
|
|
|
|
/* cpuid 1.edx */
|
|
|
|
const u32 kvm_supported_word0_x86_features =
|
|
|
|
F(FPU) | F(VME) | F(DE) | F(PSE) |
|
|
|
|
F(TSC) | F(MSR) | F(PAE) | F(MCE) |
|
|
|
|
F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
|
|
|
|
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
|
|
|
|
F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
|
|
|
|
0 /* Reserved, DS, ACPI */ | F(MMX) |
|
|
|
|
F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
|
|
|
|
0 /* HTT, TM, Reserved, PBE */;
|
|
|
|
/* cpuid 0x80000001.edx */
|
|
|
|
const u32 kvm_supported_word1_x86_features =
|
|
|
|
F(FPU) | F(VME) | F(DE) | F(PSE) |
|
|
|
|
F(TSC) | F(MSR) | F(PAE) | F(MCE) |
|
|
|
|
F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
|
|
|
|
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
|
|
|
|
F(PAT) | F(PSE36) | 0 /* Reserved */ |
|
|
|
|
f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
|
|
|
|
F(FXSR) | F(FXSR_OPT) | 0 /* GBPAGES */ | 0 /* RDTSCP */ |
|
|
|
|
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
|
|
|
|
/* cpuid 1.ecx */
|
|
|
|
const u32 kvm_supported_word4_x86_features =
|
2009-05-10 19:41:56 +08:00
|
|
|
F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
|
|
|
|
0 /* DS-CPL, VMX, SMX, EST */ |
|
|
|
|
0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
|
|
|
|
0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
|
|
|
|
0 /* Reserved, DCA */ | F(XMM4_1) |
|
|
|
|
F(XMM4_2) | 0 /* x2APIC */ | F(MOVBE) | F(POPCNT) |
|
|
|
|
0 /* Reserved, XSAVE, OSXSAVE */;
|
2009-05-10 18:55:35 +08:00
|
|
|
/* cpuid 0x80000001.ecx */
|
2007-11-21 23:10:04 +08:00
|
|
|
const u32 kvm_supported_word6_x86_features =
|
2009-05-10 18:55:35 +08:00
|
|
|
F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
|
|
|
|
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
|
|
|
|
F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
|
|
|
|
0 /* SKINIT */ | 0 /* WDT */;
|
2007-11-21 23:10:04 +08:00
|
|
|
|
2009-01-15 00:56:00 +08:00
|
|
|
/* all calls to cpuid_count() should be made on the same cpu */
|
2007-11-21 23:10:04 +08:00
|
|
|
get_cpu();
|
|
|
|
do_cpuid_1_ent(entry, function, index);
|
|
|
|
++*nent;
|
|
|
|
|
|
|
|
switch (function) {
|
|
|
|
case 0:
|
|
|
|
entry->eax = min(entry->eax, (u32)0xb);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
entry->edx &= kvm_supported_word0_x86_features;
|
2009-05-10 18:55:35 +08:00
|
|
|
entry->ecx &= kvm_supported_word4_x86_features;
|
2007-11-21 23:10:04 +08:00
|
|
|
break;
|
|
|
|
/* function 2 entries are STATEFUL. That is, repeated cpuid commands
|
|
|
|
* may return different values. This forces us to get_cpu() before
|
|
|
|
* issuing the first command, and also to emulate this annoying behavior
|
|
|
|
* in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
|
|
|
|
case 2: {
|
|
|
|
int t, times = entry->eax & 0xff;
|
|
|
|
|
|
|
|
entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
|
2008-11-06 07:56:21 +08:00
|
|
|
entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
2007-11-21 23:10:04 +08:00
|
|
|
for (t = 1; t < times && *nent < maxnent; ++t) {
|
|
|
|
do_cpuid_1_ent(&entry[t], function, 0);
|
|
|
|
entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
|
|
|
|
++*nent;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* function 4 and 0xb have additional index. */
|
|
|
|
case 4: {
|
2008-02-20 02:25:50 +08:00
|
|
|
int i, cache_type;
|
2007-11-21 23:10:04 +08:00
|
|
|
|
|
|
|
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
/* read more entries until cache_type is zero */
|
2008-02-20 02:25:50 +08:00
|
|
|
for (i = 1; *nent < maxnent; ++i) {
|
|
|
|
cache_type = entry[i - 1].eax & 0x1f;
|
2007-11-21 23:10:04 +08:00
|
|
|
if (!cache_type)
|
|
|
|
break;
|
2008-02-20 02:25:50 +08:00
|
|
|
do_cpuid_1_ent(&entry[i], function, i);
|
|
|
|
entry[i].flags |=
|
2007-11-21 23:10:04 +08:00
|
|
|
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
++*nent;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 0xb: {
|
2008-02-20 02:25:50 +08:00
|
|
|
int i, level_type;
|
2007-11-21 23:10:04 +08:00
|
|
|
|
|
|
|
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
/* read more entries until level_type is zero */
|
2008-02-20 02:25:50 +08:00
|
|
|
for (i = 1; *nent < maxnent; ++i) {
|
2008-11-06 07:37:36 +08:00
|
|
|
level_type = entry[i - 1].ecx & 0xff00;
|
2007-11-21 23:10:04 +08:00
|
|
|
if (!level_type)
|
|
|
|
break;
|
2008-02-20 02:25:50 +08:00
|
|
|
do_cpuid_1_ent(&entry[i], function, i);
|
|
|
|
entry[i].flags |=
|
2007-11-21 23:10:04 +08:00
|
|
|
KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
|
|
|
++*nent;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 0x80000000:
|
|
|
|
entry->eax = min(entry->eax, 0x8000001a);
|
|
|
|
break;
|
|
|
|
case 0x80000001:
|
|
|
|
entry->edx &= kvm_supported_word1_x86_features;
|
|
|
|
entry->ecx &= kvm_supported_word6_x86_features;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
put_cpu();
|
|
|
|
}
|
|
|
|
|
2009-05-10 18:55:35 +08:00
|
|
|
#undef F
|
|
|
|
|
2008-02-12 00:37:23 +08:00
|
|
|
static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
|
2009-01-15 00:56:00 +08:00
|
|
|
struct kvm_cpuid_entry2 __user *entries)
|
2007-11-21 23:10:04 +08:00
|
|
|
{
|
|
|
|
struct kvm_cpuid_entry2 *cpuid_entries;
|
|
|
|
int limit, nent = 0, r = -E2BIG;
|
|
|
|
u32 func;
|
|
|
|
|
|
|
|
if (cpuid->nent < 1)
|
|
|
|
goto out;
|
|
|
|
r = -ENOMEM;
|
|
|
|
cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
|
|
|
|
if (!cpuid_entries)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
|
|
|
|
limit = cpuid_entries[0].eax;
|
|
|
|
for (func = 1; func <= limit && nent < cpuid->nent; ++func)
|
|
|
|
do_cpuid_ent(&cpuid_entries[nent], func, 0,
|
2009-01-15 00:56:00 +08:00
|
|
|
&nent, cpuid->nent);
|
2007-11-21 23:10:04 +08:00
|
|
|
r = -E2BIG;
|
|
|
|
if (nent >= cpuid->nent)
|
|
|
|
goto out_free;
|
|
|
|
|
|
|
|
do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
|
|
|
|
limit = cpuid_entries[nent - 1].eax;
|
|
|
|
for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
|
|
|
|
do_cpuid_ent(&cpuid_entries[nent], func, 0,
|
2009-01-15 00:56:00 +08:00
|
|
|
&nent, cpuid->nent);
|
2007-11-21 23:10:04 +08:00
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_to_user(entries, cpuid_entries,
|
2009-01-15 00:56:00 +08:00
|
|
|
nent * sizeof(struct kvm_cpuid_entry2)))
|
2007-11-21 23:10:04 +08:00
|
|
|
goto out_free;
|
|
|
|
cpuid->nent = nent;
|
|
|
|
r = 0;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
vfree(cpuid_entries);
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_lapic_state *s)
|
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
2007-12-13 23:50:52 +08:00
|
|
|
memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_lapic_state *s)
|
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
2007-12-13 23:50:52 +08:00
|
|
|
memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
kvm_apic_post_state_restore(vcpu);
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-21 04:36:41 +08:00
|
|
|
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_interrupt *irq)
|
|
|
|
{
|
|
|
|
if (irq->irq < 0 || irq->irq >= 256)
|
|
|
|
return -EINVAL;
|
|
|
|
if (irqchip_in_kernel(vcpu->kvm))
|
|
|
|
return -ENXIO;
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
2009-05-11 18:35:50 +08:00
|
|
|
kvm_queue_interrupt(vcpu, irq->irq, false);
|
2007-11-21 04:36:41 +08:00
|
|
|
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-09-26 15:30:55 +08:00
|
|
|
static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
kvm_inject_nmi(vcpu);
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-22 22:50:39 +08:00
|
|
|
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_tpr_access_ctl *tac)
|
|
|
|
{
|
|
|
|
if (tac->flags)
|
|
|
|
return -EINVAL;
|
|
|
|
vcpu->arch.tpr_access_reporting = !!tac->enabled;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
long kvm_arch_vcpu_ioctl(struct file *filp,
|
|
|
|
unsigned int ioctl, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu *vcpu = filp->private_data;
|
|
|
|
void __user *argp = (void __user *)arg;
|
|
|
|
int r;
|
2008-08-12 01:01:47 +08:00
|
|
|
struct kvm_lapic_state *lapic = NULL;
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
|
|
|
|
switch (ioctl) {
|
|
|
|
case KVM_GET_LAPIC: {
|
2008-08-12 01:01:47 +08:00
|
|
|
lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
|
2008-08-12 01:01:47 +08:00
|
|
|
r = -ENOMEM;
|
|
|
|
if (!lapic)
|
|
|
|
goto out;
|
|
|
|
r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = -EFAULT;
|
2008-08-12 01:01:47 +08:00
|
|
|
if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case KVM_SET_LAPIC: {
|
2008-08-12 01:01:47 +08:00
|
|
|
lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
|
|
|
|
r = -ENOMEM;
|
|
|
|
if (!lapic)
|
|
|
|
goto out;
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
r = -EFAULT;
|
2008-08-12 01:01:47 +08:00
|
|
|
if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
goto out;
|
2008-08-12 01:01:47 +08:00
|
|
|
r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
2007-11-21 04:36:41 +08:00
|
|
|
case KVM_INTERRUPT: {
|
|
|
|
struct kvm_interrupt irq;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&irq, argp, sizeof irq))
|
|
|
|
goto out;
|
|
|
|
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-09-26 15:30:55 +08:00
|
|
|
case KVM_NMI: {
|
|
|
|
r = kvm_vcpu_ioctl_nmi(vcpu);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
case KVM_SET_CPUID: {
|
|
|
|
struct kvm_cpuid __user *cpuid_arg = argp;
|
|
|
|
struct kvm_cpuid cpuid;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
|
|
|
|
goto out;
|
|
|
|
r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
break;
|
|
|
|
}
|
2007-11-21 23:10:04 +08:00
|
|
|
case KVM_SET_CPUID2: {
|
|
|
|
struct kvm_cpuid2 __user *cpuid_arg = argp;
|
|
|
|
struct kvm_cpuid2 cpuid;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
|
|
|
|
goto out;
|
|
|
|
r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
|
2009-01-15 00:56:00 +08:00
|
|
|
cpuid_arg->entries);
|
2007-11-21 23:10:04 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case KVM_GET_CPUID2: {
|
|
|
|
struct kvm_cpuid2 __user *cpuid_arg = argp;
|
|
|
|
struct kvm_cpuid2 cpuid;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
|
|
|
|
goto out;
|
|
|
|
r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
|
2009-01-15 00:56:00 +08:00
|
|
|
cpuid_arg->entries);
|
2007-11-21 23:10:04 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
case KVM_GET_MSRS:
|
|
|
|
r = msr_io(vcpu, argp, kvm_get_msr, 1);
|
|
|
|
break;
|
|
|
|
case KVM_SET_MSRS:
|
|
|
|
r = msr_io(vcpu, argp, do_set_msr, 0);
|
|
|
|
break;
|
2007-10-22 22:50:39 +08:00
|
|
|
case KVM_TPR_ACCESS_REPORTING: {
|
|
|
|
struct kvm_tpr_access_ctl tac;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&tac, argp, sizeof tac))
|
|
|
|
goto out;
|
|
|
|
r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_to_user(argp, &tac, sizeof tac))
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
};
|
2007-10-25 22:52:32 +08:00
|
|
|
case KVM_SET_VAPIC_ADDR: {
|
|
|
|
struct kvm_vapic_addr va;
|
|
|
|
|
|
|
|
r = -EINVAL;
|
|
|
|
if (!irqchip_in_kernel(vcpu->kvm))
|
|
|
|
goto out;
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&va, argp, sizeof va))
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
|
|
|
|
break;
|
|
|
|
}
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
default:
|
|
|
|
r = -EINVAL;
|
|
|
|
}
|
|
|
|
out:
|
2009-03-31 16:47:44 +08:00
|
|
|
kfree(lapic);
|
KVM: Portability: split kvm_vcpu_ioctl
This patch splits kvm_vcpu_ioctl into archtecture independent parts, and
x86 specific parts which go to kvm_arch_vcpu_ioctl in x86.c.
Common ioctls for all architectures are:
KVM_RUN, KVM_GET/SET_(S-)REGS, KVM_TRANSLATE, KVM_INTERRUPT,
KVM_DEBUG_GUEST, KVM_SET_SIGNAL_MASK, KVM_GET/SET_FPU
Note that some PPC chips don't have an FPU, so we might need an #ifdef
around KVM_GET/SET_FPU one day.
x86 specific ioctls are:
KVM_GET/SET_LAPIC, KVM_SET_CPUID, KVM_GET/SET_MSRS
An interresting aspect is vcpu_load/vcpu_put. We now have a common
vcpu_load/put which does the preemption stuff, and an architecture
specific kvm_arch_vcpu_load/put. In the x86 case, this one calls the
vmx/svm function defined in kvm_x86_ops.
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-12 01:16:52 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2007-10-29 23:08:35 +08:00
|
|
|
static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (addr > (unsigned int)(-3 * PAGE_SIZE))
|
|
|
|
return -1;
|
|
|
|
ret = kvm_x86_ops->set_tss_addr(kvm, addr);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
|
|
|
|
u32 kvm_nr_mmu_pages)
|
|
|
|
{
|
|
|
|
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-02-11 00:04:15 +08:00
|
|
|
down_write(&kvm->slots_lock);
|
2009-05-13 05:55:43 +08:00
|
|
|
spin_lock(&kvm->mmu_lock);
|
2007-10-29 23:08:35 +08:00
|
|
|
|
|
|
|
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
|
2007-12-14 10:01:48 +08:00
|
|
|
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
|
2007-10-29 23:08:35 +08:00
|
|
|
|
2009-05-13 05:55:43 +08:00
|
|
|
spin_unlock(&kvm->mmu_lock);
|
2008-02-11 00:04:15 +08:00
|
|
|
up_write(&kvm->slots_lock);
|
2007-10-29 23:08:35 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
|
|
|
|
{
|
2007-12-14 10:01:48 +08:00
|
|
|
return kvm->arch.n_alloc_mmu_pages;
|
2007-10-29 23:08:35 +08:00
|
|
|
}
|
|
|
|
|
2007-11-22 11:20:33 +08:00
|
|
|
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct kvm_mem_alias *alias;
|
|
|
|
|
2007-12-14 09:54:20 +08:00
|
|
|
for (i = 0; i < kvm->arch.naliases; ++i) {
|
|
|
|
alias = &kvm->arch.aliases[i];
|
2007-11-22 11:20:33 +08:00
|
|
|
if (gfn >= alias->base_gfn
|
|
|
|
&& gfn < alias->base_gfn + alias->npages)
|
|
|
|
return alias->target_gfn + gfn - alias->base_gfn;
|
|
|
|
}
|
|
|
|
return gfn;
|
|
|
|
}
|
|
|
|
|
2007-10-29 23:08:35 +08:00
|
|
|
/*
|
|
|
|
* Set a new alias region. Aliases map a portion of physical memory into
|
|
|
|
* another portion. This is useful for memory windows, for example the PC
|
|
|
|
* VGA region.
|
|
|
|
*/
|
|
|
|
static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
|
|
|
|
struct kvm_memory_alias *alias)
|
|
|
|
{
|
|
|
|
int r, n;
|
|
|
|
struct kvm_mem_alias *p;
|
|
|
|
|
|
|
|
r = -EINVAL;
|
|
|
|
/* General sanity checks */
|
|
|
|
if (alias->memory_size & (PAGE_SIZE - 1))
|
|
|
|
goto out;
|
|
|
|
if (alias->guest_phys_addr & (PAGE_SIZE - 1))
|
|
|
|
goto out;
|
|
|
|
if (alias->slot >= KVM_ALIAS_SLOTS)
|
|
|
|
goto out;
|
|
|
|
if (alias->guest_phys_addr + alias->memory_size
|
|
|
|
< alias->guest_phys_addr)
|
|
|
|
goto out;
|
|
|
|
if (alias->target_phys_addr + alias->memory_size
|
|
|
|
< alias->target_phys_addr)
|
|
|
|
goto out;
|
|
|
|
|
2008-02-11 00:04:15 +08:00
|
|
|
down_write(&kvm->slots_lock);
|
2008-07-25 22:26:39 +08:00
|
|
|
spin_lock(&kvm->mmu_lock);
|
2007-10-29 23:08:35 +08:00
|
|
|
|
2007-12-14 09:54:20 +08:00
|
|
|
p = &kvm->arch.aliases[alias->slot];
|
2007-10-29 23:08:35 +08:00
|
|
|
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
|
|
|
|
p->npages = alias->memory_size >> PAGE_SHIFT;
|
|
|
|
p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
for (n = KVM_ALIAS_SLOTS; n > 0; --n)
|
2007-12-14 09:54:20 +08:00
|
|
|
if (kvm->arch.aliases[n - 1].npages)
|
2007-10-29 23:08:35 +08:00
|
|
|
break;
|
2007-12-14 09:54:20 +08:00
|
|
|
kvm->arch.naliases = n;
|
2007-10-29 23:08:35 +08:00
|
|
|
|
2008-07-25 22:26:39 +08:00
|
|
|
spin_unlock(&kvm->mmu_lock);
|
2007-10-29 23:08:35 +08:00
|
|
|
kvm_mmu_zap_all(kvm);
|
|
|
|
|
2008-02-11 00:04:15 +08:00
|
|
|
up_write(&kvm->slots_lock);
|
2007-10-29 23:08:35 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = 0;
|
|
|
|
switch (chip->chip_id) {
|
|
|
|
case KVM_IRQCHIP_PIC_MASTER:
|
|
|
|
memcpy(&chip->chip.pic,
|
|
|
|
&pic_irqchip(kvm)->pics[0],
|
|
|
|
sizeof(struct kvm_pic_state));
|
|
|
|
break;
|
|
|
|
case KVM_IRQCHIP_PIC_SLAVE:
|
|
|
|
memcpy(&chip->chip.pic,
|
|
|
|
&pic_irqchip(kvm)->pics[1],
|
|
|
|
sizeof(struct kvm_pic_state));
|
|
|
|
break;
|
|
|
|
case KVM_IRQCHIP_IOAPIC:
|
|
|
|
memcpy(&chip->chip.ioapic,
|
|
|
|
ioapic_irqchip(kvm),
|
|
|
|
sizeof(struct kvm_ioapic_state));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
r = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = 0;
|
|
|
|
switch (chip->chip_id) {
|
|
|
|
case KVM_IRQCHIP_PIC_MASTER:
|
|
|
|
memcpy(&pic_irqchip(kvm)->pics[0],
|
|
|
|
&chip->chip.pic,
|
|
|
|
sizeof(struct kvm_pic_state));
|
|
|
|
break;
|
|
|
|
case KVM_IRQCHIP_PIC_SLAVE:
|
|
|
|
memcpy(&pic_irqchip(kvm)->pics[1],
|
|
|
|
&chip->chip.pic,
|
|
|
|
sizeof(struct kvm_pic_state));
|
|
|
|
break;
|
|
|
|
case KVM_IRQCHIP_IOAPIC:
|
|
|
|
memcpy(ioapic_irqchip(kvm),
|
|
|
|
&chip->chip.ioapic,
|
|
|
|
sizeof(struct kvm_ioapic_state));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
r = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
kvm_pic_update_irq(pic_irqchip(kvm));
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2008-03-04 00:50:59 +08:00
|
|
|
static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
|
|
|
|
kvm_pit_load_count(kvm, 0, ps->channels[0].count);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2008-12-31 01:55:06 +08:00
|
|
|
static int kvm_vm_ioctl_reinject(struct kvm *kvm,
|
|
|
|
struct kvm_reinject_control *control)
|
|
|
|
{
|
|
|
|
if (!kvm->arch.vpit)
|
|
|
|
return -ENXIO;
|
|
|
|
kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-18 20:29:43 +08:00
|
|
|
/*
|
|
|
|
* Get (and clear) the dirty memory log for a memory slot.
|
|
|
|
*/
|
|
|
|
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
|
|
|
struct kvm_dirty_log *log)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
int n;
|
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
|
int is_dirty = 0;
|
|
|
|
|
2008-02-11 00:04:15 +08:00
|
|
|
down_write(&kvm->slots_lock);
|
2007-11-18 20:29:43 +08:00
|
|
|
|
|
|
|
r = kvm_get_dirty_log(kvm, log, &is_dirty);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* If nothing is dirty, don't bother messing with page tables. */
|
|
|
|
if (is_dirty) {
|
2009-05-13 05:55:43 +08:00
|
|
|
spin_lock(&kvm->mmu_lock);
|
2007-11-18 20:29:43 +08:00
|
|
|
kvm_mmu_slot_remove_write_access(kvm, log->slot);
|
2009-05-13 05:55:43 +08:00
|
|
|
spin_unlock(&kvm->mmu_lock);
|
2007-11-18 20:29:43 +08:00
|
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
|
memslot = &kvm->memslots[log->slot];
|
|
|
|
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
|
|
|
|
memset(memslot->dirty_bitmap, 0, n);
|
|
|
|
}
|
|
|
|
r = 0;
|
|
|
|
out:
|
2008-02-11 00:04:15 +08:00
|
|
|
up_write(&kvm->slots_lock);
|
2007-11-18 20:29:43 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2007-10-29 23:08:35 +08:00
|
|
|
long kvm_arch_vm_ioctl(struct file *filp,
|
|
|
|
unsigned int ioctl, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = filp->private_data;
|
|
|
|
void __user *argp = (void __user *)arg;
|
|
|
|
int r = -EINVAL;
|
2008-08-12 01:01:45 +08:00
|
|
|
/*
|
|
|
|
* This union makes it completely explicit to gcc-3.x
|
|
|
|
* that these two variables' stack usage should be
|
|
|
|
* combined, not added together.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
struct kvm_pit_state ps;
|
|
|
|
struct kvm_memory_alias alias;
|
|
|
|
} u;
|
2007-10-29 23:08:35 +08:00
|
|
|
|
|
|
|
switch (ioctl) {
|
|
|
|
case KVM_SET_TSS_ADDR:
|
|
|
|
r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
|
|
|
|
if (r < 0)
|
|
|
|
goto out;
|
|
|
|
break;
|
|
|
|
case KVM_SET_MEMORY_REGION: {
|
|
|
|
struct kvm_memory_region kvm_mem;
|
|
|
|
struct kvm_userspace_memory_region kvm_userspace_mem;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
|
|
|
|
goto out;
|
|
|
|
kvm_userspace_mem.slot = kvm_mem.slot;
|
|
|
|
kvm_userspace_mem.flags = kvm_mem.flags;
|
|
|
|
kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
|
|
|
|
kvm_userspace_mem.memory_size = kvm_mem.memory_size;
|
|
|
|
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case KVM_SET_NR_MMU_PAGES:
|
|
|
|
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
break;
|
|
|
|
case KVM_GET_NR_MMU_PAGES:
|
|
|
|
r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
|
|
|
|
break;
|
2008-08-12 01:01:45 +08:00
|
|
|
case KVM_SET_MEMORY_ALIAS:
|
2007-10-29 23:08:35 +08:00
|
|
|
r = -EFAULT;
|
2008-08-12 01:01:45 +08:00
|
|
|
if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
|
2007-10-29 23:08:35 +08:00
|
|
|
goto out;
|
2008-08-12 01:01:45 +08:00
|
|
|
r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
|
2007-10-29 23:08:35 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
break;
|
|
|
|
case KVM_CREATE_IRQCHIP:
|
|
|
|
r = -ENOMEM;
|
2007-12-14 10:17:34 +08:00
|
|
|
kvm->arch.vpic = kvm_create_pic(kvm);
|
|
|
|
if (kvm->arch.vpic) {
|
2007-10-29 23:08:35 +08:00
|
|
|
r = kvm_ioapic_init(kvm);
|
|
|
|
if (r) {
|
2007-12-14 10:17:34 +08:00
|
|
|
kfree(kvm->arch.vpic);
|
|
|
|
kvm->arch.vpic = NULL;
|
2007-10-29 23:08:35 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
goto out;
|
2008-11-19 19:58:46 +08:00
|
|
|
r = kvm_setup_default_irq_routing(kvm);
|
|
|
|
if (r) {
|
|
|
|
kfree(kvm->arch.vpic);
|
|
|
|
kfree(kvm->arch.vioapic);
|
|
|
|
goto out;
|
|
|
|
}
|
2007-10-29 23:08:35 +08:00
|
|
|
break;
|
2008-01-28 05:10:22 +08:00
|
|
|
case KVM_CREATE_PIT:
|
2009-01-05 21:21:42 +08:00
|
|
|
mutex_lock(&kvm->lock);
|
|
|
|
r = -EEXIST;
|
|
|
|
if (kvm->arch.vpit)
|
|
|
|
goto create_pit_unlock;
|
2008-01-28 05:10:22 +08:00
|
|
|
r = -ENOMEM;
|
|
|
|
kvm->arch.vpit = kvm_create_pit(kvm);
|
|
|
|
if (kvm->arch.vpit)
|
|
|
|
r = 0;
|
2009-01-05 21:21:42 +08:00
|
|
|
create_pit_unlock:
|
|
|
|
mutex_unlock(&kvm->lock);
|
2008-01-28 05:10:22 +08:00
|
|
|
break;
|
2009-02-04 23:28:14 +08:00
|
|
|
case KVM_IRQ_LINE_STATUS:
|
2007-10-29 23:08:35 +08:00
|
|
|
case KVM_IRQ_LINE: {
|
|
|
|
struct kvm_irq_level irq_event;
|
|
|
|
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&irq_event, argp, sizeof irq_event))
|
|
|
|
goto out;
|
|
|
|
if (irqchip_in_kernel(kvm)) {
|
2009-02-04 23:28:14 +08:00
|
|
|
__s32 status;
|
2007-10-29 23:08:35 +08:00
|
|
|
mutex_lock(&kvm->lock);
|
2009-02-04 23:28:14 +08:00
|
|
|
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
|
|
|
|
irq_event.irq, irq_event.level);
|
2007-10-29 23:08:35 +08:00
|
|
|
mutex_unlock(&kvm->lock);
|
2009-02-04 23:28:14 +08:00
|
|
|
if (ioctl == KVM_IRQ_LINE_STATUS) {
|
|
|
|
irq_event.status = status;
|
|
|
|
if (copy_to_user(argp, &irq_event,
|
|
|
|
sizeof irq_event))
|
|
|
|
goto out;
|
|
|
|
}
|
2007-10-29 23:08:35 +08:00
|
|
|
r = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case KVM_GET_IRQCHIP: {
|
|
|
|
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
|
2008-08-12 01:01:45 +08:00
|
|
|
struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
|
2007-10-29 23:08:35 +08:00
|
|
|
|
2008-08-12 01:01:45 +08:00
|
|
|
r = -ENOMEM;
|
|
|
|
if (!chip)
|
2007-10-29 23:08:35 +08:00
|
|
|
goto out;
|
2008-08-12 01:01:45 +08:00
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(chip, argp, sizeof *chip))
|
|
|
|
goto get_irqchip_out;
|
2007-10-29 23:08:35 +08:00
|
|
|
r = -ENXIO;
|
|
|
|
if (!irqchip_in_kernel(kvm))
|
2008-08-12 01:01:45 +08:00
|
|
|
goto get_irqchip_out;
|
|
|
|
r = kvm_vm_ioctl_get_irqchip(kvm, chip);
|
2007-10-29 23:08:35 +08:00
|
|
|
if (r)
|
2008-08-12 01:01:45 +08:00
|
|
|
goto get_irqchip_out;
|
2007-10-29 23:08:35 +08:00
|
|
|
r = -EFAULT;
|
2008-08-12 01:01:45 +08:00
|
|
|
if (copy_to_user(argp, chip, sizeof *chip))
|
|
|
|
goto get_irqchip_out;
|
2007-10-29 23:08:35 +08:00
|
|
|
r = 0;
|
2008-08-12 01:01:45 +08:00
|
|
|
get_irqchip_out:
|
|
|
|
kfree(chip);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
2007-10-29 23:08:35 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case KVM_SET_IRQCHIP: {
|
|
|
|
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
|
2008-08-12 01:01:45 +08:00
|
|
|
struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
|
2007-10-29 23:08:35 +08:00
|
|
|
|
2008-08-12 01:01:45 +08:00
|
|
|
r = -ENOMEM;
|
|
|
|
if (!chip)
|
2007-10-29 23:08:35 +08:00
|
|
|
goto out;
|
2008-08-12 01:01:45 +08:00
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(chip, argp, sizeof *chip))
|
|
|
|
goto set_irqchip_out;
|
2007-10-29 23:08:35 +08:00
|
|
|
r = -ENXIO;
|
|
|
|
if (!irqchip_in_kernel(kvm))
|
2008-08-12 01:01:45 +08:00
|
|
|
goto set_irqchip_out;
|
|
|
|
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
|
2007-10-29 23:08:35 +08:00
|
|
|
if (r)
|
2008-08-12 01:01:45 +08:00
|
|
|
goto set_irqchip_out;
|
2007-10-29 23:08:35 +08:00
|
|
|
r = 0;
|
2008-08-12 01:01:45 +08:00
|
|
|
set_irqchip_out:
|
|
|
|
kfree(chip);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
2007-10-29 23:08:35 +08:00
|
|
|
break;
|
|
|
|
}
|
2008-03-04 00:50:59 +08:00
|
|
|
case KVM_GET_PIT: {
|
|
|
|
r = -EFAULT;
|
2008-08-12 01:01:45 +08:00
|
|
|
if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
|
2008-03-04 00:50:59 +08:00
|
|
|
goto out;
|
|
|
|
r = -ENXIO;
|
|
|
|
if (!kvm->arch.vpit)
|
|
|
|
goto out;
|
2008-08-12 01:01:45 +08:00
|
|
|
r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
|
2008-03-04 00:50:59 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = -EFAULT;
|
2008-08-12 01:01:45 +08:00
|
|
|
if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
|
2008-03-04 00:50:59 +08:00
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case KVM_SET_PIT: {
|
|
|
|
r = -EFAULT;
|
2008-08-12 01:01:45 +08:00
|
|
|
if (copy_from_user(&u.ps, argp, sizeof u.ps))
|
2008-03-04 00:50:59 +08:00
|
|
|
goto out;
|
|
|
|
r = -ENXIO;
|
|
|
|
if (!kvm->arch.vpit)
|
|
|
|
goto out;
|
2008-08-12 01:01:45 +08:00
|
|
|
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
|
2008-03-04 00:50:59 +08:00
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-12-31 01:55:06 +08:00
|
|
|
case KVM_REINJECT_CONTROL: {
|
|
|
|
struct kvm_reinject_control control;
|
|
|
|
r = -EFAULT;
|
|
|
|
if (copy_from_user(&control, argp, sizeof(control)))
|
|
|
|
goto out;
|
|
|
|
r = kvm_vm_ioctl_reinject(kvm, &control);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
r = 0;
|
|
|
|
break;
|
|
|
|
}
|
2007-10-29 23:08:35 +08:00
|
|
|
default:
|
|
|
|
;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2007-11-16 14:38:21 +08:00
|
|
|
static void kvm_init_msr_list(void)
|
2007-10-10 23:16:19 +08:00
|
|
|
{
|
|
|
|
u32 dummy[2];
|
|
|
|
unsigned i, j;
|
|
|
|
|
|
|
|
for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
|
|
|
|
if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
|
|
|
|
continue;
|
|
|
|
if (j < i)
|
|
|
|
msrs_to_save[j] = msrs_to_save[i];
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
num_msrs_to_save = j;
|
|
|
|
}
|
|
|
|
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
/*
|
|
|
|
* Only apic need an MMIO device hook, so shortcut now..
|
|
|
|
*/
|
|
|
|
static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
|
2008-05-30 22:05:53 +08:00
|
|
|
gpa_t addr, int len,
|
|
|
|
int is_write)
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
{
|
|
|
|
struct kvm_io_device *dev;
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
if (vcpu->arch.apic) {
|
|
|
|
dev = &vcpu->arch.apic->dev;
|
2008-05-30 22:05:53 +08:00
|
|
|
if (dev->in_range(dev, addr, len, is_write))
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
|
2008-05-30 22:05:53 +08:00
|
|
|
gpa_t addr, int len,
|
|
|
|
int is_write)
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
{
|
|
|
|
struct kvm_io_device *dev;
|
|
|
|
|
2008-05-30 22:05:53 +08:00
|
|
|
dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
if (dev == NULL)
|
2008-05-30 22:05:53 +08:00
|
|
|
dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
|
|
|
|
is_write);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
2009-02-21 09:19:13 +08:00
|
|
|
static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
|
|
|
|
struct kvm_vcpu *vcpu)
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
{
|
|
|
|
void *data = val;
|
2007-12-21 08:18:22 +08:00
|
|
|
int r = X86EMUL_CONTINUE;
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
|
|
|
while (bytes) {
|
2007-12-13 23:50:52 +08:00
|
|
|
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
unsigned offset = addr & (PAGE_SIZE-1);
|
2008-12-29 07:42:19 +08:00
|
|
|
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
int ret;
|
|
|
|
|
2007-12-21 08:18:22 +08:00
|
|
|
if (gpa == UNMAPPED_GVA) {
|
|
|
|
r = X86EMUL_PROPAGATE_FAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-12-29 07:42:19 +08:00
|
|
|
ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
|
2007-12-21 08:18:22 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
r = X86EMUL_UNHANDLEABLE;
|
|
|
|
goto out;
|
|
|
|
}
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
2008-12-29 07:42:19 +08:00
|
|
|
bytes -= toread;
|
|
|
|
data += toread;
|
|
|
|
addr += toread;
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
}
|
2007-12-21 08:18:22 +08:00
|
|
|
out:
|
|
|
|
return r;
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
}
|
2008-12-29 07:42:19 +08:00
|
|
|
|
2009-02-21 09:19:13 +08:00
|
|
|
static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
|
|
|
|
struct kvm_vcpu *vcpu)
|
2008-12-29 07:42:19 +08:00
|
|
|
{
|
|
|
|
void *data = val;
|
|
|
|
int r = X86EMUL_CONTINUE;
|
|
|
|
|
|
|
|
while (bytes) {
|
|
|
|
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
|
|
|
|
unsigned offset = addr & (PAGE_SIZE-1);
|
|
|
|
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (gpa == UNMAPPED_GVA) {
|
|
|
|
r = X86EMUL_PROPAGATE_FAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
|
|
|
|
if (ret < 0) {
|
|
|
|
r = X86EMUL_UNHANDLEABLE;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes -= towrite;
|
|
|
|
data += towrite;
|
|
|
|
addr += towrite;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
|
|
|
static int emulator_read_emulated(unsigned long addr,
|
|
|
|
void *val,
|
|
|
|
unsigned int bytes,
|
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_io_device *mmio_dev;
|
|
|
|
gpa_t gpa;
|
|
|
|
|
|
|
|
if (vcpu->mmio_read_completed) {
|
|
|
|
memcpy(val, vcpu->mmio_data, bytes);
|
|
|
|
vcpu->mmio_read_completed = 0;
|
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
|
|
|
/* For APIC access vmexit */
|
|
|
|
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
|
|
|
|
goto mmio;
|
|
|
|
|
2008-12-29 07:42:19 +08:00
|
|
|
if (kvm_read_guest_virt(addr, val, bytes, vcpu)
|
|
|
|
== X86EMUL_CONTINUE)
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
if (gpa == UNMAPPED_GVA)
|
|
|
|
return X86EMUL_PROPAGATE_FAULT;
|
|
|
|
|
|
|
|
mmio:
|
|
|
|
/*
|
|
|
|
* Is this MMIO handled locally?
|
|
|
|
*/
|
2007-12-21 08:18:22 +08:00
|
|
|
mutex_lock(&vcpu->kvm->lock);
|
2008-05-30 22:05:53 +08:00
|
|
|
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
if (mmio_dev) {
|
|
|
|
kvm_iodevice_read(mmio_dev, gpa, bytes, val);
|
2007-12-21 08:18:22 +08:00
|
|
|
mutex_unlock(&vcpu->kvm->lock);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
}
|
2007-12-21 08:18:22 +08:00
|
|
|
mutex_unlock(&vcpu->kvm->lock);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
|
|
|
vcpu->mmio_needed = 1;
|
|
|
|
vcpu->mmio_phys_addr = gpa;
|
|
|
|
vcpu->mmio_size = bytes;
|
|
|
|
vcpu->mmio_is_write = 0;
|
|
|
|
|
|
|
|
return X86EMUL_UNHANDLEABLE;
|
|
|
|
}
|
|
|
|
|
2008-03-30 07:17:59 +08:00
|
|
|
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
|
2008-03-02 20:06:05 +08:00
|
|
|
const void *val, int bytes)
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
|
2008-03-02 20:06:05 +08:00
|
|
|
if (ret < 0)
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return 0;
|
2008-12-02 08:32:05 +08:00
|
|
|
kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int emulator_write_emulated_onepage(unsigned long addr,
|
|
|
|
const void *val,
|
|
|
|
unsigned int bytes,
|
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_io_device *mmio_dev;
|
2007-12-21 08:18:22 +08:00
|
|
|
gpa_t gpa;
|
|
|
|
|
|
|
|
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
|
|
|
if (gpa == UNMAPPED_GVA) {
|
2007-11-25 20:04:58 +08:00
|
|
|
kvm_inject_page_fault(vcpu, addr, 2);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return X86EMUL_PROPAGATE_FAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For APIC access vmexit */
|
|
|
|
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
|
|
|
|
goto mmio;
|
|
|
|
|
|
|
|
if (emulator_write_phys(vcpu, gpa, val, bytes))
|
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
|
|
|
|
mmio:
|
|
|
|
/*
|
|
|
|
* Is this MMIO handled locally?
|
|
|
|
*/
|
2007-12-21 08:18:22 +08:00
|
|
|
mutex_lock(&vcpu->kvm->lock);
|
2008-05-30 22:05:53 +08:00
|
|
|
mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
if (mmio_dev) {
|
|
|
|
kvm_iodevice_write(mmio_dev, gpa, bytes, val);
|
2007-12-21 08:18:22 +08:00
|
|
|
mutex_unlock(&vcpu->kvm->lock);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
}
|
2007-12-21 08:18:22 +08:00
|
|
|
mutex_unlock(&vcpu->kvm->lock);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
|
|
|
vcpu->mmio_needed = 1;
|
|
|
|
vcpu->mmio_phys_addr = gpa;
|
|
|
|
vcpu->mmio_size = bytes;
|
|
|
|
vcpu->mmio_is_write = 1;
|
|
|
|
memcpy(vcpu->mmio_data, val, bytes);
|
|
|
|
|
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int emulator_write_emulated(unsigned long addr,
|
|
|
|
const void *val,
|
|
|
|
unsigned int bytes,
|
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
/* Crossing a page boundary? */
|
|
|
|
if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
|
|
|
|
int rc, now;
|
|
|
|
|
|
|
|
now = -addr & ~PAGE_MASK;
|
|
|
|
rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
|
|
|
|
if (rc != X86EMUL_CONTINUE)
|
|
|
|
return rc;
|
|
|
|
addr += now;
|
|
|
|
val += now;
|
|
|
|
bytes -= now;
|
|
|
|
}
|
|
|
|
return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(emulator_write_emulated);
|
|
|
|
|
|
|
|
static int emulator_cmpxchg_emulated(unsigned long addr,
|
|
|
|
const void *old,
|
|
|
|
const void *new,
|
|
|
|
unsigned int bytes,
|
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
static int reported;
|
|
|
|
|
|
|
|
if (!reported) {
|
|
|
|
reported = 1;
|
|
|
|
printk(KERN_WARNING "kvm: emulating exchange as write\n");
|
|
|
|
}
|
2007-12-12 23:46:12 +08:00
|
|
|
#ifndef CONFIG_X86_64
|
|
|
|
/* guests cmpxchg8b have to be emulated atomically */
|
|
|
|
if (bytes == 8) {
|
2007-12-21 08:18:22 +08:00
|
|
|
gpa_t gpa;
|
2007-12-12 23:46:12 +08:00
|
|
|
struct page *page;
|
2008-02-05 14:27:18 +08:00
|
|
|
char *kaddr;
|
2007-12-12 23:46:12 +08:00
|
|
|
u64 val;
|
|
|
|
|
2007-12-21 08:18:22 +08:00
|
|
|
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
|
|
|
|
|
2007-12-12 23:46:12 +08:00
|
|
|
if (gpa == UNMAPPED_GVA ||
|
|
|
|
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
|
|
|
|
goto emul_write;
|
|
|
|
|
|
|
|
if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
|
|
|
|
goto emul_write;
|
|
|
|
|
|
|
|
val = *(u64 *)new;
|
2008-02-11 00:04:15 +08:00
|
|
|
|
2007-12-12 23:46:12 +08:00
|
|
|
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
|
2008-02-11 00:04:15 +08:00
|
|
|
|
2008-02-05 14:27:18 +08:00
|
|
|
kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
|
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
2007-12-12 23:46:12 +08:00
|
|
|
kvm_release_page_dirty(page);
|
|
|
|
}
|
2008-03-30 07:17:59 +08:00
|
|
|
emul_write:
|
2007-12-12 23:46:12 +08:00
|
|
|
#endif
|
|
|
|
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return emulator_write_emulated(addr, new, bytes, vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
|
|
|
|
{
|
|
|
|
return kvm_x86_ops->get_segment_base(vcpu, seg);
|
|
|
|
}
|
|
|
|
|
|
|
|
int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
|
|
|
|
{
|
2008-09-24 00:18:35 +08:00
|
|
|
kvm_mmu_invlpg(vcpu, address);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int emulate_clts(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2008-04-30 23:56:02 +08:00
|
|
|
KVMTRACE_0D(CLTS, vcpu, handler);
|
2007-12-13 23:50:52 +08:00
|
|
|
kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu *vcpu = ctxt->vcpu;
|
|
|
|
|
|
|
|
switch (dr) {
|
|
|
|
case 0 ... 3:
|
|
|
|
*dest = kvm_x86_ops->get_dr(vcpu, dr);
|
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
default:
|
2008-03-04 04:59:56 +08:00
|
|
|
pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return X86EMUL_UNHANDLEABLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
|
|
|
|
{
|
|
|
|
unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
|
|
|
|
int exception;
|
|
|
|
|
|
|
|
kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
|
|
|
|
if (exception) {
|
|
|
|
/* FIXME: better handling */
|
|
|
|
return X86EMUL_UNHANDLEABLE;
|
|
|
|
}
|
|
|
|
return X86EMUL_CONTINUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
|
|
|
|
{
|
|
|
|
u8 opcodes[4];
|
2008-06-28 01:58:02 +08:00
|
|
|
unsigned long rip = kvm_rip_read(vcpu);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
unsigned long rip_linear;
|
|
|
|
|
2008-06-14 03:45:42 +08:00
|
|
|
if (!printk_ratelimit())
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return;
|
|
|
|
|
2008-06-10 21:46:53 +08:00
|
|
|
rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
|
|
|
|
|
2008-12-29 07:42:19 +08:00
|
|
|
kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
|
|
|
printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
|
|
|
|
context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
|
|
|
|
|
2008-02-20 02:25:50 +08:00
|
|
|
static struct x86_emulate_ops emulate_ops = {
|
2008-12-29 07:42:19 +08:00
|
|
|
.read_std = kvm_read_guest_virt,
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
.read_emulated = emulator_read_emulated,
|
|
|
|
.write_emulated = emulator_write_emulated,
|
|
|
|
.cmpxchg_emulated = emulator_cmpxchg_emulated,
|
|
|
|
};
|
|
|
|
|
2008-06-28 01:58:02 +08:00
|
|
|
static void cache_all_regs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
|
|
kvm_register_read(vcpu, VCPU_REGS_RSP);
|
|
|
|
kvm_register_read(vcpu, VCPU_REGS_RIP);
|
|
|
|
vcpu->arch.regs_dirty = ~0;
|
|
|
|
}
|
|
|
|
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
int emulate_instruction(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_run *run,
|
|
|
|
unsigned long cr2,
|
|
|
|
u16 error_code,
|
KVM: x86 emulator: Only allow VMCALL/VMMCALL trapped by #UD
When executing a test program called "crashme", we found the KVM guest cannot
survive more than ten seconds, then encounterd kernel panic. The basic concept
of "crashme" is generating random assembly code and trying to execute it.
After some fixes on emulator insn validity judgment, we found it's hard to
get the current emulator handle the invalid instructions correctly, for the
#UD trap for hypercall patching caused troubles. The problem is, if the opcode
itself was OK, but combination of opcode and modrm_reg was invalid, and one
operand of the opcode was memory (SrcMem or DstMem), the emulator will fetch
the memory operand first rather than checking the validity, and may encounter
an error there. For example, ".byte 0xfe, 0x34, 0xcd" has this problem.
In the patch, we simply check that if the invalid opcode wasn't vmcall/vmmcall,
then return from emulate_instruction() and inject a #UD to guest. With the
patch, the guest had been running for more than 12 hours.
Signed-off-by: Feng (Eric) Liu <eric.e.liu@intel.com>
Signed-off-by: Sheng Yang <sheng.yang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-01-02 14:49:22 +08:00
|
|
|
int emulation_type)
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
{
|
2009-05-13 04:21:06 +08:00
|
|
|
int r, shadow_mask;
|
KVM: x86 emulator: Only allow VMCALL/VMMCALL trapped by #UD
When executing a test program called "crashme", we found the KVM guest cannot
survive more than ten seconds, then encounterd kernel panic. The basic concept
of "crashme" is generating random assembly code and trying to execute it.
After some fixes on emulator insn validity judgment, we found it's hard to
get the current emulator handle the invalid instructions correctly, for the
#UD trap for hypercall patching caused troubles. The problem is, if the opcode
itself was OK, but combination of opcode and modrm_reg was invalid, and one
operand of the opcode was memory (SrcMem or DstMem), the emulator will fetch
the memory operand first rather than checking the validity, and may encounter
an error there. For example, ".byte 0xfe, 0x34, 0xcd" has this problem.
In the patch, we simply check that if the invalid opcode wasn't vmcall/vmmcall,
then return from emulate_instruction() and inject a #UD to guest. With the
patch, the guest had been running for more than 12 hours.
Signed-off-by: Feng (Eric) Liu <eric.e.liu@intel.com>
Signed-off-by: Sheng Yang <sheng.yang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-01-02 14:49:22 +08:00
|
|
|
struct decode_cache *c;
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
2008-07-03 19:59:22 +08:00
|
|
|
kvm_clear_exception_queue(vcpu);
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.mmio_fault_cr2 = cr2;
|
2008-06-28 01:58:02 +08:00
|
|
|
/*
|
|
|
|
* TODO: fix x86_emulate.c to use guest_read/write_register
|
|
|
|
* instead of direct ->regs accesses, can save hundred cycles
|
|
|
|
* on Intel for instructions that don't read/change RSP, for
|
|
|
|
* for example.
|
|
|
|
*/
|
|
|
|
cache_all_regs(vcpu);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
|
|
|
vcpu->mmio_is_write = 0;
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.pio.string = 0;
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
KVM: x86 emulator: Only allow VMCALL/VMMCALL trapped by #UD
When executing a test program called "crashme", we found the KVM guest cannot
survive more than ten seconds, then encounterd kernel panic. The basic concept
of "crashme" is generating random assembly code and trying to execute it.
After some fixes on emulator insn validity judgment, we found it's hard to
get the current emulator handle the invalid instructions correctly, for the
#UD trap for hypercall patching caused troubles. The problem is, if the opcode
itself was OK, but combination of opcode and modrm_reg was invalid, and one
operand of the opcode was memory (SrcMem or DstMem), the emulator will fetch
the memory operand first rather than checking the validity, and may encounter
an error there. For example, ".byte 0xfe, 0x34, 0xcd" has this problem.
In the patch, we simply check that if the invalid opcode wasn't vmcall/vmmcall,
then return from emulate_instruction() and inject a #UD to guest. With the
patch, the guest had been running for more than 12 hours.
Signed-off-by: Feng (Eric) Liu <eric.e.liu@intel.com>
Signed-off-by: Sheng Yang <sheng.yang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-01-02 14:49:22 +08:00
|
|
|
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
int cs_db, cs_l;
|
|
|
|
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.emulate_ctxt.vcpu = vcpu;
|
|
|
|
vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
|
|
|
|
vcpu->arch.emulate_ctxt.mode =
|
|
|
|
(vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
? X86EMUL_MODE_REAL : cs_l
|
|
|
|
? X86EMUL_MODE_PROT64 : cs_db
|
|
|
|
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
|
KVM: x86 emulator: Only allow VMCALL/VMMCALL trapped by #UD
When executing a test program called "crashme", we found the KVM guest cannot
survive more than ten seconds, then encounterd kernel panic. The basic concept
of "crashme" is generating random assembly code and trying to execute it.
After some fixes on emulator insn validity judgment, we found it's hard to
get the current emulator handle the invalid instructions correctly, for the
#UD trap for hypercall patching caused troubles. The problem is, if the opcode
itself was OK, but combination of opcode and modrm_reg was invalid, and one
operand of the opcode was memory (SrcMem or DstMem), the emulator will fetch
the memory operand first rather than checking the validity, and may encounter
an error there. For example, ".byte 0xfe, 0x34, 0xcd" has this problem.
In the patch, we simply check that if the invalid opcode wasn't vmcall/vmmcall,
then return from emulate_instruction() and inject a #UD to guest. With the
patch, the guest had been running for more than 12 hours.
Signed-off-by: Feng (Eric) Liu <eric.e.liu@intel.com>
Signed-off-by: Sheng Yang <sheng.yang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-01-02 14:49:22 +08:00
|
|
|
|
|
|
|
/* Reject the instructions other than VMCALL/VMMCALL when
|
|
|
|
* try to emulate invalid opcode */
|
|
|
|
c = &vcpu->arch.emulate_ctxt.decode;
|
|
|
|
if ((emulation_type & EMULTYPE_TRAP_UD) &&
|
|
|
|
(!(c->twobyte && c->b == 0x01 &&
|
|
|
|
(c->modrm_reg == 0 || c->modrm_reg == 3) &&
|
|
|
|
c->modrm_mod == 3 && c->modrm_rm == 1)))
|
|
|
|
return EMULATE_FAIL;
|
|
|
|
|
2007-11-18 21:17:51 +08:00
|
|
|
++vcpu->stat.insn_emulation;
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
if (r) {
|
2007-11-18 21:17:51 +08:00
|
|
|
++vcpu->stat.insn_emulation_fail;
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
return EMULATE_FAIL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-12 18:36:57 +08:00
|
|
|
if (emulation_type & EMULTYPE_SKIP) {
|
|
|
|
kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
|
|
|
|
return EMULATE_DONE;
|
|
|
|
}
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
|
2009-05-13 04:21:06 +08:00
|
|
|
shadow_mask = vcpu->arch.emulate_ctxt.interruptibility;
|
|
|
|
|
|
|
|
if (r == 0)
|
|
|
|
kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
if (vcpu->arch.pio.string)
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
return EMULATE_DO_MMIO;
|
|
|
|
|
|
|
|
if ((r || vcpu->mmio_is_write) && run) {
|
|
|
|
run->exit_reason = KVM_EXIT_MMIO;
|
|
|
|
run->mmio.phys_addr = vcpu->mmio_phys_addr;
|
|
|
|
memcpy(run->mmio.data, vcpu->mmio_data, 8);
|
|
|
|
run->mmio.len = vcpu->mmio_size;
|
|
|
|
run->mmio.is_write = vcpu->mmio_is_write;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r) {
|
|
|
|
if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
|
|
|
|
return EMULATE_DONE;
|
|
|
|
if (!vcpu->mmio_needed) {
|
|
|
|
kvm_report_emulation_failure(vcpu, "mmio");
|
|
|
|
return EMULATE_FAIL;
|
|
|
|
}
|
|
|
|
return EMULATE_DO_MMIO;
|
|
|
|
}
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
|
KVM: Portability: Move x86 emulation and mmio device hook to x86.c
This patch moves the following functions to from kvm_main.c to x86.c:
emulator_read/write_std, vcpu_find_pervcpu_dev, vcpu_find_mmio_dev,
emulator_read/write_emulated, emulator_write_phys,
emulator_write_emulated_onepage, emulator_cmpxchg_emulated,
get_setment_base, emulate_invlpg, emulate_clts, emulator_get/set_dr,
kvm_report_emulation_failure, emulate_instruction
The following data type is moved to x86.c:
struct x86_emulate_ops emulate_ops
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Acked-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2007-10-31 01:44:21 +08:00
|
|
|
|
|
|
|
if (vcpu->mmio_is_write) {
|
|
|
|
vcpu->mmio_needed = 0;
|
|
|
|
return EMULATE_DO_MMIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return EMULATE_DONE;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(emulate_instruction);
|
|
|
|
|
2007-10-31 01:44:25 +08:00
|
|
|
static int pio_copy_data(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
void *p = vcpu->arch.pio_data;
|
2008-12-29 07:42:20 +08:00
|
|
|
gva_t q = vcpu->arch.pio.guest_gva;
|
2007-10-31 01:44:25 +08:00
|
|
|
unsigned bytes;
|
2008-12-29 07:42:20 +08:00
|
|
|
int ret;
|
2007-10-31 01:44:25 +08:00
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
|
|
|
|
if (vcpu->arch.pio.in)
|
2008-12-29 07:42:20 +08:00
|
|
|
ret = kvm_write_guest_virt(q, p, bytes, vcpu);
|
2007-10-31 01:44:25 +08:00
|
|
|
else
|
2008-12-29 07:42:20 +08:00
|
|
|
ret = kvm_read_guest_virt(q, p, bytes, vcpu);
|
|
|
|
return ret;
|
2007-10-31 01:44:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int complete_pio(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
struct kvm_pio_request *io = &vcpu->arch.pio;
|
2007-10-31 01:44:25 +08:00
|
|
|
long delta;
|
|
|
|
int r;
|
2008-06-28 01:58:02 +08:00
|
|
|
unsigned long val;
|
2007-10-31 01:44:25 +08:00
|
|
|
|
|
|
|
if (!io->string) {
|
2008-06-28 01:58:02 +08:00
|
|
|
if (io->in) {
|
|
|
|
val = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
|
|
memcpy(&val, vcpu->arch.pio_data, io->size);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RAX, val);
|
|
|
|
}
|
2007-10-31 01:44:25 +08:00
|
|
|
} else {
|
|
|
|
if (io->in) {
|
|
|
|
r = pio_copy_data(vcpu);
|
2008-06-28 01:58:02 +08:00
|
|
|
if (r)
|
2007-10-31 01:44:25 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
delta = 1;
|
|
|
|
if (io->rep) {
|
|
|
|
delta *= io->cur_count;
|
|
|
|
/*
|
|
|
|
* The size of the register should really depend on
|
|
|
|
* current address size.
|
|
|
|
*/
|
2008-06-28 01:58:02 +08:00
|
|
|
val = kvm_register_read(vcpu, VCPU_REGS_RCX);
|
|
|
|
val -= delta;
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RCX, val);
|
2007-10-31 01:44:25 +08:00
|
|
|
}
|
|
|
|
if (io->down)
|
|
|
|
delta = -delta;
|
|
|
|
delta *= io->size;
|
2008-06-28 01:58:02 +08:00
|
|
|
if (io->in) {
|
|
|
|
val = kvm_register_read(vcpu, VCPU_REGS_RDI);
|
|
|
|
val += delta;
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RDI, val);
|
|
|
|
} else {
|
|
|
|
val = kvm_register_read(vcpu, VCPU_REGS_RSI);
|
|
|
|
val += delta;
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RSI, val);
|
|
|
|
}
|
2007-10-31 01:44:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
io->count -= io->cur_count;
|
|
|
|
io->cur_count = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kernel_pio(struct kvm_io_device *pio_dev,
|
|
|
|
struct kvm_vcpu *vcpu,
|
|
|
|
void *pd)
|
|
|
|
{
|
|
|
|
/* TODO: String I/O for in kernel device */
|
|
|
|
|
|
|
|
mutex_lock(&vcpu->kvm->lock);
|
2007-12-13 23:50:52 +08:00
|
|
|
if (vcpu->arch.pio.in)
|
|
|
|
kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
|
|
|
|
vcpu->arch.pio.size,
|
2007-10-31 01:44:25 +08:00
|
|
|
pd);
|
|
|
|
else
|
2007-12-13 23:50:52 +08:00
|
|
|
kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
|
|
|
|
vcpu->arch.pio.size,
|
2007-10-31 01:44:25 +08:00
|
|
|
pd);
|
|
|
|
mutex_unlock(&vcpu->kvm->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pio_string_write(struct kvm_io_device *pio_dev,
|
|
|
|
struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
struct kvm_pio_request *io = &vcpu->arch.pio;
|
|
|
|
void *pd = vcpu->arch.pio_data;
|
2007-10-31 01:44:25 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
mutex_lock(&vcpu->kvm->lock);
|
|
|
|
for (i = 0; i < io->cur_count; i++) {
|
|
|
|
kvm_iodevice_write(pio_dev, io->port,
|
|
|
|
io->size,
|
|
|
|
pd);
|
|
|
|
pd += io->size;
|
|
|
|
}
|
|
|
|
mutex_unlock(&vcpu->kvm->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
|
2008-05-30 22:05:53 +08:00
|
|
|
gpa_t addr, int len,
|
|
|
|
int is_write)
|
2007-10-31 01:44:25 +08:00
|
|
|
{
|
2008-05-30 22:05:53 +08:00
|
|
|
return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
|
2007-10-31 01:44:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
|
|
|
|
int size, unsigned port)
|
|
|
|
{
|
|
|
|
struct kvm_io_device *pio_dev;
|
2008-06-28 01:58:02 +08:00
|
|
|
unsigned long val;
|
2007-10-31 01:44:25 +08:00
|
|
|
|
|
|
|
vcpu->run->exit_reason = KVM_EXIT_IO;
|
|
|
|
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->run->io.size = vcpu->arch.pio.size = size;
|
2007-10-31 01:44:25 +08:00
|
|
|
vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
|
|
|
|
vcpu->run->io.port = vcpu->arch.pio.port = port;
|
|
|
|
vcpu->arch.pio.in = in;
|
|
|
|
vcpu->arch.pio.string = 0;
|
|
|
|
vcpu->arch.pio.down = 0;
|
|
|
|
vcpu->arch.pio.rep = 0;
|
2007-10-31 01:44:25 +08:00
|
|
|
|
2008-04-11 03:31:10 +08:00
|
|
|
if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
|
|
|
|
KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
|
|
|
|
handler);
|
|
|
|
else
|
|
|
|
KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
|
|
|
|
handler);
|
|
|
|
|
2008-06-28 01:58:02 +08:00
|
|
|
val = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
|
|
memcpy(vcpu->arch.pio_data, &val, 4);
|
2007-10-31 01:44:25 +08:00
|
|
|
|
2008-05-30 22:05:53 +08:00
|
|
|
pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
|
2007-10-31 01:44:25 +08:00
|
|
|
if (pio_dev) {
|
2007-12-13 23:50:52 +08:00
|
|
|
kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
|
2007-10-31 01:44:25 +08:00
|
|
|
complete_pio(vcpu);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_emulate_pio);
|
|
|
|
|
|
|
|
int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
|
|
|
|
int size, unsigned long count, int down,
|
|
|
|
gva_t address, int rep, unsigned port)
|
|
|
|
{
|
|
|
|
unsigned now, in_page;
|
2008-12-29 07:42:20 +08:00
|
|
|
int ret = 0;
|
2007-10-31 01:44:25 +08:00
|
|
|
struct kvm_io_device *pio_dev;
|
|
|
|
|
|
|
|
vcpu->run->exit_reason = KVM_EXIT_IO;
|
|
|
|
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->run->io.size = vcpu->arch.pio.size = size;
|
2007-10-31 01:44:25 +08:00
|
|
|
vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
|
|
|
|
vcpu->run->io.port = vcpu->arch.pio.port = port;
|
|
|
|
vcpu->arch.pio.in = in;
|
|
|
|
vcpu->arch.pio.string = 1;
|
|
|
|
vcpu->arch.pio.down = down;
|
|
|
|
vcpu->arch.pio.rep = rep;
|
2007-10-31 01:44:25 +08:00
|
|
|
|
2008-04-11 03:31:10 +08:00
|
|
|
if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
|
|
|
|
KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
|
|
|
|
handler);
|
|
|
|
else
|
|
|
|
KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
|
|
|
|
handler);
|
|
|
|
|
2007-10-31 01:44:25 +08:00
|
|
|
if (!count) {
|
|
|
|
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!down)
|
|
|
|
in_page = PAGE_SIZE - offset_in_page(address);
|
|
|
|
else
|
|
|
|
in_page = offset_in_page(address) + size;
|
|
|
|
now = min(count, (unsigned long)in_page / size);
|
2008-12-29 07:42:20 +08:00
|
|
|
if (!now)
|
2007-10-31 01:44:25 +08:00
|
|
|
now = 1;
|
|
|
|
if (down) {
|
|
|
|
/*
|
|
|
|
* String I/O in reverse. Yuck. Kill the guest, fix later.
|
|
|
|
*/
|
|
|
|
pr_unimpl(vcpu, "guest string pio down\n");
|
2007-11-25 20:12:03 +08:00
|
|
|
kvm_inject_gp(vcpu, 0);
|
2007-10-31 01:44:25 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
vcpu->run->io.count = now;
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.pio.cur_count = now;
|
2007-10-31 01:44:25 +08:00
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
|
2007-10-31 01:44:25 +08:00
|
|
|
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
|
|
|
|
2008-12-29 07:42:20 +08:00
|
|
|
vcpu->arch.pio.guest_gva = address;
|
2007-10-31 01:44:25 +08:00
|
|
|
|
2008-05-30 22:05:53 +08:00
|
|
|
pio_dev = vcpu_find_pio_dev(vcpu, port,
|
|
|
|
vcpu->arch.pio.cur_count,
|
|
|
|
!vcpu->arch.pio.in);
|
2007-12-13 23:50:52 +08:00
|
|
|
if (!vcpu->arch.pio.in) {
|
2007-10-31 01:44:25 +08:00
|
|
|
/* string PIO write */
|
|
|
|
ret = pio_copy_data(vcpu);
|
2008-12-29 07:42:20 +08:00
|
|
|
if (ret == X86EMUL_PROPAGATE_FAULT) {
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (ret == 0 && pio_dev) {
|
2007-10-31 01:44:25 +08:00
|
|
|
pio_string_write(pio_dev, vcpu);
|
|
|
|
complete_pio(vcpu);
|
2007-12-13 23:50:52 +08:00
|
|
|
if (vcpu->arch.pio.count == 0)
|
2007-10-31 01:44:25 +08:00
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
} else if (pio_dev)
|
|
|
|
pr_unimpl(vcpu, "no string pio read support yet, "
|
|
|
|
"port %x size %d count %ld\n",
|
|
|
|
port, size, count);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
|
|
|
|
|
2009-02-05 00:52:04 +08:00
|
|
|
static void bounce_off(void *info)
|
|
|
|
{
|
|
|
|
/* nothing */
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int ref_freq;
|
|
|
|
static unsigned long tsc_khz_ref;
|
|
|
|
|
|
|
|
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct cpufreq_freqs *freq = data;
|
|
|
|
struct kvm *kvm;
|
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
int i, send_ipi = 0;
|
|
|
|
|
|
|
|
if (!ref_freq)
|
|
|
|
ref_freq = freq->old;
|
|
|
|
|
|
|
|
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
|
|
|
|
return 0;
|
|
|
|
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
|
|
|
|
return 0;
|
|
|
|
per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
|
|
|
|
|
|
|
|
spin_lock(&kvm_lock);
|
|
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
|
|
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
|
|
|
vcpu = kvm->vcpus[i];
|
|
|
|
if (!vcpu)
|
|
|
|
continue;
|
|
|
|
if (vcpu->cpu != freq->cpu)
|
|
|
|
continue;
|
|
|
|
if (!kvm_request_guest_time_update(vcpu))
|
|
|
|
continue;
|
|
|
|
if (vcpu->cpu != smp_processor_id())
|
|
|
|
send_ipi++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&kvm_lock);
|
|
|
|
|
|
|
|
if (freq->old < freq->new && send_ipi) {
|
|
|
|
/*
|
|
|
|
* We upscale the frequency. Must make the guest
|
|
|
|
* doesn't see old kvmclock values while running with
|
|
|
|
* the new frequency, otherwise we risk the guest sees
|
|
|
|
* time go backwards.
|
|
|
|
*
|
|
|
|
* In case we update the frequency for another cpu
|
|
|
|
* (which might be in guest context) send an interrupt
|
|
|
|
* to kick the cpu out of guest context. Next time
|
|
|
|
* guest context is entered kvmclock will be updated,
|
|
|
|
* so the guest will not see stale values.
|
|
|
|
*/
|
|
|
|
smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block kvmclock_cpufreq_notifier_block = {
|
|
|
|
.notifier_call = kvmclock_cpufreq_notifier
|
|
|
|
};
|
|
|
|
|
2007-11-14 20:40:21 +08:00
|
|
|
int kvm_arch_init(void *opaque)
|
2007-10-10 23:16:19 +08:00
|
|
|
{
|
2009-02-05 00:52:04 +08:00
|
|
|
int r, cpu;
|
2007-11-14 20:40:21 +08:00
|
|
|
struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
|
|
|
|
|
|
|
|
if (kvm_x86_ops) {
|
|
|
|
printk(KERN_ERR "kvm: already loaded the other module\n");
|
2007-11-18 20:43:21 +08:00
|
|
|
r = -EEXIST;
|
|
|
|
goto out;
|
2007-11-14 20:40:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!ops->cpu_has_kvm_support()) {
|
|
|
|
printk(KERN_ERR "kvm: no hardware support\n");
|
2007-11-18 20:43:21 +08:00
|
|
|
r = -EOPNOTSUPP;
|
|
|
|
goto out;
|
2007-11-14 20:40:21 +08:00
|
|
|
}
|
|
|
|
if (ops->disabled_by_bios()) {
|
|
|
|
printk(KERN_ERR "kvm: disabled by bios\n");
|
2007-11-18 20:43:21 +08:00
|
|
|
r = -EOPNOTSUPP;
|
|
|
|
goto out;
|
2007-11-14 20:40:21 +08:00
|
|
|
}
|
|
|
|
|
2008-01-13 19:23:56 +08:00
|
|
|
r = kvm_mmu_module_init();
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
kvm_init_msr_list();
|
|
|
|
|
2007-11-14 20:40:21 +08:00
|
|
|
kvm_x86_ops = ops;
|
2007-11-18 20:43:21 +08:00
|
|
|
kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
|
2008-04-25 21:13:50 +08:00
|
|
|
kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
|
|
|
|
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
|
2009-04-27 20:35:42 +08:00
|
|
|
PT_DIRTY_MASK, PT64_NX_MASK, 0);
|
2009-02-05 00:52:04 +08:00
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
|
|
|
tsc_khz_ref = tsc_khz;
|
|
|
|
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
|
|
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
|
|
|
}
|
|
|
|
|
2007-11-14 20:40:21 +08:00
|
|
|
return 0;
|
2007-11-18 20:43:21 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return r;
|
2007-10-10 23:16:19 +08:00
|
|
|
}
|
2007-11-01 06:24:24 +08:00
|
|
|
|
2007-11-14 20:40:21 +08:00
|
|
|
void kvm_arch_exit(void)
|
|
|
|
{
|
2009-04-18 01:24:58 +08:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
|
|
|
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
|
|
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
2007-11-14 20:40:21 +08:00
|
|
|
kvm_x86_ops = NULL;
|
2007-11-18 20:43:21 +08:00
|
|
|
kvm_mmu_module_exit();
|
|
|
|
}
|
2007-11-14 20:40:21 +08:00
|
|
|
|
2007-11-01 06:24:24 +08:00
|
|
|
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
++vcpu->stat.halt_exits;
|
2008-04-11 03:31:10 +08:00
|
|
|
KVMTRACE_0D(HLT, vcpu, handler);
|
2007-11-01 06:24:24 +08:00
|
|
|
if (irqchip_in_kernel(vcpu->kvm)) {
|
2008-04-13 22:54:35 +08:00
|
|
|
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
|
2007-11-01 06:24:24 +08:00
|
|
|
return 1;
|
|
|
|
} else {
|
|
|
|
vcpu->run->exit_reason = KVM_EXIT_HLT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
|
|
|
|
|
2008-02-23 01:21:37 +08:00
|
|
|
static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
|
|
|
|
unsigned long a1)
|
|
|
|
{
|
|
|
|
if (is_long_mode(vcpu))
|
|
|
|
return a0;
|
|
|
|
else
|
|
|
|
return a0 | ((gpa_t)a1 << 32);
|
|
|
|
}
|
|
|
|
|
2007-11-01 06:24:24 +08:00
|
|
|
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned long nr, a0, a1, a2, a3, ret;
|
2008-02-23 01:21:37 +08:00
|
|
|
int r = 1;
|
2007-11-01 06:24:24 +08:00
|
|
|
|
2008-06-28 01:58:02 +08:00
|
|
|
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
|
|
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
|
|
|
|
a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
|
|
|
|
a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
|
|
|
|
a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
|
2007-11-01 06:24:24 +08:00
|
|
|
|
2008-04-11 03:31:10 +08:00
|
|
|
KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
|
|
|
|
|
2007-11-01 06:24:24 +08:00
|
|
|
if (!is_long_mode(vcpu)) {
|
|
|
|
nr &= 0xFFFFFFFF;
|
|
|
|
a0 &= 0xFFFFFFFF;
|
|
|
|
a1 &= 0xFFFFFFFF;
|
|
|
|
a2 &= 0xFFFFFFFF;
|
|
|
|
a3 &= 0xFFFFFFFF;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (nr) {
|
2007-10-25 22:52:32 +08:00
|
|
|
case KVM_HC_VAPIC_POLL_IRQ:
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2008-02-23 01:21:37 +08:00
|
|
|
case KVM_HC_MMU_OP:
|
|
|
|
r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
|
|
|
|
break;
|
2007-11-01 06:24:24 +08:00
|
|
|
default:
|
|
|
|
ret = -KVM_ENOSYS;
|
|
|
|
break;
|
|
|
|
}
|
2008-06-28 01:58:02 +08:00
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
|
2008-02-21 03:30:30 +08:00
|
|
|
++vcpu->stat.hypercalls;
|
2008-02-23 01:21:37 +08:00
|
|
|
return r;
|
2007-11-01 06:24:24 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
|
|
|
|
|
|
|
|
int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
char instruction[3];
|
|
|
|
int ret = 0;
|
2008-06-28 01:58:02 +08:00
|
|
|
unsigned long rip = kvm_rip_read(vcpu);
|
2007-11-01 06:24:24 +08:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Blow out the MMU to ensure that no other VCPU has an active mapping
|
|
|
|
* to ensure that the updated hypercall appears atomically across all
|
|
|
|
* VCPUs.
|
|
|
|
*/
|
|
|
|
kvm_mmu_zap_all(vcpu->kvm);
|
|
|
|
|
|
|
|
kvm_x86_ops->patch_hypercall(vcpu, instruction);
|
2008-06-28 01:58:02 +08:00
|
|
|
if (emulator_write_emulated(rip, instruction, 3, vcpu)
|
2007-11-01 06:24:24 +08:00
|
|
|
!= X86EMUL_CONTINUE)
|
|
|
|
ret = -EFAULT;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
|
|
|
|
{
|
|
|
|
return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
|
|
|
|
}
|
|
|
|
|
|
|
|
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
|
|
|
|
{
|
|
|
|
struct descriptor_table dt = { limit, base };
|
|
|
|
|
|
|
|
kvm_x86_ops->set_gdt(vcpu, &dt);
|
|
|
|
}
|
|
|
|
|
|
|
|
void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
|
|
|
|
{
|
|
|
|
struct descriptor_table dt = { limit, base };
|
|
|
|
|
|
|
|
kvm_x86_ops->set_idt(vcpu, &dt);
|
|
|
|
}
|
|
|
|
|
|
|
|
void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
|
|
|
|
unsigned long *rflags)
|
|
|
|
{
|
2008-02-24 17:20:43 +08:00
|
|
|
kvm_lmsw(vcpu, msw);
|
2007-11-01 06:24:24 +08:00
|
|
|
*rflags = kvm_x86_ops->get_rflags(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
|
|
|
|
{
|
2008-04-30 23:56:02 +08:00
|
|
|
unsigned long value;
|
|
|
|
|
2007-11-01 06:24:24 +08:00
|
|
|
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
|
|
|
|
switch (cr) {
|
|
|
|
case 0:
|
2008-04-30 23:56:02 +08:00
|
|
|
value = vcpu->arch.cr0;
|
|
|
|
break;
|
2007-11-01 06:24:24 +08:00
|
|
|
case 2:
|
2008-04-30 23:56:02 +08:00
|
|
|
value = vcpu->arch.cr2;
|
|
|
|
break;
|
2007-11-01 06:24:24 +08:00
|
|
|
case 3:
|
2008-04-30 23:56:02 +08:00
|
|
|
value = vcpu->arch.cr3;
|
|
|
|
break;
|
2007-11-01 06:24:24 +08:00
|
|
|
case 4:
|
2008-04-30 23:56:02 +08:00
|
|
|
value = vcpu->arch.cr4;
|
|
|
|
break;
|
2007-12-06 22:46:52 +08:00
|
|
|
case 8:
|
2008-04-30 23:56:02 +08:00
|
|
|
value = kvm_get_cr8(vcpu);
|
|
|
|
break;
|
2007-11-01 06:24:24 +08:00
|
|
|
default:
|
2008-03-04 04:59:56 +08:00
|
|
|
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
|
2007-11-01 06:24:24 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2008-04-30 23:56:02 +08:00
|
|
|
KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
|
|
|
|
(u32)((u64)value >> 32), handler);
|
|
|
|
|
|
|
|
return value;
|
2007-11-01 06:24:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
|
|
|
|
unsigned long *rflags)
|
|
|
|
{
|
2008-04-30 23:56:02 +08:00
|
|
|
KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
|
|
|
|
(u32)((u64)val >> 32), handler);
|
|
|
|
|
2007-11-01 06:24:24 +08:00
|
|
|
switch (cr) {
|
|
|
|
case 0:
|
2008-02-24 17:20:43 +08:00
|
|
|
kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
|
2007-11-01 06:24:24 +08:00
|
|
|
*rflags = kvm_x86_ops->get_rflags(vcpu);
|
|
|
|
break;
|
|
|
|
case 2:
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cr2 = val;
|
2007-11-01 06:24:24 +08:00
|
|
|
break;
|
|
|
|
case 3:
|
2008-02-24 17:20:43 +08:00
|
|
|
kvm_set_cr3(vcpu, val);
|
2007-11-01 06:24:24 +08:00
|
|
|
break;
|
|
|
|
case 4:
|
2008-02-24 17:20:43 +08:00
|
|
|
kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
|
2007-11-01 06:24:24 +08:00
|
|
|
break;
|
2007-12-06 22:46:52 +08:00
|
|
|
case 8:
|
2008-02-24 17:20:43 +08:00
|
|
|
kvm_set_cr8(vcpu, val & 0xfUL);
|
2007-12-06 22:46:52 +08:00
|
|
|
break;
|
2007-11-01 06:24:24 +08:00
|
|
|
default:
|
2008-03-04 04:59:56 +08:00
|
|
|
vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
|
2007-11-01 06:24:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-21 23:10:04 +08:00
|
|
|
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
|
|
|
|
int j, nent = vcpu->arch.cpuid_nent;
|
2007-11-21 23:10:04 +08:00
|
|
|
|
|
|
|
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
|
|
|
|
/* when no next entry is found, the current entry[i] is reselected */
|
2008-11-06 07:56:21 +08:00
|
|
|
for (j = i + 1; ; j = (j + 1) % nent) {
|
2007-12-13 23:50:52 +08:00
|
|
|
struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
|
2007-11-21 23:10:04 +08:00
|
|
|
if (ej->function == e->function) {
|
|
|
|
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
|
|
|
return j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0; /* silence gcc, even though control never reaches here */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* find an entry with matching function, matching index (if needed), and that
|
|
|
|
* should be read next (if it's stateful) */
|
|
|
|
static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
|
|
|
|
u32 function, u32 index)
|
|
|
|
{
|
|
|
|
if (e->function != function)
|
|
|
|
return 0;
|
|
|
|
if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
|
|
|
|
return 0;
|
|
|
|
if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
|
2009-01-15 00:56:00 +08:00
|
|
|
!(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
|
2007-11-21 23:10:04 +08:00
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-11-26 03:17:11 +08:00
|
|
|
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
|
|
|
|
u32 function, u32 index)
|
2007-11-01 06:24:24 +08:00
|
|
|
{
|
|
|
|
int i;
|
2008-11-26 03:17:11 +08:00
|
|
|
struct kvm_cpuid_entry2 *best = NULL;
|
2007-11-01 06:24:24 +08:00
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
|
2008-11-26 03:17:11 +08:00
|
|
|
struct kvm_cpuid_entry2 *e;
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
e = &vcpu->arch.cpuid_entries[i];
|
2007-11-21 23:10:04 +08:00
|
|
|
if (is_matching_cpuid_entry(e, function, index)) {
|
|
|
|
if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
|
|
|
|
move_to_next_stateful_cpuid_entry(vcpu, i);
|
2007-11-01 06:24:24 +08:00
|
|
|
best = e;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Both basic or both extended?
|
|
|
|
*/
|
|
|
|
if (((e->function ^ function) & 0x80000000) == 0)
|
|
|
|
if (!best || e->function > best->function)
|
|
|
|
best = e;
|
|
|
|
}
|
2008-11-26 03:17:11 +08:00
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
2009-03-30 16:21:08 +08:00
|
|
|
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
|
|
|
|
if (best)
|
|
|
|
return best->eax & 0xff;
|
|
|
|
return 36;
|
|
|
|
}
|
|
|
|
|
2008-11-26 03:17:11 +08:00
|
|
|
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u32 function, index;
|
|
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
|
|
|
|
function = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
|
|
index = kvm_register_read(vcpu, VCPU_REGS_RCX);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, function, index);
|
2007-11-01 06:24:24 +08:00
|
|
|
if (best) {
|
2008-06-28 01:58:02 +08:00
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
|
2007-11-01 06:24:24 +08:00
|
|
|
}
|
|
|
|
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
2008-04-11 03:31:10 +08:00
|
|
|
KVMTRACE_5D(CPUID, vcpu, function,
|
2008-06-28 01:58:02 +08:00
|
|
|
(u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
|
|
|
|
(u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
|
|
|
|
(u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
|
|
|
|
(u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
|
2007-11-01 06:24:24 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
|
2007-11-01 06:24:25 +08:00
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
/*
|
|
|
|
* Check if userspace requested an interrupt window, and that the
|
|
|
|
* interrupt window is open.
|
|
|
|
*
|
|
|
|
* No need to exit to userspace if we already have an interrupt queued.
|
|
|
|
*/
|
|
|
|
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_run *kvm_run)
|
|
|
|
{
|
2009-04-21 22:44:56 +08:00
|
|
|
return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_run->request_interrupt_window &&
|
2009-04-21 22:44:59 +08:00
|
|
|
kvm_arch_interrupt_allowed(vcpu));
|
2007-11-02 03:16:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void post_kvm_run_save(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_run *kvm_run)
|
|
|
|
{
|
|
|
|
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
|
2008-02-24 17:20:43 +08:00
|
|
|
kvm_run->cr8 = kvm_get_cr8(vcpu);
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_run->apic_base = kvm_get_apic_base(vcpu);
|
2008-12-11 23:54:54 +08:00
|
|
|
if (irqchip_in_kernel(vcpu->kvm))
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_run->ready_for_interrupt_injection = 1;
|
2008-12-11 23:54:54 +08:00
|
|
|
else
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_run->ready_for_interrupt_injection =
|
2009-05-11 18:35:47 +08:00
|
|
|
kvm_arch_interrupt_allowed(vcpu) &&
|
|
|
|
!kvm_cpu_has_interrupt(vcpu) &&
|
|
|
|
!kvm_event_needs_reinjection(vcpu);
|
2007-11-02 03:16:10 +08:00
|
|
|
}
|
|
|
|
|
2007-10-25 22:52:32 +08:00
|
|
|
static void vapic_enter(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (!apic || !apic->vapic_addr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
|
2008-02-11 00:04:15 +08:00
|
|
|
|
|
|
|
vcpu->arch.apic->vapic_page = page;
|
2007-10-25 22:52:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void vapic_exit(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm_lapic *apic = vcpu->arch.apic;
|
|
|
|
|
|
|
|
if (!apic || !apic->vapic_addr)
|
|
|
|
return;
|
|
|
|
|
2008-06-23 23:04:25 +08:00
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
2007-10-25 22:52:32 +08:00
|
|
|
kvm_release_page_dirty(apic->vapic_page);
|
|
|
|
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
|
2008-06-23 23:04:25 +08:00
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
2007-10-25 22:52:32 +08:00
|
|
|
}
|
|
|
|
|
2009-04-21 22:45:08 +08:00
|
|
|
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int max_irr, tpr;
|
|
|
|
|
|
|
|
if (!kvm_x86_ops->update_cr8_intercept)
|
|
|
|
return;
|
|
|
|
|
2009-05-11 18:35:54 +08:00
|
|
|
if (!vcpu->arch.apic->vapic_addr)
|
|
|
|
max_irr = kvm_lapic_find_highest_irr(vcpu);
|
|
|
|
else
|
|
|
|
max_irr = -1;
|
2009-04-21 22:45:08 +08:00
|
|
|
|
|
|
|
if (max_irr != -1)
|
|
|
|
max_irr >>= 4;
|
|
|
|
|
|
|
|
tpr = kvm_lapic_get_cr8(vcpu);
|
|
|
|
|
|
|
|
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
|
|
|
|
}
|
|
|
|
|
2009-05-11 18:35:51 +08:00
|
|
|
static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
2009-04-21 22:45:08 +08:00
|
|
|
{
|
2009-05-11 18:35:51 +08:00
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
|
|
|
kvm_x86_ops->set_interrupt_shadow(vcpu, 0);
|
|
|
|
|
2009-04-21 22:45:08 +08:00
|
|
|
/* try to reinject previous events if any */
|
|
|
|
if (vcpu->arch.nmi_injected) {
|
|
|
|
kvm_x86_ops->set_nmi(vcpu);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->arch.interrupt.pending) {
|
2009-05-11 18:35:50 +08:00
|
|
|
kvm_x86_ops->set_irq(vcpu);
|
2009-04-21 22:45:08 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* try to inject new event if pending */
|
|
|
|
if (vcpu->arch.nmi_pending) {
|
|
|
|
if (kvm_x86_ops->nmi_allowed(vcpu)) {
|
|
|
|
vcpu->arch.nmi_pending = false;
|
|
|
|
vcpu->arch.nmi_injected = true;
|
|
|
|
kvm_x86_ops->set_nmi(vcpu);
|
|
|
|
}
|
|
|
|
} else if (kvm_cpu_has_interrupt(vcpu)) {
|
|
|
|
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
|
2009-05-11 18:35:50 +08:00
|
|
|
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
|
|
|
|
false);
|
|
|
|
kvm_x86_ops->set_irq(vcpu);
|
2009-04-21 22:45:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-09 02:23:48 +08:00
|
|
|
static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
2007-11-02 03:16:10 +08:00
|
|
|
{
|
|
|
|
int r;
|
2009-05-11 18:35:51 +08:00
|
|
|
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
|
|
|
|
kvm_run->request_interrupt_window;
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2008-02-21 03:47:24 +08:00
|
|
|
if (vcpu->requests)
|
|
|
|
if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
|
|
|
|
kvm_mmu_unload(vcpu);
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
r = kvm_mmu_reload(vcpu);
|
|
|
|
if (unlikely(r))
|
|
|
|
goto out;
|
|
|
|
|
2008-01-16 18:49:30 +08:00
|
|
|
if (vcpu->requests) {
|
|
|
|
if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
|
2008-05-27 23:10:20 +08:00
|
|
|
__kvm_migrate_timers(vcpu);
|
2009-02-05 00:52:04 +08:00
|
|
|
if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
|
|
|
|
kvm_write_guest_time(vcpu);
|
2008-09-24 00:18:39 +08:00
|
|
|
if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
|
|
|
|
kvm_mmu_sync_roots(vcpu);
|
2008-06-07 03:37:35 +08:00
|
|
|
if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
|
|
|
|
kvm_x86_ops->tlb_flush(vcpu);
|
2007-10-25 22:52:32 +08:00
|
|
|
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
|
|
|
|
&vcpu->requests)) {
|
|
|
|
kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
|
|
|
|
r = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-02-26 23:49:16 +08:00
|
|
|
if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
|
|
|
|
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
|
|
|
|
r = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-01-16 18:49:30 +08:00
|
|
|
}
|
2007-10-25 22:52:32 +08:00
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
kvm_x86_ops->prepare_guest_switch(vcpu);
|
|
|
|
kvm_load_guest_fpu(vcpu);
|
|
|
|
|
|
|
|
local_irq_disable();
|
|
|
|
|
2009-05-08 04:55:12 +08:00
|
|
|
clear_bit(KVM_REQ_KICK, &vcpu->requests);
|
|
|
|
smp_mb__after_clear_bit();
|
|
|
|
|
2008-09-09 02:23:48 +08:00
|
|
|
if (vcpu->requests || need_resched() || signal_pending(current)) {
|
2008-01-16 00:27:32 +08:00
|
|
|
local_irq_enable();
|
|
|
|
preempt_enable();
|
|
|
|
r = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
if (vcpu->arch.exception.pending)
|
2007-11-25 19:41:11 +08:00
|
|
|
__queue_exception(vcpu);
|
2007-11-25 23:45:31 +08:00
|
|
|
else
|
2009-04-21 22:45:08 +08:00
|
|
|
inject_pending_irq(vcpu, kvm_run);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2009-05-11 18:35:51 +08:00
|
|
|
/* enable NMI/IRQ window open exits if needed */
|
|
|
|
if (vcpu->arch.nmi_pending)
|
|
|
|
kvm_x86_ops->enable_nmi_window(vcpu);
|
|
|
|
else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
|
|
|
|
kvm_x86_ops->enable_irq_window(vcpu);
|
|
|
|
|
2009-04-21 22:45:08 +08:00
|
|
|
if (kvm_lapic_enabled(vcpu)) {
|
2009-05-11 18:35:54 +08:00
|
|
|
update_cr8_intercept(vcpu);
|
|
|
|
kvm_lapic_sync_to_vapic(vcpu);
|
2009-04-21 22:45:08 +08:00
|
|
|
}
|
2007-10-25 22:52:32 +08:00
|
|
|
|
2008-03-30 07:17:59 +08:00
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_guest_enter();
|
|
|
|
|
2008-12-15 20:52:10 +08:00
|
|
|
get_debugreg(vcpu->arch.host_dr6, 6);
|
|
|
|
get_debugreg(vcpu->arch.host_dr7, 7);
|
|
|
|
if (unlikely(vcpu->arch.switch_db_regs)) {
|
|
|
|
get_debugreg(vcpu->arch.host_db[0], 0);
|
|
|
|
get_debugreg(vcpu->arch.host_db[1], 1);
|
|
|
|
get_debugreg(vcpu->arch.host_db[2], 2);
|
|
|
|
get_debugreg(vcpu->arch.host_db[3], 3);
|
|
|
|
|
|
|
|
set_debugreg(0, 7);
|
|
|
|
set_debugreg(vcpu->arch.eff_db[0], 0);
|
|
|
|
set_debugreg(vcpu->arch.eff_db[1], 1);
|
|
|
|
set_debugreg(vcpu->arch.eff_db[2], 2);
|
|
|
|
set_debugreg(vcpu->arch.eff_db[3], 3);
|
|
|
|
}
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2008-04-11 03:31:10 +08:00
|
|
|
KVMTRACE_0D(VMENTRY, vcpu, entryexit);
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_x86_ops->run(vcpu, kvm_run);
|
|
|
|
|
2008-12-15 20:52:10 +08:00
|
|
|
if (unlikely(vcpu->arch.switch_db_regs)) {
|
|
|
|
set_debugreg(0, 7);
|
|
|
|
set_debugreg(vcpu->arch.host_db[0], 0);
|
|
|
|
set_debugreg(vcpu->arch.host_db[1], 1);
|
|
|
|
set_debugreg(vcpu->arch.host_db[2], 2);
|
|
|
|
set_debugreg(vcpu->arch.host_db[3], 3);
|
|
|
|
}
|
|
|
|
set_debugreg(vcpu->arch.host_dr6, 6);
|
|
|
|
set_debugreg(vcpu->arch.host_dr7, 7);
|
|
|
|
|
2009-05-08 04:55:12 +08:00
|
|
|
set_bit(KVM_REQ_KICK, &vcpu->requests);
|
2007-11-02 03:16:10 +08:00
|
|
|
local_irq_enable();
|
|
|
|
|
|
|
|
++vcpu->stat.exits;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We must have an instruction between local_irq_enable() and
|
|
|
|
* kvm_guest_exit(), so the timer interrupt isn't delayed by
|
|
|
|
* the interrupt shadow. The stat.exits increment will do nicely.
|
|
|
|
* But we need to prevent reordering, hence this barrier():
|
|
|
|
*/
|
|
|
|
barrier();
|
|
|
|
|
|
|
|
kvm_guest_exit();
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
|
2008-03-30 07:17:59 +08:00
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
/*
|
|
|
|
* Profile KVM exit RIPs:
|
|
|
|
*/
|
|
|
|
if (unlikely(prof_on == KVM_PROFILING)) {
|
2008-06-28 01:58:02 +08:00
|
|
|
unsigned long rip = kvm_rip_read(vcpu);
|
|
|
|
profile_hit(KVM_PROFILING, (void *)rip);
|
2007-11-02 03:16:10 +08:00
|
|
|
}
|
|
|
|
|
2007-11-25 19:41:11 +08:00
|
|
|
|
2007-10-25 22:52:32 +08:00
|
|
|
kvm_lapic_sync_from_vapic(vcpu);
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
|
2008-09-09 02:23:48 +08:00
|
|
|
out:
|
|
|
|
return r;
|
|
|
|
}
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2009-03-23 21:11:44 +08:00
|
|
|
|
2008-09-09 02:23:48 +08:00
|
|
|
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
|
2008-09-30 16:41:06 +08:00
|
|
|
pr_debug("vcpu %d received sipi with vector # %x\n",
|
|
|
|
vcpu->vcpu_id, vcpu->arch.sipi_vector);
|
2008-09-09 02:23:48 +08:00
|
|
|
kvm_lapic_reset(vcpu);
|
2008-10-07 21:42:33 +08:00
|
|
|
r = kvm_arch_vcpu_reset(vcpu);
|
2008-09-09 02:23:48 +08:00
|
|
|
if (r)
|
|
|
|
return r;
|
|
|
|
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
2007-11-02 03:16:10 +08:00
|
|
|
}
|
|
|
|
|
2008-09-09 02:23:48 +08:00
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
|
|
|
vapic_enter(vcpu);
|
|
|
|
|
|
|
|
r = 1;
|
|
|
|
while (r > 0) {
|
2008-09-22 19:28:53 +08:00
|
|
|
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
|
2008-09-09 02:23:48 +08:00
|
|
|
r = vcpu_enter_guest(vcpu, kvm_run);
|
|
|
|
else {
|
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
|
|
|
kvm_vcpu_block(vcpu);
|
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
|
|
|
if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
|
2009-03-23 21:11:44 +08:00
|
|
|
{
|
|
|
|
switch(vcpu->arch.mp_state) {
|
|
|
|
case KVM_MP_STATE_HALTED:
|
2008-09-09 02:23:48 +08:00
|
|
|
vcpu->arch.mp_state =
|
2009-03-23 21:11:44 +08:00
|
|
|
KVM_MP_STATE_RUNNABLE;
|
|
|
|
case KVM_MP_STATE_RUNNABLE:
|
|
|
|
break;
|
|
|
|
case KVM_MP_STATE_SIPI_RECEIVED:
|
|
|
|
default:
|
|
|
|
r = -EINTR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-09-09 02:23:48 +08:00
|
|
|
}
|
|
|
|
|
2009-03-23 21:11:44 +08:00
|
|
|
if (r <= 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
|
|
|
|
if (kvm_cpu_has_pending_timer(vcpu))
|
|
|
|
kvm_inject_pending_timer_irqs(vcpu);
|
|
|
|
|
|
|
|
if (dm_request_for_irq_injection(vcpu, kvm_run)) {
|
|
|
|
r = -EINTR;
|
|
|
|
kvm_run->exit_reason = KVM_EXIT_INTR;
|
|
|
|
++vcpu->stat.request_irq_exits;
|
|
|
|
}
|
|
|
|
if (signal_pending(current)) {
|
|
|
|
r = -EINTR;
|
|
|
|
kvm_run->exit_reason = KVM_EXIT_INTR;
|
|
|
|
++vcpu->stat.signal_exits;
|
|
|
|
}
|
|
|
|
if (need_resched()) {
|
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
|
|
|
kvm_resched(vcpu);
|
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
2008-09-09 02:23:48 +08:00
|
|
|
}
|
2007-11-02 03:16:10 +08:00
|
|
|
}
|
|
|
|
|
2008-09-09 02:23:48 +08:00
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
2007-11-02 03:16:10 +08:00
|
|
|
post_kvm_run_save(vcpu, kvm_run);
|
|
|
|
|
2007-10-25 22:52:32 +08:00
|
|
|
vapic_exit(vcpu);
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
sigset_t sigsaved;
|
|
|
|
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
2008-07-06 20:48:31 +08:00
|
|
|
if (vcpu->sigset_active)
|
|
|
|
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
|
|
|
|
|
2008-04-13 22:54:35 +08:00
|
|
|
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_vcpu_block(vcpu);
|
2008-09-09 02:23:48 +08:00
|
|
|
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
2008-07-06 20:48:31 +08:00
|
|
|
r = -EAGAIN;
|
|
|
|
goto out;
|
2007-11-02 03:16:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* re-sync apic's tpr */
|
|
|
|
if (!irqchip_in_kernel(vcpu->kvm))
|
2008-02-24 17:20:43 +08:00
|
|
|
kvm_set_cr8(vcpu, kvm_run->cr8);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
if (vcpu->arch.pio.cur_count) {
|
2007-11-02 03:16:10 +08:00
|
|
|
r = complete_pio(vcpu);
|
|
|
|
if (r)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
#if CONFIG_HAS_IOMEM
|
|
|
|
if (vcpu->mmio_needed) {
|
|
|
|
memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
|
|
|
|
vcpu->mmio_read_completed = 1;
|
|
|
|
vcpu->mmio_needed = 0;
|
2008-03-30 07:17:59 +08:00
|
|
|
|
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
2007-11-02 03:16:10 +08:00
|
|
|
r = emulate_instruction(vcpu, kvm_run,
|
KVM: x86 emulator: Only allow VMCALL/VMMCALL trapped by #UD
When executing a test program called "crashme", we found the KVM guest cannot
survive more than ten seconds, then encounterd kernel panic. The basic concept
of "crashme" is generating random assembly code and trying to execute it.
After some fixes on emulator insn validity judgment, we found it's hard to
get the current emulator handle the invalid instructions correctly, for the
#UD trap for hypercall patching caused troubles. The problem is, if the opcode
itself was OK, but combination of opcode and modrm_reg was invalid, and one
operand of the opcode was memory (SrcMem or DstMem), the emulator will fetch
the memory operand first rather than checking the validity, and may encounter
an error there. For example, ".byte 0xfe, 0x34, 0xcd" has this problem.
In the patch, we simply check that if the invalid opcode wasn't vmcall/vmmcall,
then return from emulate_instruction() and inject a #UD to guest. With the
patch, the guest had been running for more than 12 hours.
Signed-off-by: Feng (Eric) Liu <eric.e.liu@intel.com>
Signed-off-by: Sheng Yang <sheng.yang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-01-02 14:49:22 +08:00
|
|
|
vcpu->arch.mmio_fault_cr2, 0,
|
|
|
|
EMULTYPE_NO_DECODE);
|
2008-03-30 07:17:59 +08:00
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
2007-11-02 03:16:10 +08:00
|
|
|
if (r == EMULATE_DO_MMIO) {
|
|
|
|
/*
|
|
|
|
* Read-modify-write. Back to userspace.
|
|
|
|
*/
|
|
|
|
r = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2008-06-28 01:58:02 +08:00
|
|
|
if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RAX,
|
|
|
|
kvm_run->hypercall.ret);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
|
|
|
r = __vcpu_run(vcpu, kvm_run);
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (vcpu->sigset_active)
|
|
|
|
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
|
|
|
|
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
2008-06-28 01:58:02 +08:00
|
|
|
regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
|
|
regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
|
|
|
|
regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
|
|
|
|
regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
|
|
|
|
regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
|
|
|
|
regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
|
|
|
|
regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
|
|
|
|
regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
|
2007-11-02 03:16:10 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-06-28 01:58:02 +08:00
|
|
|
regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
|
|
|
|
regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
|
|
|
|
regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
|
|
|
|
regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
|
|
|
|
regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
|
|
|
|
regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
|
|
|
|
regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
|
|
|
|
regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
|
2007-11-02 03:16:10 +08:00
|
|
|
#endif
|
|
|
|
|
2008-06-28 01:58:02 +08:00
|
|
|
regs->rip = kvm_rip_read(vcpu);
|
2007-11-02 03:16:10 +08:00
|
|
|
regs->rflags = kvm_x86_ops->get_rflags(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't leak debug flags in case they were set for guest debugging
|
|
|
|
*/
|
2008-12-15 20:52:10 +08:00
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
2007-11-02 03:16:10 +08:00
|
|
|
regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
|
|
|
|
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
2008-06-28 01:58:02 +08:00
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
|
2007-11-02 03:16:10 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-06-28 01:58:02 +08:00
|
|
|
kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
#endif
|
|
|
|
|
2008-06-28 01:58:02 +08:00
|
|
|
kvm_rip_write(vcpu, regs->rip);
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_x86_ops->set_rflags(vcpu, regs->rflags);
|
|
|
|
|
|
|
|
|
2008-04-30 23:59:04 +08:00
|
|
|
vcpu->arch.exception.pending = false;
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
void kvm_get_segment(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_segment *var, int seg)
|
2007-11-02 03:16:10 +08:00
|
|
|
{
|
2008-02-20 02:25:50 +08:00
|
|
|
kvm_x86_ops->get_segment(vcpu, var, seg);
|
2007-11-02 03:16:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
|
|
|
|
{
|
|
|
|
struct kvm_segment cs;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
|
2007-11-02 03:16:10 +08:00
|
|
|
*db = cs.db;
|
|
|
|
*l = cs.l;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_sregs *sregs)
|
|
|
|
{
|
|
|
|
struct descriptor_table dt;
|
|
|
|
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
|
|
|
|
kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
|
|
|
|
kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
|
|
|
|
kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
|
|
|
|
kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
|
|
|
|
kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
|
|
|
|
kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
|
|
|
kvm_x86_ops->get_idt(vcpu, &dt);
|
|
|
|
sregs->idt.limit = dt.limit;
|
|
|
|
sregs->idt.base = dt.base;
|
|
|
|
kvm_x86_ops->get_gdt(vcpu, &dt);
|
|
|
|
sregs->gdt.limit = dt.limit;
|
|
|
|
sregs->gdt.base = dt.base;
|
|
|
|
|
|
|
|
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
|
2007-12-13 23:50:52 +08:00
|
|
|
sregs->cr0 = vcpu->arch.cr0;
|
|
|
|
sregs->cr2 = vcpu->arch.cr2;
|
|
|
|
sregs->cr3 = vcpu->arch.cr3;
|
|
|
|
sregs->cr4 = vcpu->arch.cr4;
|
2008-02-24 17:20:43 +08:00
|
|
|
sregs->cr8 = kvm_get_cr8(vcpu);
|
2007-12-13 23:50:52 +08:00
|
|
|
sregs->efer = vcpu->arch.shadow_efer;
|
2007-11-02 03:16:10 +08:00
|
|
|
sregs->apic_base = kvm_get_apic_base(vcpu);
|
|
|
|
|
2009-05-11 18:35:48 +08:00
|
|
|
memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2009-05-11 18:35:53 +08:00
|
|
|
if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
|
2009-04-21 22:45:11 +08:00
|
|
|
set_bit(vcpu->arch.interrupt.nr,
|
|
|
|
(unsigned long *)sregs->interrupt_bitmap);
|
2009-04-21 22:45:10 +08:00
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-04-12 00:24:45 +08:00
|
|
|
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_mp_state *mp_state)
|
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
mp_state->mp_state = vcpu->arch.mp_state;
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_mp_state *mp_state)
|
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
vcpu->arch.mp_state = mp_state->mp_state;
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
static void kvm_set_segment(struct kvm_vcpu *vcpu,
|
2007-11-02 03:16:10 +08:00
|
|
|
struct kvm_segment *var, int seg)
|
|
|
|
{
|
2008-02-20 02:25:50 +08:00
|
|
|
kvm_x86_ops->set_segment(vcpu, var, seg);
|
2007-11-02 03:16:10 +08:00
|
|
|
}
|
|
|
|
|
2008-03-25 05:14:53 +08:00
|
|
|
static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
|
|
|
|
struct kvm_segment *kvm_desct)
|
|
|
|
{
|
|
|
|
kvm_desct->base = seg_desc->base0;
|
|
|
|
kvm_desct->base |= seg_desc->base1 << 16;
|
|
|
|
kvm_desct->base |= seg_desc->base2 << 24;
|
|
|
|
kvm_desct->limit = seg_desc->limit0;
|
|
|
|
kvm_desct->limit |= seg_desc->limit << 16;
|
2008-07-20 06:08:07 +08:00
|
|
|
if (seg_desc->g) {
|
|
|
|
kvm_desct->limit <<= 12;
|
|
|
|
kvm_desct->limit |= 0xfff;
|
|
|
|
}
|
2008-03-25 05:14:53 +08:00
|
|
|
kvm_desct->selector = selector;
|
|
|
|
kvm_desct->type = seg_desc->type;
|
|
|
|
kvm_desct->present = seg_desc->p;
|
|
|
|
kvm_desct->dpl = seg_desc->dpl;
|
|
|
|
kvm_desct->db = seg_desc->d;
|
|
|
|
kvm_desct->s = seg_desc->s;
|
|
|
|
kvm_desct->l = seg_desc->l;
|
|
|
|
kvm_desct->g = seg_desc->g;
|
|
|
|
kvm_desct->avl = seg_desc->avl;
|
|
|
|
if (!selector)
|
|
|
|
kvm_desct->unusable = 1;
|
|
|
|
else
|
|
|
|
kvm_desct->unusable = 0;
|
|
|
|
kvm_desct->padding = 0;
|
|
|
|
}
|
|
|
|
|
2008-10-22 19:09:47 +08:00
|
|
|
static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
|
|
|
|
u16 selector,
|
|
|
|
struct descriptor_table *dtable)
|
2008-03-25 05:14:53 +08:00
|
|
|
{
|
|
|
|
if (selector & 1 << 2) {
|
|
|
|
struct kvm_segment kvm_seg;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
|
2008-03-25 05:14:53 +08:00
|
|
|
|
|
|
|
if (kvm_seg.unusable)
|
|
|
|
dtable->limit = 0;
|
|
|
|
else
|
|
|
|
dtable->limit = kvm_seg.limit;
|
|
|
|
dtable->base = kvm_seg.base;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
kvm_x86_ops->get_gdt(vcpu, dtable);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allowed just for 8 bytes segments */
|
|
|
|
static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
|
|
|
struct desc_struct *seg_desc)
|
|
|
|
{
|
2008-07-17 06:07:10 +08:00
|
|
|
gpa_t gpa;
|
2008-03-25 05:14:53 +08:00
|
|
|
struct descriptor_table dtable;
|
|
|
|
u16 index = selector >> 3;
|
|
|
|
|
2008-10-22 19:09:47 +08:00
|
|
|
get_segment_descriptor_dtable(vcpu, selector, &dtable);
|
2008-03-25 05:14:53 +08:00
|
|
|
|
|
|
|
if (dtable.limit < index * 8 + 7) {
|
|
|
|
kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
|
|
|
|
return 1;
|
|
|
|
}
|
2008-07-17 06:07:10 +08:00
|
|
|
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
|
|
|
|
gpa += index * 8;
|
|
|
|
return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
|
2008-03-25 05:14:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* allowed just for 8 bytes segments */
|
|
|
|
static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
|
|
|
struct desc_struct *seg_desc)
|
|
|
|
{
|
2008-07-17 06:07:10 +08:00
|
|
|
gpa_t gpa;
|
2008-03-25 05:14:53 +08:00
|
|
|
struct descriptor_table dtable;
|
|
|
|
u16 index = selector >> 3;
|
|
|
|
|
2008-10-22 19:09:47 +08:00
|
|
|
get_segment_descriptor_dtable(vcpu, selector, &dtable);
|
2008-03-25 05:14:53 +08:00
|
|
|
|
|
|
|
if (dtable.limit < index * 8 + 7)
|
|
|
|
return 1;
|
2008-07-17 06:07:10 +08:00
|
|
|
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
|
|
|
|
gpa += index * 8;
|
|
|
|
return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
|
2008-03-25 05:14:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
|
|
|
|
struct desc_struct *seg_desc)
|
|
|
|
{
|
|
|
|
u32 base_addr;
|
|
|
|
|
|
|
|
base_addr = seg_desc->base0;
|
|
|
|
base_addr |= (seg_desc->base1 << 16);
|
|
|
|
base_addr |= (seg_desc->base2 << 24);
|
|
|
|
|
2008-07-17 06:07:10 +08:00
|
|
|
return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
|
2008-03-25 05:14:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
|
|
|
|
{
|
|
|
|
struct kvm_segment kvm_seg;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
kvm_get_segment(vcpu, &kvm_seg, seg);
|
2008-03-25 05:14:53 +08:00
|
|
|
return kvm_seg.selector;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
|
|
|
|
u16 selector,
|
|
|
|
struct kvm_segment *kvm_seg)
|
|
|
|
{
|
|
|
|
struct desc_struct seg_desc;
|
|
|
|
|
|
|
|
if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
|
|
|
|
return 1;
|
|
|
|
seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-08-23 04:29:17 +08:00
|
|
|
static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
|
2008-08-20 20:51:42 +08:00
|
|
|
{
|
|
|
|
struct kvm_segment segvar = {
|
|
|
|
.base = selector << 4,
|
|
|
|
.limit = 0xffff,
|
|
|
|
.selector = selector,
|
|
|
|
.type = 3,
|
|
|
|
.present = 1,
|
|
|
|
.dpl = 3,
|
|
|
|
.db = 0,
|
|
|
|
.s = 1,
|
|
|
|
.l = 0,
|
|
|
|
.g = 0,
|
|
|
|
.avl = 0,
|
|
|
|
.unusable = 0,
|
|
|
|
};
|
|
|
|
kvm_x86_ops->set_segment(vcpu, &segvar, seg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
|
|
|
int type_bits, int seg)
|
2008-03-25 05:14:53 +08:00
|
|
|
{
|
|
|
|
struct kvm_segment kvm_seg;
|
|
|
|
|
2008-08-20 20:51:42 +08:00
|
|
|
if (!(vcpu->arch.cr0 & X86_CR0_PE))
|
|
|
|
return kvm_load_realmode_segment(vcpu, selector, seg);
|
2008-03-25 05:14:53 +08:00
|
|
|
if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
|
|
|
|
return 1;
|
|
|
|
kvm_seg.type |= type_bits;
|
|
|
|
|
|
|
|
if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
|
|
|
|
seg != VCPU_SREG_LDTR)
|
|
|
|
if (!kvm_seg.s)
|
|
|
|
kvm_seg.unusable = 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
kvm_set_segment(vcpu, &kvm_seg, seg);
|
2008-03-25 05:14:53 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void save_state_to_tss32(struct kvm_vcpu *vcpu,
|
|
|
|
struct tss_segment_32 *tss)
|
|
|
|
{
|
|
|
|
tss->cr3 = vcpu->arch.cr3;
|
2008-06-28 01:58:02 +08:00
|
|
|
tss->eip = kvm_rip_read(vcpu);
|
2008-03-25 05:14:53 +08:00
|
|
|
tss->eflags = kvm_x86_ops->get_rflags(vcpu);
|
2008-06-28 01:58:02 +08:00
|
|
|
tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
|
|
tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
|
|
|
|
tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
|
|
|
|
tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
|
|
|
|
tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
|
|
|
|
tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
|
|
|
|
tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
|
|
|
|
tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
|
2008-03-25 05:14:53 +08:00
|
|
|
tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
|
|
|
|
tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
|
|
|
|
tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
|
|
|
|
tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
|
|
|
|
tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
|
|
|
|
tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
|
|
|
|
tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int load_state_from_tss32(struct kvm_vcpu *vcpu,
|
|
|
|
struct tss_segment_32 *tss)
|
|
|
|
{
|
|
|
|
kvm_set_cr3(vcpu, tss->cr3);
|
|
|
|
|
2008-06-28 01:58:02 +08:00
|
|
|
kvm_rip_write(vcpu, tss->eip);
|
2008-03-25 05:14:53 +08:00
|
|
|
kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
|
|
|
|
|
2008-06-28 01:58:02 +08:00
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
|
2008-03-25 05:14:53 +08:00
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void save_state_to_tss16(struct kvm_vcpu *vcpu,
|
|
|
|
struct tss_segment_16 *tss)
|
|
|
|
{
|
2008-06-28 01:58:02 +08:00
|
|
|
tss->ip = kvm_rip_read(vcpu);
|
2008-03-25 05:14:53 +08:00
|
|
|
tss->flag = kvm_x86_ops->get_rflags(vcpu);
|
2008-06-28 01:58:02 +08:00
|
|
|
tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
|
|
tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
|
|
|
|
tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
|
|
|
|
tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
|
|
|
|
tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
|
|
|
|
tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
|
|
|
|
tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
|
|
|
|
tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
|
2008-03-25 05:14:53 +08:00
|
|
|
|
|
|
|
tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
|
|
|
|
tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
|
|
|
|
tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
|
|
|
|
tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
|
|
|
|
tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
|
|
|
|
tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int load_state_from_tss16(struct kvm_vcpu *vcpu,
|
|
|
|
struct tss_segment_16 *tss)
|
|
|
|
{
|
2008-06-28 01:58:02 +08:00
|
|
|
kvm_rip_write(vcpu, tss->ip);
|
2008-03-25 05:14:53 +08:00
|
|
|
kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
|
2008-06-28 01:58:02 +08:00
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
|
2008-03-25 05:14:53 +08:00
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
|
2008-03-25 05:14:53 +08:00
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-04-28 03:14:13 +08:00
|
|
|
static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
|
2009-03-30 21:03:24 +08:00
|
|
|
u16 old_tss_sel, u32 old_tss_base,
|
|
|
|
struct desc_struct *nseg_desc)
|
2008-03-25 05:14:53 +08:00
|
|
|
{
|
|
|
|
struct tss_segment_16 tss_segment_16;
|
|
|
|
int ret = 0;
|
|
|
|
|
2008-07-17 06:07:11 +08:00
|
|
|
if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
|
|
|
|
sizeof tss_segment_16))
|
2008-03-25 05:14:53 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
save_state_to_tss16(vcpu, &tss_segment_16);
|
|
|
|
|
2008-07-17 06:07:11 +08:00
|
|
|
if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
|
|
|
|
sizeof tss_segment_16))
|
2008-03-25 05:14:53 +08:00
|
|
|
goto out;
|
2008-07-17 06:07:11 +08:00
|
|
|
|
|
|
|
if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
|
|
|
|
&tss_segment_16, sizeof tss_segment_16))
|
|
|
|
goto out;
|
|
|
|
|
2009-03-30 21:03:24 +08:00
|
|
|
if (old_tss_sel != 0xffff) {
|
|
|
|
tss_segment_16.prev_task_link = old_tss_sel;
|
|
|
|
|
|
|
|
if (kvm_write_guest(vcpu->kvm,
|
|
|
|
get_tss_base_addr(vcpu, nseg_desc),
|
|
|
|
&tss_segment_16.prev_task_link,
|
|
|
|
sizeof tss_segment_16.prev_task_link))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-03-25 05:14:53 +08:00
|
|
|
if (load_state_from_tss16(vcpu, &tss_segment_16))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = 1;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-04-28 03:14:13 +08:00
|
|
|
static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
|
2009-03-30 21:03:24 +08:00
|
|
|
u16 old_tss_sel, u32 old_tss_base,
|
2008-03-25 05:14:53 +08:00
|
|
|
struct desc_struct *nseg_desc)
|
|
|
|
{
|
|
|
|
struct tss_segment_32 tss_segment_32;
|
|
|
|
int ret = 0;
|
|
|
|
|
2008-07-17 06:07:11 +08:00
|
|
|
if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
|
|
|
|
sizeof tss_segment_32))
|
2008-03-25 05:14:53 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
save_state_to_tss32(vcpu, &tss_segment_32);
|
|
|
|
|
2008-07-17 06:07:11 +08:00
|
|
|
if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
|
|
|
|
sizeof tss_segment_32))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
|
|
|
|
&tss_segment_32, sizeof tss_segment_32))
|
2008-03-25 05:14:53 +08:00
|
|
|
goto out;
|
2008-07-17 06:07:11 +08:00
|
|
|
|
2009-03-30 21:03:24 +08:00
|
|
|
if (old_tss_sel != 0xffff) {
|
|
|
|
tss_segment_32.prev_task_link = old_tss_sel;
|
|
|
|
|
|
|
|
if (kvm_write_guest(vcpu->kvm,
|
|
|
|
get_tss_base_addr(vcpu, nseg_desc),
|
|
|
|
&tss_segment_32.prev_task_link,
|
|
|
|
sizeof tss_segment_32.prev_task_link))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2008-03-25 05:14:53 +08:00
|
|
|
if (load_state_from_tss32(vcpu, &tss_segment_32))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = 1;
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
|
|
|
|
{
|
|
|
|
struct kvm_segment tr_seg;
|
|
|
|
struct desc_struct cseg_desc;
|
|
|
|
struct desc_struct nseg_desc;
|
|
|
|
int ret = 0;
|
2008-07-17 06:07:11 +08:00
|
|
|
u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
|
|
|
|
u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
|
2008-03-25 05:14:53 +08:00
|
|
|
|
2008-07-17 06:07:11 +08:00
|
|
|
old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
|
2008-03-25 05:14:53 +08:00
|
|
|
|
2008-07-17 06:07:11 +08:00
|
|
|
/* FIXME: Handle errors. Failure to read either TSS or their
|
|
|
|
* descriptors should generate a pagefault.
|
|
|
|
*/
|
2008-03-25 05:14:53 +08:00
|
|
|
if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
|
|
|
|
goto out;
|
|
|
|
|
2008-07-17 06:07:11 +08:00
|
|
|
if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
|
2008-03-25 05:14:53 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (reason != TASK_SWITCH_IRET) {
|
|
|
|
int cpl;
|
|
|
|
|
|
|
|
cpl = kvm_x86_ops->get_cpl(vcpu);
|
|
|
|
if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
|
|
|
|
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
|
|
|
|
kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
|
2008-04-28 23:23:52 +08:00
|
|
|
cseg_desc.type &= ~(1 << 1); //clear the B flag
|
2008-07-17 06:07:11 +08:00
|
|
|
save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
|
2008-03-25 05:14:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (reason == TASK_SWITCH_IRET) {
|
|
|
|
u32 eflags = kvm_x86_ops->get_rflags(vcpu);
|
|
|
|
kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
|
|
|
|
}
|
|
|
|
|
2009-03-30 21:03:29 +08:00
|
|
|
/* set back link to prev task only if NT bit is set in eflags
|
|
|
|
note that old_tss_sel is not used afetr this point */
|
|
|
|
if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
|
|
|
|
old_tss_sel = 0xffff;
|
2008-03-25 05:14:53 +08:00
|
|
|
|
2009-03-30 21:03:24 +08:00
|
|
|
/* set back link to prev task only if NT bit is set in eflags
|
|
|
|
note that old_tss_sel is not used afetr this point */
|
|
|
|
if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
|
|
|
|
old_tss_sel = 0xffff;
|
|
|
|
|
2008-03-25 05:14:53 +08:00
|
|
|
if (nseg_desc.type & 8)
|
2009-03-30 21:03:24 +08:00
|
|
|
ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_sel,
|
|
|
|
old_tss_base, &nseg_desc);
|
2008-03-25 05:14:53 +08:00
|
|
|
else
|
2009-03-30 21:03:24 +08:00
|
|
|
ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_sel,
|
|
|
|
old_tss_base, &nseg_desc);
|
2008-03-25 05:14:53 +08:00
|
|
|
|
|
|
|
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
|
|
|
|
u32 eflags = kvm_x86_ops->get_rflags(vcpu);
|
|
|
|
kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reason != TASK_SWITCH_IRET) {
|
2008-04-28 23:23:52 +08:00
|
|
|
nseg_desc.type |= (1 << 1);
|
2008-03-25 05:14:53 +08:00
|
|
|
save_guest_segment_descriptor(vcpu, tss_selector,
|
|
|
|
&nseg_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
|
|
|
|
seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
|
|
|
|
tr_seg.type = 11;
|
2008-05-27 16:18:46 +08:00
|
|
|
kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
|
2008-03-25 05:14:53 +08:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_task_switch);
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_sregs *sregs)
|
|
|
|
{
|
|
|
|
int mmu_reset_needed = 0;
|
2009-05-11 18:35:48 +08:00
|
|
|
int pending_vec, max_bits;
|
2007-11-02 03:16:10 +08:00
|
|
|
struct descriptor_table dt;
|
|
|
|
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
|
|
|
dt.limit = sregs->idt.limit;
|
|
|
|
dt.base = sregs->idt.base;
|
|
|
|
kvm_x86_ops->set_idt(vcpu, &dt);
|
|
|
|
dt.limit = sregs->gdt.limit;
|
|
|
|
dt.base = sregs->gdt.base;
|
|
|
|
kvm_x86_ops->set_gdt(vcpu, &dt);
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cr2 = sregs->cr2;
|
|
|
|
mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
|
2009-04-16 19:30:44 +08:00
|
|
|
|
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
|
|
|
if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
|
|
|
|
vcpu->arch.cr3 = sregs->cr3;
|
|
|
|
else
|
|
|
|
set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
|
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2008-02-24 17:20:43 +08:00
|
|
|
kvm_set_cr8(vcpu, sregs->cr8);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_x86_ops->set_efer(vcpu, sregs->efer);
|
|
|
|
kvm_set_apic_base(vcpu, sregs->apic_base);
|
|
|
|
|
|
|
|
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
|
2008-02-06 19:02:35 +08:00
|
|
|
vcpu->arch.cr0 = sregs->cr0;
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
|
2007-11-02 03:16:10 +08:00
|
|
|
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
|
|
|
|
if (!is_long_mode(vcpu) && is_pae(vcpu))
|
2007-12-13 23:50:52 +08:00
|
|
|
load_pdptrs(vcpu, vcpu->arch.cr3);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
|
|
|
if (mmu_reset_needed)
|
|
|
|
kvm_mmu_reset_context(vcpu);
|
|
|
|
|
2009-05-11 18:35:48 +08:00
|
|
|
max_bits = (sizeof sregs->interrupt_bitmap) << 3;
|
|
|
|
pending_vec = find_first_bit(
|
|
|
|
(const unsigned long *)sregs->interrupt_bitmap, max_bits);
|
|
|
|
if (pending_vec < max_bits) {
|
2009-05-11 18:35:50 +08:00
|
|
|
kvm_queue_interrupt(vcpu, pending_vec, false);
|
2009-05-11 18:35:48 +08:00
|
|
|
pr_debug("Set back pending irq %d\n", pending_vec);
|
|
|
|
if (irqchip_in_kernel(vcpu->kvm))
|
|
|
|
kvm_pic_clear_isr_ack(vcpu->kvm);
|
2007-11-02 03:16:10 +08:00
|
|
|
}
|
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
|
|
|
|
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
|
|
|
|
kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
|
|
|
|
kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
|
|
|
|
kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
|
|
|
|
kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2008-05-27 16:18:46 +08:00
|
|
|
kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
|
|
|
|
kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
|
2007-11-02 03:16:10 +08:00
|
|
|
|
2008-09-11 03:40:55 +08:00
|
|
|
/* Older userspace won't unhalt the vcpu on reset. */
|
|
|
|
if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
|
|
|
|
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
|
|
|
|
!(vcpu->arch.cr0 & X86_CR0_PE))
|
|
|
|
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-12-15 20:52:10 +08:00
|
|
|
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_guest_debug *dbg)
|
2007-11-02 03:16:10 +08:00
|
|
|
{
|
2008-12-15 20:52:10 +08:00
|
|
|
int i, r;
|
2007-11-02 03:16:10 +08:00
|
|
|
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
2008-12-15 20:52:10 +08:00
|
|
|
if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
|
|
|
|
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
|
|
|
|
for (i = 0; i < KVM_NR_DB_REGS; ++i)
|
|
|
|
vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
|
|
|
|
vcpu->arch.switch_db_regs =
|
|
|
|
(dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < KVM_NR_DB_REGS; i++)
|
|
|
|
vcpu->arch.eff_db[i] = vcpu->arch.db[i];
|
|
|
|
vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
|
|
|
|
}
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
|
|
|
|
|
2008-12-15 20:52:10 +08:00
|
|
|
if (dbg->control & KVM_GUESTDBG_INJECT_DB)
|
|
|
|
kvm_queue_exception(vcpu, DB_VECTOR);
|
|
|
|
else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
|
|
|
|
kvm_queue_exception(vcpu, BP_VECTOR);
|
|
|
|
|
2007-11-02 03:16:10 +08:00
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2007-11-01 06:24:25 +08:00
|
|
|
/*
|
|
|
|
* fxsave fpu state. Taken from x86_64/processor.h. To be killed when
|
|
|
|
* we have asm/x86/processor.h
|
|
|
|
*/
|
|
|
|
struct fxsave {
|
|
|
|
u16 cwd;
|
|
|
|
u16 swd;
|
|
|
|
u16 twd;
|
|
|
|
u16 fop;
|
|
|
|
u64 rip;
|
|
|
|
u64 rdp;
|
|
|
|
u32 mxcsr;
|
|
|
|
u32 mxcsr_mask;
|
|
|
|
u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
|
|
|
|
#else
|
|
|
|
u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2007-11-16 13:05:55 +08:00
|
|
|
/*
|
|
|
|
* Translate a guest virtual address to a guest physical address.
|
|
|
|
*/
|
|
|
|
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_translation *tr)
|
|
|
|
{
|
|
|
|
unsigned long vaddr = tr->linear_address;
|
|
|
|
gpa_t gpa;
|
|
|
|
|
|
|
|
vcpu_load(vcpu);
|
2008-02-11 00:04:15 +08:00
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
2007-12-13 23:50:52 +08:00
|
|
|
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
|
2008-02-11 00:04:15 +08:00
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
2007-11-16 13:05:55 +08:00
|
|
|
tr->physical_address = gpa;
|
|
|
|
tr->valid = gpa != UNMAPPED_GVA;
|
|
|
|
tr->writeable = 1;
|
|
|
|
tr->usermode = 0;
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-01 06:24:25 +08:00
|
|
|
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
|
2007-11-01 06:24:25 +08:00
|
|
|
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
|
|
|
memcpy(fpu->fpr, fxsave->st_space, 128);
|
|
|
|
fpu->fcw = fxsave->cwd;
|
|
|
|
fpu->fsw = fxsave->swd;
|
|
|
|
fpu->ftwx = fxsave->twd;
|
|
|
|
fpu->last_opcode = fxsave->fop;
|
|
|
|
fpu->last_ip = fxsave->rip;
|
|
|
|
fpu->last_dp = fxsave->rdp;
|
|
|
|
memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
|
|
|
|
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
|
|
|
{
|
2007-12-13 23:50:52 +08:00
|
|
|
struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
|
2007-11-01 06:24:25 +08:00
|
|
|
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
|
|
|
memcpy(fxsave->st_space, fpu->fpr, 128);
|
|
|
|
fxsave->cwd = fpu->fcw;
|
|
|
|
fxsave->swd = fpu->fsw;
|
|
|
|
fxsave->twd = fpu->ftwx;
|
|
|
|
fxsave->fop = fpu->last_opcode;
|
|
|
|
fxsave->rip = fpu->last_ip;
|
|
|
|
fxsave->rdp = fpu->last_dp;
|
|
|
|
memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
|
|
|
|
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void fx_init(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned after_mxcsr_mask;
|
|
|
|
|
2008-05-02 00:43:33 +08:00
|
|
|
/*
|
|
|
|
* Touch the fpu the first time in non atomic context as if
|
|
|
|
* this is the first fpu instruction the exception handler
|
|
|
|
* will fire before the instruction returns and it'll have to
|
|
|
|
* allocate ram with GFP_KERNEL.
|
|
|
|
*/
|
|
|
|
if (!used_math())
|
2008-07-10 21:53:33 +08:00
|
|
|
kvm_fx_save(&vcpu->arch.host_fx_image);
|
2008-05-02 00:43:33 +08:00
|
|
|
|
2007-11-01 06:24:25 +08:00
|
|
|
/* Initialize guest FPU by resetting ours and saving into guest's */
|
|
|
|
preempt_disable();
|
2008-07-10 21:53:33 +08:00
|
|
|
kvm_fx_save(&vcpu->arch.host_fx_image);
|
|
|
|
kvm_fx_finit();
|
|
|
|
kvm_fx_save(&vcpu->arch.guest_fx_image);
|
|
|
|
kvm_fx_restore(&vcpu->arch.host_fx_image);
|
2007-11-01 06:24:25 +08:00
|
|
|
preempt_enable();
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.cr0 |= X86_CR0_ET;
|
2007-11-01 06:24:25 +08:00
|
|
|
after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
|
|
|
|
memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
|
2007-11-01 06:24:25 +08:00
|
|
|
0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fx_init);
|
|
|
|
|
|
|
|
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vcpu->guest_fpu_loaded = 1;
|
2008-07-10 21:53:33 +08:00
|
|
|
kvm_fx_save(&vcpu->arch.host_fx_image);
|
|
|
|
kvm_fx_restore(&vcpu->arch.guest_fx_image);
|
2007-11-01 06:24:25 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
|
|
|
|
|
|
|
|
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!vcpu->guest_fpu_loaded)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vcpu->guest_fpu_loaded = 0;
|
2008-07-10 21:53:33 +08:00
|
|
|
kvm_fx_save(&vcpu->arch.guest_fx_image);
|
|
|
|
kvm_fx_restore(&vcpu->arch.host_fx_image);
|
2007-11-18 19:54:33 +08:00
|
|
|
++vcpu->stat.fpu_reload;
|
2007-11-01 06:24:25 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
|
2007-11-14 20:38:21 +08:00
|
|
|
|
|
|
|
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2009-02-25 23:08:31 +08:00
|
|
|
if (vcpu->arch.time_page) {
|
|
|
|
kvm_release_page_dirty(vcpu->arch.time_page);
|
|
|
|
vcpu->arch.time_page = NULL;
|
|
|
|
}
|
|
|
|
|
2007-11-14 20:38:21 +08:00
|
|
|
kvm_x86_ops->vcpu_free(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
|
|
|
unsigned int id)
|
|
|
|
{
|
2007-11-20 21:30:24 +08:00
|
|
|
return kvm_x86_ops->vcpu_create(kvm, id);
|
|
|
|
}
|
2007-11-14 20:38:21 +08:00
|
|
|
|
2007-11-20 21:30:24 +08:00
|
|
|
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int r;
|
2007-11-14 20:38:21 +08:00
|
|
|
|
|
|
|
/* We do fxsave: this must be aligned. */
|
2007-12-13 23:50:52 +08:00
|
|
|
BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
|
2007-11-14 20:38:21 +08:00
|
|
|
|
2008-10-09 16:01:54 +08:00
|
|
|
vcpu->arch.mtrr_state.have_fixed = 1;
|
2007-11-14 20:38:21 +08:00
|
|
|
vcpu_load(vcpu);
|
|
|
|
r = kvm_arch_vcpu_reset(vcpu);
|
|
|
|
if (r == 0)
|
|
|
|
r = kvm_mmu_setup(vcpu);
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
if (r < 0)
|
|
|
|
goto free_vcpu;
|
|
|
|
|
2007-11-20 21:30:24 +08:00
|
|
|
return 0;
|
2007-11-14 20:38:21 +08:00
|
|
|
free_vcpu:
|
|
|
|
kvm_x86_ops->vcpu_free(vcpu);
|
2007-11-20 21:30:24 +08:00
|
|
|
return r;
|
2007-11-14 20:38:21 +08:00
|
|
|
}
|
|
|
|
|
2007-11-20 04:04:43 +08:00
|
|
|
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
2007-11-14 20:38:21 +08:00
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
kvm_mmu_unload(vcpu);
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
|
|
kvm_x86_ops->vcpu_free(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2008-09-26 15:30:48 +08:00
|
|
|
vcpu->arch.nmi_pending = false;
|
|
|
|
vcpu->arch.nmi_injected = false;
|
|
|
|
|
2008-12-15 20:52:10 +08:00
|
|
|
vcpu->arch.switch_db_regs = 0;
|
|
|
|
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
|
|
|
|
vcpu->arch.dr6 = DR6_FIXED_1;
|
|
|
|
vcpu->arch.dr7 = DR7_FIXED_1;
|
|
|
|
|
2007-11-14 20:38:21 +08:00
|
|
|
return kvm_x86_ops->vcpu_reset(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_hardware_enable(void *garbage)
|
|
|
|
{
|
|
|
|
kvm_x86_ops->hardware_enable(garbage);
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_hardware_disable(void *garbage)
|
|
|
|
{
|
|
|
|
kvm_x86_ops->hardware_disable(garbage);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_hardware_setup(void)
|
|
|
|
{
|
|
|
|
return kvm_x86_ops->hardware_setup();
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_hardware_unsetup(void)
|
|
|
|
{
|
|
|
|
kvm_x86_ops->hardware_unsetup();
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_check_processor_compat(void *rtn)
|
|
|
|
{
|
|
|
|
kvm_x86_ops->check_processor_compatibility(rtn);
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
struct kvm *kvm;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
BUG_ON(vcpu->kvm == NULL);
|
|
|
|
kvm = vcpu->kvm;
|
|
|
|
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
|
2007-11-14 20:38:21 +08:00
|
|
|
if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
|
2008-04-13 22:54:35 +08:00
|
|
|
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
2007-11-14 20:38:21 +08:00
|
|
|
else
|
2008-04-13 22:54:35 +08:00
|
|
|
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
|
2007-11-14 20:38:21 +08:00
|
|
|
|
|
|
|
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
|
|
|
if (!page) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2007-12-13 23:50:52 +08:00
|
|
|
vcpu->arch.pio_data = page_address(page);
|
2007-11-14 20:38:21 +08:00
|
|
|
|
|
|
|
r = kvm_mmu_create(vcpu);
|
|
|
|
if (r < 0)
|
|
|
|
goto fail_free_pio_data;
|
|
|
|
|
|
|
|
if (irqchip_in_kernel(kvm)) {
|
|
|
|
r = kvm_create_lapic(vcpu);
|
|
|
|
if (r < 0)
|
|
|
|
goto fail_mmu_destroy;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_mmu_destroy:
|
|
|
|
kvm_mmu_destroy(vcpu);
|
|
|
|
fail_free_pio_data:
|
2007-12-13 23:50:52 +08:00
|
|
|
free_page((unsigned long)vcpu->arch.pio_data);
|
2007-11-14 20:38:21 +08:00
|
|
|
fail:
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
kvm_free_lapic(vcpu);
|
2008-03-30 07:17:59 +08:00
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
2007-11-14 20:38:21 +08:00
|
|
|
kvm_mmu_destroy(vcpu);
|
2008-03-30 07:17:59 +08:00
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
2007-12-13 23:50:52 +08:00
|
|
|
free_page((unsigned long)vcpu->arch.pio_data);
|
2007-11-14 20:38:21 +08:00
|
|
|
}
|
2007-11-18 18:43:45 +08:00
|
|
|
|
|
|
|
struct kvm *kvm_arch_create_vm(void)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!kvm)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2007-12-14 10:01:48 +08:00
|
|
|
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
|
2008-07-29 00:26:26 +08:00
|
|
|
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
|
2007-11-18 18:43:45 +08:00
|
|
|
|
2008-10-15 20:15:06 +08:00
|
|
|
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
|
|
|
|
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
|
|
|
|
|
2008-12-12 03:45:05 +08:00
|
|
|
rdtscll(kvm->arch.vm_init_tsc);
|
|
|
|
|
2007-11-18 18:43:45 +08:00
|
|
|
return kvm;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
kvm_mmu_unload(vcpu);
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kvm_free_vcpus(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unpin any mmu pages first.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < KVM_MAX_VCPUS; ++i)
|
|
|
|
if (kvm->vcpus[i])
|
|
|
|
kvm_unload_vcpu_mmu(kvm->vcpus[i]);
|
|
|
|
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
|
|
|
if (kvm->vcpus[i]) {
|
|
|
|
kvm_arch_vcpu_free(kvm->vcpus[i]);
|
|
|
|
kvm->vcpus[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2009-01-06 10:03:02 +08:00
|
|
|
void kvm_arch_sync_events(struct kvm *kvm)
|
|
|
|
{
|
2009-01-06 10:03:03 +08:00
|
|
|
kvm_free_all_assigned_devices(kvm);
|
2009-01-06 10:03:02 +08:00
|
|
|
}
|
|
|
|
|
2007-11-18 18:43:45 +08:00
|
|
|
void kvm_arch_destroy_vm(struct kvm *kvm)
|
|
|
|
{
|
2008-10-31 12:37:41 +08:00
|
|
|
kvm_iommu_unmap_guest(kvm);
|
2008-01-28 05:10:22 +08:00
|
|
|
kvm_free_pit(kvm);
|
2007-12-14 10:17:34 +08:00
|
|
|
kfree(kvm->arch.vpic);
|
|
|
|
kfree(kvm->arch.vioapic);
|
2007-11-18 18:43:45 +08:00
|
|
|
kvm_free_vcpus(kvm);
|
|
|
|
kvm_free_physmem(kvm);
|
2008-03-25 17:26:13 +08:00
|
|
|
if (kvm->arch.apic_access_page)
|
|
|
|
put_page(kvm->arch.apic_access_page);
|
2008-04-25 21:44:52 +08:00
|
|
|
if (kvm->arch.ept_identity_pagetable)
|
|
|
|
put_page(kvm->arch.ept_identity_pagetable);
|
2007-11-18 18:43:45 +08:00
|
|
|
kfree(kvm);
|
|
|
|
}
|
2007-11-20 16:25:04 +08:00
|
|
|
|
|
|
|
int kvm_arch_set_memory_region(struct kvm *kvm,
|
|
|
|
struct kvm_userspace_memory_region *mem,
|
|
|
|
struct kvm_memory_slot old,
|
|
|
|
int user_alloc)
|
|
|
|
{
|
|
|
|
int npages = mem->memory_size >> PAGE_SHIFT;
|
|
|
|
struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
|
|
|
|
|
|
|
|
/*To keep backward compatibility with older userspace,
|
|
|
|
*x86 needs to hanlde !user_alloc case.
|
|
|
|
*/
|
|
|
|
if (!user_alloc) {
|
|
|
|
if (npages && !old.rmap) {
|
2008-07-25 22:32:03 +08:00
|
|
|
unsigned long userspace_addr;
|
|
|
|
|
2008-02-11 00:04:15 +08:00
|
|
|
down_write(¤t->mm->mmap_sem);
|
2008-07-25 22:32:03 +08:00
|
|
|
userspace_addr = do_mmap(NULL, 0,
|
|
|
|
npages * PAGE_SIZE,
|
|
|
|
PROT_READ | PROT_WRITE,
|
2008-08-26 22:22:47 +08:00
|
|
|
MAP_PRIVATE | MAP_ANONYMOUS,
|
2008-07-25 22:32:03 +08:00
|
|
|
0);
|
2008-02-11 00:04:15 +08:00
|
|
|
up_write(¤t->mm->mmap_sem);
|
2007-11-20 16:25:04 +08:00
|
|
|
|
2008-07-25 22:32:03 +08:00
|
|
|
if (IS_ERR((void *)userspace_addr))
|
|
|
|
return PTR_ERR((void *)userspace_addr);
|
|
|
|
|
|
|
|
/* set userspace_addr atomically for kvm_hva_to_rmapp */
|
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
|
memslot->userspace_addr = userspace_addr;
|
|
|
|
spin_unlock(&kvm->mmu_lock);
|
2007-11-20 16:25:04 +08:00
|
|
|
} else {
|
|
|
|
if (!old.user_alloc && old.rmap) {
|
|
|
|
int ret;
|
|
|
|
|
2008-02-11 00:04:15 +08:00
|
|
|
down_write(¤t->mm->mmap_sem);
|
2007-11-20 16:25:04 +08:00
|
|
|
ret = do_munmap(current->mm, old.userspace_addr,
|
|
|
|
old.npages * PAGE_SIZE);
|
2008-02-11 00:04:15 +08:00
|
|
|
up_write(¤t->mm->mmap_sem);
|
2007-11-20 16:25:04 +08:00
|
|
|
if (ret < 0)
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"kvm_vm_ioctl_set_memory_region: "
|
|
|
|
"failed to munmap memory\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-13 05:55:43 +08:00
|
|
|
spin_lock(&kvm->mmu_lock);
|
2007-12-14 10:01:48 +08:00
|
|
|
if (!kvm->arch.n_requested_mmu_pages) {
|
2007-11-20 16:25:04 +08:00
|
|
|
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
|
|
|
|
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
|
2009-05-13 05:55:43 +08:00
|
|
|
spin_unlock(&kvm->mmu_lock);
|
2007-11-20 16:25:04 +08:00
|
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2007-12-14 09:35:10 +08:00
|
|
|
|
2008-07-11 07:49:31 +08:00
|
|
|
void kvm_arch_flush_shadow(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
kvm_mmu_zap_all(kvm);
|
2009-05-13 05:55:45 +08:00
|
|
|
kvm_reload_remote_mmus(kvm);
|
2008-07-11 07:49:31 +08:00
|
|
|
}
|
|
|
|
|
2007-12-14 09:35:10 +08:00
|
|
|
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2008-04-13 22:54:35 +08:00
|
|
|
return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
|
2008-09-26 15:30:53 +08:00
|
|
|
|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
|
|
|
|
|| vcpu->arch.nmi_pending;
|
2007-12-14 09:35:10 +08:00
|
|
|
}
|
2007-12-17 14:21:40 +08:00
|
|
|
|
|
|
|
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2009-05-08 04:55:12 +08:00
|
|
|
int me;
|
|
|
|
int cpu = vcpu->cpu;
|
2007-12-17 14:21:40 +08:00
|
|
|
|
|
|
|
if (waitqueue_active(&vcpu->wq)) {
|
|
|
|
wake_up_interruptible(&vcpu->wq);
|
|
|
|
++vcpu->stat.halt_wakeup;
|
|
|
|
}
|
2009-05-08 04:55:12 +08:00
|
|
|
|
|
|
|
me = get_cpu();
|
|
|
|
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
|
|
|
|
if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
|
|
|
|
smp_send_reschedule(cpu);
|
2008-04-12 02:01:22 +08:00
|
|
|
put_cpu();
|
2007-12-17 14:21:40 +08:00
|
|
|
}
|
2009-03-23 18:12:11 +08:00
|
|
|
|
|
|
|
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return kvm_x86_ops->interrupt_allowed(vcpu);
|
|
|
|
}
|