mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
The most interesting bit here is irqfd/ioeventfd support for ARM and ARM64.
ARM/ARM64: fixes for live migration, irqfd and ioeventfd support (enabling vhost, too), page aging s390: interrupt handling rework, allowing to inject all local interrupts via new ioctl and to get/set the full local irq state for migration and introspection. New ioctls to access memory by virtual address, and to get/set the guest storage keys. SIMD support. MIPS: FPU and MIPS SIMD Architecture (MSA) support. Includes some patches from Ralf Baechle's MIPS tree. x86: bugfixes (notably for pvclock, the others are small) and cleanups. Another small latency improvement for the TSC deadline timer. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJVJ9vmAAoJEL/70l94x66DoMEH/R3rh8IMf4jTiWRkcqohOMPX k1+NaSY/lCKayaSgggJ2hcQenMbQoXEOdslvaA/H0oC+VfJGK+lmU6E63eMyyhjQ Y+Px6L85NENIzDzaVu/TIWWuhil5PvIRr3VO8cvntExRoCjuekTUmNdOgCvN2ObW wswN2qRdPIeEj2kkulbnye+9IV4G0Ne9bvsmUdOdfSSdi6ZcV43JcvrpOZT++mKj RrKB+3gTMZYGJXMMLBwMkdl8mK1ozriD+q0mbomT04LUyGlPwYLl4pVRDBqyksD7 KsSSybaK2E4i5R80WEljgDMkNqrCgNfg6VZe4n9Y+CfAAOToNnkMJaFEi+yuqbs= =yu2b -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM updates from Paolo Bonzini: "First batch of KVM changes for 4.1 The most interesting bit here is irqfd/ioeventfd support for ARM and ARM64. Summary: ARM/ARM64: fixes for live migration, irqfd and ioeventfd support (enabling vhost, too), page aging s390: interrupt handling rework, allowing to inject all local interrupts via new ioctl and to get/set the full local irq state for migration and introspection. New ioctls to access memory by virtual address, and to get/set the guest storage keys. SIMD support. MIPS: FPU and MIPS SIMD Architecture (MSA) support. Includes some patches from Ralf Baechle's MIPS tree. x86: bugfixes (notably for pvclock, the others are small) and cleanups. Another small latency improvement for the TSC deadline timer" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (146 commits) KVM: use slowpath for cross page cached accesses kvm: mmu: lazy collapse small sptes into large sptes KVM: x86: Clear CR2 on VCPU reset KVM: x86: DR0-DR3 are not clear on reset KVM: x86: BSP in MSR_IA32_APICBASE is writable KVM: x86: simplify kvm_apic_map KVM: x86: avoid logical_map when it is invalid KVM: x86: fix mixed APIC mode broadcast KVM: x86: use MDA for interrupt matching kvm/ppc/mpic: drop unused IRQ_testbit KVM: nVMX: remove unnecessary double caching of MAXPHYADDR KVM: nVMX: checks for address bits beyond MAXPHYADDR on VM-entry KVM: x86: cache maxphyaddr CPUID leaf in struct kvm_vcpu KVM: vmx: pass error code with internal error #2 x86: vdso: fix pvclock races with task migration KVM: remove kvm_read_hva and kvm_read_hva_atomic KVM: x86: optimize delivery of TSC deadline timer interrupt KVM: x86: extract blocking logic from __vcpu_run kvm: x86: fix x86 eflags fixed bit KVM: s390: migrate vcpu interrupt state ...
This commit is contained in:
commit
9003601310
@ -997,7 +997,7 @@ for vm-wide capabilities.
|
||||
4.38 KVM_GET_MP_STATE
|
||||
|
||||
Capability: KVM_CAP_MP_STATE
|
||||
Architectures: x86, s390
|
||||
Architectures: x86, s390, arm, arm64
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_mp_state (out)
|
||||
Returns: 0 on success; -1 on error
|
||||
@ -1011,7 +1011,7 @@ uniprocessor guests).
|
||||
|
||||
Possible values are:
|
||||
|
||||
- KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86]
|
||||
- KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86,arm/arm64]
|
||||
- KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP)
|
||||
which has not yet received an INIT signal [x86]
|
||||
- KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is
|
||||
@ -1020,7 +1020,7 @@ Possible values are:
|
||||
is waiting for an interrupt [x86]
|
||||
- KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector
|
||||
accessible via KVM_GET_VCPU_EVENTS) [x86]
|
||||
- KVM_MP_STATE_STOPPED: the vcpu is stopped [s390]
|
||||
- KVM_MP_STATE_STOPPED: the vcpu is stopped [s390,arm/arm64]
|
||||
- KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390]
|
||||
- KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted)
|
||||
[s390]
|
||||
@ -1031,11 +1031,15 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
|
||||
in-kernel irqchip, the multiprocessing state must be maintained by userspace on
|
||||
these architectures.
|
||||
|
||||
For arm/arm64:
|
||||
|
||||
The only states that are valid are KVM_MP_STATE_STOPPED and
|
||||
KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not.
|
||||
|
||||
4.39 KVM_SET_MP_STATE
|
||||
|
||||
Capability: KVM_CAP_MP_STATE
|
||||
Architectures: x86, s390
|
||||
Architectures: x86, s390, arm, arm64
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_mp_state (in)
|
||||
Returns: 0 on success; -1 on error
|
||||
@ -1047,6 +1051,10 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
|
||||
in-kernel irqchip, the multiprocessing state must be maintained by userspace on
|
||||
these architectures.
|
||||
|
||||
For arm/arm64:
|
||||
|
||||
The only states that are valid are KVM_MP_STATE_STOPPED and
|
||||
KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not.
|
||||
|
||||
4.40 KVM_SET_IDENTITY_MAP_ADDR
|
||||
|
||||
@ -1967,15 +1975,25 @@ registers, find a list below:
|
||||
MIPS | KVM_REG_MIPS_CP0_STATUS | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CAUSE | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_EPC | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_PRID | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG3 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_CTL | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_HZ | 64
|
||||
MIPS | KVM_REG_MIPS_FPR_32(0..31) | 32
|
||||
MIPS | KVM_REG_MIPS_FPR_64(0..31) | 64
|
||||
MIPS | KVM_REG_MIPS_VEC_128(0..31) | 128
|
||||
MIPS | KVM_REG_MIPS_FCR_IR | 32
|
||||
MIPS | KVM_REG_MIPS_FCR_CSR | 32
|
||||
MIPS | KVM_REG_MIPS_MSA_IR | 32
|
||||
MIPS | KVM_REG_MIPS_MSA_CSR | 32
|
||||
|
||||
ARM registers are mapped using the lower 32 bits. The upper 16 of that
|
||||
is the register group type, or coprocessor number:
|
||||
@ -2029,6 +2047,25 @@ patterns depending on whether they're 32-bit or 64-bit registers:
|
||||
MIPS KVM control registers (see above) have the following id bit patterns:
|
||||
0x7030 0000 0002 <reg:16>
|
||||
|
||||
MIPS FPU registers (see KVM_REG_MIPS_FPR_{32,64}() above) have the following
|
||||
id bit patterns depending on the size of the register being accessed. They are
|
||||
always accessed according to the current guest FPU mode (Status.FR and
|
||||
Config5.FRE), i.e. as the guest would see them, and they become unpredictable
|
||||
if the guest FPU mode is changed. MIPS SIMD Architecture (MSA) vector
|
||||
registers (see KVM_REG_MIPS_VEC_128() above) have similar patterns as they
|
||||
overlap the FPU registers:
|
||||
0x7020 0000 0003 00 <0:3> <reg:5> (32-bit FPU registers)
|
||||
0x7030 0000 0003 00 <0:3> <reg:5> (64-bit FPU registers)
|
||||
0x7040 0000 0003 00 <0:3> <reg:5> (128-bit MSA vector registers)
|
||||
|
||||
MIPS FPU control registers (see KVM_REG_MIPS_FCR_{IR,CSR} above) have the
|
||||
following id bit patterns:
|
||||
0x7020 0000 0003 01 <0:3> <reg:5>
|
||||
|
||||
MIPS MSA control registers (see KVM_REG_MIPS_MSA_{IR,CSR} above) have the
|
||||
following id bit patterns:
|
||||
0x7020 0000 0003 02 <0:3> <reg:5>
|
||||
|
||||
|
||||
4.69 KVM_GET_ONE_REG
|
||||
|
||||
@ -2234,7 +2271,7 @@ into the hash PTE second double word).
|
||||
4.75 KVM_IRQFD
|
||||
|
||||
Capability: KVM_CAP_IRQFD
|
||||
Architectures: x86 s390
|
||||
Architectures: x86 s390 arm arm64
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_irqfd (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
@ -2260,6 +2297,10 @@ Note that closing the resamplefd is not sufficient to disable the
|
||||
irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment
|
||||
and need not be specified with KVM_IRQFD_FLAG_DEASSIGN.
|
||||
|
||||
On ARM/ARM64, the gsi field in the kvm_irqfd struct specifies the Shared
|
||||
Peripheral Interrupt (SPI) index, such that the GIC interrupt ID is
|
||||
given by gsi + 32.
|
||||
|
||||
4.76 KVM_PPC_ALLOCATE_HTAB
|
||||
|
||||
Capability: KVM_CAP_PPC_ALLOC_HTAB
|
||||
@ -2716,6 +2757,227 @@ The fields in each entry are defined as follows:
|
||||
eax, ebx, ecx, edx: the values returned by the cpuid instruction for
|
||||
this function/index combination
|
||||
|
||||
4.89 KVM_S390_MEM_OP
|
||||
|
||||
Capability: KVM_CAP_S390_MEM_OP
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_s390_mem_op (in)
|
||||
Returns: = 0 on success,
|
||||
< 0 on generic error (e.g. -EFAULT or -ENOMEM),
|
||||
> 0 if an exception occurred while walking the page tables
|
||||
|
||||
Read or write data from/to the logical (virtual) memory of a VPCU.
|
||||
|
||||
Parameters are specified via the following structure:
|
||||
|
||||
struct kvm_s390_mem_op {
|
||||
__u64 gaddr; /* the guest address */
|
||||
__u64 flags; /* flags */
|
||||
__u32 size; /* amount of bytes */
|
||||
__u32 op; /* type of operation */
|
||||
__u64 buf; /* buffer in userspace */
|
||||
__u8 ar; /* the access register number */
|
||||
__u8 reserved[31]; /* should be set to 0 */
|
||||
};
|
||||
|
||||
The type of operation is specified in the "op" field. It is either
|
||||
KVM_S390_MEMOP_LOGICAL_READ for reading from logical memory space or
|
||||
KVM_S390_MEMOP_LOGICAL_WRITE for writing to logical memory space. The
|
||||
KVM_S390_MEMOP_F_CHECK_ONLY flag can be set in the "flags" field to check
|
||||
whether the corresponding memory access would create an access exception
|
||||
(without touching the data in the memory at the destination). In case an
|
||||
access exception occurred while walking the MMU tables of the guest, the
|
||||
ioctl returns a positive error number to indicate the type of exception.
|
||||
This exception is also raised directly at the corresponding VCPU if the
|
||||
flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field.
|
||||
|
||||
The start address of the memory region has to be specified in the "gaddr"
|
||||
field, and the length of the region in the "size" field. "buf" is the buffer
|
||||
supplied by the userspace application where the read data should be written
|
||||
to for KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written
|
||||
is stored for a KVM_S390_MEMOP_LOGICAL_WRITE. "buf" is unused and can be NULL
|
||||
when KVM_S390_MEMOP_F_CHECK_ONLY is specified. "ar" designates the access
|
||||
register number to be used.
|
||||
|
||||
The "reserved" field is meant for future extensions. It is not used by
|
||||
KVM with the currently defined set of flags.
|
||||
|
||||
4.90 KVM_S390_GET_SKEYS
|
||||
|
||||
Capability: KVM_CAP_S390_SKEYS
|
||||
Architectures: s390
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_s390_skeys
|
||||
Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage
|
||||
keys, negative value on error
|
||||
|
||||
This ioctl is used to get guest storage key values on the s390
|
||||
architecture. The ioctl takes parameters via the kvm_s390_skeys struct.
|
||||
|
||||
struct kvm_s390_skeys {
|
||||
__u64 start_gfn;
|
||||
__u64 count;
|
||||
__u64 skeydata_addr;
|
||||
__u32 flags;
|
||||
__u32 reserved[9];
|
||||
};
|
||||
|
||||
The start_gfn field is the number of the first guest frame whose storage keys
|
||||
you want to get.
|
||||
|
||||
The count field is the number of consecutive frames (starting from start_gfn)
|
||||
whose storage keys to get. The count field must be at least 1 and the maximum
|
||||
allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range
|
||||
will cause the ioctl to return -EINVAL.
|
||||
|
||||
The skeydata_addr field is the address to a buffer large enough to hold count
|
||||
bytes. This buffer will be filled with storage key data by the ioctl.
|
||||
|
||||
4.91 KVM_S390_SET_SKEYS
|
||||
|
||||
Capability: KVM_CAP_S390_SKEYS
|
||||
Architectures: s390
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_s390_skeys
|
||||
Returns: 0 on success, negative value on error
|
||||
|
||||
This ioctl is used to set guest storage key values on the s390
|
||||
architecture. The ioctl takes parameters via the kvm_s390_skeys struct.
|
||||
See section on KVM_S390_GET_SKEYS for struct definition.
|
||||
|
||||
The start_gfn field is the number of the first guest frame whose storage keys
|
||||
you want to set.
|
||||
|
||||
The count field is the number of consecutive frames (starting from start_gfn)
|
||||
whose storage keys to get. The count field must be at least 1 and the maximum
|
||||
allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range
|
||||
will cause the ioctl to return -EINVAL.
|
||||
|
||||
The skeydata_addr field is the address to a buffer containing count bytes of
|
||||
storage keys. Each byte in the buffer will be set as the storage key for a
|
||||
single frame starting at start_gfn for count frames.
|
||||
|
||||
Note: If any architecturally invalid key value is found in the given data then
|
||||
the ioctl will return -EINVAL.
|
||||
|
||||
4.92 KVM_S390_IRQ
|
||||
|
||||
Capability: KVM_CAP_S390_INJECT_IRQ
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_s390_irq (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
Errors:
|
||||
EINVAL: interrupt type is invalid
|
||||
type is KVM_S390_SIGP_STOP and flag parameter is invalid value
|
||||
type is KVM_S390_INT_EXTERNAL_CALL and code is bigger
|
||||
than the maximum of VCPUs
|
||||
EBUSY: type is KVM_S390_SIGP_SET_PREFIX and vcpu is not stopped
|
||||
type is KVM_S390_SIGP_STOP and a stop irq is already pending
|
||||
type is KVM_S390_INT_EXTERNAL_CALL and an external call interrupt
|
||||
is already pending
|
||||
|
||||
Allows to inject an interrupt to the guest.
|
||||
|
||||
Using struct kvm_s390_irq as a parameter allows
|
||||
to inject additional payload which is not
|
||||
possible via KVM_S390_INTERRUPT.
|
||||
|
||||
Interrupt parameters are passed via kvm_s390_irq:
|
||||
|
||||
struct kvm_s390_irq {
|
||||
__u64 type;
|
||||
union {
|
||||
struct kvm_s390_io_info io;
|
||||
struct kvm_s390_ext_info ext;
|
||||
struct kvm_s390_pgm_info pgm;
|
||||
struct kvm_s390_emerg_info emerg;
|
||||
struct kvm_s390_extcall_info extcall;
|
||||
struct kvm_s390_prefix_info prefix;
|
||||
struct kvm_s390_stop_info stop;
|
||||
struct kvm_s390_mchk_info mchk;
|
||||
char reserved[64];
|
||||
} u;
|
||||
};
|
||||
|
||||
type can be one of the following:
|
||||
|
||||
KVM_S390_SIGP_STOP - sigp stop; parameter in .stop
|
||||
KVM_S390_PROGRAM_INT - program check; parameters in .pgm
|
||||
KVM_S390_SIGP_SET_PREFIX - sigp set prefix; parameters in .prefix
|
||||
KVM_S390_RESTART - restart; no parameters
|
||||
KVM_S390_INT_CLOCK_COMP - clock comparator interrupt; no parameters
|
||||
KVM_S390_INT_CPU_TIMER - CPU timer interrupt; no parameters
|
||||
KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
|
||||
KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
|
||||
KVM_S390_MCHK - machine check interrupt; parameters in .mchk
|
||||
|
||||
|
||||
Note that the vcpu ioctl is asynchronous to vcpu execution.
|
||||
|
||||
4.94 KVM_S390_GET_IRQ_STATE
|
||||
|
||||
Capability: KVM_CAP_S390_IRQ_STATE
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_s390_irq_state (out)
|
||||
Returns: >= number of bytes copied into buffer,
|
||||
-EINVAL if buffer size is 0,
|
||||
-ENOBUFS if buffer size is too small to fit all pending interrupts,
|
||||
-EFAULT if the buffer address was invalid
|
||||
|
||||
This ioctl allows userspace to retrieve the complete state of all currently
|
||||
pending interrupts in a single buffer. Use cases include migration
|
||||
and introspection. The parameter structure contains the address of a
|
||||
userspace buffer and its length:
|
||||
|
||||
struct kvm_s390_irq_state {
|
||||
__u64 buf;
|
||||
__u32 flags;
|
||||
__u32 len;
|
||||
__u32 reserved[4];
|
||||
};
|
||||
|
||||
Userspace passes in the above struct and for each pending interrupt a
|
||||
struct kvm_s390_irq is copied to the provided buffer.
|
||||
|
||||
If -ENOBUFS is returned the buffer provided was too small and userspace
|
||||
may retry with a bigger buffer.
|
||||
|
||||
4.95 KVM_S390_SET_IRQ_STATE
|
||||
|
||||
Capability: KVM_CAP_S390_IRQ_STATE
|
||||
Architectures: s390
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_s390_irq_state (in)
|
||||
Returns: 0 on success,
|
||||
-EFAULT if the buffer address was invalid,
|
||||
-EINVAL for an invalid buffer length (see below),
|
||||
-EBUSY if there were already interrupts pending,
|
||||
errors occurring when actually injecting the
|
||||
interrupt. See KVM_S390_IRQ.
|
||||
|
||||
This ioctl allows userspace to set the complete state of all cpu-local
|
||||
interrupts currently pending for the vcpu. It is intended for restoring
|
||||
interrupt state after a migration. The input parameter is a userspace buffer
|
||||
containing a struct kvm_s390_irq_state:
|
||||
|
||||
struct kvm_s390_irq_state {
|
||||
__u64 buf;
|
||||
__u32 len;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
The userspace memory referenced by buf contains a struct kvm_s390_irq
|
||||
for each interrupt to be injected into the guest.
|
||||
If one of the interrupts could not be injected for some reason the
|
||||
ioctl aborts.
|
||||
|
||||
len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0
|
||||
and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq),
|
||||
which is the maximum number of possibly pending cpu-local interrupts.
|
||||
|
||||
5. The kvm_run structure
|
||||
------------------------
|
||||
|
||||
@ -3189,6 +3451,31 @@ Parameters: none
|
||||
This capability enables the in-kernel irqchip for s390. Please refer to
|
||||
"4.24 KVM_CREATE_IRQCHIP" for details.
|
||||
|
||||
6.9 KVM_CAP_MIPS_FPU
|
||||
|
||||
Architectures: mips
|
||||
Target: vcpu
|
||||
Parameters: args[0] is reserved for future use (should be 0).
|
||||
|
||||
This capability allows the use of the host Floating Point Unit by the guest. It
|
||||
allows the Config1.FP bit to be set to enable the FPU in the guest. Once this is
|
||||
done the KVM_REG_MIPS_FPR_* and KVM_REG_MIPS_FCR_* registers can be accessed
|
||||
(depending on the current guest FPU register mode), and the Status.FR,
|
||||
Config5.FRE bits are accessible via the KVM API and also from the guest,
|
||||
depending on them being supported by the FPU.
|
||||
|
||||
6.10 KVM_CAP_MIPS_MSA
|
||||
|
||||
Architectures: mips
|
||||
Target: vcpu
|
||||
Parameters: args[0] is reserved for future use (should be 0).
|
||||
|
||||
This capability allows the use of the MIPS SIMD Architecture (MSA) by the guest.
|
||||
It allows the Config3.MSAP bit to be set to enable the use of MSA by the guest.
|
||||
Once this is done the KVM_REG_MIPS_VEC_* and KVM_REG_MIPS_MSA_* registers can be
|
||||
accessed, and the Config5.MSAEn bit is accessible via the KVM API and also from
|
||||
the guest.
|
||||
|
||||
7. Capabilities that can be enabled on VMs
|
||||
------------------------------------------
|
||||
|
||||
@ -3248,3 +3535,41 @@ All other orders will be handled completely in user space.
|
||||
Only privileged operation exceptions will be checked for in the kernel (or even
|
||||
in the hardware prior to interception). If this capability is not enabled, the
|
||||
old way of handling SIGP orders is used (partially in kernel and user space).
|
||||
|
||||
7.3 KVM_CAP_S390_VECTOR_REGISTERS
|
||||
|
||||
Architectures: s390
|
||||
Parameters: none
|
||||
Returns: 0 on success, negative value on error
|
||||
|
||||
Allows use of the vector registers introduced with z13 processor, and
|
||||
provides for the synchronization between host and user space. Will
|
||||
return -EINVAL if the machine does not support vectors.
|
||||
|
||||
7.4 KVM_CAP_S390_USER_STSI
|
||||
|
||||
Architectures: s390
|
||||
Parameters: none
|
||||
|
||||
This capability allows post-handlers for the STSI instruction. After
|
||||
initial handling in the kernel, KVM exits to user space with
|
||||
KVM_EXIT_S390_STSI to allow user space to insert further data.
|
||||
|
||||
Before exiting to userspace, kvm handlers should fill in s390_stsi field of
|
||||
vcpu->run:
|
||||
struct {
|
||||
__u64 addr;
|
||||
__u8 ar;
|
||||
__u8 reserved;
|
||||
__u8 fc;
|
||||
__u8 sel1;
|
||||
__u16 sel2;
|
||||
} s390_stsi;
|
||||
|
||||
@addr - guest address of STSI SYSIB
|
||||
@fc - function code
|
||||
@sel1 - selector 1
|
||||
@sel2 - selector 2
|
||||
@ar - access register number
|
||||
|
||||
KVM handlers should exit to userspace with rc = -EREMOTE.
|
||||
|
@ -27,6 +27,9 @@ Groups:
|
||||
Copies all floating interrupts into a buffer provided by userspace.
|
||||
When the buffer is too small it returns -ENOMEM, which is the indication
|
||||
for userspace to try again with a bigger buffer.
|
||||
-ENOBUFS is returned when the allocation of a kernelspace buffer has
|
||||
failed.
|
||||
-EFAULT is returned when copying data to userspace failed.
|
||||
All interrupts remain pending, i.e. are not deleted from the list of
|
||||
currently pending interrupts.
|
||||
attr->addr contains the userspace address of the buffer into which all
|
||||
|
@ -5591,6 +5591,8 @@ S: Supported
|
||||
F: Documentation/*/kvm*.txt
|
||||
F: Documentation/virtual/kvm/
|
||||
F: arch/*/kvm/
|
||||
F: arch/x86/kernel/kvm.c
|
||||
F: arch/x86/kernel/kvmclock.c
|
||||
F: arch/*/include/asm/kvm*
|
||||
F: include/linux/kvm*
|
||||
F: include/uapi/linux/kvm*
|
||||
|
@ -185,6 +185,7 @@
|
||||
#define HSR_COND (0xfU << HSR_COND_SHIFT)
|
||||
|
||||
#define FSC_FAULT (0x04)
|
||||
#define FSC_ACCESS (0x08)
|
||||
#define FSC_PERM (0x0c)
|
||||
|
||||
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include <asm/fpstate.h>
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
|
||||
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
|
||||
#else
|
||||
@ -165,19 +167,10 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
||||
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
|
||||
/* We do not have shadow page tables, hence the empty hooks */
|
||||
static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
|
||||
unsigned long address)
|
||||
{
|
||||
|
@ -28,28 +28,6 @@ struct kvm_decode {
|
||||
bool sign_extend;
|
||||
};
|
||||
|
||||
/*
|
||||
* The in-kernel MMIO emulation code wants to use a copy of run->mmio,
|
||||
* which is an anonymous type. Use our own type instead.
|
||||
*/
|
||||
struct kvm_exit_mmio {
|
||||
phys_addr_t phys_addr;
|
||||
u8 data[8];
|
||||
u32 len;
|
||||
bool is_write;
|
||||
void *private;
|
||||
};
|
||||
|
||||
static inline void kvm_prepare_mmio(struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
{
|
||||
run->mmio.phys_addr = mmio->phys_addr;
|
||||
run->mmio.len = mmio->len;
|
||||
run->mmio.is_write = mmio->is_write;
|
||||
memcpy(run->mmio.data, mmio->data, mmio->len);
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
}
|
||||
|
||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa);
|
||||
|
@ -198,6 +198,9 @@ struct kvm_arch_memory_slot {
|
||||
/* Highest supported SPI, from VGIC_NR_IRQS */
|
||||
#define KVM_ARM_IRQ_GIC_MAX 127
|
||||
|
||||
/* One single KVM irqchip, ie. the VGIC */
|
||||
#define KVM_NR_IRQCHIPS 1
|
||||
|
||||
/* PSCI interface */
|
||||
#define KVM_PSCI_FN_BASE 0x95c1ba5e
|
||||
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
|
||||
|
@ -190,7 +190,6 @@ int main(void)
|
||||
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
|
||||
DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
|
||||
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
|
||||
#ifdef CONFIG_KVM_ARM_VGIC
|
||||
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
|
||||
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
|
||||
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
|
||||
@ -200,14 +199,11 @@ int main(void)
|
||||
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
|
||||
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
|
||||
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
|
||||
#ifdef CONFIG_KVM_ARM_TIMER
|
||||
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
|
||||
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
|
||||
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
|
||||
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
|
||||
#endif
|
||||
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
|
||||
#endif
|
||||
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
|
||||
#endif
|
||||
return 0;
|
||||
|
@ -18,6 +18,7 @@ if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on MMU && OF
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
@ -26,10 +27,12 @@ config KVM
|
||||
select KVM_ARM_HOST
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
select SRCU
|
||||
depends on ARM_VIRT_EXT && ARM_LPAE
|
||||
select MMU_NOTIFIER
|
||||
select HAVE_KVM_EVENTFD
|
||||
select HAVE_KVM_IRQFD
|
||||
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
|
||||
---help---
|
||||
Support hosting virtualized guest machines. You will also
|
||||
need to select one or more of the processor modules below.
|
||||
Support hosting virtualized guest machines.
|
||||
|
||||
This module provides access to the hardware capabilities through
|
||||
a character device node named /dev/kvm.
|
||||
@ -37,10 +40,7 @@ config KVM
|
||||
If unsure, say N.
|
||||
|
||||
config KVM_ARM_HOST
|
||||
bool "KVM host support for ARM cpus."
|
||||
depends on KVM
|
||||
depends on MMU
|
||||
select MMU_NOTIFIER
|
||||
bool
|
||||
---help---
|
||||
Provides host support for ARM processors.
|
||||
|
||||
@ -55,20 +55,4 @@ config KVM_ARM_MAX_VCPUS
|
||||
large, so only choose a reasonable number that you expect to
|
||||
actually use.
|
||||
|
||||
config KVM_ARM_VGIC
|
||||
bool "KVM support for Virtual GIC"
|
||||
depends on KVM_ARM_HOST && OF
|
||||
select HAVE_KVM_IRQCHIP
|
||||
default y
|
||||
---help---
|
||||
Adds support for a hardware assisted, in-kernel GIC emulation.
|
||||
|
||||
config KVM_ARM_TIMER
|
||||
bool "KVM support for Architected Timers"
|
||||
depends on KVM_ARM_VGIC && ARM_ARCH_TIMER
|
||||
select HAVE_KVM_IRQCHIP
|
||||
default y
|
||||
---help---
|
||||
Adds support for the Architected Timers in virtual machines
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
@ -7,7 +7,7 @@ ifeq ($(plus_virt),+virt)
|
||||
plus_virt_def := -DREQUIRES_VIRT=1
|
||||
endif
|
||||
|
||||
ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
|
||||
ccflags-y += -Iarch/arm/kvm
|
||||
CFLAGS_arm.o := -I. $(plus_virt_def)
|
||||
CFLAGS_mmu.o := -I.
|
||||
|
||||
@ -15,12 +15,12 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
|
||||
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
|
||||
|
||||
KVM := ../../../virt/kvm
|
||||
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
|
||||
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
|
||||
|
||||
obj-y += kvm-arm.o init.o interrupts.o
|
||||
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
|
||||
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
|
||||
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
|
||||
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
|
||||
obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
|
||||
obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
|
||||
obj-y += $(KVM)/arm/vgic.o
|
||||
obj-y += $(KVM)/arm/vgic-v2.o
|
||||
obj-y += $(KVM)/arm/vgic-v2-emul.o
|
||||
obj-y += $(KVM)/arm/arch_timer.o
|
||||
|
@ -61,8 +61,6 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
|
||||
static u8 kvm_next_vmid;
|
||||
static DEFINE_SPINLOCK(kvm_vmid_lock);
|
||||
|
||||
static bool vgic_present;
|
||||
|
||||
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
@ -173,8 +171,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
int r;
|
||||
switch (ext) {
|
||||
case KVM_CAP_IRQCHIP:
|
||||
r = vgic_present;
|
||||
break;
|
||||
case KVM_CAP_IRQFD:
|
||||
case KVM_CAP_IOEVENTFD:
|
||||
case KVM_CAP_DEVICE_CTRL:
|
||||
case KVM_CAP_USER_MEMORY:
|
||||
case KVM_CAP_SYNC_MMU:
|
||||
@ -183,6 +181,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_ARM_PSCI:
|
||||
case KVM_CAP_ARM_PSCI_0_2:
|
||||
case KVM_CAP_READONLY_MEM:
|
||||
case KVM_CAP_MP_STATE:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
@ -268,7 +267,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
return kvm_timer_should_fire(vcpu);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
@ -313,13 +312,29 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mp_state *mp_state)
|
||||
{
|
||||
return -EINVAL;
|
||||
if (vcpu->arch.pause)
|
||||
mp_state->mp_state = KVM_MP_STATE_STOPPED;
|
||||
else
|
||||
mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mp_state *mp_state)
|
||||
{
|
||||
return -EINVAL;
|
||||
switch (mp_state->mp_state) {
|
||||
case KVM_MP_STATE_RUNNABLE:
|
||||
vcpu->arch.pause = false;
|
||||
break;
|
||||
case KVM_MP_STATE_STOPPED:
|
||||
vcpu->arch.pause = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -452,6 +467,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool kvm_arch_intc_initialized(struct kvm *kvm)
|
||||
{
|
||||
return vgic_initialized(kvm);
|
||||
}
|
||||
|
||||
static void vcpu_pause(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
|
||||
@ -831,8 +851,6 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
|
||||
|
||||
switch (dev_id) {
|
||||
case KVM_ARM_DEVICE_VGIC_V2:
|
||||
if (!vgic_present)
|
||||
return -ENXIO;
|
||||
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
|
||||
default:
|
||||
return -ENODEV;
|
||||
@ -847,10 +865,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
|
||||
switch (ioctl) {
|
||||
case KVM_CREATE_IRQCHIP: {
|
||||
if (vgic_present)
|
||||
return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
else
|
||||
return -ENXIO;
|
||||
return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
}
|
||||
case KVM_ARM_SET_DEVICE_ADDR: {
|
||||
struct kvm_arm_device_addr dev_addr;
|
||||
@ -1035,10 +1050,6 @@ static int init_hyp_mode(void)
|
||||
if (err)
|
||||
goto out_free_context;
|
||||
|
||||
#ifdef CONFIG_KVM_ARM_VGIC
|
||||
vgic_present = true;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Init HYP architected timer support
|
||||
*/
|
||||
|
@ -109,22 +109,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_KVM_ARM_TIMER
|
||||
|
||||
#define NUM_TIMER_REGS 0
|
||||
|
||||
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_timer_reg(u64 index)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define NUM_TIMER_REGS 3
|
||||
|
||||
static bool is_timer_reg(u64 index)
|
||||
@ -152,8 +136,6 @@ static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
|
@ -402,7 +402,6 @@ vcpu .req r0 @ vcpu pointer always in r0
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*/
|
||||
.macro save_vgic_state
|
||||
#ifdef CONFIG_KVM_ARM_VGIC
|
||||
/* Get VGIC VCTRL base into r2 */
|
||||
ldr r2, [vcpu, #VCPU_KVM]
|
||||
ldr r2, [r2, #KVM_VGIC_VCTRL]
|
||||
@ -460,7 +459,6 @@ ARM_BE8(rev r6, r6 )
|
||||
subs r4, r4, #1
|
||||
bne 1b
|
||||
2:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
@ -469,7 +467,6 @@ ARM_BE8(rev r6, r6 )
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*/
|
||||
.macro restore_vgic_state
|
||||
#ifdef CONFIG_KVM_ARM_VGIC
|
||||
/* Get VGIC VCTRL base into r2 */
|
||||
ldr r2, [vcpu, #VCPU_KVM]
|
||||
ldr r2, [r2, #KVM_VGIC_VCTRL]
|
||||
@ -501,7 +498,6 @@ ARM_BE8(rev r6, r6 )
|
||||
subs r4, r4, #1
|
||||
bne 1b
|
||||
2:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#define CNTHCTL_PL1PCTEN (1 << 0)
|
||||
@ -515,7 +511,6 @@ ARM_BE8(rev r6, r6 )
|
||||
* Clobbers r2-r5
|
||||
*/
|
||||
.macro save_timer_state
|
||||
#ifdef CONFIG_KVM_ARM_TIMER
|
||||
ldr r4, [vcpu, #VCPU_KVM]
|
||||
ldr r2, [r4, #KVM_TIMER_ENABLED]
|
||||
cmp r2, #0
|
||||
@ -537,7 +532,6 @@ ARM_BE8(rev r6, r6 )
|
||||
mcrr p15, 4, r2, r2, c14 @ CNTVOFF
|
||||
|
||||
1:
|
||||
#endif
|
||||
@ Allow physical timer/counter access for the host
|
||||
mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
|
||||
orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
|
||||
@ -559,7 +553,6 @@ ARM_BE8(rev r6, r6 )
|
||||
bic r2, r2, #CNTHCTL_PL1PCEN
|
||||
mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
|
||||
|
||||
#ifdef CONFIG_KVM_ARM_TIMER
|
||||
ldr r4, [vcpu, #VCPU_KVM]
|
||||
ldr r2, [r4, #KVM_TIMER_ENABLED]
|
||||
cmp r2, #0
|
||||
@ -579,7 +572,6 @@ ARM_BE8(rev r6, r6 )
|
||||
and r2, r2, #3
|
||||
mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
|
||||
1:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.equ vmentry, 0
|
||||
|
@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
|
||||
{
|
||||
unsigned long rt;
|
||||
int len;
|
||||
bool is_write, sign_extend;
|
||||
int access_size;
|
||||
bool sign_extend;
|
||||
|
||||
if (kvm_vcpu_dabt_isextabt(vcpu)) {
|
||||
/* cache operation on I/O addr, tell guest unsupported */
|
||||
@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
return 1;
|
||||
}
|
||||
|
||||
len = kvm_vcpu_dabt_get_as(vcpu);
|
||||
if (unlikely(len < 0))
|
||||
return len;
|
||||
access_size = kvm_vcpu_dabt_get_as(vcpu);
|
||||
if (unlikely(access_size < 0))
|
||||
return access_size;
|
||||
|
||||
is_write = kvm_vcpu_dabt_iswrite(vcpu);
|
||||
*is_write = kvm_vcpu_dabt_iswrite(vcpu);
|
||||
sign_extend = kvm_vcpu_dabt_issext(vcpu);
|
||||
rt = kvm_vcpu_dabt_get_rd(vcpu);
|
||||
|
||||
mmio->is_write = is_write;
|
||||
mmio->phys_addr = fault_ipa;
|
||||
mmio->len = len;
|
||||
*len = access_size;
|
||||
vcpu->arch.mmio_decode.sign_extend = sign_extend;
|
||||
vcpu->arch.mmio_decode.rt = rt;
|
||||
|
||||
@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa)
|
||||
{
|
||||
struct kvm_exit_mmio mmio;
|
||||
unsigned long data;
|
||||
unsigned long rt;
|
||||
int ret;
|
||||
bool is_write;
|
||||
int len;
|
||||
u8 data_buf[8];
|
||||
|
||||
/*
|
||||
* Prepare MMIO operation. First stash it in a private
|
||||
* structure that we can use for in-kernel emulation. If the
|
||||
* kernel can't handle it, copy it into run->mmio and let user
|
||||
* space do its magic.
|
||||
* Prepare MMIO operation. First decode the syndrome data we get
|
||||
* from the CPU. Then try if some in-kernel emulation feels
|
||||
* responsible, otherwise let user space do its magic.
|
||||
*/
|
||||
|
||||
if (kvm_vcpu_dabt_isvalid(vcpu)) {
|
||||
ret = decode_hsr(vcpu, fault_ipa, &mmio);
|
||||
ret = decode_hsr(vcpu, &is_write, &len);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
|
||||
rt = vcpu->arch.mmio_decode.rt;
|
||||
|
||||
if (mmio.is_write) {
|
||||
data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt),
|
||||
mmio.len);
|
||||
if (is_write) {
|
||||
data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
|
||||
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len,
|
||||
fault_ipa, data);
|
||||
mmio_write_buf(mmio.data, mmio.len, data);
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
|
||||
mmio_write_buf(data_buf, len, data);
|
||||
|
||||
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
|
||||
data_buf);
|
||||
} else {
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len,
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
|
||||
fault_ipa, 0);
|
||||
|
||||
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
|
||||
data_buf);
|
||||
}
|
||||
|
||||
if (vgic_handle_mmio(vcpu, run, &mmio))
|
||||
return 1;
|
||||
/* Now prepare kvm_run for the potential return to userland. */
|
||||
run->mmio.is_write = is_write;
|
||||
run->mmio.phys_addr = fault_ipa;
|
||||
run->mmio.len = len;
|
||||
memcpy(run->mmio.data, data_buf, len);
|
||||
|
||||
kvm_prepare_mmio(run, &mmio);
|
||||
if (!ret) {
|
||||
/* We handled the access successfully in the kernel. */
|
||||
kvm_handle_mmio_return(vcpu, run);
|
||||
return 1;
|
||||
}
|
||||
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1330,10 +1330,51 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
kvm_set_pfn_accessed(pfn);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Resolve the access fault by making the page young again.
|
||||
* Note that because the faulting entry is guaranteed not to be
|
||||
* cached in the TLB, we don't need to invalidate anything.
|
||||
*/
|
||||
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pfn_t pfn;
|
||||
bool pfn_valid = false;
|
||||
|
||||
trace_kvm_access_fault(fault_ipa);
|
||||
|
||||
spin_lock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
|
||||
if (!pmd || pmd_none(*pmd)) /* Nothing there */
|
||||
goto out;
|
||||
|
||||
if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
|
||||
*pmd = pmd_mkyoung(*pmd);
|
||||
pfn = pmd_pfn(*pmd);
|
||||
pfn_valid = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, fault_ipa);
|
||||
if (pte_none(*pte)) /* Nothing there either */
|
||||
goto out;
|
||||
|
||||
*pte = pte_mkyoung(*pte); /* Just a page... */
|
||||
pfn = pte_pfn(*pte);
|
||||
pfn_valid = true;
|
||||
out:
|
||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||
if (pfn_valid)
|
||||
kvm_set_pfn_accessed(pfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_guest_abort - handles all 2nd stage aborts
|
||||
* @vcpu: the VCPU pointer
|
||||
@ -1364,7 +1405,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
||||
/* Check the stage-2 fault is trans. fault or write fault */
|
||||
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
|
||||
if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
|
||||
if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
|
||||
fault_status != FSC_ACCESS) {
|
||||
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
|
||||
kvm_vcpu_trap_get_class(vcpu),
|
||||
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
|
||||
@ -1400,6 +1442,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
/* Userspace should not be able to register out-of-bounds IPAs */
|
||||
VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
|
||||
|
||||
if (fault_status == FSC_ACCESS) {
|
||||
handle_access_fault(vcpu, fault_ipa);
|
||||
ret = 1;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
|
||||
if (ret == 0)
|
||||
ret = 1;
|
||||
@ -1408,15 +1456,16 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void handle_hva_to_gpa(struct kvm *kvm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
void (*handler)(struct kvm *kvm,
|
||||
gpa_t gpa, void *data),
|
||||
void *data)
|
||||
static int handle_hva_to_gpa(struct kvm *kvm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
int (*handler)(struct kvm *kvm,
|
||||
gpa_t gpa, void *data),
|
||||
void *data)
|
||||
{
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
int ret = 0;
|
||||
|
||||
slots = kvm_memslots(kvm);
|
||||
|
||||
@ -1440,14 +1489,17 @@ static void handle_hva_to_gpa(struct kvm *kvm,
|
||||
|
||||
for (; gfn < gfn_end; ++gfn) {
|
||||
gpa_t gpa = gfn << PAGE_SHIFT;
|
||||
handler(kvm, gpa, data);
|
||||
ret |= handler(kvm, gpa, data);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
{
|
||||
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||
@ -1473,7 +1525,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
{
|
||||
pte_t *pte = (pte_t *)data;
|
||||
|
||||
@ -1485,6 +1537,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
* through this calling path.
|
||||
*/
|
||||
stage2_set_pte(kvm, NULL, gpa, pte, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -1501,6 +1554,67 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
|
||||
}
|
||||
|
||||
static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
pmd = stage2_get_pmd(kvm, NULL, gpa);
|
||||
if (!pmd || pmd_none(*pmd)) /* Nothing there */
|
||||
return 0;
|
||||
|
||||
if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
|
||||
if (pmd_young(*pmd)) {
|
||||
*pmd = pmd_mkold(*pmd);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, gpa);
|
||||
if (pte_none(*pte))
|
||||
return 0;
|
||||
|
||||
if (pte_young(*pte)) {
|
||||
*pte = pte_mkold(*pte); /* Just a page... */
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
pmd = stage2_get_pmd(kvm, NULL, gpa);
|
||||
if (!pmd || pmd_none(*pmd)) /* Nothing there */
|
||||
return 0;
|
||||
|
||||
if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */
|
||||
return pmd_young(*pmd);
|
||||
|
||||
pte = pte_offset_kernel(pmd, gpa);
|
||||
if (!pte_none(*pte)) /* Just a page... */
|
||||
return pte_young(*pte);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
{
|
||||
trace_kvm_age_hva(start, end);
|
||||
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
|
||||
}
|
||||
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
trace_kvm_test_age_hva(hva);
|
||||
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
|
||||
}
|
||||
|
||||
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
|
||||
|
@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault,
|
||||
__entry->hxfar, __entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_access_fault,
|
||||
TP_PROTO(unsigned long ipa),
|
||||
TP_ARGS(ipa),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, ipa )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ipa = ipa;
|
||||
),
|
||||
|
||||
TP_printk("IPA: %lx", __entry->ipa)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_irq_line,
|
||||
TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
|
||||
TP_ARGS(type, vcpu_idx, irq_num, level),
|
||||
@ -210,6 +225,39 @@ TRACE_EVENT(kvm_set_spte_hva,
|
||||
TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_age_hva,
|
||||
TP_PROTO(unsigned long start, unsigned long end),
|
||||
TP_ARGS(start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, start )
|
||||
__field( unsigned long, end )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->start = start;
|
||||
__entry->end = end;
|
||||
),
|
||||
|
||||
TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
|
||||
__entry->start, __entry->end)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_test_age_hva,
|
||||
TP_PROTO(unsigned long hva),
|
||||
TP_ARGS(hva),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, hva )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hva = hva;
|
||||
),
|
||||
|
||||
TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_hvc,
|
||||
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
|
||||
TP_ARGS(vcpu_pc, r0, imm),
|
||||
|
@ -90,6 +90,7 @@
|
||||
#define ESR_ELx_FSC (0x3F)
|
||||
#define ESR_ELx_FSC_TYPE (0x3C)
|
||||
#define ESR_ELx_FSC_EXTABT (0x10)
|
||||
#define ESR_ELx_FSC_ACCESS (0x08)
|
||||
#define ESR_ELx_FSC_FAULT (0x04)
|
||||
#define ESR_ELx_FSC_PERM (0x0C)
|
||||
#define ESR_ELx_CV (UL(1) << 24)
|
||||
|
@ -188,6 +188,7 @@
|
||||
|
||||
/* For compatibility with fault code shared with 32-bit */
|
||||
#define FSC_FAULT ESR_ELx_FSC_FAULT
|
||||
#define FSC_ACCESS ESR_ELx_FSC_ACCESS
|
||||
#define FSC_PERM ESR_ELx_FSC_PERM
|
||||
|
||||
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
|
||||
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
|
||||
#else
|
||||
@ -177,19 +179,10 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
|
||||
/* We do not have shadow page tables, hence the empty hooks */
|
||||
static inline int kvm_age_hva(struct kvm *kvm, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
|
||||
unsigned long address)
|
||||
{
|
||||
|
@ -31,28 +31,6 @@ struct kvm_decode {
|
||||
bool sign_extend;
|
||||
};
|
||||
|
||||
/*
|
||||
* The in-kernel MMIO emulation code wants to use a copy of run->mmio,
|
||||
* which is an anonymous type. Use our own type instead.
|
||||
*/
|
||||
struct kvm_exit_mmio {
|
||||
phys_addr_t phys_addr;
|
||||
u8 data[8];
|
||||
u32 len;
|
||||
bool is_write;
|
||||
void *private;
|
||||
};
|
||||
|
||||
static inline void kvm_prepare_mmio(struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
{
|
||||
run->mmio.phys_addr = mmio->phys_addr;
|
||||
run->mmio.len = mmio->len;
|
||||
run->mmio.is_write = mmio->is_write;
|
||||
memcpy(run->mmio.data, mmio->data, mmio->len);
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
}
|
||||
|
||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa);
|
||||
|
@ -191,6 +191,9 @@ struct kvm_arch_memory_slot {
|
||||
/* Highest supported SPI, from VGIC_NR_IRQS */
|
||||
#define KVM_ARM_IRQ_GIC_MAX 127
|
||||
|
||||
/* One single KVM irqchip, ie. the VGIC */
|
||||
#define KVM_NR_IRQCHIPS 1
|
||||
|
||||
/* PSCI interface */
|
||||
#define KVM_PSCI_FN_BASE 0x95c1ba5e
|
||||
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
|
||||
|
@ -18,6 +18,7 @@ if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on OF
|
||||
select MMU_NOTIFIER
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
@ -25,10 +26,10 @@ config KVM
|
||||
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
select KVM_MMIO
|
||||
select KVM_ARM_HOST
|
||||
select KVM_ARM_VGIC
|
||||
select KVM_ARM_TIMER
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
select SRCU
|
||||
select HAVE_KVM_EVENTFD
|
||||
select HAVE_KVM_IRQFD
|
||||
---help---
|
||||
Support hosting virtualized guest machines.
|
||||
|
||||
@ -50,17 +51,4 @@ config KVM_ARM_MAX_VCPUS
|
||||
large, so only choose a reasonable number that you expect to
|
||||
actually use.
|
||||
|
||||
config KVM_ARM_VGIC
|
||||
bool
|
||||
depends on KVM_ARM_HOST && OF
|
||||
select HAVE_KVM_IRQCHIP
|
||||
---help---
|
||||
Adds support for a hardware assisted, in-kernel GIC emulation.
|
||||
|
||||
config KVM_ARM_TIMER
|
||||
bool
|
||||
depends on KVM_ARM_VGIC
|
||||
---help---
|
||||
Adds support for the Architected Timers in virtual machines.
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
@ -2,7 +2,7 @@
|
||||
# Makefile for Kernel-based Virtual Machine module
|
||||
#
|
||||
|
||||
ccflags-y += -Ivirt/kvm -Iarch/arm64/kvm
|
||||
ccflags-y += -Iarch/arm64/kvm
|
||||
CFLAGS_arm.o := -I.
|
||||
CFLAGS_mmu.o := -I.
|
||||
|
||||
@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
|
||||
|
||||
@ -19,11 +19,11 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
|
||||
|
@ -16,38 +16,38 @@
|
||||
.set push
|
||||
SET_HARDFLOAT
|
||||
cfc1 \tmp, fcr31
|
||||
swc1 $f0, THREAD_FPR0_LS64(\thread)
|
||||
swc1 $f1, THREAD_FPR1_LS64(\thread)
|
||||
swc1 $f2, THREAD_FPR2_LS64(\thread)
|
||||
swc1 $f3, THREAD_FPR3_LS64(\thread)
|
||||
swc1 $f4, THREAD_FPR4_LS64(\thread)
|
||||
swc1 $f5, THREAD_FPR5_LS64(\thread)
|
||||
swc1 $f6, THREAD_FPR6_LS64(\thread)
|
||||
swc1 $f7, THREAD_FPR7_LS64(\thread)
|
||||
swc1 $f8, THREAD_FPR8_LS64(\thread)
|
||||
swc1 $f9, THREAD_FPR9_LS64(\thread)
|
||||
swc1 $f10, THREAD_FPR10_LS64(\thread)
|
||||
swc1 $f11, THREAD_FPR11_LS64(\thread)
|
||||
swc1 $f12, THREAD_FPR12_LS64(\thread)
|
||||
swc1 $f13, THREAD_FPR13_LS64(\thread)
|
||||
swc1 $f14, THREAD_FPR14_LS64(\thread)
|
||||
swc1 $f15, THREAD_FPR15_LS64(\thread)
|
||||
swc1 $f16, THREAD_FPR16_LS64(\thread)
|
||||
swc1 $f17, THREAD_FPR17_LS64(\thread)
|
||||
swc1 $f18, THREAD_FPR18_LS64(\thread)
|
||||
swc1 $f19, THREAD_FPR19_LS64(\thread)
|
||||
swc1 $f20, THREAD_FPR20_LS64(\thread)
|
||||
swc1 $f21, THREAD_FPR21_LS64(\thread)
|
||||
swc1 $f22, THREAD_FPR22_LS64(\thread)
|
||||
swc1 $f23, THREAD_FPR23_LS64(\thread)
|
||||
swc1 $f24, THREAD_FPR24_LS64(\thread)
|
||||
swc1 $f25, THREAD_FPR25_LS64(\thread)
|
||||
swc1 $f26, THREAD_FPR26_LS64(\thread)
|
||||
swc1 $f27, THREAD_FPR27_LS64(\thread)
|
||||
swc1 $f28, THREAD_FPR28_LS64(\thread)
|
||||
swc1 $f29, THREAD_FPR29_LS64(\thread)
|
||||
swc1 $f30, THREAD_FPR30_LS64(\thread)
|
||||
swc1 $f31, THREAD_FPR31_LS64(\thread)
|
||||
swc1 $f0, THREAD_FPR0(\thread)
|
||||
swc1 $f1, THREAD_FPR1(\thread)
|
||||
swc1 $f2, THREAD_FPR2(\thread)
|
||||
swc1 $f3, THREAD_FPR3(\thread)
|
||||
swc1 $f4, THREAD_FPR4(\thread)
|
||||
swc1 $f5, THREAD_FPR5(\thread)
|
||||
swc1 $f6, THREAD_FPR6(\thread)
|
||||
swc1 $f7, THREAD_FPR7(\thread)
|
||||
swc1 $f8, THREAD_FPR8(\thread)
|
||||
swc1 $f9, THREAD_FPR9(\thread)
|
||||
swc1 $f10, THREAD_FPR10(\thread)
|
||||
swc1 $f11, THREAD_FPR11(\thread)
|
||||
swc1 $f12, THREAD_FPR12(\thread)
|
||||
swc1 $f13, THREAD_FPR13(\thread)
|
||||
swc1 $f14, THREAD_FPR14(\thread)
|
||||
swc1 $f15, THREAD_FPR15(\thread)
|
||||
swc1 $f16, THREAD_FPR16(\thread)
|
||||
swc1 $f17, THREAD_FPR17(\thread)
|
||||
swc1 $f18, THREAD_FPR18(\thread)
|
||||
swc1 $f19, THREAD_FPR19(\thread)
|
||||
swc1 $f20, THREAD_FPR20(\thread)
|
||||
swc1 $f21, THREAD_FPR21(\thread)
|
||||
swc1 $f22, THREAD_FPR22(\thread)
|
||||
swc1 $f23, THREAD_FPR23(\thread)
|
||||
swc1 $f24, THREAD_FPR24(\thread)
|
||||
swc1 $f25, THREAD_FPR25(\thread)
|
||||
swc1 $f26, THREAD_FPR26(\thread)
|
||||
swc1 $f27, THREAD_FPR27(\thread)
|
||||
swc1 $f28, THREAD_FPR28(\thread)
|
||||
swc1 $f29, THREAD_FPR29(\thread)
|
||||
swc1 $f30, THREAD_FPR30(\thread)
|
||||
swc1 $f31, THREAD_FPR31(\thread)
|
||||
sw \tmp, THREAD_FCR31(\thread)
|
||||
.set pop
|
||||
.endm
|
||||
@ -56,38 +56,38 @@
|
||||
.set push
|
||||
SET_HARDFLOAT
|
||||
lw \tmp, THREAD_FCR31(\thread)
|
||||
lwc1 $f0, THREAD_FPR0_LS64(\thread)
|
||||
lwc1 $f1, THREAD_FPR1_LS64(\thread)
|
||||
lwc1 $f2, THREAD_FPR2_LS64(\thread)
|
||||
lwc1 $f3, THREAD_FPR3_LS64(\thread)
|
||||
lwc1 $f4, THREAD_FPR4_LS64(\thread)
|
||||
lwc1 $f5, THREAD_FPR5_LS64(\thread)
|
||||
lwc1 $f6, THREAD_FPR6_LS64(\thread)
|
||||
lwc1 $f7, THREAD_FPR7_LS64(\thread)
|
||||
lwc1 $f8, THREAD_FPR8_LS64(\thread)
|
||||
lwc1 $f9, THREAD_FPR9_LS64(\thread)
|
||||
lwc1 $f10, THREAD_FPR10_LS64(\thread)
|
||||
lwc1 $f11, THREAD_FPR11_LS64(\thread)
|
||||
lwc1 $f12, THREAD_FPR12_LS64(\thread)
|
||||
lwc1 $f13, THREAD_FPR13_LS64(\thread)
|
||||
lwc1 $f14, THREAD_FPR14_LS64(\thread)
|
||||
lwc1 $f15, THREAD_FPR15_LS64(\thread)
|
||||
lwc1 $f16, THREAD_FPR16_LS64(\thread)
|
||||
lwc1 $f17, THREAD_FPR17_LS64(\thread)
|
||||
lwc1 $f18, THREAD_FPR18_LS64(\thread)
|
||||
lwc1 $f19, THREAD_FPR19_LS64(\thread)
|
||||
lwc1 $f20, THREAD_FPR20_LS64(\thread)
|
||||
lwc1 $f21, THREAD_FPR21_LS64(\thread)
|
||||
lwc1 $f22, THREAD_FPR22_LS64(\thread)
|
||||
lwc1 $f23, THREAD_FPR23_LS64(\thread)
|
||||
lwc1 $f24, THREAD_FPR24_LS64(\thread)
|
||||
lwc1 $f25, THREAD_FPR25_LS64(\thread)
|
||||
lwc1 $f26, THREAD_FPR26_LS64(\thread)
|
||||
lwc1 $f27, THREAD_FPR27_LS64(\thread)
|
||||
lwc1 $f28, THREAD_FPR28_LS64(\thread)
|
||||
lwc1 $f29, THREAD_FPR29_LS64(\thread)
|
||||
lwc1 $f30, THREAD_FPR30_LS64(\thread)
|
||||
lwc1 $f31, THREAD_FPR31_LS64(\thread)
|
||||
lwc1 $f0, THREAD_FPR0(\thread)
|
||||
lwc1 $f1, THREAD_FPR1(\thread)
|
||||
lwc1 $f2, THREAD_FPR2(\thread)
|
||||
lwc1 $f3, THREAD_FPR3(\thread)
|
||||
lwc1 $f4, THREAD_FPR4(\thread)
|
||||
lwc1 $f5, THREAD_FPR5(\thread)
|
||||
lwc1 $f6, THREAD_FPR6(\thread)
|
||||
lwc1 $f7, THREAD_FPR7(\thread)
|
||||
lwc1 $f8, THREAD_FPR8(\thread)
|
||||
lwc1 $f9, THREAD_FPR9(\thread)
|
||||
lwc1 $f10, THREAD_FPR10(\thread)
|
||||
lwc1 $f11, THREAD_FPR11(\thread)
|
||||
lwc1 $f12, THREAD_FPR12(\thread)
|
||||
lwc1 $f13, THREAD_FPR13(\thread)
|
||||
lwc1 $f14, THREAD_FPR14(\thread)
|
||||
lwc1 $f15, THREAD_FPR15(\thread)
|
||||
lwc1 $f16, THREAD_FPR16(\thread)
|
||||
lwc1 $f17, THREAD_FPR17(\thread)
|
||||
lwc1 $f18, THREAD_FPR18(\thread)
|
||||
lwc1 $f19, THREAD_FPR19(\thread)
|
||||
lwc1 $f20, THREAD_FPR20(\thread)
|
||||
lwc1 $f21, THREAD_FPR21(\thread)
|
||||
lwc1 $f22, THREAD_FPR22(\thread)
|
||||
lwc1 $f23, THREAD_FPR23(\thread)
|
||||
lwc1 $f24, THREAD_FPR24(\thread)
|
||||
lwc1 $f25, THREAD_FPR25(\thread)
|
||||
lwc1 $f26, THREAD_FPR26(\thread)
|
||||
lwc1 $f27, THREAD_FPR27(\thread)
|
||||
lwc1 $f28, THREAD_FPR28(\thread)
|
||||
lwc1 $f29, THREAD_FPR29(\thread)
|
||||
lwc1 $f30, THREAD_FPR30(\thread)
|
||||
lwc1 $f31, THREAD_FPR31(\thread)
|
||||
ctc1 \tmp, fcr31
|
||||
.set pop
|
||||
.endm
|
||||
|
@ -60,22 +60,22 @@
|
||||
.set push
|
||||
SET_HARDFLOAT
|
||||
cfc1 \tmp, fcr31
|
||||
sdc1 $f0, THREAD_FPR0_LS64(\thread)
|
||||
sdc1 $f2, THREAD_FPR2_LS64(\thread)
|
||||
sdc1 $f4, THREAD_FPR4_LS64(\thread)
|
||||
sdc1 $f6, THREAD_FPR6_LS64(\thread)
|
||||
sdc1 $f8, THREAD_FPR8_LS64(\thread)
|
||||
sdc1 $f10, THREAD_FPR10_LS64(\thread)
|
||||
sdc1 $f12, THREAD_FPR12_LS64(\thread)
|
||||
sdc1 $f14, THREAD_FPR14_LS64(\thread)
|
||||
sdc1 $f16, THREAD_FPR16_LS64(\thread)
|
||||
sdc1 $f18, THREAD_FPR18_LS64(\thread)
|
||||
sdc1 $f20, THREAD_FPR20_LS64(\thread)
|
||||
sdc1 $f22, THREAD_FPR22_LS64(\thread)
|
||||
sdc1 $f24, THREAD_FPR24_LS64(\thread)
|
||||
sdc1 $f26, THREAD_FPR26_LS64(\thread)
|
||||
sdc1 $f28, THREAD_FPR28_LS64(\thread)
|
||||
sdc1 $f30, THREAD_FPR30_LS64(\thread)
|
||||
sdc1 $f0, THREAD_FPR0(\thread)
|
||||
sdc1 $f2, THREAD_FPR2(\thread)
|
||||
sdc1 $f4, THREAD_FPR4(\thread)
|
||||
sdc1 $f6, THREAD_FPR6(\thread)
|
||||
sdc1 $f8, THREAD_FPR8(\thread)
|
||||
sdc1 $f10, THREAD_FPR10(\thread)
|
||||
sdc1 $f12, THREAD_FPR12(\thread)
|
||||
sdc1 $f14, THREAD_FPR14(\thread)
|
||||
sdc1 $f16, THREAD_FPR16(\thread)
|
||||
sdc1 $f18, THREAD_FPR18(\thread)
|
||||
sdc1 $f20, THREAD_FPR20(\thread)
|
||||
sdc1 $f22, THREAD_FPR22(\thread)
|
||||
sdc1 $f24, THREAD_FPR24(\thread)
|
||||
sdc1 $f26, THREAD_FPR26(\thread)
|
||||
sdc1 $f28, THREAD_FPR28(\thread)
|
||||
sdc1 $f30, THREAD_FPR30(\thread)
|
||||
sw \tmp, THREAD_FCR31(\thread)
|
||||
.set pop
|
||||
.endm
|
||||
@ -84,22 +84,22 @@
|
||||
.set push
|
||||
.set mips64r2
|
||||
SET_HARDFLOAT
|
||||
sdc1 $f1, THREAD_FPR1_LS64(\thread)
|
||||
sdc1 $f3, THREAD_FPR3_LS64(\thread)
|
||||
sdc1 $f5, THREAD_FPR5_LS64(\thread)
|
||||
sdc1 $f7, THREAD_FPR7_LS64(\thread)
|
||||
sdc1 $f9, THREAD_FPR9_LS64(\thread)
|
||||
sdc1 $f11, THREAD_FPR11_LS64(\thread)
|
||||
sdc1 $f13, THREAD_FPR13_LS64(\thread)
|
||||
sdc1 $f15, THREAD_FPR15_LS64(\thread)
|
||||
sdc1 $f17, THREAD_FPR17_LS64(\thread)
|
||||
sdc1 $f19, THREAD_FPR19_LS64(\thread)
|
||||
sdc1 $f21, THREAD_FPR21_LS64(\thread)
|
||||
sdc1 $f23, THREAD_FPR23_LS64(\thread)
|
||||
sdc1 $f25, THREAD_FPR25_LS64(\thread)
|
||||
sdc1 $f27, THREAD_FPR27_LS64(\thread)
|
||||
sdc1 $f29, THREAD_FPR29_LS64(\thread)
|
||||
sdc1 $f31, THREAD_FPR31_LS64(\thread)
|
||||
sdc1 $f1, THREAD_FPR1(\thread)
|
||||
sdc1 $f3, THREAD_FPR3(\thread)
|
||||
sdc1 $f5, THREAD_FPR5(\thread)
|
||||
sdc1 $f7, THREAD_FPR7(\thread)
|
||||
sdc1 $f9, THREAD_FPR9(\thread)
|
||||
sdc1 $f11, THREAD_FPR11(\thread)
|
||||
sdc1 $f13, THREAD_FPR13(\thread)
|
||||
sdc1 $f15, THREAD_FPR15(\thread)
|
||||
sdc1 $f17, THREAD_FPR17(\thread)
|
||||
sdc1 $f19, THREAD_FPR19(\thread)
|
||||
sdc1 $f21, THREAD_FPR21(\thread)
|
||||
sdc1 $f23, THREAD_FPR23(\thread)
|
||||
sdc1 $f25, THREAD_FPR25(\thread)
|
||||
sdc1 $f27, THREAD_FPR27(\thread)
|
||||
sdc1 $f29, THREAD_FPR29(\thread)
|
||||
sdc1 $f31, THREAD_FPR31(\thread)
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
@ -118,22 +118,22 @@
|
||||
.set push
|
||||
SET_HARDFLOAT
|
||||
lw \tmp, THREAD_FCR31(\thread)
|
||||
ldc1 $f0, THREAD_FPR0_LS64(\thread)
|
||||
ldc1 $f2, THREAD_FPR2_LS64(\thread)
|
||||
ldc1 $f4, THREAD_FPR4_LS64(\thread)
|
||||
ldc1 $f6, THREAD_FPR6_LS64(\thread)
|
||||
ldc1 $f8, THREAD_FPR8_LS64(\thread)
|
||||
ldc1 $f10, THREAD_FPR10_LS64(\thread)
|
||||
ldc1 $f12, THREAD_FPR12_LS64(\thread)
|
||||
ldc1 $f14, THREAD_FPR14_LS64(\thread)
|
||||
ldc1 $f16, THREAD_FPR16_LS64(\thread)
|
||||
ldc1 $f18, THREAD_FPR18_LS64(\thread)
|
||||
ldc1 $f20, THREAD_FPR20_LS64(\thread)
|
||||
ldc1 $f22, THREAD_FPR22_LS64(\thread)
|
||||
ldc1 $f24, THREAD_FPR24_LS64(\thread)
|
||||
ldc1 $f26, THREAD_FPR26_LS64(\thread)
|
||||
ldc1 $f28, THREAD_FPR28_LS64(\thread)
|
||||
ldc1 $f30, THREAD_FPR30_LS64(\thread)
|
||||
ldc1 $f0, THREAD_FPR0(\thread)
|
||||
ldc1 $f2, THREAD_FPR2(\thread)
|
||||
ldc1 $f4, THREAD_FPR4(\thread)
|
||||
ldc1 $f6, THREAD_FPR6(\thread)
|
||||
ldc1 $f8, THREAD_FPR8(\thread)
|
||||
ldc1 $f10, THREAD_FPR10(\thread)
|
||||
ldc1 $f12, THREAD_FPR12(\thread)
|
||||
ldc1 $f14, THREAD_FPR14(\thread)
|
||||
ldc1 $f16, THREAD_FPR16(\thread)
|
||||
ldc1 $f18, THREAD_FPR18(\thread)
|
||||
ldc1 $f20, THREAD_FPR20(\thread)
|
||||
ldc1 $f22, THREAD_FPR22(\thread)
|
||||
ldc1 $f24, THREAD_FPR24(\thread)
|
||||
ldc1 $f26, THREAD_FPR26(\thread)
|
||||
ldc1 $f28, THREAD_FPR28(\thread)
|
||||
ldc1 $f30, THREAD_FPR30(\thread)
|
||||
ctc1 \tmp, fcr31
|
||||
.endm
|
||||
|
||||
@ -141,22 +141,22 @@
|
||||
.set push
|
||||
.set mips64r2
|
||||
SET_HARDFLOAT
|
||||
ldc1 $f1, THREAD_FPR1_LS64(\thread)
|
||||
ldc1 $f3, THREAD_FPR3_LS64(\thread)
|
||||
ldc1 $f5, THREAD_FPR5_LS64(\thread)
|
||||
ldc1 $f7, THREAD_FPR7_LS64(\thread)
|
||||
ldc1 $f9, THREAD_FPR9_LS64(\thread)
|
||||
ldc1 $f11, THREAD_FPR11_LS64(\thread)
|
||||
ldc1 $f13, THREAD_FPR13_LS64(\thread)
|
||||
ldc1 $f15, THREAD_FPR15_LS64(\thread)
|
||||
ldc1 $f17, THREAD_FPR17_LS64(\thread)
|
||||
ldc1 $f19, THREAD_FPR19_LS64(\thread)
|
||||
ldc1 $f21, THREAD_FPR21_LS64(\thread)
|
||||
ldc1 $f23, THREAD_FPR23_LS64(\thread)
|
||||
ldc1 $f25, THREAD_FPR25_LS64(\thread)
|
||||
ldc1 $f27, THREAD_FPR27_LS64(\thread)
|
||||
ldc1 $f29, THREAD_FPR29_LS64(\thread)
|
||||
ldc1 $f31, THREAD_FPR31_LS64(\thread)
|
||||
ldc1 $f1, THREAD_FPR1(\thread)
|
||||
ldc1 $f3, THREAD_FPR3(\thread)
|
||||
ldc1 $f5, THREAD_FPR5(\thread)
|
||||
ldc1 $f7, THREAD_FPR7(\thread)
|
||||
ldc1 $f9, THREAD_FPR9(\thread)
|
||||
ldc1 $f11, THREAD_FPR11(\thread)
|
||||
ldc1 $f13, THREAD_FPR13(\thread)
|
||||
ldc1 $f15, THREAD_FPR15(\thread)
|
||||
ldc1 $f17, THREAD_FPR17(\thread)
|
||||
ldc1 $f19, THREAD_FPR19(\thread)
|
||||
ldc1 $f21, THREAD_FPR21(\thread)
|
||||
ldc1 $f23, THREAD_FPR23(\thread)
|
||||
ldc1 $f25, THREAD_FPR25(\thread)
|
||||
ldc1 $f27, THREAD_FPR27(\thread)
|
||||
ldc1 $f29, THREAD_FPR29(\thread)
|
||||
ldc1 $f31, THREAD_FPR31(\thread)
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
@ -211,6 +211,22 @@
|
||||
.endm
|
||||
|
||||
#ifdef TOOLCHAIN_SUPPORTS_MSA
|
||||
.macro _cfcmsa rd, cs
|
||||
.set push
|
||||
.set mips32r2
|
||||
.set msa
|
||||
cfcmsa \rd, $\cs
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro _ctcmsa cd, rs
|
||||
.set push
|
||||
.set mips32r2
|
||||
.set msa
|
||||
ctcmsa $\cd, \rs
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro ld_d wd, off, base
|
||||
.set push
|
||||
.set mips32r2
|
||||
@ -227,35 +243,35 @@
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro copy_u_w rd, ws, n
|
||||
.macro copy_u_w ws, n
|
||||
.set push
|
||||
.set mips32r2
|
||||
.set msa
|
||||
copy_u.w \rd, $w\ws[\n]
|
||||
copy_u.w $1, $w\ws[\n]
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro copy_u_d rd, ws, n
|
||||
.macro copy_u_d ws, n
|
||||
.set push
|
||||
.set mips64r2
|
||||
.set msa
|
||||
copy_u.d \rd, $w\ws[\n]
|
||||
copy_u.d $1, $w\ws[\n]
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro insert_w wd, n, rs
|
||||
.macro insert_w wd, n
|
||||
.set push
|
||||
.set mips32r2
|
||||
.set msa
|
||||
insert.w $w\wd[\n], \rs
|
||||
insert.w $w\wd[\n], $1
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro insert_d wd, n, rs
|
||||
.macro insert_d wd, n
|
||||
.set push
|
||||
.set mips64r2
|
||||
.set msa
|
||||
insert.d $w\wd[\n], \rs
|
||||
insert.d $w\wd[\n], $1
|
||||
.set pop
|
||||
.endm
|
||||
#else
|
||||
@ -283,7 +299,7 @@
|
||||
/*
|
||||
* Temporary until all toolchains in use include MSA support.
|
||||
*/
|
||||
.macro cfcmsa rd, cs
|
||||
.macro _cfcmsa rd, cs
|
||||
.set push
|
||||
.set noat
|
||||
SET_HARDFLOAT
|
||||
@ -293,7 +309,7 @@
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro ctcmsa cd, rs
|
||||
.macro _ctcmsa cd, rs
|
||||
.set push
|
||||
.set noat
|
||||
SET_HARDFLOAT
|
||||
@ -320,44 +336,36 @@
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro copy_u_w rd, ws, n
|
||||
.macro copy_u_w ws, n
|
||||
.set push
|
||||
.set noat
|
||||
SET_HARDFLOAT
|
||||
.insn
|
||||
.word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
|
||||
/* move triggers an assembler bug... */
|
||||
or \rd, $1, zero
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro copy_u_d rd, ws, n
|
||||
.macro copy_u_d ws, n
|
||||
.set push
|
||||
.set noat
|
||||
SET_HARDFLOAT
|
||||
.insn
|
||||
.word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
|
||||
/* move triggers an assembler bug... */
|
||||
or \rd, $1, zero
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro insert_w wd, n, rs
|
||||
.macro insert_w wd, n
|
||||
.set push
|
||||
.set noat
|
||||
SET_HARDFLOAT
|
||||
/* move triggers an assembler bug... */
|
||||
or $1, \rs, zero
|
||||
.word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
.macro insert_d wd, n, rs
|
||||
.macro insert_d wd, n
|
||||
.set push
|
||||
.set noat
|
||||
SET_HARDFLOAT
|
||||
/* move triggers an assembler bug... */
|
||||
or $1, \rs, zero
|
||||
.word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
|
||||
.set pop
|
||||
.endm
|
||||
@ -399,7 +407,7 @@
|
||||
.set push
|
||||
.set noat
|
||||
SET_HARDFLOAT
|
||||
cfcmsa $1, MSA_CSR
|
||||
_cfcmsa $1, MSA_CSR
|
||||
sw $1, THREAD_MSA_CSR(\thread)
|
||||
.set pop
|
||||
.endm
|
||||
@ -409,7 +417,7 @@
|
||||
.set noat
|
||||
SET_HARDFLOAT
|
||||
lw $1, THREAD_MSA_CSR(\thread)
|
||||
ctcmsa MSA_CSR, $1
|
||||
_ctcmsa MSA_CSR, $1
|
||||
.set pop
|
||||
ld_d 0, THREAD_FPR0, \thread
|
||||
ld_d 1, THREAD_FPR1, \thread
|
||||
@ -452,9 +460,6 @@
|
||||
insert_w \wd, 2
|
||||
insert_w \wd, 3
|
||||
#endif
|
||||
.if 31-\wd
|
||||
msa_init_upper (\wd+1)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro msa_init_all_upper
|
||||
@ -463,6 +468,37 @@
|
||||
SET_HARDFLOAT
|
||||
not $1, zero
|
||||
msa_init_upper 0
|
||||
msa_init_upper 1
|
||||
msa_init_upper 2
|
||||
msa_init_upper 3
|
||||
msa_init_upper 4
|
||||
msa_init_upper 5
|
||||
msa_init_upper 6
|
||||
msa_init_upper 7
|
||||
msa_init_upper 8
|
||||
msa_init_upper 9
|
||||
msa_init_upper 10
|
||||
msa_init_upper 11
|
||||
msa_init_upper 12
|
||||
msa_init_upper 13
|
||||
msa_init_upper 14
|
||||
msa_init_upper 15
|
||||
msa_init_upper 16
|
||||
msa_init_upper 17
|
||||
msa_init_upper 18
|
||||
msa_init_upper 19
|
||||
msa_init_upper 20
|
||||
msa_init_upper 21
|
||||
msa_init_upper 22
|
||||
msa_init_upper 23
|
||||
msa_init_upper 24
|
||||
msa_init_upper 25
|
||||
msa_init_upper 26
|
||||
msa_init_upper 27
|
||||
msa_init_upper 28
|
||||
msa_init_upper 29
|
||||
msa_init_upper 30
|
||||
msa_init_upper 31
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
|
@ -48,6 +48,12 @@ enum fpu_mode {
|
||||
#define FPU_FR_MASK 0x1
|
||||
};
|
||||
|
||||
#define __disable_fpu() \
|
||||
do { \
|
||||
clear_c0_status(ST0_CU1); \
|
||||
disable_fpu_hazard(); \
|
||||
} while (0)
|
||||
|
||||
static inline int __enable_fpu(enum fpu_mode mode)
|
||||
{
|
||||
int fr;
|
||||
@ -86,7 +92,12 @@ fr_common:
|
||||
enable_fpu_hazard();
|
||||
|
||||
/* check FR has the desired value */
|
||||
return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE;
|
||||
if (!!(read_c0_status() & ST0_FR) == !!fr)
|
||||
return 0;
|
||||
|
||||
/* unsupported FR value */
|
||||
__disable_fpu();
|
||||
return SIGFPE;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
@ -95,12 +106,6 @@ fr_common:
|
||||
return SIGFPE;
|
||||
}
|
||||
|
||||
#define __disable_fpu() \
|
||||
do { \
|
||||
clear_c0_status(ST0_CU1); \
|
||||
disable_fpu_hazard(); \
|
||||
} while (0)
|
||||
|
||||
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
|
||||
|
||||
static inline int __is_fpu_owner(void)
|
||||
@ -170,6 +175,7 @@ static inline void lose_fpu(int save)
|
||||
}
|
||||
disable_msa();
|
||||
clear_thread_flag(TIF_USEDMSA);
|
||||
__disable_fpu();
|
||||
} else if (is_fpu_owner()) {
|
||||
if (save)
|
||||
_save_fp(current);
|
||||
|
@ -10,7 +10,8 @@ enum die_val {
|
||||
DIE_RI,
|
||||
DIE_PAGE_FAULT,
|
||||
DIE_BREAK,
|
||||
DIE_SSTEPBP
|
||||
DIE_SSTEPBP,
|
||||
DIE_MSAFP
|
||||
};
|
||||
|
||||
#endif /* _ASM_MIPS_KDEBUG_H */
|
||||
|
@ -21,10 +21,10 @@
|
||||
|
||||
/* MIPS KVM register ids */
|
||||
#define MIPS_CP0_32(_R, _S) \
|
||||
(KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
|
||||
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
|
||||
|
||||
#define MIPS_CP0_64(_R, _S) \
|
||||
(KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
|
||||
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
|
||||
|
||||
#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
|
||||
@ -42,11 +42,14 @@
|
||||
#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
|
||||
#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
|
||||
#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
|
||||
#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
|
||||
#define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
|
||||
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
|
||||
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
|
||||
@ -119,6 +122,10 @@ struct kvm_vcpu_stat {
|
||||
u32 syscall_exits;
|
||||
u32 resvd_inst_exits;
|
||||
u32 break_inst_exits;
|
||||
u32 trap_inst_exits;
|
||||
u32 msa_fpe_exits;
|
||||
u32 fpe_exits;
|
||||
u32 msa_disabled_exits;
|
||||
u32 flush_dcache_exits;
|
||||
u32 halt_successful_poll;
|
||||
u32 halt_wakeup;
|
||||
@ -138,6 +145,10 @@ enum kvm_mips_exit_types {
|
||||
SYSCALL_EXITS,
|
||||
RESVD_INST_EXITS,
|
||||
BREAK_INST_EXITS,
|
||||
TRAP_INST_EXITS,
|
||||
MSA_FPE_EXITS,
|
||||
FPE_EXITS,
|
||||
MSA_DISABLED_EXITS,
|
||||
FLUSH_DCACHE_EXITS,
|
||||
MAX_KVM_MIPS_EXIT_TYPES
|
||||
};
|
||||
@ -206,6 +217,8 @@ struct mips_coproc {
|
||||
#define MIPS_CP0_CONFIG1_SEL 1
|
||||
#define MIPS_CP0_CONFIG2_SEL 2
|
||||
#define MIPS_CP0_CONFIG3_SEL 3
|
||||
#define MIPS_CP0_CONFIG4_SEL 4
|
||||
#define MIPS_CP0_CONFIG5_SEL 5
|
||||
|
||||
/* Config0 register bits */
|
||||
#define CP0C0_M 31
|
||||
@ -262,31 +275,6 @@ struct mips_coproc {
|
||||
#define CP0C3_SM 1
|
||||
#define CP0C3_TL 0
|
||||
|
||||
/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
|
||||
#define MIPS_CONFIG0 \
|
||||
((1 << CP0C0_M) | (0x3 << CP0C0_K0))
|
||||
|
||||
/* Have config2, no coprocessor2 attached, no MDMX support attached,
|
||||
no performance counters, watch registers present,
|
||||
no code compression, EJTAG present, no FPU, no watch registers */
|
||||
#define MIPS_CONFIG1 \
|
||||
((1 << CP0C1_M) | \
|
||||
(0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
|
||||
(0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
|
||||
(0 << CP0C1_FP))
|
||||
|
||||
/* Have config3, no tertiary/secondary caches implemented */
|
||||
#define MIPS_CONFIG2 \
|
||||
((1 << CP0C2_M))
|
||||
|
||||
/* No config4, no DSP ASE, no large physaddr (PABITS),
|
||||
no external interrupt controller, no vectored interrupts,
|
||||
no 1kb pages, no SmartMIPS ASE, no trace logic */
|
||||
#define MIPS_CONFIG3 \
|
||||
((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
|
||||
(0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
|
||||
(0 << CP0C3_SM) | (0 << CP0C3_TL))
|
||||
|
||||
/* MMU types, the first four entries have the same layout as the
|
||||
CP0C0_MT field. */
|
||||
enum mips_mmu_types {
|
||||
@ -321,7 +309,9 @@ enum mips_mmu_types {
|
||||
*/
|
||||
#define T_TRAP 13 /* Trap instruction */
|
||||
#define T_VCEI 14 /* Virtual coherency exception */
|
||||
#define T_MSAFPE 14 /* MSA floating point exception */
|
||||
#define T_FPE 15 /* Floating point exception */
|
||||
#define T_MSADIS 21 /* MSA disabled exception */
|
||||
#define T_WATCH 23 /* Watch address reference */
|
||||
#define T_VCED 31 /* Virtual coherency data */
|
||||
|
||||
@ -374,6 +364,9 @@ struct kvm_mips_tlb {
|
||||
long tlb_lo1;
|
||||
};
|
||||
|
||||
#define KVM_MIPS_FPU_FPU 0x1
|
||||
#define KVM_MIPS_FPU_MSA 0x2
|
||||
|
||||
#define KVM_MIPS_GUEST_TLB_SIZE 64
|
||||
struct kvm_vcpu_arch {
|
||||
void *host_ebase, *guest_ebase;
|
||||
@ -395,6 +388,8 @@ struct kvm_vcpu_arch {
|
||||
|
||||
/* FPU State */
|
||||
struct mips_fpu_struct fpu;
|
||||
/* Which FPU state is loaded (KVM_MIPS_FPU_*) */
|
||||
unsigned int fpu_inuse;
|
||||
|
||||
/* COP0 State */
|
||||
struct mips_coproc *cop0;
|
||||
@ -441,6 +436,9 @@ struct kvm_vcpu_arch {
|
||||
|
||||
/* WAIT executed */
|
||||
int wait;
|
||||
|
||||
u8 fpu_enabled;
|
||||
u8 msa_enabled;
|
||||
};
|
||||
|
||||
|
||||
@ -482,11 +480,15 @@ struct kvm_vcpu_arch {
|
||||
#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
|
||||
#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
|
||||
#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
|
||||
#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
|
||||
#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
|
||||
#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
|
||||
#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
|
||||
#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
|
||||
#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
|
||||
#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
|
||||
#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
|
||||
#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
|
||||
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
|
||||
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
|
||||
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
|
||||
@ -567,6 +569,31 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
|
||||
kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
|
||||
}
|
||||
|
||||
/* Helpers */
|
||||
|
||||
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
|
||||
{
|
||||
return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
|
||||
vcpu->fpu_enabled;
|
||||
}
|
||||
|
||||
static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
|
||||
{
|
||||
return kvm_mips_guest_can_have_fpu(vcpu) &&
|
||||
kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
|
||||
}
|
||||
|
||||
static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
|
||||
{
|
||||
return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
|
||||
vcpu->msa_enabled;
|
||||
}
|
||||
|
||||
static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
|
||||
{
|
||||
return kvm_mips_guest_can_have_msa(vcpu) &&
|
||||
kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
|
||||
}
|
||||
|
||||
struct kvm_mips_callbacks {
|
||||
int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
|
||||
@ -578,6 +605,10 @@ struct kvm_mips_callbacks {
|
||||
int (*handle_syscall)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_res_inst)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_break)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_trap)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_fpe)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
|
||||
int (*vm_init)(struct kvm *kvm);
|
||||
int (*vcpu_init)(struct kvm_vcpu *vcpu);
|
||||
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
|
||||
@ -596,6 +627,8 @@ struct kvm_mips_callbacks {
|
||||
const struct kvm_one_reg *reg, s64 *v);
|
||||
int (*set_one_reg)(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg, s64 v);
|
||||
int (*vcpu_get_regs)(struct kvm_vcpu *vcpu);
|
||||
int (*vcpu_set_regs)(struct kvm_vcpu *vcpu);
|
||||
};
|
||||
extern struct kvm_mips_callbacks *kvm_mips_callbacks;
|
||||
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
|
||||
@ -606,6 +639,19 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
|
||||
/* Trampoline ASM routine to start running in "Guest" context */
|
||||
extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
|
||||
/* FPU/MSA context management */
|
||||
void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
|
||||
void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
|
||||
void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
|
||||
void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
|
||||
void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
|
||||
void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
|
||||
void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
|
||||
void kvm_own_fpu(struct kvm_vcpu *vcpu);
|
||||
void kvm_own_msa(struct kvm_vcpu *vcpu);
|
||||
void kvm_drop_fpu(struct kvm_vcpu *vcpu);
|
||||
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* TLB handling */
|
||||
uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
|
||||
|
||||
@ -711,6 +757,26 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run);
|
||||
|
||||
@ -749,6 +815,11 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
|
||||
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
|
||||
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
|
||||
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Dynamic binary translation */
|
||||
extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
@ -105,7 +105,7 @@ union fpureg {
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
# define FPR_IDX(width, idx) (idx)
|
||||
#else
|
||||
# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx))
|
||||
# define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
|
||||
#endif
|
||||
|
||||
#define BUILD_FPR_ACCESS(width) \
|
||||
|
@ -36,77 +36,85 @@ struct kvm_regs {
|
||||
|
||||
/*
|
||||
* for KVM_GET_FPU and KVM_SET_FPU
|
||||
*
|
||||
* If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs
|
||||
* are zero filled.
|
||||
*/
|
||||
struct kvm_fpu {
|
||||
__u64 fpr[32];
|
||||
__u32 fir;
|
||||
__u32 fccr;
|
||||
__u32 fexr;
|
||||
__u32 fenr;
|
||||
__u32 fcsr;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0
|
||||
* For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
|
||||
* registers. The id field is broken down as follows:
|
||||
*
|
||||
* bits[2..0] - Register 'sel' index.
|
||||
* bits[7..3] - Register 'rd' index.
|
||||
* bits[15..8] - Must be zero.
|
||||
* bits[31..16] - 1 -> CP0 registers.
|
||||
* bits[51..32] - Must be zero.
|
||||
* bits[63..52] - As per linux/kvm.h
|
||||
* bits[51..32] - Must be zero.
|
||||
* bits[31..16] - Register set.
|
||||
*
|
||||
* Register set = 0: GP registers from kvm_regs (see definitions below).
|
||||
*
|
||||
* Register set = 1: CP0 registers.
|
||||
* bits[15..8] - Must be zero.
|
||||
* bits[7..3] - Register 'rd' index.
|
||||
* bits[2..0] - Register 'sel' index.
|
||||
*
|
||||
* Register set = 2: KVM specific registers (see definitions below).
|
||||
*
|
||||
* Register set = 3: FPU / MSA registers (see definitions below).
|
||||
*
|
||||
* Other sets registers may be added in the future. Each set would
|
||||
* have its own identifier in bits[31..16].
|
||||
*
|
||||
* The registers defined in struct kvm_regs are also accessible, the
|
||||
* id values for these are below.
|
||||
*/
|
||||
|
||||
#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0)
|
||||
#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1)
|
||||
#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2)
|
||||
#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3)
|
||||
#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4)
|
||||
#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5)
|
||||
#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6)
|
||||
#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7)
|
||||
#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8)
|
||||
#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9)
|
||||
#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10)
|
||||
#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11)
|
||||
#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12)
|
||||
#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13)
|
||||
#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14)
|
||||
#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15)
|
||||
#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16)
|
||||
#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17)
|
||||
#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18)
|
||||
#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19)
|
||||
#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20)
|
||||
#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21)
|
||||
#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22)
|
||||
#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23)
|
||||
#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24)
|
||||
#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25)
|
||||
#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26)
|
||||
#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27)
|
||||
#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28)
|
||||
#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29)
|
||||
#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30)
|
||||
#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31)
|
||||
#define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL)
|
||||
#define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL)
|
||||
#define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL)
|
||||
#define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL)
|
||||
|
||||
#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32)
|
||||
#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
|
||||
#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
|
||||
|
||||
/* KVM specific control registers */
|
||||
/*
|
||||
* KVM_REG_MIPS_GP - General purpose registers from kvm_regs.
|
||||
*/
|
||||
|
||||
#define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0)
|
||||
#define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1)
|
||||
#define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2)
|
||||
#define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3)
|
||||
#define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4)
|
||||
#define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5)
|
||||
#define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6)
|
||||
#define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7)
|
||||
#define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8)
|
||||
#define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9)
|
||||
#define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10)
|
||||
#define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11)
|
||||
#define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12)
|
||||
#define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13)
|
||||
#define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14)
|
||||
#define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15)
|
||||
#define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16)
|
||||
#define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17)
|
||||
#define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18)
|
||||
#define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19)
|
||||
#define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20)
|
||||
#define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21)
|
||||
#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22)
|
||||
#define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23)
|
||||
#define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24)
|
||||
#define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25)
|
||||
#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26)
|
||||
#define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27)
|
||||
#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28)
|
||||
#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29)
|
||||
#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30)
|
||||
#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31)
|
||||
|
||||
#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32)
|
||||
#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33)
|
||||
#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
|
||||
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_KVM - KVM specific control registers.
|
||||
*/
|
||||
|
||||
/*
|
||||
* CP0_Count control
|
||||
@ -118,8 +126,7 @@ struct kvm_fpu {
|
||||
* safely without losing time or guest timer interrupts.
|
||||
* Other: Reserved, do not change.
|
||||
*/
|
||||
#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
|
||||
0x20000 | 0)
|
||||
#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0)
|
||||
#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
|
||||
|
||||
/*
|
||||
@ -131,15 +138,46 @@ struct kvm_fpu {
|
||||
* emulated.
|
||||
* Modifications to times in the future are rejected.
|
||||
*/
|
||||
#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
|
||||
0x20000 | 1)
|
||||
#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1)
|
||||
/*
|
||||
* CP0_Count rate in Hz
|
||||
* Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
|
||||
* discontinuities in CP0_Count.
|
||||
*/
|
||||
#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
|
||||
0x20000 | 2)
|
||||
#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2)
|
||||
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers.
|
||||
*
|
||||
* bits[15..8] - Register subset (see definitions below).
|
||||
* bits[7..5] - Must be zero.
|
||||
* bits[4..0] - Register number within register subset.
|
||||
*/
|
||||
|
||||
#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL)
|
||||
#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL)
|
||||
#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL)
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_FPR - Floating point / Vector registers.
|
||||
*/
|
||||
#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n))
|
||||
#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n))
|
||||
#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n))
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_FCR - Floating point control registers.
|
||||
*/
|
||||
#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0)
|
||||
#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31)
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers.
|
||||
*/
|
||||
#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0)
|
||||
#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1)
|
||||
|
||||
|
||||
/*
|
||||
* KVM MIPS specific structures and definitions
|
||||
|
@ -167,72 +167,6 @@ void output_thread_fpu_defines(void)
|
||||
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
|
||||
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
|
||||
|
||||
/* the least significant 64 bits of each FP register */
|
||||
OFFSET(THREAD_FPR0_LS64, task_struct,
|
||||
thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR1_LS64, task_struct,
|
||||
thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR2_LS64, task_struct,
|
||||
thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR3_LS64, task_struct,
|
||||
thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR4_LS64, task_struct,
|
||||
thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR5_LS64, task_struct,
|
||||
thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR6_LS64, task_struct,
|
||||
thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR7_LS64, task_struct,
|
||||
thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR8_LS64, task_struct,
|
||||
thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR9_LS64, task_struct,
|
||||
thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR10_LS64, task_struct,
|
||||
thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR11_LS64, task_struct,
|
||||
thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR12_LS64, task_struct,
|
||||
thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR13_LS64, task_struct,
|
||||
thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR14_LS64, task_struct,
|
||||
thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR15_LS64, task_struct,
|
||||
thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR16_LS64, task_struct,
|
||||
thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR17_LS64, task_struct,
|
||||
thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR18_LS64, task_struct,
|
||||
thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR19_LS64, task_struct,
|
||||
thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR20_LS64, task_struct,
|
||||
thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR21_LS64, task_struct,
|
||||
thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR22_LS64, task_struct,
|
||||
thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR23_LS64, task_struct,
|
||||
thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR24_LS64, task_struct,
|
||||
thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR25_LS64, task_struct,
|
||||
thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR26_LS64, task_struct,
|
||||
thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR27_LS64, task_struct,
|
||||
thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR28_LS64, task_struct,
|
||||
thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR29_LS64, task_struct,
|
||||
thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR30_LS64, task_struct,
|
||||
thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
|
||||
OFFSET(THREAD_FPR31_LS64, task_struct,
|
||||
thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
|
||||
|
||||
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
|
||||
OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
|
||||
BLANK();
|
||||
@ -470,6 +404,45 @@ void output_kvm_defines(void)
|
||||
OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
|
||||
OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
|
||||
OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
|
||||
BLANK();
|
||||
|
||||
OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
|
||||
OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
|
||||
OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
|
||||
OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
|
||||
OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
|
||||
OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
|
||||
OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
|
||||
OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
|
||||
OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
|
||||
OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
|
||||
OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
|
||||
OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
|
||||
OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
|
||||
OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
|
||||
OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
|
||||
OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
|
||||
OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
|
||||
OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
|
||||
OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
|
||||
OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
|
||||
OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
|
||||
OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
|
||||
OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
|
||||
OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
|
||||
OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
|
||||
OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
|
||||
OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
|
||||
OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
|
||||
OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
|
||||
OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
|
||||
OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
|
||||
OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
|
||||
|
||||
OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
|
||||
OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
|
||||
BLANK();
|
||||
|
||||
OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
|
||||
OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
|
||||
OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
|
||||
|
@ -360,12 +360,15 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
||||
.set mips1
|
||||
SET_HARDFLOAT
|
||||
cfc1 a1, fcr31
|
||||
li a2, ~(0x3f << 12)
|
||||
and a2, a1
|
||||
ctc1 a2, fcr31
|
||||
.set pop
|
||||
TRACE_IRQS_ON
|
||||
STI
|
||||
CLI
|
||||
TRACE_IRQS_OFF
|
||||
.endm
|
||||
|
||||
.macro __build_clear_msa_fpe
|
||||
_cfcmsa a1, MSA_CSR
|
||||
CLI
|
||||
TRACE_IRQS_OFF
|
||||
.endm
|
||||
|
||||
.macro __build_clear_ade
|
||||
@ -426,7 +429,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
|
||||
BUILD_HANDLER cpu cpu sti silent /* #11 */
|
||||
BUILD_HANDLER ov ov sti silent /* #12 */
|
||||
BUILD_HANDLER tr tr sti silent /* #13 */
|
||||
BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
|
||||
BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
|
||||
BUILD_HANDLER fpe fpe fpe silent /* #15 */
|
||||
BUILD_HANDLER ftlb ftlb none silent /* #16 */
|
||||
BUILD_HANDLER msa msa sti silent /* #21 */
|
||||
|
@ -46,6 +46,26 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/syscalls.h>
|
||||
|
||||
static void init_fp_ctx(struct task_struct *target)
|
||||
{
|
||||
/* If FP has been used then the target already has context */
|
||||
if (tsk_used_math(target))
|
||||
return;
|
||||
|
||||
/* Begin with data registers set to all 1s... */
|
||||
memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
|
||||
|
||||
/* ...and FCSR zeroed */
|
||||
target->thread.fpu.fcr31 = 0;
|
||||
|
||||
/*
|
||||
* Record that the target has "used" math, such that the context
|
||||
* just initialised, and any modifications made by the caller,
|
||||
* aren't discarded.
|
||||
*/
|
||||
set_stopped_child_used_math(target);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*
|
||||
@ -142,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
|
||||
if (!access_ok(VERIFY_READ, data, 33 * 8))
|
||||
return -EIO;
|
||||
|
||||
init_fp_ctx(child);
|
||||
fregs = get_fpu_regs(child);
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
@ -439,6 +460,8 @@ static int fpr_set(struct task_struct *target,
|
||||
|
||||
/* XXX fcr31 */
|
||||
|
||||
init_fp_ctx(target);
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu,
|
||||
@ -660,12 +683,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
case FPR_BASE ... FPR_BASE + 31: {
|
||||
union fpureg *fregs = get_fpu_regs(child);
|
||||
|
||||
if (!tsk_used_math(child)) {
|
||||
/* FP not yet used */
|
||||
memset(&child->thread.fpu, ~0,
|
||||
sizeof(child->thread.fpu));
|
||||
child->thread.fpu.fcr31 = 0;
|
||||
}
|
||||
init_fp_ctx(child);
|
||||
#ifdef CONFIG_32BIT
|
||||
if (test_thread_flag(TIF_32BIT_FPREGS)) {
|
||||
/*
|
||||
|
@ -34,7 +34,6 @@
|
||||
.endm
|
||||
|
||||
.set noreorder
|
||||
.set MIPS_ISA_ARCH_LEVEL_RAW
|
||||
|
||||
LEAF(_save_fp_context)
|
||||
.set push
|
||||
@ -103,6 +102,7 @@ LEAF(_save_fp_context)
|
||||
/* Save 32-bit process floating point context */
|
||||
LEAF(_save_fp_context32)
|
||||
.set push
|
||||
.set MIPS_ISA_ARCH_LEVEL_RAW
|
||||
SET_HARDFLOAT
|
||||
cfc1 t1, fcr31
|
||||
|
||||
|
@ -701,6 +701,13 @@ asmlinkage void do_ov(struct pt_regs *regs)
|
||||
|
||||
int process_fpemu_return(int sig, void __user *fault_addr)
|
||||
{
|
||||
/*
|
||||
* We can't allow the emulated instruction to leave any of the cause
|
||||
* bits set in FCSR. If they were then the kernel would take an FP
|
||||
* exception when restoring FP context.
|
||||
*/
|
||||
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
|
||||
|
||||
if (sig == SIGSEGV || sig == SIGBUS) {
|
||||
struct siginfo si = {0};
|
||||
si.si_addr = fault_addr;
|
||||
@ -781,6 +788,11 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
|
||||
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
|
||||
SIGFPE) == NOTIFY_STOP)
|
||||
goto out;
|
||||
|
||||
/* Clear FCSR.Cause before enabling interrupts */
|
||||
write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
|
||||
local_irq_enable();
|
||||
|
||||
die_if_kernel("FP exception in kernel code", regs);
|
||||
|
||||
if (fcr31 & FPU_CSR_UNI_X) {
|
||||
@ -804,18 +816,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
|
||||
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
|
||||
&fault_addr);
|
||||
|
||||
/*
|
||||
* We can't allow the emulated instruction to leave any of
|
||||
* the cause bit set in $fcr31.
|
||||
*/
|
||||
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
|
||||
/* If something went wrong, signal */
|
||||
process_fpemu_return(sig, fault_addr);
|
||||
|
||||
/* Restore the hardware register state */
|
||||
own_fpu(1); /* Using the FPU again. */
|
||||
|
||||
/* If something went wrong, signal */
|
||||
process_fpemu_return(sig, fault_addr);
|
||||
|
||||
goto out;
|
||||
} else if (fcr31 & FPU_CSR_INV_X)
|
||||
info.si_code = FPE_FLTINV;
|
||||
@ -1392,13 +1398,22 @@ out:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
asmlinkage void do_msa_fpe(struct pt_regs *regs)
|
||||
asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
|
||||
{
|
||||
enum ctx_state prev_state;
|
||||
|
||||
prev_state = exception_enter();
|
||||
if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
|
||||
regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
|
||||
goto out;
|
||||
|
||||
/* Clear MSACSR.Cause before enabling interrupts */
|
||||
write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
|
||||
local_irq_enable();
|
||||
|
||||
die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
|
||||
force_sig(SIGFPE, current);
|
||||
out:
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
|
@ -1,13 +1,15 @@
|
||||
# Makefile for KVM support for MIPS
|
||||
#
|
||||
|
||||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
|
||||
common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
|
||||
|
||||
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
|
||||
|
||||
kvm-objs := $(common-objs) mips.o emulate.o locore.o \
|
||||
common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
|
||||
|
||||
kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
|
||||
interrupt.o stats.o commpage.o \
|
||||
dyntrans.o trap_emul.o
|
||||
dyntrans.o trap_emul.o fpu.o
|
||||
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-y += callback.o tlb.o
|
||||
|
@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Finds the mask of bits which are writable in the guest's Config1 CP0
|
||||
* register, by userland (currently read-only to the guest).
|
||||
*/
|
||||
unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
|
||||
/* Permit FPU to be present if FPU is supported */
|
||||
if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
|
||||
mask |= MIPS_CONF1_FP;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Finds the mask of bits which are writable in the guest's Config3 CP0
|
||||
* register, by userland (currently read-only to the guest).
|
||||
*/
|
||||
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Config4 is optional */
|
||||
unsigned int mask = MIPS_CONF_M;
|
||||
|
||||
/* Permit MSA to be present if MSA is supported */
|
||||
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
|
||||
mask |= MIPS_CONF3_MSA;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Finds the mask of bits which are writable in the guest's Config4 CP0
|
||||
* register, by userland (currently read-only to the guest).
|
||||
*/
|
||||
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Config5 is optional */
|
||||
return MIPS_CONF_M;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Finds the mask of bits which are writable in the guest's Config5 CP0
|
||||
* register, by the guest itself.
|
||||
*/
|
||||
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
|
||||
/* Permit MSAEn changes if MSA supported and enabled */
|
||||
if (kvm_mips_guest_has_msa(&vcpu->arch))
|
||||
mask |= MIPS_CONF5_MSAEN;
|
||||
|
||||
/*
|
||||
* Permit guest FPU mode changes if FPU is enabled and the relevant
|
||||
* feature exists according to FIR register.
|
||||
*/
|
||||
if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
|
||||
if (cpu_has_fre)
|
||||
mask |= MIPS_CONF5_FRE;
|
||||
/* We don't support UFR or UFE */
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
||||
uint32_t cause, struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
|
||||
kvm_mips_write_compare(vcpu,
|
||||
vcpu->arch.gprs[rt]);
|
||||
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
|
||||
kvm_write_c0_guest_status(cop0,
|
||||
vcpu->arch.gprs[rt]);
|
||||
unsigned int old_val, val, change;
|
||||
|
||||
old_val = kvm_read_c0_guest_status(cop0);
|
||||
val = vcpu->arch.gprs[rt];
|
||||
change = val ^ old_val;
|
||||
|
||||
/* Make sure that the NMI bit is never set */
|
||||
val &= ~ST0_NMI;
|
||||
|
||||
/*
|
||||
* Make sure that CU1 and NMI bits are
|
||||
* never set
|
||||
* Don't allow CU1 or FR to be set unless FPU
|
||||
* capability enabled and exists in guest
|
||||
* configuration.
|
||||
*/
|
||||
kvm_clear_c0_guest_status(cop0,
|
||||
(ST0_CU1 | ST0_NMI));
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
val &= ~(ST0_CU1 | ST0_FR);
|
||||
|
||||
/*
|
||||
* Also don't allow FR to be set if host doesn't
|
||||
* support it.
|
||||
*/
|
||||
if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
|
||||
val &= ~ST0_FR;
|
||||
|
||||
|
||||
/* Handle changes in FPU mode */
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* FPU and Vector register state is made
|
||||
* UNPREDICTABLE by a change of FR, so don't
|
||||
* even bother saving it.
|
||||
*/
|
||||
if (change & ST0_FR)
|
||||
kvm_drop_fpu(vcpu);
|
||||
|
||||
/*
|
||||
* If MSA state is already live, it is undefined
|
||||
* how it interacts with FR=0 FPU state, and we
|
||||
* don't want to hit reserved instruction
|
||||
* exceptions trying to save the MSA state later
|
||||
* when CU=1 && FR=1, so play it safe and save
|
||||
* it first.
|
||||
*/
|
||||
if (change & ST0_CU1 && !(val & ST0_FR) &&
|
||||
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
|
||||
kvm_lose_fpu(vcpu);
|
||||
|
||||
/*
|
||||
* Propagate CU1 (FPU enable) changes
|
||||
* immediately if the FPU context is already
|
||||
* loaded. When disabling we leave the context
|
||||
* loaded so it can be quickly enabled again in
|
||||
* the near future.
|
||||
*/
|
||||
if (change & ST0_CU1 &&
|
||||
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
|
||||
change_c0_status(ST0_CU1, val);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
kvm_write_c0_guest_status(cop0, val);
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
|
||||
kvm_mips_trans_mtc0(inst, opc, vcpu);
|
||||
/*
|
||||
* If FPU present, we need CU1/FR bits to take
|
||||
* effect fairly soon.
|
||||
*/
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
kvm_mips_trans_mtc0(inst, opc, vcpu);
|
||||
#endif
|
||||
} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
|
||||
unsigned int old_val, val, change, wrmask;
|
||||
|
||||
old_val = kvm_read_c0_guest_config5(cop0);
|
||||
val = vcpu->arch.gprs[rt];
|
||||
|
||||
/* Only a few bits are writable in Config5 */
|
||||
wrmask = kvm_mips_config5_wrmask(vcpu);
|
||||
change = (val ^ old_val) & wrmask;
|
||||
val = old_val ^ change;
|
||||
|
||||
|
||||
/* Handle changes in FPU/MSA modes */
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* Propagate FRE changes immediately if the FPU
|
||||
* context is already loaded.
|
||||
*/
|
||||
if (change & MIPS_CONF5_FRE &&
|
||||
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
|
||||
change_c0_config5(MIPS_CONF5_FRE, val);
|
||||
|
||||
/*
|
||||
* Propagate MSAEn changes immediately if the
|
||||
* MSA context is already loaded. When disabling
|
||||
* we leave the context loaded so it can be
|
||||
* quickly enabled again in the near future.
|
||||
*/
|
||||
if (change & MIPS_CONF5_MSAEN &&
|
||||
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
|
||||
change_c0_config5(MIPS_CONF5_MSAEN,
|
||||
val);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
kvm_write_c0_guest_config5(cop0, val);
|
||||
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
|
||||
uint32_t old_cause, new_cause;
|
||||
|
||||
@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
|
||||
return er;
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
kvm_write_c0_guest_epc(cop0, arch->pc);
|
||||
kvm_set_c0_guest_status(cop0, ST0_EXL);
|
||||
|
||||
if (cause & CAUSEF_BD)
|
||||
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
else
|
||||
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
|
||||
kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
|
||||
|
||||
kvm_change_c0_guest_cause(cop0, (0xff),
|
||||
(T_TRAP << CAUSEB_EXCCODE));
|
||||
|
||||
/* Set PC to the exception entry point */
|
||||
arch->pc = KVM_GUEST_KSEG0 + 0x180;
|
||||
|
||||
} else {
|
||||
kvm_err("Trying to deliver TRAP when EXL is already set\n");
|
||||
er = EMULATE_FAIL;
|
||||
}
|
||||
|
||||
return er;
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
kvm_write_c0_guest_epc(cop0, arch->pc);
|
||||
kvm_set_c0_guest_status(cop0, ST0_EXL);
|
||||
|
||||
if (cause & CAUSEF_BD)
|
||||
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
else
|
||||
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
|
||||
kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
|
||||
|
||||
kvm_change_c0_guest_cause(cop0, (0xff),
|
||||
(T_MSAFPE << CAUSEB_EXCCODE));
|
||||
|
||||
/* Set PC to the exception entry point */
|
||||
arch->pc = KVM_GUEST_KSEG0 + 0x180;
|
||||
|
||||
} else {
|
||||
kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
|
||||
er = EMULATE_FAIL;
|
||||
}
|
||||
|
||||
return er;
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
kvm_write_c0_guest_epc(cop0, arch->pc);
|
||||
kvm_set_c0_guest_status(cop0, ST0_EXL);
|
||||
|
||||
if (cause & CAUSEF_BD)
|
||||
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
else
|
||||
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
|
||||
kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
|
||||
|
||||
kvm_change_c0_guest_cause(cop0, (0xff),
|
||||
(T_FPE << CAUSEB_EXCCODE));
|
||||
|
||||
/* Set PC to the exception entry point */
|
||||
arch->pc = KVM_GUEST_KSEG0 + 0x180;
|
||||
|
||||
} else {
|
||||
kvm_err("Trying to deliver FPE when EXL is already set\n");
|
||||
er = EMULATE_FAIL;
|
||||
}
|
||||
|
||||
return er;
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
|
||||
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
|
||||
/* save old pc */
|
||||
kvm_write_c0_guest_epc(cop0, arch->pc);
|
||||
kvm_set_c0_guest_status(cop0, ST0_EXL);
|
||||
|
||||
if (cause & CAUSEF_BD)
|
||||
kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
else
|
||||
kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
|
||||
|
||||
kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
|
||||
|
||||
kvm_change_c0_guest_cause(cop0, (0xff),
|
||||
(T_MSADIS << CAUSEB_EXCCODE));
|
||||
|
||||
/* Set PC to the exception entry point */
|
||||
arch->pc = KVM_GUEST_KSEG0 + 0x180;
|
||||
|
||||
} else {
|
||||
kvm_err("Trying to deliver MSADIS when EXL is already set\n");
|
||||
er = EMULATE_FAIL;
|
||||
}
|
||||
|
||||
return er;
|
||||
}
|
||||
|
||||
/* ll/sc, rdhwr, sync emulation */
|
||||
|
||||
#define OPCODE 0xfc000000
|
||||
@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
|
||||
case T_SYSCALL:
|
||||
case T_BREAK:
|
||||
case T_RES_INST:
|
||||
case T_TRAP:
|
||||
case T_MSAFPE:
|
||||
case T_FPE:
|
||||
case T_MSADIS:
|
||||
break;
|
||||
|
||||
case T_COP_UNUSABLE:
|
||||
|
122
arch/mips/kvm/fpu.S
Normal file
122
arch/mips/kvm/fpu.S
Normal file
@ -0,0 +1,122 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* FPU context handling code for KVM.
|
||||
*
|
||||
* Copyright (C) 2015 Imagination Technologies Ltd.
|
||||
*/
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/fpregdef.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/regdef.h>
|
||||
|
||||
.set noreorder
|
||||
.set noat
|
||||
|
||||
LEAF(__kvm_save_fpu)
|
||||
.set push
|
||||
.set mips64r2
|
||||
SET_HARDFLOAT
|
||||
mfc0 t0, CP0_STATUS
|
||||
sll t0, t0, 5 # is Status.FR set?
|
||||
bgez t0, 1f # no: skip odd doubles
|
||||
nop
|
||||
sdc1 $f1, VCPU_FPR1(a0)
|
||||
sdc1 $f3, VCPU_FPR3(a0)
|
||||
sdc1 $f5, VCPU_FPR5(a0)
|
||||
sdc1 $f7, VCPU_FPR7(a0)
|
||||
sdc1 $f9, VCPU_FPR9(a0)
|
||||
sdc1 $f11, VCPU_FPR11(a0)
|
||||
sdc1 $f13, VCPU_FPR13(a0)
|
||||
sdc1 $f15, VCPU_FPR15(a0)
|
||||
sdc1 $f17, VCPU_FPR17(a0)
|
||||
sdc1 $f19, VCPU_FPR19(a0)
|
||||
sdc1 $f21, VCPU_FPR21(a0)
|
||||
sdc1 $f23, VCPU_FPR23(a0)
|
||||
sdc1 $f25, VCPU_FPR25(a0)
|
||||
sdc1 $f27, VCPU_FPR27(a0)
|
||||
sdc1 $f29, VCPU_FPR29(a0)
|
||||
sdc1 $f31, VCPU_FPR31(a0)
|
||||
1: sdc1 $f0, VCPU_FPR0(a0)
|
||||
sdc1 $f2, VCPU_FPR2(a0)
|
||||
sdc1 $f4, VCPU_FPR4(a0)
|
||||
sdc1 $f6, VCPU_FPR6(a0)
|
||||
sdc1 $f8, VCPU_FPR8(a0)
|
||||
sdc1 $f10, VCPU_FPR10(a0)
|
||||
sdc1 $f12, VCPU_FPR12(a0)
|
||||
sdc1 $f14, VCPU_FPR14(a0)
|
||||
sdc1 $f16, VCPU_FPR16(a0)
|
||||
sdc1 $f18, VCPU_FPR18(a0)
|
||||
sdc1 $f20, VCPU_FPR20(a0)
|
||||
sdc1 $f22, VCPU_FPR22(a0)
|
||||
sdc1 $f24, VCPU_FPR24(a0)
|
||||
sdc1 $f26, VCPU_FPR26(a0)
|
||||
sdc1 $f28, VCPU_FPR28(a0)
|
||||
jr ra
|
||||
sdc1 $f30, VCPU_FPR30(a0)
|
||||
.set pop
|
||||
END(__kvm_save_fpu)
|
||||
|
||||
LEAF(__kvm_restore_fpu)
|
||||
.set push
|
||||
.set mips64r2
|
||||
SET_HARDFLOAT
|
||||
mfc0 t0, CP0_STATUS
|
||||
sll t0, t0, 5 # is Status.FR set?
|
||||
bgez t0, 1f # no: skip odd doubles
|
||||
nop
|
||||
ldc1 $f1, VCPU_FPR1(a0)
|
||||
ldc1 $f3, VCPU_FPR3(a0)
|
||||
ldc1 $f5, VCPU_FPR5(a0)
|
||||
ldc1 $f7, VCPU_FPR7(a0)
|
||||
ldc1 $f9, VCPU_FPR9(a0)
|
||||
ldc1 $f11, VCPU_FPR11(a0)
|
||||
ldc1 $f13, VCPU_FPR13(a0)
|
||||
ldc1 $f15, VCPU_FPR15(a0)
|
||||
ldc1 $f17, VCPU_FPR17(a0)
|
||||
ldc1 $f19, VCPU_FPR19(a0)
|
||||
ldc1 $f21, VCPU_FPR21(a0)
|
||||
ldc1 $f23, VCPU_FPR23(a0)
|
||||
ldc1 $f25, VCPU_FPR25(a0)
|
||||
ldc1 $f27, VCPU_FPR27(a0)
|
||||
ldc1 $f29, VCPU_FPR29(a0)
|
||||
ldc1 $f31, VCPU_FPR31(a0)
|
||||
1: ldc1 $f0, VCPU_FPR0(a0)
|
||||
ldc1 $f2, VCPU_FPR2(a0)
|
||||
ldc1 $f4, VCPU_FPR4(a0)
|
||||
ldc1 $f6, VCPU_FPR6(a0)
|
||||
ldc1 $f8, VCPU_FPR8(a0)
|
||||
ldc1 $f10, VCPU_FPR10(a0)
|
||||
ldc1 $f12, VCPU_FPR12(a0)
|
||||
ldc1 $f14, VCPU_FPR14(a0)
|
||||
ldc1 $f16, VCPU_FPR16(a0)
|
||||
ldc1 $f18, VCPU_FPR18(a0)
|
||||
ldc1 $f20, VCPU_FPR20(a0)
|
||||
ldc1 $f22, VCPU_FPR22(a0)
|
||||
ldc1 $f24, VCPU_FPR24(a0)
|
||||
ldc1 $f26, VCPU_FPR26(a0)
|
||||
ldc1 $f28, VCPU_FPR28(a0)
|
||||
jr ra
|
||||
ldc1 $f30, VCPU_FPR30(a0)
|
||||
.set pop
|
||||
END(__kvm_restore_fpu)
|
||||
|
||||
LEAF(__kvm_restore_fcsr)
|
||||
.set push
|
||||
SET_HARDFLOAT
|
||||
lw t0, VCPU_FCR31(a0)
|
||||
/*
|
||||
* The ctc1 must stay at this offset in __kvm_restore_fcsr.
|
||||
* See kvm_mips_csr_die_notify() which handles t0 containing a value
|
||||
* which triggers an FP Exception, which must be stepped over and
|
||||
* ignored since the set cause bits must remain there for the guest.
|
||||
*/
|
||||
ctc1 t0, fcr31
|
||||
jr ra
|
||||
nop
|
||||
.set pop
|
||||
END(__kvm_restore_fcsr)
|
@ -36,6 +36,8 @@
|
||||
#define PT_HOST_USERLOCAL PT_EPC
|
||||
|
||||
#define CP0_DDATA_LO $28,3
|
||||
#define CP0_CONFIG3 $16,3
|
||||
#define CP0_CONFIG5 $16,5
|
||||
#define CP0_EBASE $15,1
|
||||
|
||||
#define CP0_INTCTL $12,1
|
||||
@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
|
||||
LONG_L k0, VCPU_HOST_EBASE(k1)
|
||||
mtc0 k0,CP0_EBASE
|
||||
|
||||
/*
|
||||
* If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
|
||||
* trigger FPE for pending exceptions.
|
||||
*/
|
||||
.set at
|
||||
and v1, v0, ST0_CU1
|
||||
beqz v1, 1f
|
||||
nop
|
||||
.set push
|
||||
SET_HARDFLOAT
|
||||
cfc1 t0, fcr31
|
||||
sw t0, VCPU_FCR31(k1)
|
||||
ctc1 zero,fcr31
|
||||
.set pop
|
||||
.set noat
|
||||
1:
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_MSA
|
||||
/*
|
||||
* If MSA is enabled, save MSACSR and clear it so that later
|
||||
* instructions don't trigger MSAFPE for pending exceptions.
|
||||
*/
|
||||
mfc0 t0, CP0_CONFIG3
|
||||
ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
|
||||
beqz t0, 1f
|
||||
nop
|
||||
mfc0 t0, CP0_CONFIG5
|
||||
ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
|
||||
beqz t0, 1f
|
||||
nop
|
||||
_cfcmsa t0, MSA_CSR
|
||||
sw t0, VCPU_MSA_CSR(k1)
|
||||
_ctcmsa MSA_CSR, zero
|
||||
1:
|
||||
#endif
|
||||
|
||||
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
|
||||
.set at
|
||||
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
@ -48,6 +49,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
|
||||
{ "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
|
||||
{ "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
|
||||
{ "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
|
||||
{ "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
|
||||
{ "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
|
||||
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
|
||||
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
|
||||
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
|
||||
@ -504,10 +509,13 @@ static u64 kvm_mips_get_one_regs[] = {
|
||||
KVM_REG_MIPS_CP0_STATUS,
|
||||
KVM_REG_MIPS_CP0_CAUSE,
|
||||
KVM_REG_MIPS_CP0_EPC,
|
||||
KVM_REG_MIPS_CP0_PRID,
|
||||
KVM_REG_MIPS_CP0_CONFIG,
|
||||
KVM_REG_MIPS_CP0_CONFIG1,
|
||||
KVM_REG_MIPS_CP0_CONFIG2,
|
||||
KVM_REG_MIPS_CP0_CONFIG3,
|
||||
KVM_REG_MIPS_CP0_CONFIG4,
|
||||
KVM_REG_MIPS_CP0_CONFIG5,
|
||||
KVM_REG_MIPS_CP0_CONFIG7,
|
||||
KVM_REG_MIPS_CP0_ERROREPC,
|
||||
|
||||
@ -520,10 +528,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
||||
int ret;
|
||||
s64 v;
|
||||
s64 vs[2];
|
||||
unsigned int idx;
|
||||
|
||||
switch (reg->id) {
|
||||
/* General purpose registers */
|
||||
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
|
||||
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
|
||||
break;
|
||||
@ -537,6 +549,67 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||
v = (long)vcpu->arch.pc;
|
||||
break;
|
||||
|
||||
/* Floating point registers */
|
||||
case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
idx = reg->id - KVM_REG_MIPS_FPR_32(0);
|
||||
/* Odd singles in top of even double when FR=0 */
|
||||
if (kvm_read_c0_guest_status(cop0) & ST0_FR)
|
||||
v = get_fpr32(&fpu->fpr[idx], 0);
|
||||
else
|
||||
v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
|
||||
break;
|
||||
case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
idx = reg->id - KVM_REG_MIPS_FPR_64(0);
|
||||
/* Can't access odd doubles in FR=0 mode */
|
||||
if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
|
||||
return -EINVAL;
|
||||
v = get_fpr64(&fpu->fpr[idx], 0);
|
||||
break;
|
||||
case KVM_REG_MIPS_FCR_IR:
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
v = boot_cpu_data.fpu_id;
|
||||
break;
|
||||
case KVM_REG_MIPS_FCR_CSR:
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
v = fpu->fcr31;
|
||||
break;
|
||||
|
||||
/* MIPS SIMD Architecture (MSA) registers */
|
||||
case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
|
||||
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
/* Can't access MSA registers in FR=0 mode */
|
||||
if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
|
||||
return -EINVAL;
|
||||
idx = reg->id - KVM_REG_MIPS_VEC_128(0);
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
/* least significant byte first */
|
||||
vs[0] = get_fpr64(&fpu->fpr[idx], 0);
|
||||
vs[1] = get_fpr64(&fpu->fpr[idx], 1);
|
||||
#else
|
||||
/* most significant byte first */
|
||||
vs[0] = get_fpr64(&fpu->fpr[idx], 1);
|
||||
vs[1] = get_fpr64(&fpu->fpr[idx], 0);
|
||||
#endif
|
||||
break;
|
||||
case KVM_REG_MIPS_MSA_IR:
|
||||
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
v = boot_cpu_data.msa_id;
|
||||
break;
|
||||
case KVM_REG_MIPS_MSA_CSR:
|
||||
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
v = fpu->msacsr;
|
||||
break;
|
||||
|
||||
/* Co-processor 0 registers */
|
||||
case KVM_REG_MIPS_CP0_INDEX:
|
||||
v = (long)kvm_read_c0_guest_index(cop0);
|
||||
break;
|
||||
@ -573,8 +646,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||
case KVM_REG_MIPS_CP0_EPC:
|
||||
v = (long)kvm_read_c0_guest_epc(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_ERROREPC:
|
||||
v = (long)kvm_read_c0_guest_errorepc(cop0);
|
||||
case KVM_REG_MIPS_CP0_PRID:
|
||||
v = (long)kvm_read_c0_guest_prid(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG:
|
||||
v = (long)kvm_read_c0_guest_config(cop0);
|
||||
@ -588,9 +661,18 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||
case KVM_REG_MIPS_CP0_CONFIG3:
|
||||
v = (long)kvm_read_c0_guest_config3(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG4:
|
||||
v = (long)kvm_read_c0_guest_config4(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG5:
|
||||
v = (long)kvm_read_c0_guest_config5(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG7:
|
||||
v = (long)kvm_read_c0_guest_config7(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_ERROREPC:
|
||||
v = (long)kvm_read_c0_guest_errorepc(cop0);
|
||||
break;
|
||||
/* registers to be handled specially */
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
@ -612,6 +694,10 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||
u32 v32 = (u32)v;
|
||||
|
||||
return put_user(v32, uaddr32);
|
||||
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
|
||||
return copy_to_user(uaddr, vs, 16);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -621,7 +707,10 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
u64 v;
|
||||
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
|
||||
s64 v;
|
||||
s64 vs[2];
|
||||
unsigned int idx;
|
||||
|
||||
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
|
||||
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
|
||||
@ -635,11 +724,16 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
||||
if (get_user(v32, uaddr32) != 0)
|
||||
return -EFAULT;
|
||||
v = (s64)v32;
|
||||
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
|
||||
return copy_from_user(vs, uaddr, 16);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (reg->id) {
|
||||
/* General purpose registers */
|
||||
case KVM_REG_MIPS_R0:
|
||||
/* Silently ignore requests to set $0 */
|
||||
break;
|
||||
@ -656,6 +750,64 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
||||
vcpu->arch.pc = v;
|
||||
break;
|
||||
|
||||
/* Floating point registers */
|
||||
case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
idx = reg->id - KVM_REG_MIPS_FPR_32(0);
|
||||
/* Odd singles in top of even double when FR=0 */
|
||||
if (kvm_read_c0_guest_status(cop0) & ST0_FR)
|
||||
set_fpr32(&fpu->fpr[idx], 0, v);
|
||||
else
|
||||
set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
idx = reg->id - KVM_REG_MIPS_FPR_64(0);
|
||||
/* Can't access odd doubles in FR=0 mode */
|
||||
if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
|
||||
return -EINVAL;
|
||||
set_fpr64(&fpu->fpr[idx], 0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_FCR_IR:
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
/* Read-only */
|
||||
break;
|
||||
case KVM_REG_MIPS_FCR_CSR:
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
fpu->fcr31 = v;
|
||||
break;
|
||||
|
||||
/* MIPS SIMD Architecture (MSA) registers */
|
||||
case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
|
||||
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
idx = reg->id - KVM_REG_MIPS_VEC_128(0);
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
/* least significant byte first */
|
||||
set_fpr64(&fpu->fpr[idx], 0, vs[0]);
|
||||
set_fpr64(&fpu->fpr[idx], 1, vs[1]);
|
||||
#else
|
||||
/* most significant byte first */
|
||||
set_fpr64(&fpu->fpr[idx], 1, vs[0]);
|
||||
set_fpr64(&fpu->fpr[idx], 0, vs[1]);
|
||||
#endif
|
||||
break;
|
||||
case KVM_REG_MIPS_MSA_IR:
|
||||
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
/* Read-only */
|
||||
break;
|
||||
case KVM_REG_MIPS_MSA_CSR:
|
||||
if (!kvm_mips_guest_has_msa(&vcpu->arch))
|
||||
return -EINVAL;
|
||||
fpu->msacsr = v;
|
||||
break;
|
||||
|
||||
/* Co-processor 0 registers */
|
||||
case KVM_REG_MIPS_CP0_INDEX:
|
||||
kvm_write_c0_guest_index(cop0, v);
|
||||
break;
|
||||
@ -686,6 +838,9 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
||||
case KVM_REG_MIPS_CP0_EPC:
|
||||
kvm_write_c0_guest_epc(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_PRID:
|
||||
kvm_write_c0_guest_prid(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_ERROREPC:
|
||||
kvm_write_c0_guest_errorepc(cop0, v);
|
||||
break;
|
||||
@ -693,6 +848,12 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
case KVM_REG_MIPS_CP0_COMPARE:
|
||||
case KVM_REG_MIPS_CP0_CAUSE:
|
||||
case KVM_REG_MIPS_CP0_CONFIG:
|
||||
case KVM_REG_MIPS_CP0_CONFIG1:
|
||||
case KVM_REG_MIPS_CP0_CONFIG2:
|
||||
case KVM_REG_MIPS_CP0_CONFIG3:
|
||||
case KVM_REG_MIPS_CP0_CONFIG4:
|
||||
case KVM_REG_MIPS_CP0_CONFIG5:
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
case KVM_REG_MIPS_COUNT_RESUME:
|
||||
case KVM_REG_MIPS_COUNT_HZ:
|
||||
@ -703,6 +864,33 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
||||
struct kvm_enable_cap *cap)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
|
||||
return -EINVAL;
|
||||
if (cap->flags)
|
||||
return -EINVAL;
|
||||
if (cap->args[0])
|
||||
return -EINVAL;
|
||||
|
||||
switch (cap->cap) {
|
||||
case KVM_CAP_MIPS_FPU:
|
||||
vcpu->arch.fpu_enabled = true;
|
||||
break;
|
||||
case KVM_CAP_MIPS_MSA:
|
||||
vcpu->arch.msa_enabled = true;
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
{
|
||||
@ -760,6 +948,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
|
||||
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
|
||||
break;
|
||||
}
|
||||
case KVM_ENABLE_CAP: {
|
||||
struct kvm_enable_cap cap;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&cap, argp, sizeof(cap)))
|
||||
goto out;
|
||||
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
r = -ENOIOCTLCMD;
|
||||
}
|
||||
@ -868,11 +1065,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
|
||||
switch (ext) {
|
||||
case KVM_CAP_ONE_REG:
|
||||
case KVM_CAP_ENABLE_CAP:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
||||
break;
|
||||
case KVM_CAP_MIPS_FPU:
|
||||
r = !!cpu_has_fpu;
|
||||
break;
|
||||
case KVM_CAP_MIPS_MSA:
|
||||
/*
|
||||
* We don't support MSA vector partitioning yet:
|
||||
* 1) It would require explicit support which can't be tested
|
||||
* yet due to lack of support in current hardware.
|
||||
* 2) It extends the state that would need to be saved/restored
|
||||
* by e.g. QEMU for migration.
|
||||
*
|
||||
* When vector partitioning hardware becomes available, support
|
||||
* could be added by requiring a flag when enabling
|
||||
* KVM_CAP_MIPS_MSA capability to indicate that userland knows
|
||||
* to save/restore the appropriate extra state.
|
||||
*/
|
||||
r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
break;
|
||||
@ -1119,6 +1335,30 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
ret = kvm_mips_callbacks->handle_break(vcpu);
|
||||
break;
|
||||
|
||||
case T_TRAP:
|
||||
++vcpu->stat.trap_inst_exits;
|
||||
trace_kvm_exit(vcpu, TRAP_INST_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_trap(vcpu);
|
||||
break;
|
||||
|
||||
case T_MSAFPE:
|
||||
++vcpu->stat.msa_fpe_exits;
|
||||
trace_kvm_exit(vcpu, MSA_FPE_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
|
||||
break;
|
||||
|
||||
case T_FPE:
|
||||
++vcpu->stat.fpe_exits;
|
||||
trace_kvm_exit(vcpu, FPE_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_fpe(vcpu);
|
||||
break;
|
||||
|
||||
case T_MSADIS:
|
||||
++vcpu->stat.msa_disabled_exits;
|
||||
trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
|
||||
exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
|
||||
@ -1146,12 +1386,233 @@ skip_emul:
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == RESUME_GUEST) {
|
||||
/*
|
||||
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
|
||||
* is live), restore FCR31 / MSACSR.
|
||||
*
|
||||
* This should be before returning to the guest exception
|
||||
* vector, as it may well cause an [MSA] FP exception if there
|
||||
* are pending exception bits unmasked. (see
|
||||
* kvm_mips_csr_die_notifier() for how that is handled).
|
||||
*/
|
||||
if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
|
||||
read_c0_status() & ST0_CU1)
|
||||
__kvm_restore_fcsr(&vcpu->arch);
|
||||
|
||||
if (kvm_mips_guest_has_msa(&vcpu->arch) &&
|
||||
read_c0_config5() & MIPS_CONF5_MSAEN)
|
||||
__kvm_restore_msacsr(&vcpu->arch);
|
||||
}
|
||||
|
||||
/* Disable HTW before returning to guest or host */
|
||||
htw_stop();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable FPU for guest and restore context */
|
||||
void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
unsigned int sr, cfg5;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
sr = kvm_read_c0_guest_status(cop0);
|
||||
|
||||
/*
|
||||
* If MSA state is already live, it is undefined how it interacts with
|
||||
* FR=0 FPU state, and we don't want to hit reserved instruction
|
||||
* exceptions trying to save the MSA state later when CU=1 && FR=1, so
|
||||
* play it safe and save it first.
|
||||
*
|
||||
* In theory we shouldn't ever hit this case since kvm_lose_fpu() should
|
||||
* get called when guest CU1 is set, however we can't trust the guest
|
||||
* not to clobber the status register directly via the commpage.
|
||||
*/
|
||||
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
|
||||
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
|
||||
kvm_lose_fpu(vcpu);
|
||||
|
||||
/*
|
||||
* Enable FPU for guest
|
||||
* We set FR and FRE according to guest context
|
||||
*/
|
||||
change_c0_status(ST0_CU1 | ST0_FR, sr);
|
||||
if (cpu_has_fre) {
|
||||
cfg5 = kvm_read_c0_guest_config5(cop0);
|
||||
change_c0_config5(MIPS_CONF5_FRE, cfg5);
|
||||
}
|
||||
enable_fpu_hazard();
|
||||
|
||||
/* If guest FPU state not active, restore it now */
|
||||
if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
|
||||
__kvm_restore_fpu(&vcpu->arch);
|
||||
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_MSA
|
||||
/* Enable MSA for guest and restore context */
|
||||
void kvm_own_msa(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
unsigned int sr, cfg5;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* Enable FPU if enabled in guest, since we're restoring FPU context
|
||||
* anyway. We set FR and FRE according to guest context.
|
||||
*/
|
||||
if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
|
||||
sr = kvm_read_c0_guest_status(cop0);
|
||||
|
||||
/*
|
||||
* If FR=0 FPU state is already live, it is undefined how it
|
||||
* interacts with MSA state, so play it safe and save it first.
|
||||
*/
|
||||
if (!(sr & ST0_FR) &&
|
||||
(vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
|
||||
KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
|
||||
kvm_lose_fpu(vcpu);
|
||||
|
||||
change_c0_status(ST0_CU1 | ST0_FR, sr);
|
||||
if (sr & ST0_CU1 && cpu_has_fre) {
|
||||
cfg5 = kvm_read_c0_guest_config5(cop0);
|
||||
change_c0_config5(MIPS_CONF5_FRE, cfg5);
|
||||
}
|
||||
}
|
||||
|
||||
/* Enable MSA for guest */
|
||||
set_c0_config5(MIPS_CONF5_MSAEN);
|
||||
enable_fpu_hazard();
|
||||
|
||||
switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
|
||||
case KVM_MIPS_FPU_FPU:
|
||||
/*
|
||||
* Guest FPU state already loaded, only restore upper MSA state
|
||||
*/
|
||||
__kvm_restore_msa_upper(&vcpu->arch);
|
||||
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
|
||||
break;
|
||||
case 0:
|
||||
/* Neither FPU or MSA already active, restore full MSA state */
|
||||
__kvm_restore_msa(&vcpu->arch);
|
||||
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
|
||||
if (kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Drop FPU & MSA without saving it */
|
||||
void kvm_drop_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
|
||||
disable_msa();
|
||||
vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
|
||||
}
|
||||
if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
|
||||
clear_c0_status(ST0_CU1 | ST0_FR);
|
||||
vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* Save and disable FPU & MSA */
|
||||
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* FPU & MSA get disabled in root context (hardware) when it is disabled
|
||||
* in guest context (software), but the register state in the hardware
|
||||
* may still be in use. This is why we explicitly re-enable the hardware
|
||||
* before saving.
|
||||
*/
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
|
||||
set_c0_config5(MIPS_CONF5_MSAEN);
|
||||
enable_fpu_hazard();
|
||||
|
||||
__kvm_save_msa(&vcpu->arch);
|
||||
|
||||
/* Disable MSA & FPU */
|
||||
disable_msa();
|
||||
if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
|
||||
clear_c0_status(ST0_CU1 | ST0_FR);
|
||||
vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
|
||||
} else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
|
||||
set_c0_status(ST0_CU1);
|
||||
enable_fpu_hazard();
|
||||
|
||||
__kvm_save_fpu(&vcpu->arch);
|
||||
vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
|
||||
|
||||
/* Disable FPU */
|
||||
clear_c0_status(ST0_CU1 | ST0_FR);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
|
||||
* used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
|
||||
* exception if cause bits are set in the value being written.
|
||||
*/
|
||||
static int kvm_mips_csr_die_notify(struct notifier_block *self,
|
||||
unsigned long cmd, void *ptr)
|
||||
{
|
||||
struct die_args *args = (struct die_args *)ptr;
|
||||
struct pt_regs *regs = args->regs;
|
||||
unsigned long pc;
|
||||
|
||||
/* Only interested in FPE and MSAFPE */
|
||||
if (cmd != DIE_FP && cmd != DIE_MSAFP)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Return immediately if guest context isn't active */
|
||||
if (!(current->flags & PF_VCPU))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Should never get here from user mode */
|
||||
BUG_ON(user_mode(regs));
|
||||
|
||||
pc = instruction_pointer(regs);
|
||||
switch (cmd) {
|
||||
case DIE_FP:
|
||||
/* match 2nd instruction in __kvm_restore_fcsr */
|
||||
if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
|
||||
return NOTIFY_DONE;
|
||||
break;
|
||||
case DIE_MSAFP:
|
||||
/* match 2nd/3rd instruction in __kvm_restore_msacsr */
|
||||
if (!cpu_has_msa ||
|
||||
pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
|
||||
pc > (unsigned long)&__kvm_restore_msacsr + 8)
|
||||
return NOTIFY_DONE;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Move PC forward a little and continue executing */
|
||||
instruction_pointer(regs) += 4;
|
||||
|
||||
return NOTIFY_STOP;
|
||||
}
|
||||
|
||||
static struct notifier_block kvm_mips_csr_die_notifier = {
|
||||
.notifier_call = kvm_mips_csr_die_notify,
|
||||
};
|
||||
|
||||
int __init kvm_mips_init(void)
|
||||
{
|
||||
int ret;
|
||||
@ -1161,6 +1622,8 @@ int __init kvm_mips_init(void)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
register_die_notifier(&kvm_mips_csr_die_notifier);
|
||||
|
||||
/*
|
||||
* On MIPS, kernel modules are executed from "mapped space", which
|
||||
* requires TLBs. The TLB handling code is statically linked with
|
||||
@ -1173,7 +1636,6 @@ int __init kvm_mips_init(void)
|
||||
kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
|
||||
kvm_mips_is_error_pfn = is_error_pfn;
|
||||
|
||||
pr_info("KVM/MIPS Initialized\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1185,7 +1647,7 @@ void __exit kvm_mips_exit(void)
|
||||
kvm_mips_release_pfn_clean = NULL;
|
||||
kvm_mips_is_error_pfn = NULL;
|
||||
|
||||
pr_info("KVM/MIPS unloaded\n");
|
||||
unregister_die_notifier(&kvm_mips_csr_die_notifier);
|
||||
}
|
||||
|
||||
module_init(kvm_mips_init);
|
||||
|
161
arch/mips/kvm/msa.S
Normal file
161
arch/mips/kvm/msa.S
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* MIPS SIMD Architecture (MSA) context handling code for KVM.
|
||||
*
|
||||
* Copyright (C) 2015 Imagination Technologies Ltd.
|
||||
*/
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/regdef.h>
|
||||
|
||||
.set noreorder
|
||||
.set noat
|
||||
|
||||
LEAF(__kvm_save_msa)
|
||||
st_d 0, VCPU_FPR0, a0
|
||||
st_d 1, VCPU_FPR1, a0
|
||||
st_d 2, VCPU_FPR2, a0
|
||||
st_d 3, VCPU_FPR3, a0
|
||||
st_d 4, VCPU_FPR4, a0
|
||||
st_d 5, VCPU_FPR5, a0
|
||||
st_d 6, VCPU_FPR6, a0
|
||||
st_d 7, VCPU_FPR7, a0
|
||||
st_d 8, VCPU_FPR8, a0
|
||||
st_d 9, VCPU_FPR9, a0
|
||||
st_d 10, VCPU_FPR10, a0
|
||||
st_d 11, VCPU_FPR11, a0
|
||||
st_d 12, VCPU_FPR12, a0
|
||||
st_d 13, VCPU_FPR13, a0
|
||||
st_d 14, VCPU_FPR14, a0
|
||||
st_d 15, VCPU_FPR15, a0
|
||||
st_d 16, VCPU_FPR16, a0
|
||||
st_d 17, VCPU_FPR17, a0
|
||||
st_d 18, VCPU_FPR18, a0
|
||||
st_d 19, VCPU_FPR19, a0
|
||||
st_d 20, VCPU_FPR20, a0
|
||||
st_d 21, VCPU_FPR21, a0
|
||||
st_d 22, VCPU_FPR22, a0
|
||||
st_d 23, VCPU_FPR23, a0
|
||||
st_d 24, VCPU_FPR24, a0
|
||||
st_d 25, VCPU_FPR25, a0
|
||||
st_d 26, VCPU_FPR26, a0
|
||||
st_d 27, VCPU_FPR27, a0
|
||||
st_d 28, VCPU_FPR28, a0
|
||||
st_d 29, VCPU_FPR29, a0
|
||||
st_d 30, VCPU_FPR30, a0
|
||||
st_d 31, VCPU_FPR31, a0
|
||||
jr ra
|
||||
nop
|
||||
END(__kvm_save_msa)
|
||||
|
||||
LEAF(__kvm_restore_msa)
|
||||
ld_d 0, VCPU_FPR0, a0
|
||||
ld_d 1, VCPU_FPR1, a0
|
||||
ld_d 2, VCPU_FPR2, a0
|
||||
ld_d 3, VCPU_FPR3, a0
|
||||
ld_d 4, VCPU_FPR4, a0
|
||||
ld_d 5, VCPU_FPR5, a0
|
||||
ld_d 6, VCPU_FPR6, a0
|
||||
ld_d 7, VCPU_FPR7, a0
|
||||
ld_d 8, VCPU_FPR8, a0
|
||||
ld_d 9, VCPU_FPR9, a0
|
||||
ld_d 10, VCPU_FPR10, a0
|
||||
ld_d 11, VCPU_FPR11, a0
|
||||
ld_d 12, VCPU_FPR12, a0
|
||||
ld_d 13, VCPU_FPR13, a0
|
||||
ld_d 14, VCPU_FPR14, a0
|
||||
ld_d 15, VCPU_FPR15, a0
|
||||
ld_d 16, VCPU_FPR16, a0
|
||||
ld_d 17, VCPU_FPR17, a0
|
||||
ld_d 18, VCPU_FPR18, a0
|
||||
ld_d 19, VCPU_FPR19, a0
|
||||
ld_d 20, VCPU_FPR20, a0
|
||||
ld_d 21, VCPU_FPR21, a0
|
||||
ld_d 22, VCPU_FPR22, a0
|
||||
ld_d 23, VCPU_FPR23, a0
|
||||
ld_d 24, VCPU_FPR24, a0
|
||||
ld_d 25, VCPU_FPR25, a0
|
||||
ld_d 26, VCPU_FPR26, a0
|
||||
ld_d 27, VCPU_FPR27, a0
|
||||
ld_d 28, VCPU_FPR28, a0
|
||||
ld_d 29, VCPU_FPR29, a0
|
||||
ld_d 30, VCPU_FPR30, a0
|
||||
ld_d 31, VCPU_FPR31, a0
|
||||
jr ra
|
||||
nop
|
||||
END(__kvm_restore_msa)
|
||||
|
||||
.macro kvm_restore_msa_upper wr, off, base
|
||||
.set push
|
||||
.set noat
|
||||
#ifdef CONFIG_64BIT
|
||||
ld $1, \off(\base)
|
||||
insert_d \wr, 1
|
||||
#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
|
||||
lw $1, \off(\base)
|
||||
insert_w \wr, 2
|
||||
lw $1, (\off+4)(\base)
|
||||
insert_w \wr, 3
|
||||
#else /* CONFIG_CPU_BIG_ENDIAN */
|
||||
lw $1, (\off+4)(\base)
|
||||
insert_w \wr, 2
|
||||
lw $1, \off(\base)
|
||||
insert_w \wr, 3
|
||||
#endif
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
LEAF(__kvm_restore_msa_upper)
|
||||
kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0
|
||||
kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0
|
||||
kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0
|
||||
kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0
|
||||
kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0
|
||||
kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0
|
||||
kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0
|
||||
kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0
|
||||
kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0
|
||||
kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0
|
||||
kvm_restore_msa_upper 10, VCPU_FPR10+8, a0
|
||||
kvm_restore_msa_upper 11, VCPU_FPR11+8, a0
|
||||
kvm_restore_msa_upper 12, VCPU_FPR12+8, a0
|
||||
kvm_restore_msa_upper 13, VCPU_FPR13+8, a0
|
||||
kvm_restore_msa_upper 14, VCPU_FPR14+8, a0
|
||||
kvm_restore_msa_upper 15, VCPU_FPR15+8, a0
|
||||
kvm_restore_msa_upper 16, VCPU_FPR16+8, a0
|
||||
kvm_restore_msa_upper 17, VCPU_FPR17+8, a0
|
||||
kvm_restore_msa_upper 18, VCPU_FPR18+8, a0
|
||||
kvm_restore_msa_upper 19, VCPU_FPR19+8, a0
|
||||
kvm_restore_msa_upper 20, VCPU_FPR20+8, a0
|
||||
kvm_restore_msa_upper 21, VCPU_FPR21+8, a0
|
||||
kvm_restore_msa_upper 22, VCPU_FPR22+8, a0
|
||||
kvm_restore_msa_upper 23, VCPU_FPR23+8, a0
|
||||
kvm_restore_msa_upper 24, VCPU_FPR24+8, a0
|
||||
kvm_restore_msa_upper 25, VCPU_FPR25+8, a0
|
||||
kvm_restore_msa_upper 26, VCPU_FPR26+8, a0
|
||||
kvm_restore_msa_upper 27, VCPU_FPR27+8, a0
|
||||
kvm_restore_msa_upper 28, VCPU_FPR28+8, a0
|
||||
kvm_restore_msa_upper 29, VCPU_FPR29+8, a0
|
||||
kvm_restore_msa_upper 30, VCPU_FPR30+8, a0
|
||||
kvm_restore_msa_upper 31, VCPU_FPR31+8, a0
|
||||
jr ra
|
||||
nop
|
||||
END(__kvm_restore_msa_upper)
|
||||
|
||||
LEAF(__kvm_restore_msacsr)
|
||||
lw t0, VCPU_MSA_CSR(a0)
|
||||
/*
|
||||
* The ctcmsa must stay at this offset in __kvm_restore_msacsr.
|
||||
* See kvm_mips_csr_die_notify() which handles t0 containing a value
|
||||
* which triggers an MSA FP Exception, which must be stepped over and
|
||||
* ignored since the set cause bits must remain there for the guest.
|
||||
*/
|
||||
_ctcmsa MSA_CSR, t0
|
||||
jr ra
|
||||
nop
|
||||
END(__kvm_restore_msacsr)
|
@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
|
||||
"System Call",
|
||||
"Reserved Inst",
|
||||
"Break Inst",
|
||||
"Trap Inst",
|
||||
"MSA FPE",
|
||||
"FPE",
|
||||
"MSA Disabled",
|
||||
"D-Cache Flushes",
|
||||
};
|
||||
|
||||
|
@ -733,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
}
|
||||
}
|
||||
|
||||
/* restore guest state to registers */
|
||||
kvm_mips_callbacks->vcpu_set_regs(vcpu);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
}
|
||||
@ -751,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.preempt_entryhi = read_c0_entryhi();
|
||||
vcpu->arch.last_sched_cpu = cpu;
|
||||
|
||||
/* save guest state in registers */
|
||||
kvm_mips_callbacks->vcpu_get_regs(vcpu);
|
||||
|
||||
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
|
||||
ASID_VERSION_MASK)) {
|
||||
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
|
||||
|
@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
|
||||
|
||||
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
|
||||
er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
|
||||
else
|
||||
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
|
||||
/* FPU Unusable */
|
||||
if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
|
||||
(kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
|
||||
/*
|
||||
* Unusable/no FPU in guest:
|
||||
* deliver guest COP1 Unusable Exception
|
||||
*/
|
||||
er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
|
||||
} else {
|
||||
/* Restore FPU state */
|
||||
kvm_own_fpu(vcpu);
|
||||
er = EMULATE_DONE;
|
||||
}
|
||||
} else {
|
||||
er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
|
||||
}
|
||||
|
||||
switch (er) {
|
||||
case EMULATE_DONE:
|
||||
@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE) {
|
||||
ret = RESUME_GUEST;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE) {
|
||||
ret = RESUME_GUEST;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE) {
|
||||
ret = RESUME_GUEST;
|
||||
} else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
|
||||
* @vcpu: Virtual CPU context.
|
||||
*
|
||||
* Handle when the guest attempts to use MSA when it is disabled.
|
||||
*/
|
||||
static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
|
||||
(kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
|
||||
/*
|
||||
* No MSA in guest, or FPU enabled and not in FR=1 mode,
|
||||
* guest reserved instruction exception
|
||||
*/
|
||||
er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
|
||||
} else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
|
||||
/* MSA disabled by guest, guest MSA disabled exception */
|
||||
er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
|
||||
} else {
|
||||
/* Restore MSA/FPU state */
|
||||
kvm_own_msa(vcpu);
|
||||
er = EMULATE_DONE;
|
||||
}
|
||||
|
||||
switch (er) {
|
||||
case EMULATE_DONE:
|
||||
ret = RESUME_GUEST;
|
||||
break;
|
||||
|
||||
case EMULATE_FAIL:
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vm_init(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
* guest will come up as expected, for now we simulate a MIPS 24kc
|
||||
*/
|
||||
kvm_write_c0_guest_prid(cop0, 0x00019300);
|
||||
kvm_write_c0_guest_config(cop0,
|
||||
MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
|
||||
/* Have config1, Cacheable, noncoherent, write-back, write allocate */
|
||||
kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
|
||||
(0x1 << CP0C0_AR) |
|
||||
(MMU_TYPE_R4000 << CP0C0_MT));
|
||||
|
||||
/* Read the cache characteristics from the host Config1 Register */
|
||||
@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
(1 << CP0C1_WR) | (1 << CP0C1_CA));
|
||||
kvm_write_c0_guest_config1(cop0, config1);
|
||||
|
||||
kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
|
||||
/* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
|
||||
kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
|
||||
(1 << CP0C3_ULRI));
|
||||
/* Have config3, no tertiary/secondary caches implemented */
|
||||
kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
|
||||
/* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
|
||||
|
||||
/* Have config4, UserLocal */
|
||||
kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
|
||||
|
||||
/* Have config5 */
|
||||
kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
|
||||
|
||||
/* No config6 */
|
||||
kvm_write_c0_guest_config5(cop0, 0);
|
||||
|
||||
/* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
|
||||
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
|
||||
@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
int ret = 0;
|
||||
unsigned int cur, change;
|
||||
|
||||
switch (reg->id) {
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
kvm_write_c0_guest_cause(cop0, v);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG:
|
||||
/* read-only for now */
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG1:
|
||||
cur = kvm_read_c0_guest_config1(cop0);
|
||||
change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
|
||||
if (change) {
|
||||
v = cur ^ change;
|
||||
kvm_write_c0_guest_config1(cop0, v);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG2:
|
||||
/* read-only for now */
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG3:
|
||||
cur = kvm_read_c0_guest_config3(cop0);
|
||||
change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
|
||||
if (change) {
|
||||
v = cur ^ change;
|
||||
kvm_write_c0_guest_config3(cop0, v);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG4:
|
||||
cur = kvm_read_c0_guest_config4(cop0);
|
||||
change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
|
||||
if (change) {
|
||||
v = cur ^ change;
|
||||
kvm_write_c0_guest_config4(cop0, v);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_CONFIG5:
|
||||
cur = kvm_read_c0_guest_config5(cop0);
|
||||
change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
|
||||
if (change) {
|
||||
v = cur ^ change;
|
||||
kvm_write_c0_guest_config5(cop0, v);
|
||||
}
|
||||
break;
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
ret = kvm_mips_set_count_ctl(vcpu, v);
|
||||
break;
|
||||
@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_lose_fpu(vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
|
||||
/* exit handlers */
|
||||
.handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
|
||||
@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
|
||||
.handle_syscall = kvm_trap_emul_handle_syscall,
|
||||
.handle_res_inst = kvm_trap_emul_handle_res_inst,
|
||||
.handle_break = kvm_trap_emul_handle_break,
|
||||
.handle_trap = kvm_trap_emul_handle_trap,
|
||||
.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
|
||||
.handle_fpe = kvm_trap_emul_handle_fpe,
|
||||
.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
|
||||
|
||||
.vm_init = kvm_trap_emul_vm_init,
|
||||
.vcpu_init = kvm_trap_emul_vcpu_init,
|
||||
@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
|
||||
.irq_clear = kvm_mips_irq_clear_cb,
|
||||
.get_one_reg = kvm_trap_emul_get_one_reg,
|
||||
.set_one_reg = kvm_trap_emul_set_one_reg,
|
||||
.vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
|
||||
.vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
|
||||
};
|
||||
|
||||
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include <asm/kvm_para.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include "iodev.h"
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
#define MAX_CPU 32
|
||||
#define MAX_SRC 256
|
||||
@ -289,11 +289,6 @@ static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
|
||||
clear_bit(n_IRQ, q->queue);
|
||||
}
|
||||
|
||||
static inline int IRQ_testbit(struct irq_queue *q, int n_IRQ)
|
||||
{
|
||||
return test_bit(n_IRQ, q->queue);
|
||||
}
|
||||
|
||||
static void IRQ_check(struct openpic *opp, struct irq_queue *q)
|
||||
{
|
||||
int irq = -1;
|
||||
@ -1374,8 +1369,9 @@ static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
|
||||
int len, void *ptr)
|
||||
static int kvm_mpic_read(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr, int len, void *ptr)
|
||||
{
|
||||
struct openpic *opp = container_of(this, struct openpic, mmio);
|
||||
int ret;
|
||||
@ -1415,8 +1411,9 @@ static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
|
||||
int len, const void *ptr)
|
||||
static int kvm_mpic_write(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr, int len, const void *ptr)
|
||||
{
|
||||
struct openpic *opp = container_of(this, struct openpic, mmio);
|
||||
int ret;
|
||||
|
@ -807,7 +807,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
|
||||
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
|
||||
bytes, &run->mmio.data);
|
||||
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
@ -880,7 +880,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
|
||||
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
|
||||
bytes, &run->mmio.data);
|
||||
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
@ -172,7 +172,9 @@ struct kvm_s390_sie_block {
|
||||
__u32 fac; /* 0x01a0 */
|
||||
__u8 reserved1a4[20]; /* 0x01a4 */
|
||||
__u64 cbrlo; /* 0x01b8 */
|
||||
__u8 reserved1c0[30]; /* 0x01c0 */
|
||||
__u8 reserved1c0[8]; /* 0x01c0 */
|
||||
__u32 ecd; /* 0x01c8 */
|
||||
__u8 reserved1cc[18]; /* 0x01cc */
|
||||
__u64 pp; /* 0x01de */
|
||||
__u8 reserved1e6[2]; /* 0x01e6 */
|
||||
__u64 itdba; /* 0x01e8 */
|
||||
@ -183,11 +185,17 @@ struct kvm_s390_itdb {
|
||||
__u8 data[256];
|
||||
} __packed;
|
||||
|
||||
struct kvm_s390_vregs {
|
||||
__vector128 vrs[32];
|
||||
__u8 reserved200[512]; /* for future vector expansion */
|
||||
} __packed;
|
||||
|
||||
struct sie_page {
|
||||
struct kvm_s390_sie_block sie_block;
|
||||
__u8 reserved200[1024]; /* 0x0200 */
|
||||
struct kvm_s390_itdb itdb; /* 0x0600 */
|
||||
__u8 reserved700[2304]; /* 0x0700 */
|
||||
__u8 reserved700[1280]; /* 0x0700 */
|
||||
struct kvm_s390_vregs vregs; /* 0x0c00 */
|
||||
} __packed;
|
||||
|
||||
struct kvm_vcpu_stat {
|
||||
@ -238,6 +246,7 @@ struct kvm_vcpu_stat {
|
||||
u32 instruction_sigp_stop;
|
||||
u32 instruction_sigp_stop_store_status;
|
||||
u32 instruction_sigp_store_status;
|
||||
u32 instruction_sigp_store_adtl_status;
|
||||
u32 instruction_sigp_arch;
|
||||
u32 instruction_sigp_prefix;
|
||||
u32 instruction_sigp_restart;
|
||||
@ -270,6 +279,7 @@ struct kvm_vcpu_stat {
|
||||
#define PGM_SPECIAL_OPERATION 0x13
|
||||
#define PGM_OPERAND 0x15
|
||||
#define PGM_TRACE_TABEL 0x16
|
||||
#define PGM_VECTOR_PROCESSING 0x1b
|
||||
#define PGM_SPACE_SWITCH 0x1c
|
||||
#define PGM_HFP_SQUARE_ROOT 0x1d
|
||||
#define PGM_PC_TRANSLATION_SPEC 0x1f
|
||||
@ -334,6 +344,11 @@ enum irq_types {
|
||||
IRQ_PEND_COUNT
|
||||
};
|
||||
|
||||
/* We have 2M for virtio device descriptor pages. Smallest amount of
|
||||
* memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
|
||||
*/
|
||||
#define KVM_S390_MAX_VIRTIO_IRQS 87381
|
||||
|
||||
/*
|
||||
* Repressible (non-floating) machine check interrupts
|
||||
* subclass bits in MCIC
|
||||
@ -411,13 +426,32 @@ struct kvm_s390_local_interrupt {
|
||||
unsigned long pending_irqs;
|
||||
};
|
||||
|
||||
#define FIRQ_LIST_IO_ISC_0 0
|
||||
#define FIRQ_LIST_IO_ISC_1 1
|
||||
#define FIRQ_LIST_IO_ISC_2 2
|
||||
#define FIRQ_LIST_IO_ISC_3 3
|
||||
#define FIRQ_LIST_IO_ISC_4 4
|
||||
#define FIRQ_LIST_IO_ISC_5 5
|
||||
#define FIRQ_LIST_IO_ISC_6 6
|
||||
#define FIRQ_LIST_IO_ISC_7 7
|
||||
#define FIRQ_LIST_PFAULT 8
|
||||
#define FIRQ_LIST_VIRTIO 9
|
||||
#define FIRQ_LIST_COUNT 10
|
||||
#define FIRQ_CNTR_IO 0
|
||||
#define FIRQ_CNTR_SERVICE 1
|
||||
#define FIRQ_CNTR_VIRTIO 2
|
||||
#define FIRQ_CNTR_PFAULT 3
|
||||
#define FIRQ_MAX_COUNT 4
|
||||
|
||||
struct kvm_s390_float_interrupt {
|
||||
unsigned long pending_irqs;
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
atomic_t active;
|
||||
struct list_head lists[FIRQ_LIST_COUNT];
|
||||
int counters[FIRQ_MAX_COUNT];
|
||||
struct kvm_s390_mchk_info mchk;
|
||||
struct kvm_s390_ext_info srv_signal;
|
||||
int next_rr_cpu;
|
||||
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
|
||||
unsigned int irq_count;
|
||||
};
|
||||
|
||||
struct kvm_hw_wp_info_arch {
|
||||
@ -465,6 +499,7 @@ struct kvm_vcpu_arch {
|
||||
s390_fp_regs host_fpregs;
|
||||
unsigned int host_acrs[NUM_ACRS];
|
||||
s390_fp_regs guest_fpregs;
|
||||
struct kvm_s390_vregs *host_vregs;
|
||||
struct kvm_s390_local_interrupt local_int;
|
||||
struct hrtimer ckc_timer;
|
||||
struct kvm_s390_pgm_info pgm;
|
||||
@ -553,6 +588,7 @@ struct kvm_arch{
|
||||
int use_cmma;
|
||||
int user_cpu_state_ctrl;
|
||||
int user_sigp;
|
||||
int user_stsi;
|
||||
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
|
||||
wait_queue_head_t ipte_wq;
|
||||
int ipte_lock_count;
|
||||
|
@ -150,6 +150,7 @@ struct kvm_guest_debug_arch {
|
||||
#define KVM_SYNC_CRS (1UL << 3)
|
||||
#define KVM_SYNC_ARCH0 (1UL << 4)
|
||||
#define KVM_SYNC_PFAULT (1UL << 5)
|
||||
#define KVM_SYNC_VRS (1UL << 6)
|
||||
/* definition of registers in kvm_run */
|
||||
struct kvm_sync_regs {
|
||||
__u64 prefix; /* prefix register */
|
||||
@ -164,6 +165,9 @@ struct kvm_sync_regs {
|
||||
__u64 pft; /* pfault token [PFAULT] */
|
||||
__u64 pfs; /* pfault select [PFAULT] */
|
||||
__u64 pfc; /* pfault compare [PFAULT] */
|
||||
__u64 vrs[32][2]; /* vector registers */
|
||||
__u8 reserved[512]; /* for future vector expansion */
|
||||
__u32 fpc; /* only valid with vector registers */
|
||||
};
|
||||
|
||||
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
|
||||
|
@ -230,7 +230,7 @@
|
||||
* and returns a key, which can be used to find a mnemonic name
|
||||
* of the instruction in the icpt_insn_codes table.
|
||||
*/
|
||||
#define icpt_insn_decoder(insn) \
|
||||
#define icpt_insn_decoder(insn) ( \
|
||||
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
|
||||
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
|
||||
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
|
||||
@ -239,6 +239,6 @@
|
||||
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
|
||||
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
|
||||
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
|
||||
INSN_DECODE(insn)
|
||||
INSN_DECODE(insn))
|
||||
|
||||
#endif /* _UAPI_ASM_S390_SIE_H */
|
||||
|
@ -171,6 +171,7 @@ int main(void)
|
||||
#else /* CONFIG_32BIT */
|
||||
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
|
||||
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
|
||||
DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
|
||||
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
|
||||
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
|
||||
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
|
||||
|
@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (vcpu->run->s.regs.gprs[rx] & 7)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm));
|
||||
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
|
||||
@ -213,7 +213,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
|
||||
* - gpr 3 contains the virtqueue index (passed as datamatch)
|
||||
* - gpr 4 contains the index on the bus (optionally)
|
||||
*/
|
||||
ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
|
||||
ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
|
||||
vcpu->run->s.regs.gprs[2] & 0xffffffff,
|
||||
8, &vcpu->run->s.regs.gprs[3],
|
||||
vcpu->run->s.regs.gprs[4]);
|
||||
@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
|
||||
int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
union asce {
|
||||
unsigned long val;
|
||||
@ -207,6 +208,54 @@ union raddress {
|
||||
unsigned long pfra : 52; /* Page-Frame Real Address */
|
||||
};
|
||||
|
||||
union alet {
|
||||
u32 val;
|
||||
struct {
|
||||
u32 reserved : 7;
|
||||
u32 p : 1;
|
||||
u32 alesn : 8;
|
||||
u32 alen : 16;
|
||||
};
|
||||
};
|
||||
|
||||
union ald {
|
||||
u32 val;
|
||||
struct {
|
||||
u32 : 1;
|
||||
u32 alo : 24;
|
||||
u32 all : 7;
|
||||
};
|
||||
};
|
||||
|
||||
struct ale {
|
||||
unsigned long i : 1; /* ALEN-Invalid Bit */
|
||||
unsigned long : 5;
|
||||
unsigned long fo : 1; /* Fetch-Only Bit */
|
||||
unsigned long p : 1; /* Private Bit */
|
||||
unsigned long alesn : 8; /* Access-List-Entry Sequence Number */
|
||||
unsigned long aleax : 16; /* Access-List-Entry Authorization Index */
|
||||
unsigned long : 32;
|
||||
unsigned long : 1;
|
||||
unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
|
||||
unsigned long : 6;
|
||||
unsigned long astesn : 32; /* ASTE Sequence Number */
|
||||
} __packed;
|
||||
|
||||
struct aste {
|
||||
unsigned long i : 1; /* ASX-Invalid Bit */
|
||||
unsigned long ato : 29; /* Authority-Table Origin */
|
||||
unsigned long : 1;
|
||||
unsigned long b : 1; /* Base-Space Bit */
|
||||
unsigned long ax : 16; /* Authorization Index */
|
||||
unsigned long atl : 12; /* Authority-Table Length */
|
||||
unsigned long : 2;
|
||||
unsigned long ca : 1; /* Controlled-ASN Bit */
|
||||
unsigned long ra : 1; /* Reusable-ASN Bit */
|
||||
unsigned long asce : 64; /* Address-Space-Control Element */
|
||||
unsigned long ald : 32;
|
||||
unsigned long astesn : 32;
|
||||
/* .. more fields there */
|
||||
} __packed;
|
||||
|
||||
int ipte_lock_held(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
|
||||
ipte_unlock_simple(vcpu);
|
||||
}
|
||||
|
||||
static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu)
|
||||
static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
|
||||
int write)
|
||||
{
|
||||
union alet alet;
|
||||
struct ale ale;
|
||||
struct aste aste;
|
||||
unsigned long ald_addr, authority_table_addr;
|
||||
union ald ald;
|
||||
int eax, rc;
|
||||
u8 authority_table;
|
||||
|
||||
if (ar >= NUM_ACRS)
|
||||
return -EINVAL;
|
||||
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
alet.val = vcpu->run->s.regs.acrs[ar];
|
||||
|
||||
if (ar == 0 || alet.val == 0) {
|
||||
asce->val = vcpu->arch.sie_block->gcr[1];
|
||||
return 0;
|
||||
} else if (alet.val == 1) {
|
||||
asce->val = vcpu->arch.sie_block->gcr[7];
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (alet.reserved)
|
||||
return PGM_ALET_SPECIFICATION;
|
||||
|
||||
if (alet.p)
|
||||
ald_addr = vcpu->arch.sie_block->gcr[5];
|
||||
else
|
||||
ald_addr = vcpu->arch.sie_block->gcr[2];
|
||||
ald_addr &= 0x7fffffc0;
|
||||
|
||||
rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (alet.alen / 8 > ald.all)
|
||||
return PGM_ALEN_TRANSLATION;
|
||||
|
||||
if (0x7fffffff - ald.alo * 128 < alet.alen * 16)
|
||||
return PGM_ADDRESSING;
|
||||
|
||||
rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
|
||||
sizeof(struct ale));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (ale.i == 1)
|
||||
return PGM_ALEN_TRANSLATION;
|
||||
if (ale.alesn != alet.alesn)
|
||||
return PGM_ALE_SEQUENCE;
|
||||
|
||||
rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (aste.i)
|
||||
return PGM_ASTE_VALIDITY;
|
||||
if (aste.astesn != ale.astesn)
|
||||
return PGM_ASTE_SEQUENCE;
|
||||
|
||||
if (ale.p == 1) {
|
||||
eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
|
||||
if (ale.aleax != eax) {
|
||||
if (eax / 16 > aste.atl)
|
||||
return PGM_EXTENDED_AUTHORITY;
|
||||
|
||||
authority_table_addr = aste.ato * 4 + eax / 4;
|
||||
|
||||
rc = read_guest_real(vcpu, authority_table_addr,
|
||||
&authority_table,
|
||||
sizeof(u8));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0)
|
||||
return PGM_EXTENDED_AUTHORITY;
|
||||
}
|
||||
}
|
||||
|
||||
if (ale.fo == 1 && write)
|
||||
return PGM_PROTECTION;
|
||||
|
||||
asce->val = aste.asce;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct trans_exc_code_bits {
|
||||
unsigned long addr : 52; /* Translation-exception Address */
|
||||
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
|
||||
unsigned long : 6;
|
||||
unsigned long b60 : 1;
|
||||
unsigned long b61 : 1;
|
||||
unsigned long as : 2; /* ASCE Identifier */
|
||||
};
|
||||
|
||||
enum {
|
||||
FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
|
||||
FSI_STORE = 1, /* Exception was due to store operation */
|
||||
FSI_FETCH = 2 /* Exception was due to fetch operation */
|
||||
};
|
||||
|
||||
static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
|
||||
ar_t ar, int write)
|
||||
{
|
||||
int rc;
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||
struct trans_exc_code_bits *tec_bits;
|
||||
|
||||
memset(pgm, 0, sizeof(*pgm));
|
||||
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
||||
tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
|
||||
tec_bits->as = psw_bits(*psw).as;
|
||||
|
||||
if (!psw_bits(*psw).t) {
|
||||
asce->val = 0;
|
||||
asce->r = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
|
||||
case PSW_AS_PRIMARY:
|
||||
return vcpu->arch.sie_block->gcr[1];
|
||||
asce->val = vcpu->arch.sie_block->gcr[1];
|
||||
return 0;
|
||||
case PSW_AS_SECONDARY:
|
||||
return vcpu->arch.sie_block->gcr[7];
|
||||
asce->val = vcpu->arch.sie_block->gcr[7];
|
||||
return 0;
|
||||
case PSW_AS_HOME:
|
||||
return vcpu->arch.sie_block->gcr[13];
|
||||
asce->val = vcpu->arch.sie_block->gcr[13];
|
||||
return 0;
|
||||
case PSW_AS_ACCREG:
|
||||
rc = ar_translation(vcpu, asce, ar, write);
|
||||
switch (rc) {
|
||||
case PGM_ALEN_TRANSLATION:
|
||||
case PGM_ALE_SEQUENCE:
|
||||
case PGM_ASTE_VALIDITY:
|
||||
case PGM_ASTE_SEQUENCE:
|
||||
case PGM_EXTENDED_AUTHORITY:
|
||||
vcpu->arch.pgm.exc_access_id = ar;
|
||||
break;
|
||||
case PGM_PROTECTION:
|
||||
tec_bits->b60 = 1;
|
||||
tec_bits->b61 = 1;
|
||||
break;
|
||||
}
|
||||
if (rc > 0)
|
||||
pgm->code = rc;
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -330,10 +521,11 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
|
||||
* @vcpu: virtual cpu
|
||||
* @gva: guest virtual address
|
||||
* @gpa: points to where guest physical (absolute) address should be stored
|
||||
* @asce: effective asce
|
||||
* @write: indicates if access is a write access
|
||||
*
|
||||
* Translate a guest virtual address into a guest absolute address by means
|
||||
* of dynamic address translation as specified by the architecuture.
|
||||
* of dynamic address translation as specified by the architecture.
|
||||
* If the resulting absolute address is not available in the configuration
|
||||
* an addressing exception is indicated and @gpa will not be changed.
|
||||
*
|
||||
@ -345,7 +537,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
|
||||
* by the architecture
|
||||
*/
|
||||
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
unsigned long *gpa, int write)
|
||||
unsigned long *gpa, const union asce asce,
|
||||
int write)
|
||||
{
|
||||
union vaddress vaddr = {.addr = gva};
|
||||
union raddress raddr = {.addr = gva};
|
||||
@ -354,12 +547,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
union ctlreg0 ctlreg0;
|
||||
unsigned long ptr;
|
||||
int edat1, edat2;
|
||||
union asce asce;
|
||||
|
||||
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
|
||||
edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
|
||||
edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
|
||||
asce.val = get_vcpu_asce(vcpu);
|
||||
if (asce.r)
|
||||
goto real_address;
|
||||
ptr = asce.origin * 4096;
|
||||
@ -506,48 +697,30 @@ static inline int is_low_address(unsigned long ga)
|
||||
return (ga & ~0x11fful) == 0;
|
||||
}
|
||||
|
||||
static int low_address_protection_enabled(struct kvm_vcpu *vcpu)
|
||||
static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
|
||||
const union asce asce)
|
||||
{
|
||||
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
union asce asce;
|
||||
|
||||
if (!ctlreg0.lap)
|
||||
return 0;
|
||||
asce.val = get_vcpu_asce(vcpu);
|
||||
if (psw_bits(*psw).t && asce.p)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct trans_exc_code_bits {
|
||||
unsigned long addr : 52; /* Translation-exception Address */
|
||||
unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */
|
||||
unsigned long : 7;
|
||||
unsigned long b61 : 1;
|
||||
unsigned long as : 2; /* ASCE Identifier */
|
||||
};
|
||||
|
||||
enum {
|
||||
FSI_UNKNOWN = 0, /* Unknown wether fetch or store */
|
||||
FSI_STORE = 1, /* Exception was due to store operation */
|
||||
FSI_FETCH = 2 /* Exception was due to fetch operation */
|
||||
};
|
||||
|
||||
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
|
||||
unsigned long *pages, unsigned long nr_pages,
|
||||
int write)
|
||||
const union asce asce, int write)
|
||||
{
|
||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
struct trans_exc_code_bits *tec_bits;
|
||||
int lap_enabled, rc;
|
||||
|
||||
memset(pgm, 0, sizeof(*pgm));
|
||||
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
||||
tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
|
||||
tec_bits->as = psw_bits(*psw).as;
|
||||
lap_enabled = low_address_protection_enabled(vcpu);
|
||||
lap_enabled = low_address_protection_enabled(vcpu, asce);
|
||||
while (nr_pages) {
|
||||
ga = kvm_s390_logical_to_effective(vcpu, ga);
|
||||
tec_bits->addr = ga >> PAGE_SHIFT;
|
||||
@ -557,7 +730,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
|
||||
}
|
||||
ga &= PAGE_MASK;
|
||||
if (psw_bits(*psw).t) {
|
||||
rc = guest_translate(vcpu, ga, pages, write);
|
||||
rc = guest_translate(vcpu, ga, pages, asce, write);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
if (rc == PGM_PROTECTION)
|
||||
@ -578,7 +751,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
unsigned long len, int write)
|
||||
{
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
@ -591,20 +764,19 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
|
||||
if (!len)
|
||||
return 0;
|
||||
/* Access register mode is not supported yet. */
|
||||
if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
|
||||
return -EOPNOTSUPP;
|
||||
rc = get_vcpu_asce(vcpu, &asce, ar, write);
|
||||
if (rc)
|
||||
return rc;
|
||||
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
|
||||
pages = pages_array;
|
||||
if (nr_pages > ARRAY_SIZE(pages_array))
|
||||
pages = vmalloc(nr_pages * sizeof(unsigned long));
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
asce.val = get_vcpu_asce(vcpu);
|
||||
need_ipte_lock = psw_bits(*psw).t && !asce.r;
|
||||
if (need_ipte_lock)
|
||||
ipte_lock(vcpu);
|
||||
rc = guest_page_range(vcpu, ga, pages, nr_pages, write);
|
||||
rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
|
||||
for (idx = 0; idx < nr_pages && !rc; idx++) {
|
||||
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
|
||||
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
|
||||
@ -652,7 +824,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
* Note: The IPTE lock is not taken during this function, so the caller
|
||||
* has to take care of this.
|
||||
*/
|
||||
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
||||
unsigned long *gpa, int write)
|
||||
{
|
||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||
@ -661,26 +833,21 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
union asce asce;
|
||||
int rc;
|
||||
|
||||
/* Access register mode is not supported yet. */
|
||||
if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
gva = kvm_s390_logical_to_effective(vcpu, gva);
|
||||
memset(pgm, 0, sizeof(*pgm));
|
||||
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
||||
tec->as = psw_bits(*psw).as;
|
||||
tec->fsi = write ? FSI_STORE : FSI_FETCH;
|
||||
rc = get_vcpu_asce(vcpu, &asce, ar, write);
|
||||
tec->addr = gva >> PAGE_SHIFT;
|
||||
if (is_low_address(gva) && low_address_protection_enabled(vcpu)) {
|
||||
if (rc)
|
||||
return rc;
|
||||
if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
|
||||
if (write) {
|
||||
rc = pgm->code = PGM_PROTECTION;
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
asce.val = get_vcpu_asce(vcpu);
|
||||
if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */
|
||||
rc = guest_translate(vcpu, gva, gpa, write);
|
||||
rc = guest_translate(vcpu, gva, gpa, asce, write);
|
||||
if (rc > 0) {
|
||||
if (rc == PGM_PROTECTION)
|
||||
tec->b61 = 1;
|
||||
@ -697,28 +864,51 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_check_low_addr_protection - check for low-address protection
|
||||
* @ga: Guest address
|
||||
* check_gva_range - test a range of guest virtual addresses for accessibility
|
||||
*/
|
||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
||||
unsigned long length, int is_write)
|
||||
{
|
||||
unsigned long gpa;
|
||||
unsigned long currlen;
|
||||
int rc = 0;
|
||||
|
||||
ipte_lock(vcpu);
|
||||
while (length > 0 && !rc) {
|
||||
currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
|
||||
rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write);
|
||||
gva += currlen;
|
||||
length -= currlen;
|
||||
}
|
||||
ipte_unlock(vcpu);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_check_low_addr_prot_real - check for low-address protection
|
||||
* @gra: Guest real address
|
||||
*
|
||||
* Checks whether an address is subject to low-address protection and set
|
||||
* up vcpu->arch.pgm accordingly if necessary.
|
||||
*
|
||||
* Return: 0 if no protection exception, or PGM_PROTECTION if protected.
|
||||
*/
|
||||
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga)
|
||||
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
|
||||
{
|
||||
struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
struct trans_exc_code_bits *tec_bits;
|
||||
union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
|
||||
|
||||
if (!is_low_address(ga) || !low_address_protection_enabled(vcpu))
|
||||
if (!ctlreg0.lap || !is_low_address(gra))
|
||||
return 0;
|
||||
|
||||
memset(pgm, 0, sizeof(*pgm));
|
||||
tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
|
||||
tec_bits->fsi = FSI_STORE;
|
||||
tec_bits->as = psw_bits(*psw).as;
|
||||
tec_bits->addr = ga >> PAGE_SHIFT;
|
||||
tec_bits->addr = gra >> PAGE_SHIFT;
|
||||
pgm->code = PGM_PROTECTION;
|
||||
|
||||
return pgm->code;
|
||||
|
@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
|
||||
}
|
||||
|
||||
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
unsigned long *gpa, int write);
|
||||
ar_t ar, unsigned long *gpa, int write);
|
||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
|
||||
unsigned long length, int is_write);
|
||||
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
unsigned long len, int write);
|
||||
|
||||
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
* write_guest - copy data from kernel space to guest space
|
||||
* @vcpu: virtual cpu
|
||||
* @ga: guest address
|
||||
* @ar: access register
|
||||
* @data: source address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
* If DAT is off data will be copied to guest real or absolute memory.
|
||||
* If DAT is on data will be copied to the address space as specified by
|
||||
* the address space bits of the PSW:
|
||||
* Primary, secondory or home space (access register mode is currently not
|
||||
* implemented).
|
||||
* Primary, secondary, home space or access register mode.
|
||||
* The addressing mode of the PSW is also inspected, so that address wrap
|
||||
* around is taken into account for 24-, 31- and 64-bit addressing mode,
|
||||
* if the to be copied data crosses page boundaries in guest address space.
|
||||
@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||
* if data has been changed in guest space in case of an exception.
|
||||
*/
|
||||
static inline __must_check
|
||||
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return access_guest(vcpu, ga, data, len, 1);
|
||||
return access_guest(vcpu, ga, ar, data, len, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_guest - copy data from guest space to kernel space
|
||||
* @vcpu: virtual cpu
|
||||
* @ga: guest address
|
||||
* @ar: access register
|
||||
* @data: destination address in kernel space
|
||||
* @len: number of bytes to copy
|
||||
*
|
||||
@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
* data will be copied from guest space to kernel space.
|
||||
*/
|
||||
static inline __must_check
|
||||
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
|
||||
int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
|
||||
unsigned long len)
|
||||
{
|
||||
return access_guest(vcpu, ga, data, len, 0);
|
||||
return access_guest(vcpu, ga, ar, data, len, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
|
||||
void ipte_lock(struct kvm_vcpu *vcpu);
|
||||
void ipte_unlock(struct kvm_vcpu *vcpu);
|
||||
int ipte_lock_held(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
|
||||
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
|
||||
|
||||
#endif /* __KVM_S390_GACCESS_H */
|
||||
|
@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
|
||||
if (!wp_info->old_data)
|
||||
return -ENOMEM;
|
||||
/* try to backup the original value */
|
||||
ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data,
|
||||
wp_info->len);
|
||||
ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
|
||||
wp_info->len);
|
||||
if (ret) {
|
||||
kfree(wp_info->old_data);
|
||||
wp_info->old_data = NULL;
|
||||
@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
|
||||
continue;
|
||||
|
||||
/* refetch the wp data and compare it to the old value */
|
||||
if (!read_guest(vcpu, wp_info->phys_addr, temp,
|
||||
wp_info->len)) {
|
||||
if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
|
||||
wp_info->len)) {
|
||||
if (memcmp(temp, wp_info->old_data, wp_info->len)) {
|
||||
kfree(temp);
|
||||
return wp_info;
|
||||
|
@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
|
||||
pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
|
||||
pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
|
||||
break;
|
||||
case PGM_VECTOR_PROCESSING:
|
||||
case PGM_DATA:
|
||||
pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
|
||||
break;
|
||||
@ -319,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* Make sure that the source is paged-in */
|
||||
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
|
||||
&srcaddr, 0);
|
||||
reg2, &srcaddr, 0);
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
|
||||
@ -328,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* Make sure that the destination is paged-in */
|
||||
rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
|
||||
&dstaddr, 1);
|
||||
reg1, &dstaddr, 1);
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -25,11 +25,13 @@
|
||||
#include <linux/random.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/isc.h>
|
||||
#include <asm/sclp.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
@ -38,6 +40,11 @@
|
||||
#include "trace.h"
|
||||
#include "trace-s390.h"
|
||||
|
||||
#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
|
||||
#define LOCAL_IRQS 32
|
||||
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
|
||||
(KVM_MAX_VCPUS + LOCAL_IRQS))
|
||||
|
||||
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
|
||||
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
@ -87,6 +94,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
|
||||
{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
|
||||
{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
|
||||
{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
|
||||
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
|
||||
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
|
||||
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
|
||||
@ -101,8 +109,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
|
||||
/* upper facilities limit for kvm */
|
||||
unsigned long kvm_s390_fac_list_mask[] = {
|
||||
0xff82fffbf4fc2000UL,
|
||||
0x005c000000000000UL,
|
||||
0xffe6fffbfcfdfc40UL,
|
||||
0x205c800000000000UL,
|
||||
};
|
||||
|
||||
unsigned long kvm_s390_fac_list_mask_size(void)
|
||||
@ -171,9 +179,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_S390_IRQCHIP:
|
||||
case KVM_CAP_VM_ATTRIBUTES:
|
||||
case KVM_CAP_MP_STATE:
|
||||
case KVM_CAP_S390_INJECT_IRQ:
|
||||
case KVM_CAP_S390_USER_SIGP:
|
||||
case KVM_CAP_S390_USER_STSI:
|
||||
case KVM_CAP_S390_SKEYS:
|
||||
case KVM_CAP_S390_IRQ_STATE:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_S390_MEM_OP:
|
||||
r = MEM_OP_MAX_SIZE;
|
||||
break;
|
||||
case KVM_CAP_NR_VCPUS:
|
||||
case KVM_CAP_MAX_VCPUS:
|
||||
r = KVM_MAX_VCPUS;
|
||||
@ -184,6 +199,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_S390_COW:
|
||||
r = MACHINE_HAS_ESOP;
|
||||
break;
|
||||
case KVM_CAP_S390_VECTOR_REGISTERS:
|
||||
r = MACHINE_HAS_VX;
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
}
|
||||
@ -264,6 +282,18 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||
kvm->arch.user_sigp = 1;
|
||||
r = 0;
|
||||
break;
|
||||
case KVM_CAP_S390_VECTOR_REGISTERS:
|
||||
if (MACHINE_HAS_VX) {
|
||||
set_kvm_facility(kvm->arch.model.fac->mask, 129);
|
||||
set_kvm_facility(kvm->arch.model.fac->list, 129);
|
||||
r = 0;
|
||||
} else
|
||||
r = -EINVAL;
|
||||
break;
|
||||
case KVM_CAP_S390_USER_STSI:
|
||||
kvm->arch.user_stsi = 1;
|
||||
r = 0;
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
break;
|
||||
@ -708,6 +738,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||
{
|
||||
uint8_t *keys;
|
||||
uint64_t hva;
|
||||
unsigned long curkey;
|
||||
int i, r = 0;
|
||||
|
||||
if (args->flags != 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Is this guest using storage keys? */
|
||||
if (!mm_use_skey(current->mm))
|
||||
return KVM_S390_GET_SKEYS_NONE;
|
||||
|
||||
/* Enforce sane limit on memory allocation */
|
||||
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
keys = kmalloc_array(args->count, sizeof(uint8_t),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!keys)
|
||||
keys = vmalloc(sizeof(uint8_t) * args->count);
|
||||
if (!keys)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < args->count; i++) {
|
||||
hva = gfn_to_hva(kvm, args->start_gfn + i);
|
||||
if (kvm_is_error_hva(hva)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
curkey = get_guest_storage_key(current->mm, hva);
|
||||
if (IS_ERR_VALUE(curkey)) {
|
||||
r = curkey;
|
||||
goto out;
|
||||
}
|
||||
keys[i] = curkey;
|
||||
}
|
||||
|
||||
r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
|
||||
sizeof(uint8_t) * args->count);
|
||||
if (r)
|
||||
r = -EFAULT;
|
||||
out:
|
||||
kvfree(keys);
|
||||
return r;
|
||||
}
|
||||
|
||||
static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
|
||||
{
|
||||
uint8_t *keys;
|
||||
uint64_t hva;
|
||||
int i, r = 0;
|
||||
|
||||
if (args->flags != 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Enforce sane limit on memory allocation */
|
||||
if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
keys = kmalloc_array(args->count, sizeof(uint8_t),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!keys)
|
||||
keys = vmalloc(sizeof(uint8_t) * args->count);
|
||||
if (!keys)
|
||||
return -ENOMEM;
|
||||
|
||||
r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
|
||||
sizeof(uint8_t) * args->count);
|
||||
if (r) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Enable storage key handling for the guest */
|
||||
s390_enable_skey();
|
||||
|
||||
for (i = 0; i < args->count; i++) {
|
||||
hva = gfn_to_hva(kvm, args->start_gfn + i);
|
||||
if (kvm_is_error_hva(hva)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Lowest order bit is reserved */
|
||||
if (keys[i] & 0x01) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = set_guest_storage_key(current->mm, hva,
|
||||
(unsigned long)keys[i], 0);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
kvfree(keys);
|
||||
return r;
|
||||
}
|
||||
|
||||
long kvm_arch_vm_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
@ -767,6 +899,26 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
r = kvm_s390_vm_has_attr(kvm, &attr);
|
||||
break;
|
||||
}
|
||||
case KVM_S390_GET_SKEYS: {
|
||||
struct kvm_s390_skeys args;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&args, argp,
|
||||
sizeof(struct kvm_s390_skeys)))
|
||||
break;
|
||||
r = kvm_s390_get_skeys(kvm, &args);
|
||||
break;
|
||||
}
|
||||
case KVM_S390_SET_SKEYS: {
|
||||
struct kvm_s390_skeys args;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&args, argp,
|
||||
sizeof(struct kvm_s390_skeys)))
|
||||
break;
|
||||
r = kvm_s390_set_skeys(kvm, &args);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
r = -ENOTTY;
|
||||
}
|
||||
@ -887,7 +1039,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
|
||||
if (!kvm->arch.dbf)
|
||||
goto out_nodbf;
|
||||
goto out_err;
|
||||
|
||||
/*
|
||||
* The architectural maximum amount of facilities is 16 kbit. To store
|
||||
@ -899,7 +1051,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
kvm->arch.model.fac =
|
||||
(struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!kvm->arch.model.fac)
|
||||
goto out_nofac;
|
||||
goto out_err;
|
||||
|
||||
/* Populate the facility mask initially. */
|
||||
memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
|
||||
@ -919,10 +1071,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
|
||||
|
||||
if (kvm_s390_crypto_init(kvm) < 0)
|
||||
goto out_crypto;
|
||||
goto out_err;
|
||||
|
||||
spin_lock_init(&kvm->arch.float_int.lock);
|
||||
INIT_LIST_HEAD(&kvm->arch.float_int.list);
|
||||
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
||||
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
||||
init_waitqueue_head(&kvm->arch.ipte_wq);
|
||||
mutex_init(&kvm->arch.ipte_mutex);
|
||||
|
||||
@ -934,7 +1087,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
} else {
|
||||
kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
|
||||
if (!kvm->arch.gmap)
|
||||
goto out_nogmap;
|
||||
goto out_err;
|
||||
kvm->arch.gmap->private = kvm;
|
||||
kvm->arch.gmap->pfault_enabled = 0;
|
||||
}
|
||||
@ -946,15 +1099,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
spin_lock_init(&kvm->arch.start_stop_lock);
|
||||
|
||||
return 0;
|
||||
out_nogmap:
|
||||
kfree(kvm->arch.crypto.crycb);
|
||||
out_crypto:
|
||||
free_page((unsigned long)kvm->arch.model.fac);
|
||||
out_nofac:
|
||||
debug_unregister(kvm->arch.dbf);
|
||||
out_nodbf:
|
||||
free_page((unsigned long)(kvm->arch.sca));
|
||||
out_err:
|
||||
kfree(kvm->arch.crypto.crycb);
|
||||
free_page((unsigned long)kvm->arch.model.fac);
|
||||
debug_unregister(kvm->arch.dbf);
|
||||
free_page((unsigned long)(kvm->arch.sca));
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1034,6 +1183,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
KVM_SYNC_CRS |
|
||||
KVM_SYNC_ARCH0 |
|
||||
KVM_SYNC_PFAULT;
|
||||
if (test_kvm_facility(vcpu->kvm, 129))
|
||||
vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
|
||||
|
||||
if (kvm_is_ucontrol(vcpu->kvm))
|
||||
return __kvm_ucontrol_vcpu_init(vcpu);
|
||||
@ -1044,10 +1195,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
|
||||
save_fp_regs(vcpu->arch.host_fpregs.fprs);
|
||||
if (test_kvm_facility(vcpu->kvm, 129))
|
||||
save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
|
||||
else
|
||||
save_fp_regs(vcpu->arch.host_fpregs.fprs);
|
||||
save_access_regs(vcpu->arch.host_acrs);
|
||||
restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
|
||||
restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
|
||||
if (test_kvm_facility(vcpu->kvm, 129)) {
|
||||
restore_fp_ctl(&vcpu->run->s.regs.fpc);
|
||||
restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
} else {
|
||||
restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
|
||||
restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
|
||||
}
|
||||
restore_access_regs(vcpu->run->s.regs.acrs);
|
||||
gmap_enable(vcpu->arch.gmap);
|
||||
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
|
||||
@ -1057,11 +1216,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
|
||||
gmap_disable(vcpu->arch.gmap);
|
||||
save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
|
||||
save_fp_regs(vcpu->arch.guest_fpregs.fprs);
|
||||
if (test_kvm_facility(vcpu->kvm, 129)) {
|
||||
save_fp_ctl(&vcpu->run->s.regs.fpc);
|
||||
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
} else {
|
||||
save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
|
||||
save_fp_regs(vcpu->arch.guest_fpregs.fprs);
|
||||
}
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
|
||||
restore_fp_regs(vcpu->arch.host_fpregs.fprs);
|
||||
if (test_kvm_facility(vcpu->kvm, 129))
|
||||
restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
|
||||
else
|
||||
restore_fp_regs(vcpu->arch.host_fpregs.fprs);
|
||||
restore_access_regs(vcpu->arch.host_acrs);
|
||||
}
|
||||
|
||||
@ -1129,6 +1296,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
|
||||
|
||||
vcpu->arch.cpu_id = model->cpu_id;
|
||||
vcpu->arch.sie_block->ibc = model->ibc;
|
||||
vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rc = 0;
|
||||
@ -1137,6 +1313,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
CPUSTAT_SM |
|
||||
CPUSTAT_STOPPED |
|
||||
CPUSTAT_GED);
|
||||
kvm_s390_vcpu_setup_model(vcpu);
|
||||
|
||||
vcpu->arch.sie_block->ecb = 6;
|
||||
if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
|
||||
vcpu->arch.sie_block->ecb |= 0x10;
|
||||
@ -1147,8 +1325,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.sie_block->eca |= 1;
|
||||
if (sclp_has_sigpif())
|
||||
vcpu->arch.sie_block->eca |= 0x10000000U;
|
||||
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
|
||||
ICTL_TPROT;
|
||||
if (test_kvm_facility(vcpu->kvm, 129)) {
|
||||
vcpu->arch.sie_block->eca |= 0x00020000;
|
||||
vcpu->arch.sie_block->ecd |= 0x20000000;
|
||||
}
|
||||
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||
|
||||
if (kvm_s390_cmma_enabled(vcpu->kvm)) {
|
||||
rc = kvm_s390_vcpu_setup_cmma(vcpu);
|
||||
@ -1158,11 +1339,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
|
||||
vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
|
||||
kvm_s390_vcpu_crypto_setup(vcpu);
|
||||
|
||||
return rc;
|
||||
@ -1190,6 +1366,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
|
||||
vcpu->arch.sie_block = &sie_page->sie_block;
|
||||
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
|
||||
vcpu->arch.host_vregs = &sie_page->vregs;
|
||||
|
||||
vcpu->arch.sie_block->icpua = id;
|
||||
if (!kvm_is_ucontrol(kvm)) {
|
||||
@ -1205,7 +1382,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
|
||||
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
|
||||
}
|
||||
vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
|
||||
|
||||
spin_lock_init(&vcpu->arch.local_int.lock);
|
||||
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
|
||||
@ -1725,6 +1901,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||
u8 opcode;
|
||||
int rc;
|
||||
|
||||
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
|
||||
trace_kvm_s390_sie_fault(vcpu);
|
||||
|
||||
/*
|
||||
* We want to inject an addressing exception, which is defined as a
|
||||
* suppressing or terminating exception. However, since we came here
|
||||
* by a DAT access exception, the PSW still points to the faulting
|
||||
* instruction since DAT exceptions are nullifying. So we've got
|
||||
* to look up the current opcode to get the length of the instruction
|
||||
* to be able to forward the PSW.
|
||||
*/
|
||||
rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
psw->addr = __rewind_psw(*psw, -insn_length(opcode));
|
||||
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
}
|
||||
|
||||
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||
{
|
||||
int rc = -1;
|
||||
@ -1756,11 +1957,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||
}
|
||||
}
|
||||
|
||||
if (rc == -1) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
|
||||
trace_kvm_s390_sie_fault(vcpu);
|
||||
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
}
|
||||
if (rc == -1)
|
||||
rc = vcpu_post_run_fault_in_sie(vcpu);
|
||||
|
||||
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
|
||||
|
||||
@ -1976,6 +2174,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
return kvm_s390_store_status_unloaded(vcpu, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* store additional status at address
|
||||
*/
|
||||
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
|
||||
unsigned long gpa)
|
||||
{
|
||||
/* Only bits 0-53 are used for address formation */
|
||||
if (!(gpa & ~0x3ff))
|
||||
return 0;
|
||||
|
||||
return write_guest_abs(vcpu, gpa & ~0x3ff,
|
||||
(void *)&vcpu->run->s.regs.vrs, 512);
|
||||
}
|
||||
|
||||
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
if (!test_kvm_facility(vcpu->kvm, 129))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The guest VXRS are in the host VXRs due to the lazy
|
||||
* copying in vcpu load/put. Let's update our copies before we save
|
||||
* it into the save area.
|
||||
*/
|
||||
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
|
||||
|
||||
return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
|
||||
}
|
||||
|
||||
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
|
||||
@ -2100,6 +2327,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
||||
return r;
|
||||
}
|
||||
|
||||
static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s390_mem_op *mop)
|
||||
{
|
||||
void __user *uaddr = (void __user *)mop->buf;
|
||||
void *tmpbuf = NULL;
|
||||
int r, srcu_idx;
|
||||
const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
|
||||
| KVM_S390_MEMOP_F_CHECK_ONLY;
|
||||
|
||||
if (mop->flags & ~supported_flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (mop->size > MEM_OP_MAX_SIZE)
|
||||
return -E2BIG;
|
||||
|
||||
if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
|
||||
tmpbuf = vmalloc(mop->size);
|
||||
if (!tmpbuf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
switch (mop->op) {
|
||||
case KVM_S390_MEMOP_LOGICAL_READ:
|
||||
if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
|
||||
r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
|
||||
break;
|
||||
}
|
||||
r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
|
||||
if (r == 0) {
|
||||
if (copy_to_user(uaddr, tmpbuf, mop->size))
|
||||
r = -EFAULT;
|
||||
}
|
||||
break;
|
||||
case KVM_S390_MEMOP_LOGICAL_WRITE:
|
||||
if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
|
||||
r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
|
||||
break;
|
||||
}
|
||||
if (copy_from_user(tmpbuf, uaddr, mop->size)) {
|
||||
r = -EFAULT;
|
||||
break;
|
||||
}
|
||||
r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
|
||||
|
||||
if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
|
||||
kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
||||
|
||||
vfree(tmpbuf);
|
||||
return r;
|
||||
}
|
||||
|
||||
long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
@ -2109,6 +2395,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
long r;
|
||||
|
||||
switch (ioctl) {
|
||||
case KVM_S390_IRQ: {
|
||||
struct kvm_s390_irq s390irq;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
|
||||
break;
|
||||
r = kvm_s390_inject_vcpu(vcpu, &s390irq);
|
||||
break;
|
||||
}
|
||||
case KVM_S390_INTERRUPT: {
|
||||
struct kvm_s390_interrupt s390int;
|
||||
struct kvm_s390_irq s390irq;
|
||||
@ -2199,6 +2494,47 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
|
||||
break;
|
||||
}
|
||||
case KVM_S390_MEM_OP: {
|
||||
struct kvm_s390_mem_op mem_op;
|
||||
|
||||
if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
|
||||
r = kvm_s390_guest_mem_op(vcpu, &mem_op);
|
||||
else
|
||||
r = -EFAULT;
|
||||
break;
|
||||
}
|
||||
case KVM_S390_SET_IRQ_STATE: {
|
||||
struct kvm_s390_irq_state irq_state;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
|
||||
break;
|
||||
if (irq_state.len > VCPU_IRQS_MAX_BUF ||
|
||||
irq_state.len == 0 ||
|
||||
irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
r = kvm_s390_set_irq_state(vcpu,
|
||||
(void __user *) irq_state.buf,
|
||||
irq_state.len);
|
||||
break;
|
||||
}
|
||||
case KVM_S390_GET_IRQ_STATE: {
|
||||
struct kvm_s390_irq_state irq_state;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
|
||||
break;
|
||||
if (irq_state.len == 0) {
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
r = kvm_s390_get_irq_state(vcpu,
|
||||
(__u8 __user *) irq_state.buf,
|
||||
irq_state.len);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
r = -ENOTTY;
|
||||
}
|
||||
|
@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
|
||||
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
|
||||
}
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu)
|
||||
typedef u8 __bitwise ar_t;
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
|
||||
{
|
||||
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
||||
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
|
||||
|
||||
if (ar)
|
||||
*ar = base2;
|
||||
|
||||
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
||||
}
|
||||
|
||||
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
|
||||
u64 *address1, u64 *address2)
|
||||
u64 *address1, u64 *address2,
|
||||
ar_t *ar_b1, ar_t *ar_b2)
|
||||
{
|
||||
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
|
||||
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
|
||||
@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
|
||||
|
||||
*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
|
||||
*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
||||
|
||||
if (ar_b1)
|
||||
*ar_b1 = base1;
|
||||
if (ar_b2)
|
||||
*ar_b2 = base2;
|
||||
}
|
||||
|
||||
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
|
||||
@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2
|
||||
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
|
||||
}
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
|
||||
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
|
||||
{
|
||||
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
||||
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
|
||||
@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu)
|
||||
if (disp2 & 0x80000)
|
||||
disp2+=0xfff00000;
|
||||
|
||||
if (ar)
|
||||
*ar = base2;
|
||||
|
||||
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
|
||||
}
|
||||
|
||||
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
|
||||
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
|
||||
{
|
||||
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
||||
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
|
||||
|
||||
if (ar)
|
||||
*ar = base2;
|
||||
|
||||
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
||||
}
|
||||
|
||||
@ -125,13 +142,24 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
|
||||
vcpu->arch.sie_block->gpsw.mask |= cc << 44;
|
||||
}
|
||||
|
||||
/* test availability of facility in a kvm intance */
|
||||
/* test availability of facility in a kvm instance */
|
||||
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
|
||||
{
|
||||
return __test_facility(nr, kvm->arch.model.fac->mask) &&
|
||||
__test_facility(nr, kvm->arch.model.fac->list);
|
||||
}
|
||||
|
||||
static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
|
||||
{
|
||||
unsigned char *ptr;
|
||||
|
||||
if (nr >= MAX_FACILITY_BIT)
|
||||
return -EINVAL;
|
||||
ptr = (unsigned char *) fac_list + (nr >> 3);
|
||||
*ptr |= (0x80UL >> (nr & 7));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* are cpu states controlled by user space */
|
||||
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
|
||||
{
|
||||
@ -150,9 +178,9 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s390_irq *irq);
|
||||
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
|
||||
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
|
||||
u64 cr6, u64 schid);
|
||||
void kvm_s390_reinject_io_int(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt_info *inti);
|
||||
u64 isc_mask, u32 schid);
|
||||
int kvm_s390_reinject_io_int(struct kvm *kvm,
|
||||
struct kvm_s390_interrupt_info *inti);
|
||||
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
|
||||
|
||||
/* implemented in intercept.c */
|
||||
@ -177,7 +205,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
||||
/* implemented in kvm-s390.c */
|
||||
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
|
||||
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
|
||||
unsigned long addr);
|
||||
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
|
||||
void s390_vcpu_block(struct kvm_vcpu *vcpu);
|
||||
@ -241,6 +272,10 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
|
||||
extern struct kvm_device_ops kvm_flic_ops;
|
||||
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
|
||||
void __user *buf, int len);
|
||||
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
|
||||
__u8 __user *buf, int len);
|
||||
|
||||
/* implemented in guestdbg.c */
|
||||
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
|
||||
|
@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
|
||||
struct kvm_vcpu *cpup;
|
||||
s64 hostclk, val;
|
||||
int i, rc;
|
||||
ar_t ar;
|
||||
u64 op2;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
op2 = kvm_s390_get_base_disp_s(vcpu);
|
||||
op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
if (op2 & 7) /* Operand must be on a doubleword boundary */
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
rc = read_guest(vcpu, op2, &val, sizeof(val));
|
||||
rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
|
||||
u64 operand2;
|
||||
u32 address;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_spx++;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
operand2 = kvm_s390_get_base_disp_s(vcpu);
|
||||
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
|
||||
/* must be word boundary */
|
||||
if (operand2 & 3)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
/* get the value */
|
||||
rc = read_guest(vcpu, operand2, &address, sizeof(address));
|
||||
rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
|
||||
u64 operand2;
|
||||
u32 address;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_stpx++;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
operand2 = kvm_s390_get_base_disp_s(vcpu);
|
||||
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
|
||||
/* must be word boundary */
|
||||
if (operand2 & 3)
|
||||
@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu)
|
||||
address = kvm_s390_get_prefix(vcpu);
|
||||
|
||||
/* get the value */
|
||||
rc = write_guest(vcpu, operand2, &address, sizeof(address));
|
||||
rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
|
||||
u16 vcpu_id = vcpu->vcpu_id;
|
||||
u64 ga;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_stap++;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
ga = kvm_s390_get_base_disp_s(vcpu);
|
||||
ga = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
|
||||
if (ga & 1)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
|
||||
rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
@ -207,7 +211,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
|
||||
kvm_s390_get_regs_rre(vcpu, NULL, ®2);
|
||||
addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
|
||||
addr = kvm_s390_logical_to_effective(vcpu, addr);
|
||||
if (kvm_s390_check_low_addr_protection(vcpu, addr))
|
||||
if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
|
||||
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
||||
addr = kvm_s390_real_to_abs(vcpu, addr);
|
||||
|
||||
@ -229,18 +233,20 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
unsigned long len;
|
||||
u32 tpi_data[3];
|
||||
int cc, rc;
|
||||
int rc;
|
||||
u64 addr;
|
||||
ar_t ar;
|
||||
|
||||
rc = 0;
|
||||
addr = kvm_s390_get_base_disp_s(vcpu);
|
||||
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
if (addr & 3)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
cc = 0;
|
||||
|
||||
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
|
||||
if (!inti)
|
||||
goto no_interrupt;
|
||||
cc = 1;
|
||||
if (!inti) {
|
||||
kvm_s390_set_psw_cc(vcpu, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
|
||||
tpi_data[1] = inti->io.io_int_parm;
|
||||
tpi_data[2] = inti->io.io_int_word;
|
||||
@ -250,40 +256,51 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
|
||||
* provided area.
|
||||
*/
|
||||
len = sizeof(tpi_data) - 4;
|
||||
rc = write_guest(vcpu, addr, &tpi_data, len);
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
rc = write_guest(vcpu, addr, ar, &tpi_data, len);
|
||||
if (rc) {
|
||||
rc = kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
goto reinject_interrupt;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Store the three-word I/O interruption code into
|
||||
* the appropriate lowcore area.
|
||||
*/
|
||||
len = sizeof(tpi_data);
|
||||
if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
|
||||
if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
|
||||
/* failed writes to the low core are not recoverable */
|
||||
rc = -EFAULT;
|
||||
goto reinject_interrupt;
|
||||
}
|
||||
}
|
||||
|
||||
/* irq was successfully handed to the guest */
|
||||
kfree(inti);
|
||||
kvm_s390_set_psw_cc(vcpu, 1);
|
||||
return 0;
|
||||
reinject_interrupt:
|
||||
/*
|
||||
* If we encounter a problem storing the interruption code, the
|
||||
* instruction is suppressed from the guest's view: reinject the
|
||||
* interrupt.
|
||||
*/
|
||||
if (!rc)
|
||||
if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
|
||||
kfree(inti);
|
||||
else
|
||||
kvm_s390_reinject_io_int(vcpu->kvm, inti);
|
||||
no_interrupt:
|
||||
/* Set condition code and we're done. */
|
||||
if (!rc)
|
||||
kvm_s390_set_psw_cc(vcpu, cc);
|
||||
rc = -EFAULT;
|
||||
}
|
||||
/* don't set the cc, a pgm irq was injected or we drop to user space */
|
||||
return rc ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static int handle_tsch(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s390_interrupt_info *inti;
|
||||
struct kvm_s390_interrupt_info *inti = NULL;
|
||||
const u64 isc_mask = 0xffUL << 24; /* all iscs set */
|
||||
|
||||
inti = kvm_s390_get_io_int(vcpu->kvm, 0,
|
||||
vcpu->run->s.regs.gprs[1]);
|
||||
/* a valid schid has at least one bit set */
|
||||
if (vcpu->run->s.regs.gprs[1])
|
||||
inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
|
||||
vcpu->run->s.regs.gprs[1]);
|
||||
|
||||
/*
|
||||
* Prepare exit to userspace.
|
||||
@ -386,15 +403,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
|
||||
psw_compat_t new_psw;
|
||||
u64 addr;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
|
||||
if (gpsw->mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
addr = kvm_s390_get_base_disp_s(vcpu);
|
||||
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
if (addr & 7)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
|
||||
rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
if (!(new_psw.mask & PSW32_MASK_BASE))
|
||||
@ -412,14 +430,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
|
||||
psw_t new_psw;
|
||||
u64 addr;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
addr = kvm_s390_get_base_disp_s(vcpu);
|
||||
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
if (addr & 7)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
|
||||
rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
vcpu->arch.sie_block->gpsw = new_psw;
|
||||
@ -433,18 +452,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu)
|
||||
u64 stidp_data = vcpu->arch.stidp_data;
|
||||
u64 operand2;
|
||||
int rc;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_stidp++;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
operand2 = kvm_s390_get_base_disp_s(vcpu);
|
||||
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
|
||||
if (operand2 & 7)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
|
||||
rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
@ -467,6 +487,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
|
||||
for (n = mem->count - 1; n > 0 ; n--)
|
||||
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
|
||||
|
||||
memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
|
||||
mem->vm[0].cpus_total = cpus;
|
||||
mem->vm[0].cpus_configured = cpus;
|
||||
mem->vm[0].cpus_standby = 0;
|
||||
@ -478,6 +499,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
|
||||
ASCEBC(mem->vm[0].cpi, 16);
|
||||
}
|
||||
|
||||
static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar,
|
||||
u8 fc, u8 sel1, u16 sel2)
|
||||
{
|
||||
vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
|
||||
vcpu->run->s390_stsi.addr = addr;
|
||||
vcpu->run->s390_stsi.ar = ar;
|
||||
vcpu->run->s390_stsi.fc = fc;
|
||||
vcpu->run->s390_stsi.sel1 = sel1;
|
||||
vcpu->run->s390_stsi.sel2 = sel2;
|
||||
}
|
||||
|
||||
static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
|
||||
@ -486,6 +518,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
unsigned long mem = 0;
|
||||
u64 operand2;
|
||||
int rc = 0;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_stsi++;
|
||||
VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
|
||||
@ -508,7 +541,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
operand2 = kvm_s390_get_base_disp_s(vcpu);
|
||||
operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
|
||||
if (operand2 & 0xfff)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
@ -532,16 +565,20 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
|
||||
rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
|
||||
rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
|
||||
if (rc) {
|
||||
rc = kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
goto out;
|
||||
}
|
||||
if (vcpu->kvm->arch.user_stsi) {
|
||||
insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
|
||||
rc = -EREMOTE;
|
||||
}
|
||||
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
|
||||
free_page(mem);
|
||||
kvm_s390_set_psw_cc(vcpu, 0);
|
||||
vcpu->run->s.regs.gprs[0] = 0;
|
||||
return 0;
|
||||
return rc;
|
||||
out_no_data:
|
||||
kvm_s390_set_psw_cc(vcpu, 3);
|
||||
out:
|
||||
@ -670,7 +707,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
|
||||
if (kvm_s390_check_low_addr_protection(vcpu, start))
|
||||
if (kvm_s390_check_low_addr_prot_real(vcpu, start))
|
||||
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
||||
}
|
||||
|
||||
@ -776,13 +813,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
|
||||
int reg, rc, nr_regs;
|
||||
u32 ctl_array[16];
|
||||
u64 ga;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_lctl++;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
ga = kvm_s390_get_base_disp_rs(vcpu);
|
||||
ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
|
||||
|
||||
if (ga & 3)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
@ -791,7 +829,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
|
||||
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
|
||||
|
||||
nr_regs = ((reg3 - reg1) & 0xf) + 1;
|
||||
rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
|
||||
rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
reg = reg1;
|
||||
@ -814,13 +852,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
|
||||
int reg, rc, nr_regs;
|
||||
u32 ctl_array[16];
|
||||
u64 ga;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_stctl++;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
ga = kvm_s390_get_base_disp_rs(vcpu);
|
||||
ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
|
||||
|
||||
if (ga & 3)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
@ -836,7 +875,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
reg = (reg + 1) % 16;
|
||||
} while (1);
|
||||
rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
|
||||
rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
|
||||
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
|
||||
}
|
||||
|
||||
@ -847,13 +886,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
|
||||
int reg, rc, nr_regs;
|
||||
u64 ctl_array[16];
|
||||
u64 ga;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_lctlg++;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
ga = kvm_s390_get_base_disp_rsy(vcpu);
|
||||
ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
|
||||
|
||||
if (ga & 7)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
@ -862,7 +902,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu)
|
||||
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
|
||||
|
||||
nr_regs = ((reg3 - reg1) & 0xf) + 1;
|
||||
rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
|
||||
rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
reg = reg1;
|
||||
@ -884,13 +924,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
|
||||
int reg, rc, nr_regs;
|
||||
u64 ctl_array[16];
|
||||
u64 ga;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_stctg++;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
ga = kvm_s390_get_base_disp_rsy(vcpu);
|
||||
ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
|
||||
|
||||
if (ga & 7)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
@ -906,7 +947,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
reg = (reg + 1) % 16;
|
||||
} while (1);
|
||||
rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
|
||||
rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
|
||||
return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
|
||||
}
|
||||
|
||||
@ -931,13 +972,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
|
||||
unsigned long hva, gpa;
|
||||
int ret = 0, cc = 0;
|
||||
bool writable;
|
||||
ar_t ar;
|
||||
|
||||
vcpu->stat.instruction_tprot++;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
|
||||
kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
|
||||
|
||||
/* we only handle the Linux memory detection case:
|
||||
* access key == 0
|
||||
@ -946,11 +988,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
|
||||
return -EOPNOTSUPP;
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
|
||||
ipte_lock(vcpu);
|
||||
ret = guest_translate_address(vcpu, address1, &gpa, 1);
|
||||
ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
|
||||
if (ret == PGM_PROTECTION) {
|
||||
/* Write protected? Try again with read-only... */
|
||||
cc = 1;
|
||||
ret = guest_translate_address(vcpu, address1, &gpa, 0);
|
||||
ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
|
||||
}
|
||||
if (ret) {
|
||||
if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
|
||||
|
@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
|
||||
case SIGP_STORE_STATUS_AT_ADDRESS:
|
||||
vcpu->stat.instruction_sigp_store_status++;
|
||||
break;
|
||||
case SIGP_STORE_ADDITIONAL_STATUS:
|
||||
vcpu->stat.instruction_sigp_store_adtl_status++;
|
||||
break;
|
||||
case SIGP_SET_PREFIX:
|
||||
vcpu->stat.instruction_sigp_prefix++;
|
||||
break;
|
||||
@ -431,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
order_code = kvm_s390_get_base_disp_rs(vcpu);
|
||||
order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
|
||||
if (handle_sigp_order_in_user_space(vcpu, order_code))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
@ -473,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
|
||||
int r3 = vcpu->arch.sie_block->ipa & 0x000f;
|
||||
u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
|
||||
struct kvm_vcpu *dest_vcpu;
|
||||
u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
|
||||
u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
|
||||
|
||||
trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
|
||||
|
||||
|
@ -81,11 +81,6 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
|
||||
(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
|
||||
}
|
||||
|
||||
#define SELECTOR_TI_MASK (1 << 2)
|
||||
#define SELECTOR_RPL_MASK 0x03
|
||||
|
||||
#define IOPL_SHIFT 12
|
||||
|
||||
#define KVM_PERMILLE_MMU_PAGES 20
|
||||
#define KVM_MIN_ALLOC_MMU_PAGES 64
|
||||
#define KVM_MMU_HASH_SHIFT 10
|
||||
@ -345,6 +340,7 @@ struct kvm_pmu {
|
||||
enum {
|
||||
KVM_DEBUGREG_BP_ENABLED = 1,
|
||||
KVM_DEBUGREG_WONT_EXIT = 2,
|
||||
KVM_DEBUGREG_RELOAD = 4,
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
@ -431,6 +427,9 @@ struct kvm_vcpu_arch {
|
||||
|
||||
int cpuid_nent;
|
||||
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
|
||||
|
||||
int maxphyaddr;
|
||||
|
||||
/* emulate context */
|
||||
|
||||
struct x86_emulate_ctxt emulate_ctxt;
|
||||
@ -550,11 +549,20 @@ struct kvm_arch_memory_slot {
|
||||
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
|
||||
};
|
||||
|
||||
/*
|
||||
* We use as the mode the number of bits allocated in the LDR for the
|
||||
* logical processor ID. It happens that these are all powers of two.
|
||||
* This makes it is very easy to detect cases where the APICs are
|
||||
* configured for multiple modes; in that case, we cannot use the map and
|
||||
* hence cannot use kvm_irq_delivery_to_apic_fast either.
|
||||
*/
|
||||
#define KVM_APIC_MODE_XAPIC_CLUSTER 4
|
||||
#define KVM_APIC_MODE_XAPIC_FLAT 8
|
||||
#define KVM_APIC_MODE_X2APIC 16
|
||||
|
||||
struct kvm_apic_map {
|
||||
struct rcu_head rcu;
|
||||
u8 ldr_bits;
|
||||
/* fields bellow are used to decode ldr values in different modes */
|
||||
u32 cid_shift, cid_mask, lid_mask, broadcast;
|
||||
u8 mode;
|
||||
struct kvm_lapic *phys_map[256];
|
||||
/* first index is cluster id second is cpu id in a cluster */
|
||||
struct kvm_lapic *logical_map[16][16];
|
||||
@ -859,6 +867,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
|
||||
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
|
||||
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot);
|
||||
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
|
||||
@ -933,6 +943,7 @@ struct x86_emulate_ctxt;
|
||||
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
|
||||
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
|
||||
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
|
||||
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
|
||||
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
||||
@ -1128,7 +1139,6 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
|
||||
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
|
||||
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
|
||||
|
@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void)
|
||||
|
||||
static inline bool kvm_para_available(void)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int kvm_arch_para_features(void)
|
||||
|
@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
|
||||
|
||||
struct pvclock_vsyscall_time_info {
|
||||
struct pvclock_vcpu_time_info pvti;
|
||||
u32 migrate_count;
|
||||
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
|
||||
|
||||
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
|
||||
|
@ -67,6 +67,7 @@
|
||||
#define EXIT_REASON_EPT_VIOLATION 48
|
||||
#define EXIT_REASON_EPT_MISCONFIG 49
|
||||
#define EXIT_REASON_INVEPT 50
|
||||
#define EXIT_REASON_RDTSCP 51
|
||||
#define EXIT_REASON_PREEMPTION_TIMER 52
|
||||
#define EXIT_REASON_INVVPID 53
|
||||
#define EXIT_REASON_WBINVD 54
|
||||
|
@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
|
||||
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
|
||||
}
|
||||
|
||||
static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
|
||||
|
||||
static struct pvclock_vsyscall_time_info *
|
||||
pvclock_get_vsyscall_user_time_info(int cpu)
|
||||
{
|
||||
if (!pvclock_vdso_info) {
|
||||
BUG();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &pvclock_vdso_info[cpu];
|
||||
}
|
||||
|
||||
struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
|
||||
{
|
||||
return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
|
||||
void *v)
|
||||
{
|
||||
struct task_migration_notifier *mn = v;
|
||||
struct pvclock_vsyscall_time_info *pvti;
|
||||
|
||||
pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
|
||||
|
||||
/* this is NULL when pvclock vsyscall is not initialized */
|
||||
if (unlikely(pvti == NULL))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
pvti->migrate_count++;
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block pvclock_migrate = {
|
||||
.notifier_call = pvclock_task_migrate,
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialize the generic pvclock vsyscall state. This will allocate
|
||||
* a/some page(s) for the per-vcpu pvclock information, set up a
|
||||
@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
|
||||
|
||||
WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
|
||||
|
||||
pvclock_vdso_info = i;
|
||||
|
||||
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
|
||||
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
|
||||
__pa(i) + (idx*PAGE_SIZE),
|
||||
PAGE_KERNEL_VVAR);
|
||||
}
|
||||
|
||||
|
||||
register_task_migration_notifier(&pvclock_migrate);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
ccflags-y += -Ivirt/kvm -Iarch/x86/kvm
|
||||
ccflags-y += -Iarch/x86/kvm
|
||||
|
||||
CFLAGS_x86.o := -I.
|
||||
CFLAGS_svm.o := -I.
|
||||
|
@ -104,6 +104,9 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
|
||||
((best->eax & 0xff00) >> 8) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Update physical-address width */
|
||||
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
|
||||
|
||||
kvm_pmu_cpuid_update(vcpu);
|
||||
return 0;
|
||||
}
|
||||
@ -135,6 +138,21 @@ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
|
||||
if (!best || best->eax < 0x80000008)
|
||||
goto not_found;
|
||||
best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
|
||||
if (best)
|
||||
return best->eax & 0xff;
|
||||
not_found:
|
||||
return 36;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
|
||||
|
||||
/* when an old userspace process fills a new kernel module */
|
||||
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
||||
struct kvm_cpuid *cpuid,
|
||||
@ -757,21 +775,6 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
|
||||
|
||||
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
|
||||
if (!best || best->eax < 0x80000008)
|
||||
goto not_found;
|
||||
best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
|
||||
if (best)
|
||||
return best->eax & 0xff;
|
||||
not_found:
|
||||
return 36;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpuid_maxphyaddr);
|
||||
|
||||
/*
|
||||
* If no match is found, check whether we exceed the vCPU's limit
|
||||
* and return the content of the highest valid _standard_ leaf instead.
|
||||
|
@ -20,13 +20,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
|
||||
struct kvm_cpuid_entry2 __user *entries);
|
||||
void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
|
||||
|
||||
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.maxphyaddr;
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
if (!static_cpu_has(X86_FEATURE_XSAVE))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
||||
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
|
||||
|
@ -248,27 +248,7 @@ struct mode_dual {
|
||||
struct opcode mode64;
|
||||
};
|
||||
|
||||
/* EFLAGS bit definitions. */
|
||||
#define EFLG_ID (1<<21)
|
||||
#define EFLG_VIP (1<<20)
|
||||
#define EFLG_VIF (1<<19)
|
||||
#define EFLG_AC (1<<18)
|
||||
#define EFLG_VM (1<<17)
|
||||
#define EFLG_RF (1<<16)
|
||||
#define EFLG_IOPL (3<<12)
|
||||
#define EFLG_NT (1<<14)
|
||||
#define EFLG_OF (1<<11)
|
||||
#define EFLG_DF (1<<10)
|
||||
#define EFLG_IF (1<<9)
|
||||
#define EFLG_TF (1<<8)
|
||||
#define EFLG_SF (1<<7)
|
||||
#define EFLG_ZF (1<<6)
|
||||
#define EFLG_AF (1<<4)
|
||||
#define EFLG_PF (1<<2)
|
||||
#define EFLG_CF (1<<0)
|
||||
|
||||
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
|
||||
#define EFLG_RESERVED_ONE_MASK 2
|
||||
|
||||
enum x86_transfer_type {
|
||||
X86_TRANSFER_NONE,
|
||||
@ -317,7 +297,8 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
|
||||
* These EFLAGS bits are restored from saved value during emulation, and
|
||||
* any changes are written back to the saved value after emulation.
|
||||
*/
|
||||
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
|
||||
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
|
||||
X86_EFLAGS_PF|X86_EFLAGS_CF)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define ON64(x) x
|
||||
@ -478,6 +459,25 @@ static void assign_masked(ulong *dest, ulong src, ulong mask)
|
||||
*dest = (*dest & ~mask) | (src & mask);
|
||||
}
|
||||
|
||||
static void assign_register(unsigned long *reg, u64 val, int bytes)
|
||||
{
|
||||
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
|
||||
switch (bytes) {
|
||||
case 1:
|
||||
*(u8 *)reg = (u8)val;
|
||||
break;
|
||||
case 2:
|
||||
*(u16 *)reg = (u16)val;
|
||||
break;
|
||||
case 4:
|
||||
*reg = (u32)val;
|
||||
break; /* 64b: zero-extend */
|
||||
case 8:
|
||||
*reg = val;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return (1UL << (ctxt->ad_bytes << 3)) - 1;
|
||||
@ -943,6 +943,22 @@ FASTOP2(xadd);
|
||||
|
||||
FASTOP2R(cmp, cmp_r);
|
||||
|
||||
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
/* If src is zero, do not writeback, but update flags */
|
||||
if (ctxt->src.val == 0)
|
||||
ctxt->dst.type = OP_NONE;
|
||||
return fastop(ctxt, em_bsf);
|
||||
}
|
||||
|
||||
static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
/* If src is zero, do not writeback, but update flags */
|
||||
if (ctxt->src.val == 0)
|
||||
ctxt->dst.type = OP_NONE;
|
||||
return fastop(ctxt, em_bsr);
|
||||
}
|
||||
|
||||
static u8 test_cc(unsigned int condition, unsigned long flags)
|
||||
{
|
||||
u8 rc;
|
||||
@ -1399,7 +1415,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
unsigned int in_page, n;
|
||||
unsigned int count = ctxt->rep_prefix ?
|
||||
address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
|
||||
in_page = (ctxt->eflags & EFLG_DF) ?
|
||||
in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
|
||||
offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
|
||||
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
|
||||
n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
|
||||
@ -1412,7 +1428,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
}
|
||||
|
||||
if (ctxt->rep_prefix && (ctxt->d & String) &&
|
||||
!(ctxt->eflags & EFLG_DF)) {
|
||||
!(ctxt->eflags & X86_EFLAGS_DF)) {
|
||||
ctxt->dst.data = rc->data + rc->pos;
|
||||
ctxt->dst.type = OP_MEM_STR;
|
||||
ctxt->dst.count = (rc->end - rc->pos) / size;
|
||||
@ -1691,21 +1707,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||
|
||||
static void write_register_operand(struct operand *op)
|
||||
{
|
||||
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
|
||||
switch (op->bytes) {
|
||||
case 1:
|
||||
*(u8 *)op->addr.reg = (u8)op->val;
|
||||
break;
|
||||
case 2:
|
||||
*(u16 *)op->addr.reg = (u16)op->val;
|
||||
break;
|
||||
case 4:
|
||||
*op->addr.reg = (u32)op->val;
|
||||
break; /* 64b: zero-extend */
|
||||
case 8:
|
||||
*op->addr.reg = op->val;
|
||||
break;
|
||||
}
|
||||
return assign_register(op->addr.reg, op->val, op->bytes);
|
||||
}
|
||||
|
||||
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
|
||||
@ -1792,32 +1794,34 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
|
||||
{
|
||||
int rc;
|
||||
unsigned long val, change_mask;
|
||||
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
|
||||
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
|
||||
int cpl = ctxt->ops->cpl(ctxt);
|
||||
|
||||
rc = emulate_pop(ctxt, &val, len);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
|
||||
| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
|
||||
change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
|
||||
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
|
||||
X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
|
||||
X86_EFLAGS_AC | X86_EFLAGS_ID;
|
||||
|
||||
switch(ctxt->mode) {
|
||||
case X86EMUL_MODE_PROT64:
|
||||
case X86EMUL_MODE_PROT32:
|
||||
case X86EMUL_MODE_PROT16:
|
||||
if (cpl == 0)
|
||||
change_mask |= EFLG_IOPL;
|
||||
change_mask |= X86_EFLAGS_IOPL;
|
||||
if (cpl <= iopl)
|
||||
change_mask |= EFLG_IF;
|
||||
change_mask |= X86_EFLAGS_IF;
|
||||
break;
|
||||
case X86EMUL_MODE_VM86:
|
||||
if (iopl < 3)
|
||||
return emulate_gp(ctxt, 0);
|
||||
change_mask |= EFLG_IF;
|
||||
change_mask |= X86_EFLAGS_IF;
|
||||
break;
|
||||
default: /* real mode */
|
||||
change_mask |= (EFLG_IOPL | EFLG_IF);
|
||||
change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1918,7 +1922,7 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
static int em_pushf(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
|
||||
ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
|
||||
return em_push(ctxt);
|
||||
}
|
||||
|
||||
@ -1926,6 +1930,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int rc = X86EMUL_CONTINUE;
|
||||
int reg = VCPU_REGS_RDI;
|
||||
u32 val;
|
||||
|
||||
while (reg >= VCPU_REGS_RAX) {
|
||||
if (reg == VCPU_REGS_RSP) {
|
||||
@ -1933,9 +1938,10 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
|
||||
--reg;
|
||||
}
|
||||
|
||||
rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
|
||||
rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
break;
|
||||
assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
|
||||
--reg;
|
||||
}
|
||||
return rc;
|
||||
@ -1956,7 +1962,7 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
|
||||
ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
|
||||
|
||||
ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
|
||||
rc = em_push(ctxt);
|
||||
@ -2022,10 +2028,14 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
|
||||
unsigned long temp_eip = 0;
|
||||
unsigned long temp_eflags = 0;
|
||||
unsigned long cs = 0;
|
||||
unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
|
||||
EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
|
||||
EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
|
||||
unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
|
||||
unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
|
||||
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
|
||||
X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
|
||||
X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
|
||||
X86_EFLAGS_AC | X86_EFLAGS_ID |
|
||||
X86_EFLAGS_FIXED;
|
||||
unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
|
||||
X86_EFLAGS_VIP;
|
||||
|
||||
/* TODO: Add stack limit check */
|
||||
|
||||
@ -2054,7 +2064,6 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
ctxt->_eip = temp_eip;
|
||||
|
||||
|
||||
if (ctxt->op_bytes == 4)
|
||||
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
|
||||
else if (ctxt->op_bytes == 2) {
|
||||
@ -2063,7 +2072,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
|
||||
}
|
||||
|
||||
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
|
||||
ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
|
||||
ctxt->eflags |= X86_EFLAGS_FIXED;
|
||||
ctxt->ops->set_nmi_mask(ctxt, false);
|
||||
|
||||
return rc;
|
||||
@ -2145,12 +2154,12 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
|
||||
((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
|
||||
*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
|
||||
*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
|
||||
ctxt->eflags &= ~EFLG_ZF;
|
||||
ctxt->eflags &= ~X86_EFLAGS_ZF;
|
||||
} else {
|
||||
ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
|
||||
(u32) reg_read(ctxt, VCPU_REGS_RBX);
|
||||
|
||||
ctxt->eflags |= EFLG_ZF;
|
||||
ctxt->eflags |= X86_EFLAGS_ZF;
|
||||
}
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
@ -2222,7 +2231,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
|
||||
ctxt->src.val = ctxt->dst.orig_val;
|
||||
fastop(ctxt, em_cmp);
|
||||
|
||||
if (ctxt->eflags & EFLG_ZF) {
|
||||
if (ctxt->eflags & X86_EFLAGS_ZF) {
|
||||
/* Success: write back to memory; no update of EAX */
|
||||
ctxt->src.type = OP_NONE;
|
||||
ctxt->dst.val = ctxt->src.orig_val;
|
||||
@ -2381,14 +2390,14 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
|
||||
ctxt->eflags &= ~msr_data;
|
||||
ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
|
||||
ctxt->eflags |= X86_EFLAGS_FIXED;
|
||||
#endif
|
||||
} else {
|
||||
/* legacy mode */
|
||||
ops->get_msr(ctxt, MSR_STAR, &msr_data);
|
||||
ctxt->_eip = (u32)msr_data;
|
||||
|
||||
ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
|
||||
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
|
||||
}
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
@ -2425,8 +2434,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
|
||||
if ((msr_data & 0xfffc) == 0x0)
|
||||
return emulate_gp(ctxt, 0);
|
||||
|
||||
ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
|
||||
cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
|
||||
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
|
||||
cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
|
||||
ss_sel = cs_sel + 8;
|
||||
if (efer & EFER_LMA) {
|
||||
cs.d = 0;
|
||||
@ -2493,8 +2502,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
||||
return emulate_gp(ctxt, 0);
|
||||
break;
|
||||
}
|
||||
cs_sel |= SELECTOR_RPL_MASK;
|
||||
ss_sel |= SELECTOR_RPL_MASK;
|
||||
cs_sel |= SEGMENT_RPL_MASK;
|
||||
ss_sel |= SEGMENT_RPL_MASK;
|
||||
|
||||
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
|
||||
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
|
||||
@ -2512,7 +2521,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
|
||||
return false;
|
||||
if (ctxt->mode == X86EMUL_MODE_VM86)
|
||||
return true;
|
||||
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
|
||||
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
|
||||
return ctxt->ops->cpl(ctxt) > iopl;
|
||||
}
|
||||
|
||||
@ -2782,10 +2791,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
|
||||
return ret;
|
||||
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
|
||||
X86_TRANSFER_TASK_SWITCH, NULL);
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
return ret;
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
|
||||
@ -2954,7 +2961,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
|
||||
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
|
||||
struct operand *op)
|
||||
{
|
||||
int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
|
||||
int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
|
||||
|
||||
register_address_increment(ctxt, reg, df * op->bytes);
|
||||
op->addr.mem.ea = register_address(ctxt, reg);
|
||||
@ -3323,7 +3330,7 @@ static int em_clts(struct x86_emulate_ctxt *ctxt)
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
|
||||
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int rc = ctxt->ops->fix_hypercall(ctxt);
|
||||
|
||||
@ -3395,17 +3402,6 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt)
|
||||
return em_lgdt_lidt(ctxt, true);
|
||||
}
|
||||
|
||||
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ctxt->ops->fix_hypercall(ctxt);
|
||||
|
||||
/* Disable writeback. */
|
||||
ctxt->dst.type = OP_NONE;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int em_lidt(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return em_lgdt_lidt(ctxt, false);
|
||||
@ -3504,7 +3500,8 @@ static int em_sahf(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
u32 flags;
|
||||
|
||||
flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
|
||||
flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
|
||||
X86_EFLAGS_SF;
|
||||
flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
|
||||
|
||||
ctxt->eflags &= ~0xffUL;
|
||||
@ -3769,7 +3766,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
static const struct opcode group7_rm0[] = {
|
||||
N,
|
||||
I(SrcNone | Priv | EmulateOnUD, em_vmcall),
|
||||
I(SrcNone | Priv | EmulateOnUD, em_hypercall),
|
||||
N, N, N, N, N, N,
|
||||
};
|
||||
|
||||
@ -3781,7 +3778,7 @@ static const struct opcode group7_rm1[] = {
|
||||
|
||||
static const struct opcode group7_rm3[] = {
|
||||
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
|
||||
II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
|
||||
II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
|
||||
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
|
||||
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
|
||||
DIP(SrcNone | Prot | Priv, stgi, check_svme),
|
||||
@ -4192,7 +4189,8 @@ static const struct opcode twobyte_table[256] = {
|
||||
N, N,
|
||||
G(BitOp, group8),
|
||||
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
|
||||
F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
|
||||
I(DstReg | SrcMem | ModRM, em_bsf_c),
|
||||
I(DstReg | SrcMem | ModRM, em_bsr_c),
|
||||
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
|
||||
/* 0xC0 - 0xC7 */
|
||||
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
|
||||
@ -4759,9 +4757,9 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
|
||||
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
|
||||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
|
||||
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
|
||||
((ctxt->eflags & EFLG_ZF) == 0))
|
||||
((ctxt->eflags & X86_EFLAGS_ZF) == 0))
|
||||
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
|
||||
((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
|
||||
((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -4913,7 +4911,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
||||
/* All REP prefixes have the same first termination condition */
|
||||
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
|
||||
ctxt->eip = ctxt->_eip;
|
||||
ctxt->eflags &= ~EFLG_RF;
|
||||
ctxt->eflags &= ~X86_EFLAGS_RF;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
@ -4963,9 +4961,9 @@ special_insn:
|
||||
}
|
||||
|
||||
if (ctxt->rep_prefix && (ctxt->d & String))
|
||||
ctxt->eflags |= EFLG_RF;
|
||||
ctxt->eflags |= X86_EFLAGS_RF;
|
||||
else
|
||||
ctxt->eflags &= ~EFLG_RF;
|
||||
ctxt->eflags &= ~X86_EFLAGS_RF;
|
||||
|
||||
if (ctxt->execute) {
|
||||
if (ctxt->d & Fastop) {
|
||||
@ -5014,7 +5012,7 @@ special_insn:
|
||||
rc = emulate_int(ctxt, ctxt->src.val);
|
||||
break;
|
||||
case 0xce: /* into */
|
||||
if (ctxt->eflags & EFLG_OF)
|
||||
if (ctxt->eflags & X86_EFLAGS_OF)
|
||||
rc = emulate_int(ctxt, 4);
|
||||
break;
|
||||
case 0xe9: /* jmp rel */
|
||||
@ -5027,19 +5025,19 @@ special_insn:
|
||||
break;
|
||||
case 0xf5: /* cmc */
|
||||
/* complement carry flag from eflags reg */
|
||||
ctxt->eflags ^= EFLG_CF;
|
||||
ctxt->eflags ^= X86_EFLAGS_CF;
|
||||
break;
|
||||
case 0xf8: /* clc */
|
||||
ctxt->eflags &= ~EFLG_CF;
|
||||
ctxt->eflags &= ~X86_EFLAGS_CF;
|
||||
break;
|
||||
case 0xf9: /* stc */
|
||||
ctxt->eflags |= EFLG_CF;
|
||||
ctxt->eflags |= X86_EFLAGS_CF;
|
||||
break;
|
||||
case 0xfc: /* cld */
|
||||
ctxt->eflags &= ~EFLG_DF;
|
||||
ctxt->eflags &= ~X86_EFLAGS_DF;
|
||||
break;
|
||||
case 0xfd: /* std */
|
||||
ctxt->eflags |= EFLG_DF;
|
||||
ctxt->eflags |= X86_EFLAGS_DF;
|
||||
break;
|
||||
default:
|
||||
goto cannot_emulate;
|
||||
@ -5100,7 +5098,7 @@ writeback:
|
||||
}
|
||||
goto done; /* skip rip writeback */
|
||||
}
|
||||
ctxt->eflags &= ~EFLG_RF;
|
||||
ctxt->eflags &= ~X86_EFLAGS_RF;
|
||||
}
|
||||
|
||||
ctxt->eip = ctxt->_eip;
|
||||
@ -5137,8 +5135,7 @@ twobyte_insn:
|
||||
case 0x40 ... 0x4f: /* cmov */
|
||||
if (test_cc(ctxt->b, ctxt->eflags))
|
||||
ctxt->dst.val = ctxt->src.val;
|
||||
else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
|
||||
ctxt->op_bytes != 4)
|
||||
else if (ctxt->op_bytes != 4)
|
||||
ctxt->dst.type = OP_NONE; /* no writeback */
|
||||
break;
|
||||
case 0x80 ... 0x8f: /* jnz rel, etc*/
|
||||
|
@ -443,7 +443,8 @@ static inline int pit_in_range(gpa_t addr)
|
||||
(addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
|
||||
}
|
||||
|
||||
static int pit_ioport_write(struct kvm_io_device *this,
|
||||
static int pit_ioport_write(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr, int len, const void *data)
|
||||
{
|
||||
struct kvm_pit *pit = dev_to_pit(this);
|
||||
@ -519,7 +520,8 @@ static int pit_ioport_write(struct kvm_io_device *this,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pit_ioport_read(struct kvm_io_device *this,
|
||||
static int pit_ioport_read(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr, int len, void *data)
|
||||
{
|
||||
struct kvm_pit *pit = dev_to_pit(this);
|
||||
@ -589,7 +591,8 @@ static int pit_ioport_read(struct kvm_io_device *this,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int speaker_ioport_write(struct kvm_io_device *this,
|
||||
static int speaker_ioport_write(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr, int len, const void *data)
|
||||
{
|
||||
struct kvm_pit *pit = speaker_to_pit(this);
|
||||
@ -606,8 +609,9 @@ static int speaker_ioport_write(struct kvm_io_device *this,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int speaker_ioport_read(struct kvm_io_device *this,
|
||||
gpa_t addr, int len, void *data)
|
||||
static int speaker_ioport_read(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr, int len, void *data)
|
||||
{
|
||||
struct kvm_pit *pit = speaker_to_pit(this);
|
||||
struct kvm_kpit_state *pit_state = &pit->pit_state;
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "iodev.h"
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
struct kvm_kpit_channel_state {
|
||||
u32 count; /* can be 65536 */
|
||||
|
@ -529,42 +529,42 @@ static int picdev_read(struct kvm_pic *s,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int picdev_master_write(struct kvm_io_device *dev,
|
||||
static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
|
||||
gpa_t addr, int len, const void *val)
|
||||
{
|
||||
return picdev_write(container_of(dev, struct kvm_pic, dev_master),
|
||||
addr, len, val);
|
||||
}
|
||||
|
||||
static int picdev_master_read(struct kvm_io_device *dev,
|
||||
static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
|
||||
gpa_t addr, int len, void *val)
|
||||
{
|
||||
return picdev_read(container_of(dev, struct kvm_pic, dev_master),
|
||||
addr, len, val);
|
||||
}
|
||||
|
||||
static int picdev_slave_write(struct kvm_io_device *dev,
|
||||
static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
|
||||
gpa_t addr, int len, const void *val)
|
||||
{
|
||||
return picdev_write(container_of(dev, struct kvm_pic, dev_slave),
|
||||
addr, len, val);
|
||||
}
|
||||
|
||||
static int picdev_slave_read(struct kvm_io_device *dev,
|
||||
static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
|
||||
gpa_t addr, int len, void *val)
|
||||
{
|
||||
return picdev_read(container_of(dev, struct kvm_pic, dev_slave),
|
||||
addr, len, val);
|
||||
}
|
||||
|
||||
static int picdev_eclr_write(struct kvm_io_device *dev,
|
||||
static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
|
||||
gpa_t addr, int len, const void *val)
|
||||
{
|
||||
return picdev_write(container_of(dev, struct kvm_pic, dev_eclr),
|
||||
addr, len, val);
|
||||
}
|
||||
|
||||
static int picdev_eclr_read(struct kvm_io_device *dev,
|
||||
static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
|
||||
gpa_t addr, int len, void *val)
|
||||
{
|
||||
return picdev_read(container_of(dev, struct kvm_pic, dev_eclr),
|
||||
|
@ -206,6 +206,8 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
|
||||
|
||||
old_irr = ioapic->irr;
|
||||
ioapic->irr |= mask;
|
||||
if (edge)
|
||||
ioapic->irr_delivered &= ~mask;
|
||||
if ((edge && old_irr == ioapic->irr) ||
|
||||
(!edge && entry.fields.remote_irr)) {
|
||||
ret = 0;
|
||||
@ -349,7 +351,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
|
||||
irqe.shorthand = 0;
|
||||
|
||||
if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
|
||||
ioapic->irr &= ~(1 << irq);
|
||||
ioapic->irr_delivered |= 1 << irq;
|
||||
|
||||
if (irq == RTC_GSI && line_status) {
|
||||
/*
|
||||
@ -473,13 +475,6 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
}
|
||||
|
||||
bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
|
||||
{
|
||||
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
|
||||
smp_rmb();
|
||||
return test_bit(vector, ioapic->handled_vectors);
|
||||
}
|
||||
|
||||
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
|
||||
{
|
||||
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
|
||||
@ -500,8 +495,8 @@ static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
|
||||
(addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
|
||||
}
|
||||
|
||||
static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
|
||||
void *val)
|
||||
static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
|
||||
gpa_t addr, int len, void *val)
|
||||
{
|
||||
struct kvm_ioapic *ioapic = to_ioapic(this);
|
||||
u32 result;
|
||||
@ -543,8 +538,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
|
||||
const void *val)
|
||||
static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
|
||||
gpa_t addr, int len, const void *val)
|
||||
{
|
||||
struct kvm_ioapic *ioapic = to_ioapic(this);
|
||||
u32 data;
|
||||
@ -599,6 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
|
||||
ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
|
||||
ioapic->ioregsel = 0;
|
||||
ioapic->irr = 0;
|
||||
ioapic->irr_delivered = 0;
|
||||
ioapic->id = 0;
|
||||
memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
|
||||
rtc_irq_eoi_tracking_reset(ioapic);
|
||||
@ -656,6 +652,7 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
|
||||
|
||||
spin_lock(&ioapic->lock);
|
||||
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
|
||||
state->irr &= ~ioapic->irr_delivered;
|
||||
spin_unlock(&ioapic->lock);
|
||||
return 0;
|
||||
}
|
||||
@ -669,6 +666,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
|
||||
spin_lock(&ioapic->lock);
|
||||
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
|
||||
ioapic->irr = 0;
|
||||
ioapic->irr_delivered = 0;
|
||||
update_handled_vectors(ioapic);
|
||||
kvm_vcpu_request_scan_ioapic(kvm);
|
||||
kvm_ioapic_inject_all(ioapic, state->irr);
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include "iodev.h"
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
struct kvm;
|
||||
struct kvm_vcpu;
|
||||
@ -77,6 +77,7 @@ struct kvm_ioapic {
|
||||
struct rtc_status rtc_status;
|
||||
struct delayed_work eoi_inject;
|
||||
u32 irq_eoi[IOAPIC_NUM_PINS];
|
||||
u32 irr_delivered;
|
||||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -97,13 +98,19 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
|
||||
return kvm->arch.vioapic;
|
||||
}
|
||||
|
||||
static inline bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
|
||||
{
|
||||
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
|
||||
smp_rmb();
|
||||
return test_bit(vector, ioapic->handled_vectors);
|
||||
}
|
||||
|
||||
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
|
||||
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||
int short_hand, unsigned int dest, int dest_mode);
|
||||
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
|
||||
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
|
||||
int trigger_mode);
|
||||
bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
|
||||
int kvm_ioapic_init(struct kvm *kvm);
|
||||
void kvm_ioapic_destroy(struct kvm *kvm);
|
||||
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
|
||||
|
@ -27,7 +27,7 @@
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "iodev.h"
|
||||
#include <kvm/iodev.h>
|
||||
#include "ioapic.h"
|
||||
#include "lapic.h"
|
||||
|
||||
|
@ -133,6 +133,28 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
|
||||
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
|
||||
}
|
||||
|
||||
/* The logical map is definitely wrong if we have multiple
|
||||
* modes at the same time. (Physical map is always right.)
|
||||
*/
|
||||
static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map)
|
||||
{
|
||||
return !(map->mode & (map->mode - 1));
|
||||
}
|
||||
|
||||
static inline void
|
||||
apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid)
|
||||
{
|
||||
unsigned lid_bits;
|
||||
|
||||
BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4);
|
||||
BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8);
|
||||
BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16);
|
||||
lid_bits = map->mode;
|
||||
|
||||
*cid = dest_id >> lid_bits;
|
||||
*lid = dest_id & ((1 << lid_bits) - 1);
|
||||
}
|
||||
|
||||
static void recalculate_apic_map(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_apic_map *new, *old = NULL;
|
||||
@ -146,48 +168,6 @@ static void recalculate_apic_map(struct kvm *kvm)
|
||||
if (!new)
|
||||
goto out;
|
||||
|
||||
new->ldr_bits = 8;
|
||||
/* flat mode is default */
|
||||
new->cid_shift = 8;
|
||||
new->cid_mask = 0;
|
||||
new->lid_mask = 0xff;
|
||||
new->broadcast = APIC_BROADCAST;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
|
||||
if (!kvm_apic_present(vcpu))
|
||||
continue;
|
||||
|
||||
if (apic_x2apic_mode(apic)) {
|
||||
new->ldr_bits = 32;
|
||||
new->cid_shift = 16;
|
||||
new->cid_mask = new->lid_mask = 0xffff;
|
||||
new->broadcast = X2APIC_BROADCAST;
|
||||
} else if (kvm_apic_get_reg(apic, APIC_LDR)) {
|
||||
if (kvm_apic_get_reg(apic, APIC_DFR) ==
|
||||
APIC_DFR_CLUSTER) {
|
||||
new->cid_shift = 4;
|
||||
new->cid_mask = 0xf;
|
||||
new->lid_mask = 0xf;
|
||||
} else {
|
||||
new->cid_shift = 8;
|
||||
new->cid_mask = 0;
|
||||
new->lid_mask = 0xff;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* All APICs have to be configured in the same mode by an OS.
|
||||
* We take advatage of this while building logical id loockup
|
||||
* table. After reset APICs are in software disabled mode, so if
|
||||
* we find apic with different setting we assume this is the mode
|
||||
* OS wants all apics to be in; build lookup table accordingly.
|
||||
*/
|
||||
if (kvm_apic_sw_enabled(apic))
|
||||
break;
|
||||
}
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
u16 cid, lid;
|
||||
@ -198,11 +178,25 @@ static void recalculate_apic_map(struct kvm *kvm)
|
||||
|
||||
aid = kvm_apic_id(apic);
|
||||
ldr = kvm_apic_get_reg(apic, APIC_LDR);
|
||||
cid = apic_cluster_id(new, ldr);
|
||||
lid = apic_logical_id(new, ldr);
|
||||
|
||||
if (aid < ARRAY_SIZE(new->phys_map))
|
||||
new->phys_map[aid] = apic;
|
||||
|
||||
if (apic_x2apic_mode(apic)) {
|
||||
new->mode |= KVM_APIC_MODE_X2APIC;
|
||||
} else if (ldr) {
|
||||
ldr = GET_APIC_LOGICAL_ID(ldr);
|
||||
if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
|
||||
new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
|
||||
else
|
||||
new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
|
||||
}
|
||||
|
||||
if (!kvm_apic_logical_map_valid(new))
|
||||
continue;
|
||||
|
||||
apic_logical_id(new, ldr, &cid, &lid);
|
||||
|
||||
if (lid && cid < ARRAY_SIZE(new->logical_map))
|
||||
new->logical_map[cid][ffs(lid) - 1] = apic;
|
||||
}
|
||||
@ -588,15 +582,23 @@ static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
|
||||
apic_update_ppr(apic);
|
||||
}
|
||||
|
||||
static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 dest)
|
||||
static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
|
||||
{
|
||||
return dest == (apic_x2apic_mode(apic) ?
|
||||
X2APIC_BROADCAST : APIC_BROADCAST);
|
||||
if (apic_x2apic_mode(apic))
|
||||
return mda == X2APIC_BROADCAST;
|
||||
|
||||
return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST;
|
||||
}
|
||||
|
||||
static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 dest)
|
||||
static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
|
||||
{
|
||||
return kvm_apic_id(apic) == dest || kvm_apic_broadcast(apic, dest);
|
||||
if (kvm_apic_broadcast(apic, mda))
|
||||
return true;
|
||||
|
||||
if (apic_x2apic_mode(apic))
|
||||
return mda == kvm_apic_id(apic);
|
||||
|
||||
return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
|
||||
}
|
||||
|
||||
static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
|
||||
@ -613,6 +615,7 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
|
||||
&& (logical_id & mda & 0xffff) != 0;
|
||||
|
||||
logical_id = GET_APIC_LOGICAL_ID(logical_id);
|
||||
mda = GET_APIC_DEST_FIELD(mda);
|
||||
|
||||
switch (kvm_apic_get_reg(apic, APIC_DFR)) {
|
||||
case APIC_DFR_FLAT:
|
||||
@ -627,10 +630,27 @@ static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
|
||||
}
|
||||
}
|
||||
|
||||
/* KVM APIC implementation has two quirks
|
||||
* - dest always begins at 0 while xAPIC MDA has offset 24,
|
||||
* - IOxAPIC messages have to be delivered (directly) to x2APIC.
|
||||
*/
|
||||
static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source,
|
||||
struct kvm_lapic *target)
|
||||
{
|
||||
bool ipi = source != NULL;
|
||||
bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);
|
||||
|
||||
if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda)
|
||||
return X2APIC_BROADCAST;
|
||||
|
||||
return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
|
||||
}
|
||||
|
||||
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||
int short_hand, unsigned int dest, int dest_mode)
|
||||
{
|
||||
struct kvm_lapic *target = vcpu->arch.apic;
|
||||
u32 mda = kvm_apic_mda(dest, source, target);
|
||||
|
||||
apic_debug("target %p, source %p, dest 0x%x, "
|
||||
"dest_mode 0x%x, short_hand 0x%x\n",
|
||||
@ -640,9 +660,9 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||
switch (short_hand) {
|
||||
case APIC_DEST_NOSHORT:
|
||||
if (dest_mode == APIC_DEST_PHYSICAL)
|
||||
return kvm_apic_match_physical_addr(target, dest);
|
||||
return kvm_apic_match_physical_addr(target, mda);
|
||||
else
|
||||
return kvm_apic_match_logical_addr(target, dest);
|
||||
return kvm_apic_match_logical_addr(target, mda);
|
||||
case APIC_DEST_SELF:
|
||||
return target == source;
|
||||
case APIC_DEST_ALLINC:
|
||||
@ -664,6 +684,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
|
||||
struct kvm_lapic **dst;
|
||||
int i;
|
||||
bool ret = false;
|
||||
bool x2apic_ipi = src && apic_x2apic_mode(src);
|
||||
|
||||
*r = -1;
|
||||
|
||||
@ -675,15 +696,15 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
|
||||
if (irq->shorthand)
|
||||
return false;
|
||||
|
||||
if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST))
|
||||
return false;
|
||||
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(kvm->arch.apic_map);
|
||||
|
||||
if (!map)
|
||||
goto out;
|
||||
|
||||
if (irq->dest_id == map->broadcast)
|
||||
goto out;
|
||||
|
||||
ret = true;
|
||||
|
||||
if (irq->dest_mode == APIC_DEST_PHYSICAL) {
|
||||
@ -692,16 +713,20 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
|
||||
|
||||
dst = &map->phys_map[irq->dest_id];
|
||||
} else {
|
||||
u32 mda = irq->dest_id << (32 - map->ldr_bits);
|
||||
u16 cid = apic_cluster_id(map, mda);
|
||||
u16 cid;
|
||||
|
||||
if (!kvm_apic_logical_map_valid(map)) {
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
|
||||
|
||||
if (cid >= ARRAY_SIZE(map->logical_map))
|
||||
goto out;
|
||||
|
||||
dst = map->logical_map[cid];
|
||||
|
||||
bitmap = apic_logical_id(map, mda);
|
||||
|
||||
if (irq->delivery_mode == APIC_DM_LOWEST) {
|
||||
int l = -1;
|
||||
for_each_set_bit(i, &bitmap, 16) {
|
||||
@ -1037,7 +1062,7 @@ static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
|
||||
addr < apic->base_address + LAPIC_MMIO_LENGTH;
|
||||
}
|
||||
|
||||
static int apic_mmio_read(struct kvm_io_device *this,
|
||||
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
|
||||
gpa_t address, int len, void *data)
|
||||
{
|
||||
struct kvm_lapic *apic = to_lapic(this);
|
||||
@ -1357,7 +1382,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int apic_mmio_write(struct kvm_io_device *this,
|
||||
static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
|
||||
gpa_t address, int len, const void *data)
|
||||
{
|
||||
struct kvm_lapic *apic = to_lapic(this);
|
||||
@ -1497,8 +1522,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!kvm_vcpu_is_bsp(apic->vcpu))
|
||||
value &= ~MSR_IA32_APICBASE_BSP;
|
||||
vcpu->arch.apic_base = value;
|
||||
|
||||
/* update jump label if enable bit changes */
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef __KVM_X86_LAPIC_H
|
||||
#define __KVM_X86_LAPIC_H
|
||||
|
||||
#include "iodev.h"
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
@ -148,21 +148,6 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
|
||||
return kvm_x86_ops->vm_has_apicv(kvm);
|
||||
}
|
||||
|
||||
static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr)
|
||||
{
|
||||
u16 cid;
|
||||
ldr >>= 32 - map->ldr_bits;
|
||||
cid = (ldr >> map->cid_shift) & map->cid_mask;
|
||||
|
||||
return cid;
|
||||
}
|
||||
|
||||
static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
|
||||
{
|
||||
ldr >>= (32 - map->ldr_bits);
|
||||
return ldr & map->lid_mask;
|
||||
}
|
||||
|
||||
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.apic->pending_events;
|
||||
|
@ -4465,6 +4465,79 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
|
||||
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
|
||||
unsigned long *rmapp)
|
||||
{
|
||||
u64 *sptep;
|
||||
struct rmap_iterator iter;
|
||||
int need_tlb_flush = 0;
|
||||
pfn_t pfn;
|
||||
struct kvm_mmu_page *sp;
|
||||
|
||||
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
|
||||
BUG_ON(!(*sptep & PT_PRESENT_MASK));
|
||||
|
||||
sp = page_header(__pa(sptep));
|
||||
pfn = spte_to_pfn(*sptep);
|
||||
|
||||
/*
|
||||
* Only EPT supported for now; otherwise, one would need to
|
||||
* find out efficiently whether the guest page tables are
|
||||
* also using huge pages.
|
||||
*/
|
||||
if (sp->role.direct &&
|
||||
!kvm_is_reserved_pfn(pfn) &&
|
||||
PageTransCompound(pfn_to_page(pfn))) {
|
||||
drop_spte(kvm, sptep);
|
||||
sptep = rmap_get_first(*rmapp, &iter);
|
||||
need_tlb_flush = 1;
|
||||
} else
|
||||
sptep = rmap_get_next(&iter);
|
||||
}
|
||||
|
||||
return need_tlb_flush;
|
||||
}
|
||||
|
||||
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
bool flush = false;
|
||||
unsigned long *rmapp;
|
||||
unsigned long last_index, index;
|
||||
gfn_t gfn_start, gfn_end;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
gfn_start = memslot->base_gfn;
|
||||
gfn_end = memslot->base_gfn + memslot->npages - 1;
|
||||
|
||||
if (gfn_start >= gfn_end)
|
||||
goto out;
|
||||
|
||||
rmapp = memslot->arch.rmap[0];
|
||||
last_index = gfn_to_index(gfn_end, memslot->base_gfn,
|
||||
PT_PAGE_TABLE_LEVEL);
|
||||
|
||||
for (index = 0; index <= last_index; ++index, ++rmapp) {
|
||||
if (*rmapp)
|
||||
flush |= kvm_mmu_zap_collapsible_spte(kvm, rmapp);
|
||||
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
|
||||
if (flush) {
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
flush = false;
|
||||
}
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (flush)
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
|
||||
out:
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
|
@ -38,7 +38,7 @@ static struct kvm_arch_event_perf_mapping {
|
||||
};
|
||||
|
||||
/* mapping between fixed pmc index and arch_events array */
|
||||
int fixed_pmc_events[] = {1, 0, 7};
|
||||
static int fixed_pmc_events[] = {1, 0, 7};
|
||||
|
||||
static bool pmc_is_gp(struct kvm_pmc *pmc)
|
||||
{
|
||||
|
@ -1261,7 +1261,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
|
||||
|
||||
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
|
||||
MSR_IA32_APICBASE_ENABLE;
|
||||
if (kvm_vcpu_is_bsp(&svm->vcpu))
|
||||
if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
|
||||
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
|
||||
|
||||
svm_init_osvw(&svm->vcpu);
|
||||
@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm)
|
||||
static int halt_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
|
||||
skip_emulated_instruction(&svm->vcpu);
|
||||
return kvm_emulate_halt(&svm->vcpu);
|
||||
}
|
||||
|
||||
static int vmmcall_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
|
||||
skip_emulated_instruction(&svm->vcpu);
|
||||
kvm_emulate_hypercall(&svm->vcpu);
|
||||
return 1;
|
||||
}
|
||||
@ -2757,11 +2755,11 @@ static int invlpga_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
|
||||
trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
|
||||
vcpu->arch.regs[VCPU_REGS_RAX]);
|
||||
trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
|
||||
kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
|
||||
|
||||
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
|
||||
kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
|
||||
kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
|
||||
|
||||
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
|
||||
skip_emulated_instruction(&svm->vcpu);
|
||||
@ -2770,12 +2768,18 @@ static int invlpga_interception(struct vcpu_svm *svm)
|
||||
|
||||
static int skinit_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
|
||||
trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
|
||||
|
||||
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int wbinvd_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
kvm_emulate_wbinvd(&svm->vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int xsetbv_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
|
||||
@ -2902,7 +2906,8 @@ static int rdpmc_interception(struct vcpu_svm *svm)
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
|
||||
static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
|
||||
unsigned long val)
|
||||
{
|
||||
unsigned long cr0 = svm->vcpu.arch.cr0;
|
||||
bool ret = false;
|
||||
@ -2940,7 +2945,10 @@ static int cr_interception(struct vcpu_svm *svm)
|
||||
return emulate_on_interception(svm);
|
||||
|
||||
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
|
||||
cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
|
||||
if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
|
||||
cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
|
||||
else
|
||||
cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
|
||||
|
||||
err = 0;
|
||||
if (cr >= 16) { /* mov to cr */
|
||||
@ -3133,7 +3141,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
|
||||
|
||||
static int rdmsr_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
|
||||
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
|
||||
u64 data;
|
||||
|
||||
if (svm_get_msr(&svm->vcpu, ecx, &data)) {
|
||||
@ -3142,8 +3150,8 @@ static int rdmsr_interception(struct vcpu_svm *svm)
|
||||
} else {
|
||||
trace_kvm_msr_read(ecx, data);
|
||||
|
||||
svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
|
||||
svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
|
||||
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff);
|
||||
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32);
|
||||
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
|
||||
skip_emulated_instruction(&svm->vcpu);
|
||||
}
|
||||
@ -3246,9 +3254,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
static int wrmsr_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
struct msr_data msr;
|
||||
u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
|
||||
u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
|
||||
| ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
|
||||
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
|
||||
u64 data = kvm_read_edx_eax(&svm->vcpu);
|
||||
|
||||
msr.data = data;
|
||||
msr.index = ecx;
|
||||
@ -3325,7 +3332,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
|
||||
[SVM_EXIT_READ_CR3] = cr_interception,
|
||||
[SVM_EXIT_READ_CR4] = cr_interception,
|
||||
[SVM_EXIT_READ_CR8] = cr_interception,
|
||||
[SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
|
||||
[SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
|
||||
[SVM_EXIT_WRITE_CR0] = cr_interception,
|
||||
[SVM_EXIT_WRITE_CR3] = cr_interception,
|
||||
[SVM_EXIT_WRITE_CR4] = cr_interception,
|
||||
@ -3376,7 +3383,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
|
||||
[SVM_EXIT_STGI] = stgi_interception,
|
||||
[SVM_EXIT_CLGI] = clgi_interception,
|
||||
[SVM_EXIT_SKINIT] = skinit_interception,
|
||||
[SVM_EXIT_WBINVD] = emulate_on_interception,
|
||||
[SVM_EXIT_WBINVD] = wbinvd_interception,
|
||||
[SVM_EXIT_MONITOR] = monitor_interception,
|
||||
[SVM_EXIT_MWAIT] = mwait_interception,
|
||||
[SVM_EXIT_XSETBV] = xsetbv_interception,
|
||||
@ -3555,7 +3562,7 @@ static int handle_exit(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|
||||
|| !svm_exit_handlers[exit_code]) {
|
||||
WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
|
||||
WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
|
@ -2470,6 +2470,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
|
||||
vmx->nested.nested_vmx_secondary_ctls_low = 0;
|
||||
vmx->nested.nested_vmx_secondary_ctls_high &=
|
||||
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||
SECONDARY_EXEC_RDTSCP |
|
||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
||||
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
||||
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
||||
@ -3268,8 +3269,8 @@ static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
|
||||
* default value.
|
||||
*/
|
||||
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
|
||||
save->selector &= ~SELECTOR_RPL_MASK;
|
||||
save->dpl = save->selector & SELECTOR_RPL_MASK;
|
||||
save->selector &= ~SEGMENT_RPL_MASK;
|
||||
save->dpl = save->selector & SEGMENT_RPL_MASK;
|
||||
save->s = 1;
|
||||
}
|
||||
vmx_set_segment(vcpu, save, seg);
|
||||
@ -3842,7 +3843,7 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu)
|
||||
unsigned int cs_rpl;
|
||||
|
||||
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
|
||||
cs_rpl = cs.selector & SELECTOR_RPL_MASK;
|
||||
cs_rpl = cs.selector & SEGMENT_RPL_MASK;
|
||||
|
||||
if (cs.unusable)
|
||||
return false;
|
||||
@ -3870,7 +3871,7 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu)
|
||||
unsigned int ss_rpl;
|
||||
|
||||
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
|
||||
ss_rpl = ss.selector & SELECTOR_RPL_MASK;
|
||||
ss_rpl = ss.selector & SEGMENT_RPL_MASK;
|
||||
|
||||
if (ss.unusable)
|
||||
return true;
|
||||
@ -3892,7 +3893,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
|
||||
unsigned int rpl;
|
||||
|
||||
vmx_get_segment(vcpu, &var, seg);
|
||||
rpl = var.selector & SELECTOR_RPL_MASK;
|
||||
rpl = var.selector & SEGMENT_RPL_MASK;
|
||||
|
||||
if (var.unusable)
|
||||
return true;
|
||||
@ -3919,7 +3920,7 @@ static bool tr_valid(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (tr.unusable)
|
||||
return false;
|
||||
if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
|
||||
if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
|
||||
return false;
|
||||
if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
|
||||
return false;
|
||||
@ -3937,7 +3938,7 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (ldtr.unusable)
|
||||
return true;
|
||||
if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
|
||||
if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
|
||||
return false;
|
||||
if (ldtr.type != 2)
|
||||
return false;
|
||||
@ -3954,8 +3955,8 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
|
||||
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
|
||||
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
|
||||
|
||||
return ((cs.selector & SELECTOR_RPL_MASK) ==
|
||||
(ss.selector & SELECTOR_RPL_MASK));
|
||||
return ((cs.selector & SEGMENT_RPL_MASK) ==
|
||||
(ss.selector & SEGMENT_RPL_MASK));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4711,7 +4712,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
|
||||
kvm_set_cr8(&vmx->vcpu, 0);
|
||||
apic_base_msr.data = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
|
||||
if (kvm_vcpu_is_bsp(&vmx->vcpu))
|
||||
if (kvm_vcpu_is_reset_bsp(&vmx->vcpu))
|
||||
apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
|
||||
apic_base_msr.host_initiated = true;
|
||||
kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
|
||||
@ -5006,7 +5007,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
|
||||
if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
|
||||
if (vcpu->arch.halt_request) {
|
||||
vcpu->arch.halt_request = 0;
|
||||
return kvm_emulate_halt(vcpu);
|
||||
return kvm_vcpu_halt(vcpu);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -5071,6 +5072,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if (is_invalid_opcode(intr_info)) {
|
||||
if (is_guest_mode(vcpu)) {
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
|
||||
if (er != EMULATE_DONE)
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
@ -5090,9 +5095,10 @@ static int handle_exception(struct kvm_vcpu *vcpu)
|
||||
!(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
|
||||
vcpu->run->internal.ndata = 2;
|
||||
vcpu->run->internal.ndata = 3;
|
||||
vcpu->run->internal.data[0] = vect_info;
|
||||
vcpu->run->internal.data[1] = intr_info;
|
||||
vcpu->run->internal.data[2] = error_code;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5533,13 +5539,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_halt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
skip_emulated_instruction(vcpu);
|
||||
return kvm_emulate_halt(vcpu);
|
||||
}
|
||||
|
||||
static int handle_vmcall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
skip_emulated_instruction(vcpu);
|
||||
kvm_emulate_hypercall(vcpu);
|
||||
return 1;
|
||||
}
|
||||
@ -5570,7 +5574,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_wbinvd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
skip_emulated_instruction(vcpu);
|
||||
kvm_emulate_wbinvd(vcpu);
|
||||
return 1;
|
||||
}
|
||||
@ -5828,7 +5831,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
|
||||
gpa_t gpa;
|
||||
|
||||
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
|
||||
if (!kvm_io_bus_write(vcpu->kvm, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
|
||||
if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
|
||||
skip_emulated_instruction(vcpu);
|
||||
return 1;
|
||||
}
|
||||
@ -5909,7 +5912,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (vcpu->arch.halt_request) {
|
||||
vcpu->arch.halt_request = 0;
|
||||
ret = kvm_emulate_halt(vcpu);
|
||||
ret = kvm_vcpu_halt(vcpu);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -7318,21 +7321,21 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
|
||||
else if (port < 0x10000)
|
||||
bitmap = vmcs12->io_bitmap_b;
|
||||
else
|
||||
return 1;
|
||||
return true;
|
||||
bitmap += (port & 0x7fff) / 8;
|
||||
|
||||
if (last_bitmap != bitmap)
|
||||
if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
|
||||
return 1;
|
||||
return true;
|
||||
if (b & (1 << (port & 7)))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
port++;
|
||||
size--;
|
||||
last_bitmap = bitmap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -7348,7 +7351,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
|
||||
gpa_t bitmap;
|
||||
|
||||
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
/*
|
||||
* The MSR_BITMAP page is divided into four 1024-byte bitmaps,
|
||||
@ -7367,10 +7370,10 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
|
||||
if (msr_index < 1024*8) {
|
||||
unsigned char b;
|
||||
if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
|
||||
return 1;
|
||||
return true;
|
||||
return 1 & (b >> (msr_index & 7));
|
||||
} else
|
||||
return 1; /* let L1 handle the wrong parameter */
|
||||
return true; /* let L1 handle the wrong parameter */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -7392,7 +7395,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
|
||||
case 0:
|
||||
if (vmcs12->cr0_guest_host_mask &
|
||||
(val ^ vmcs12->cr0_read_shadow))
|
||||
return 1;
|
||||
return true;
|
||||
break;
|
||||
case 3:
|
||||
if ((vmcs12->cr3_target_count >= 1 &&
|
||||
@ -7403,37 +7406,37 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
|
||||
vmcs12->cr3_target_value2 == val) ||
|
||||
(vmcs12->cr3_target_count >= 4 &&
|
||||
vmcs12->cr3_target_value3 == val))
|
||||
return 0;
|
||||
return false;
|
||||
if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
|
||||
return 1;
|
||||
return true;
|
||||
break;
|
||||
case 4:
|
||||
if (vmcs12->cr4_guest_host_mask &
|
||||
(vmcs12->cr4_read_shadow ^ val))
|
||||
return 1;
|
||||
return true;
|
||||
break;
|
||||
case 8:
|
||||
if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
|
||||
return 1;
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 2: /* clts */
|
||||
if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
|
||||
(vmcs12->cr0_read_shadow & X86_CR0_TS))
|
||||
return 1;
|
||||
return true;
|
||||
break;
|
||||
case 1: /* mov from cr */
|
||||
switch (cr) {
|
||||
case 3:
|
||||
if (vmcs12->cpu_based_vm_exec_control &
|
||||
CPU_BASED_CR3_STORE_EXITING)
|
||||
return 1;
|
||||
return true;
|
||||
break;
|
||||
case 8:
|
||||
if (vmcs12->cpu_based_vm_exec_control &
|
||||
CPU_BASED_CR8_STORE_EXITING)
|
||||
return 1;
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
@ -7444,14 +7447,14 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
if (vmcs12->cr0_guest_host_mask & 0xe &
|
||||
(val ^ vmcs12->cr0_read_shadow))
|
||||
return 1;
|
||||
return true;
|
||||
if ((vmcs12->cr0_guest_host_mask & 0x1) &&
|
||||
!(vmcs12->cr0_read_shadow & 0x1) &&
|
||||
(val & 0x1))
|
||||
return 1;
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -7474,48 +7477,48 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
||||
KVM_ISA_VMX);
|
||||
|
||||
if (vmx->nested.nested_run_pending)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (unlikely(vmx->fail)) {
|
||||
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
|
||||
vmcs_read32(VM_INSTRUCTION_ERROR));
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
switch (exit_reason) {
|
||||
case EXIT_REASON_EXCEPTION_NMI:
|
||||
if (!is_exception(intr_info))
|
||||
return 0;
|
||||
return false;
|
||||
else if (is_page_fault(intr_info))
|
||||
return enable_ept;
|
||||
else if (is_no_device(intr_info) &&
|
||||
!(vmcs12->guest_cr0 & X86_CR0_TS))
|
||||
return 0;
|
||||
return false;
|
||||
return vmcs12->exception_bitmap &
|
||||
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
|
||||
case EXIT_REASON_EXTERNAL_INTERRUPT:
|
||||
return 0;
|
||||
return false;
|
||||
case EXIT_REASON_TRIPLE_FAULT:
|
||||
return 1;
|
||||
return true;
|
||||
case EXIT_REASON_PENDING_INTERRUPT:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
|
||||
case EXIT_REASON_NMI_WINDOW:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
|
||||
case EXIT_REASON_TASK_SWITCH:
|
||||
return 1;
|
||||
return true;
|
||||
case EXIT_REASON_CPUID:
|
||||
if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
|
||||
return 0;
|
||||
return 1;
|
||||
return false;
|
||||
return true;
|
||||
case EXIT_REASON_HLT:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
|
||||
case EXIT_REASON_INVD:
|
||||
return 1;
|
||||
return true;
|
||||
case EXIT_REASON_INVLPG:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
|
||||
case EXIT_REASON_RDPMC:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
|
||||
case EXIT_REASON_RDTSC:
|
||||
case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
|
||||
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
|
||||
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
|
||||
@ -7527,7 +7530,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
||||
* VMX instructions trap unconditionally. This allows L1 to
|
||||
* emulate them for its L2 guest, i.e., allows 3-level nesting!
|
||||
*/
|
||||
return 1;
|
||||
return true;
|
||||
case EXIT_REASON_CR_ACCESS:
|
||||
return nested_vmx_exit_handled_cr(vcpu, vmcs12);
|
||||
case EXIT_REASON_DR_ACCESS:
|
||||
@ -7538,7 +7541,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
||||
case EXIT_REASON_MSR_WRITE:
|
||||
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
|
||||
case EXIT_REASON_INVALID_STATE:
|
||||
return 1;
|
||||
return true;
|
||||
case EXIT_REASON_MWAIT_INSTRUCTION:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
|
||||
case EXIT_REASON_MONITOR_INSTRUCTION:
|
||||
@ -7548,7 +7551,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
||||
nested_cpu_has2(vmcs12,
|
||||
SECONDARY_EXEC_PAUSE_LOOP_EXITING);
|
||||
case EXIT_REASON_MCE_DURING_VMENTRY:
|
||||
return 0;
|
||||
return false;
|
||||
case EXIT_REASON_TPR_BELOW_THRESHOLD:
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
|
||||
case EXIT_REASON_APIC_ACCESS:
|
||||
@ -7557,7 +7560,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
||||
case EXIT_REASON_APIC_WRITE:
|
||||
case EXIT_REASON_EOI_INDUCED:
|
||||
/* apic_write and eoi_induced should exit unconditionally. */
|
||||
return 1;
|
||||
return true;
|
||||
case EXIT_REASON_EPT_VIOLATION:
|
||||
/*
|
||||
* L0 always deals with the EPT violation. If nested EPT is
|
||||
@ -7565,7 +7568,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
||||
* missing in the guest EPT table (EPT12), the EPT violation
|
||||
* will be injected with nested_ept_inject_page_fault()
|
||||
*/
|
||||
return 0;
|
||||
return false;
|
||||
case EXIT_REASON_EPT_MISCONFIG:
|
||||
/*
|
||||
* L2 never uses directly L1's EPT, but rather L0's own EPT
|
||||
@ -7573,11 +7576,11 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
||||
* (EPT on EPT). So any problems with the structure of the
|
||||
* table is L0's fault.
|
||||
*/
|
||||
return 0;
|
||||
return false;
|
||||
case EXIT_REASON_WBINVD:
|
||||
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
|
||||
case EXIT_REASON_XSETBV:
|
||||
return 1;
|
||||
return true;
|
||||
case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
|
||||
/*
|
||||
* This should never happen, since it is not possible to
|
||||
@ -7587,7 +7590,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
|
||||
default:
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -8522,6 +8525,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
||||
exec_control);
|
||||
}
|
||||
}
|
||||
if (nested && !vmx->rdtscp_enabled)
|
||||
vmx->nested.nested_vmx_secondary_ctls_high &=
|
||||
~SECONDARY_EXEC_RDTSCP;
|
||||
}
|
||||
|
||||
/* Exposing INVPCID only when PCID is exposed */
|
||||
@ -8622,10 +8628,11 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
int maxphyaddr = cpuid_maxphyaddr(vcpu);
|
||||
|
||||
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
||||
/* TODO: Also verify bits beyond physical address width are 0 */
|
||||
if (!PAGE_ALIGNED(vmcs12->apic_access_addr))
|
||||
if (!PAGE_ALIGNED(vmcs12->apic_access_addr) ||
|
||||
vmcs12->apic_access_addr >> maxphyaddr)
|
||||
return false;
|
||||
|
||||
/*
|
||||
@ -8641,8 +8648,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
|
||||
/* TODO: Also verify bits beyond physical address width are 0 */
|
||||
if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr))
|
||||
if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) ||
|
||||
vmcs12->virtual_apic_page_addr >> maxphyaddr)
|
||||
return false;
|
||||
|
||||
if (vmx->nested.virtual_apic_page) /* shouldn't happen */
|
||||
@ -8665,7 +8672,8 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
if (nested_cpu_has_posted_intr(vmcs12)) {
|
||||
if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64))
|
||||
if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) ||
|
||||
vmcs12->posted_intr_desc_addr >> maxphyaddr)
|
||||
return false;
|
||||
|
||||
if (vmx->nested.pi_desc_page) { /* shouldn't happen */
|
||||
@ -8864,9 +8872,9 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
|
||||
|
||||
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
|
||||
unsigned long count_field,
|
||||
unsigned long addr_field,
|
||||
int maxphyaddr)
|
||||
unsigned long addr_field)
|
||||
{
|
||||
int maxphyaddr;
|
||||
u64 count, addr;
|
||||
|
||||
if (vmcs12_read_any(vcpu, count_field, &count) ||
|
||||
@ -8876,6 +8884,7 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
if (count == 0)
|
||||
return 0;
|
||||
maxphyaddr = cpuid_maxphyaddr(vcpu);
|
||||
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
|
||||
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
|
||||
pr_warn_ratelimited(
|
||||
@ -8889,19 +8898,16 @@ static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
|
||||
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
int maxphyaddr;
|
||||
|
||||
if (vmcs12->vm_exit_msr_load_count == 0 &&
|
||||
vmcs12->vm_exit_msr_store_count == 0 &&
|
||||
vmcs12->vm_entry_msr_load_count == 0)
|
||||
return 0; /* Fast path */
|
||||
maxphyaddr = cpuid_maxphyaddr(vcpu);
|
||||
if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
|
||||
VM_EXIT_MSR_LOAD_ADDR, maxphyaddr) ||
|
||||
VM_EXIT_MSR_LOAD_ADDR) ||
|
||||
nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
|
||||
VM_EXIT_MSR_STORE_ADDR, maxphyaddr) ||
|
||||
VM_EXIT_MSR_STORE_ADDR) ||
|
||||
nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
|
||||
VM_ENTRY_MSR_LOAD_ADDR, maxphyaddr))
|
||||
VM_ENTRY_MSR_LOAD_ADDR))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
@ -9151,8 +9157,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
||||
exec_control &= ~SECONDARY_EXEC_RDTSCP;
|
||||
/* Take the following fields only from vmcs12 */
|
||||
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||
SECONDARY_EXEC_RDTSCP |
|
||||
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
||||
SECONDARY_EXEC_APIC_REGISTER_VIRT);
|
||||
SECONDARY_EXEC_APIC_REGISTER_VIRT);
|
||||
if (nested_cpu_has(vmcs12,
|
||||
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
|
||||
exec_control |= vmcs12->secondary_vm_exec_control;
|
||||
@ -9385,7 +9392,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||
}
|
||||
|
||||
if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
|
||||
/*TODO: Also verify bits beyond physical address width are 0*/
|
||||
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
||||
return 1;
|
||||
}
|
||||
@ -9524,7 +9530,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
||||
vmcs12->launch_state = 1;
|
||||
|
||||
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
|
||||
return kvm_emulate_halt(vcpu);
|
||||
return kvm_vcpu_halt(vcpu);
|
||||
|
||||
vmx->nested.nested_run_pending = 1;
|
||||
|
||||
|
@ -801,6 +801,17 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_cr8);
|
||||
|
||||
static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
|
||||
for (i = 0; i < KVM_NR_DB_REGS; i++)
|
||||
vcpu->arch.eff_db[i] = vcpu->arch.db[i];
|
||||
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
|
||||
}
|
||||
}
|
||||
|
||||
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
|
||||
@ -3149,6 +3160,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
|
||||
kvm_update_dr0123(vcpu);
|
||||
vcpu->arch.dr6 = dbgregs->dr6;
|
||||
kvm_update_dr6(vcpu);
|
||||
vcpu->arch.dr7 = dbgregs->dr7;
|
||||
@ -4114,8 +4126,8 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
|
||||
do {
|
||||
n = min(len, 8);
|
||||
if (!(vcpu->arch.apic &&
|
||||
!kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
|
||||
&& kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
|
||||
!kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
|
||||
&& kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
|
||||
break;
|
||||
handled += n;
|
||||
addr += n;
|
||||
@ -4134,8 +4146,9 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
|
||||
do {
|
||||
n = min(len, 8);
|
||||
if (!(vcpu->arch.apic &&
|
||||
!kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
|
||||
&& kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
|
||||
!kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
|
||||
addr, n, v))
|
||||
&& kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
|
||||
break;
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
|
||||
handled += n;
|
||||
@ -4475,7 +4488,8 @@ mmio:
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
|
||||
static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
|
||||
unsigned long addr,
|
||||
void *val, unsigned int bytes,
|
||||
struct x86_exception *exception,
|
||||
const struct read_write_emulator_ops *ops)
|
||||
@ -4538,7 +4552,7 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
exception, &read_emultor);
|
||||
}
|
||||
|
||||
int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
unsigned long addr,
|
||||
const void *val,
|
||||
unsigned int bytes,
|
||||
@ -4629,10 +4643,10 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
|
||||
int r;
|
||||
|
||||
if (vcpu->arch.pio.in)
|
||||
r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
|
||||
r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
|
||||
vcpu->arch.pio.size, pd);
|
||||
else
|
||||
r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
|
||||
r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
|
||||
vcpu->arch.pio.port, vcpu->arch.pio.size,
|
||||
pd);
|
||||
return r;
|
||||
@ -4705,7 +4719,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
|
||||
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
|
||||
}
|
||||
|
||||
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
|
||||
int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!need_emulate_wbinvd(vcpu))
|
||||
return X86EMUL_CONTINUE;
|
||||
@ -4722,19 +4736,29 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
|
||||
wbinvd();
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
||||
return kvm_emulate_wbinvd_noskip(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
|
||||
|
||||
|
||||
|
||||
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
|
||||
kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
|
||||
}
|
||||
|
||||
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
|
||||
static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
|
||||
unsigned long *dest)
|
||||
{
|
||||
return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
|
||||
}
|
||||
|
||||
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
|
||||
static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
|
||||
unsigned long value)
|
||||
{
|
||||
|
||||
return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
|
||||
@ -5816,7 +5840,7 @@ void kvm_arch_exit(void)
|
||||
free_percpu(shared_msrs);
|
||||
}
|
||||
|
||||
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
||||
int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
++vcpu->stat.halt_exits;
|
||||
if (irqchip_in_kernel(vcpu->kvm)) {
|
||||
@ -5827,6 +5851,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
|
||||
|
||||
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
||||
return kvm_vcpu_halt(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
|
||||
|
||||
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
||||
@ -5903,7 +5934,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
|
||||
lapic_irq.dest_id = apicid;
|
||||
|
||||
lapic_irq.delivery_mode = APIC_DM_REMRD;
|
||||
kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL);
|
||||
kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
|
||||
}
|
||||
|
||||
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||
@ -5911,6 +5942,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||
unsigned long nr, a0, a1, a2, a3, ret;
|
||||
int op_64_bit, r = 1;
|
||||
|
||||
kvm_x86_ops->skip_emulated_instruction(vcpu);
|
||||
|
||||
if (kvm_hv_hypercall_enabled(vcpu->kvm))
|
||||
return kvm_hv_hypercall(vcpu);
|
||||
|
||||
@ -6164,7 +6197,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 1 to let __vcpu_run() continue the guest execution loop without
|
||||
* Returns 1 to let vcpu_run() continue the guest execution loop without
|
||||
* exiting to the userspace. Otherwise, the value will be returned to the
|
||||
* userspace.
|
||||
*/
|
||||
@ -6301,6 +6334,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
set_debugreg(vcpu->arch.eff_db[2], 2);
|
||||
set_debugreg(vcpu->arch.eff_db[3], 3);
|
||||
set_debugreg(vcpu->arch.dr6, 6);
|
||||
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
|
||||
}
|
||||
|
||||
trace_kvm_entry(vcpu->vcpu_id);
|
||||
@ -6382,42 +6416,47 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_arch_vcpu_runnable(vcpu)) {
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
kvm_vcpu_block(vcpu);
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
kvm_apic_accept_events(vcpu);
|
||||
switch(vcpu->arch.mp_state) {
|
||||
case KVM_MP_STATE_HALTED:
|
||||
vcpu->arch.pv.pv_unhalted = false;
|
||||
vcpu->arch.mp_state =
|
||||
KVM_MP_STATE_RUNNABLE;
|
||||
case KVM_MP_STATE_RUNNABLE:
|
||||
vcpu->arch.apf.halted = false;
|
||||
break;
|
||||
case KVM_MP_STATE_INIT_RECEIVED:
|
||||
break;
|
||||
default:
|
||||
return -EINTR;
|
||||
break;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
r = 1;
|
||||
while (r > 0) {
|
||||
for (;;) {
|
||||
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
||||
!vcpu->arch.apf.halted)
|
||||
r = vcpu_enter_guest(vcpu);
|
||||
else {
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
kvm_vcpu_block(vcpu);
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
|
||||
kvm_apic_accept_events(vcpu);
|
||||
switch(vcpu->arch.mp_state) {
|
||||
case KVM_MP_STATE_HALTED:
|
||||
vcpu->arch.pv.pv_unhalted = false;
|
||||
vcpu->arch.mp_state =
|
||||
KVM_MP_STATE_RUNNABLE;
|
||||
case KVM_MP_STATE_RUNNABLE:
|
||||
vcpu->arch.apf.halted = false;
|
||||
break;
|
||||
case KVM_MP_STATE_INIT_RECEIVED:
|
||||
break;
|
||||
default:
|
||||
r = -EINTR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else
|
||||
r = vcpu_block(kvm, vcpu);
|
||||
if (r <= 0)
|
||||
break;
|
||||
|
||||
@ -6429,6 +6468,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
r = -EINTR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTR;
|
||||
++vcpu->stat.request_irq_exits;
|
||||
break;
|
||||
}
|
||||
|
||||
kvm_check_async_pf_completion(vcpu);
|
||||
@ -6437,6 +6477,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
r = -EINTR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTR;
|
||||
++vcpu->stat.signal_exits;
|
||||
break;
|
||||
}
|
||||
if (need_resched()) {
|
||||
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
||||
@ -6568,7 +6609,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
} else
|
||||
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
|
||||
|
||||
r = __vcpu_run(vcpu);
|
||||
r = vcpu_run(vcpu);
|
||||
|
||||
out:
|
||||
post_kvm_run_save(vcpu);
|
||||
@ -7075,11 +7116,14 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
kvm_clear_exception_queue(vcpu);
|
||||
|
||||
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
|
||||
kvm_update_dr0123(vcpu);
|
||||
vcpu->arch.dr6 = DR6_INIT;
|
||||
kvm_update_dr6(vcpu);
|
||||
vcpu->arch.dr7 = DR7_FIXED_1;
|
||||
kvm_update_dr7(vcpu);
|
||||
|
||||
vcpu->arch.cr2 = 0;
|
||||
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
vcpu->arch.apf.msr_val = 0;
|
||||
vcpu->arch.st.msr_val = 0;
|
||||
@ -7240,7 +7284,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
|
||||
vcpu->arch.pv.pv_unhalted = false;
|
||||
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
|
||||
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
|
||||
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
else
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
|
||||
@ -7288,6 +7332,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.guest_supported_xcr0 = 0;
|
||||
vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
|
||||
|
||||
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
|
||||
|
||||
kvm_async_pf_hash_reset(vcpu);
|
||||
kvm_pmu_init(vcpu);
|
||||
|
||||
@ -7428,7 +7474,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
|
||||
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
|
||||
if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
|
||||
kvm_kvfree(free->arch.rmap[i]);
|
||||
kvfree(free->arch.rmap[i]);
|
||||
free->arch.rmap[i] = NULL;
|
||||
}
|
||||
if (i == 0)
|
||||
@ -7436,7 +7482,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
|
||||
if (!dont || free->arch.lpage_info[i - 1] !=
|
||||
dont->arch.lpage_info[i - 1]) {
|
||||
kvm_kvfree(free->arch.lpage_info[i - 1]);
|
||||
kvfree(free->arch.lpage_info[i - 1]);
|
||||
free->arch.lpage_info[i - 1] = NULL;
|
||||
}
|
||||
}
|
||||
@ -7490,12 +7536,12 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
|
||||
out_free:
|
||||
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
|
||||
kvm_kvfree(slot->arch.rmap[i]);
|
||||
kvfree(slot->arch.rmap[i]);
|
||||
slot->arch.rmap[i] = NULL;
|
||||
if (i == 0)
|
||||
continue;
|
||||
|
||||
kvm_kvfree(slot->arch.lpage_info[i - 1]);
|
||||
kvfree(slot->arch.lpage_info[i - 1]);
|
||||
slot->arch.lpage_info[i - 1] = NULL;
|
||||
}
|
||||
return -ENOMEM;
|
||||
@ -7617,6 +7663,23 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
/* It's OK to get 'new' slot here as it has already been installed */
|
||||
new = id_to_memslot(kvm->memslots, mem->slot);
|
||||
|
||||
/*
|
||||
* Dirty logging tracks sptes in 4k granularity, meaning that large
|
||||
* sptes have to be split. If live migration is successful, the guest
|
||||
* in the source machine will be destroyed and large sptes will be
|
||||
* created in the destination. However, if the guest continues to run
|
||||
* in the source machine (for example if live migration fails), small
|
||||
* sptes will remain around and cause bad performance.
|
||||
*
|
||||
* Scan sptes if dirty logging has been stopped, dropping those
|
||||
* which can be collapsed into a single large-page spte. Later
|
||||
* page faults will create the large-page sptes.
|
||||
*/
|
||||
if ((change != KVM_MR_DELETE) &&
|
||||
(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
|
||||
!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
|
||||
kvm_mmu_zap_collapsible_sptes(kvm, new);
|
||||
|
||||
/*
|
||||
* Set up write protection and/or dirty logging for the new slot.
|
||||
*
|
||||
|
@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode)
|
||||
cycle_t ret;
|
||||
u64 last;
|
||||
u32 version;
|
||||
u32 migrate_count;
|
||||
u8 flags;
|
||||
unsigned cpu, cpu1;
|
||||
|
||||
|
||||
/*
|
||||
* Note: hypervisor must guarantee that:
|
||||
* 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
|
||||
* 2. that per-CPU pvclock time info is updated if the
|
||||
* underlying CPU changes.
|
||||
* 3. that version is increased whenever underlying CPU
|
||||
* changes.
|
||||
*
|
||||
* When looping to get a consistent (time-info, tsc) pair, we
|
||||
* also need to deal with the possibility we can switch vcpus,
|
||||
* so make sure we always re-fetch time-info for the current vcpu.
|
||||
*/
|
||||
do {
|
||||
cpu = __getcpu() & VGETCPU_CPU_MASK;
|
||||
@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode)
|
||||
* __getcpu() calls (Gleb).
|
||||
*/
|
||||
|
||||
pvti = get_pvti(cpu);
|
||||
/* Make sure migrate_count will change if we leave the VCPU. */
|
||||
do {
|
||||
pvti = get_pvti(cpu);
|
||||
migrate_count = pvti->migrate_count;
|
||||
|
||||
cpu1 = cpu;
|
||||
cpu = __getcpu() & VGETCPU_CPU_MASK;
|
||||
} while (unlikely(cpu != cpu1));
|
||||
|
||||
version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
|
||||
|
||||
/*
|
||||
* Test we're still on the cpu as well as the version.
|
||||
* We could have been migrated just after the first
|
||||
* vgetcpu but before fetching the version, so we
|
||||
* wouldn't notice a version change.
|
||||
* - We must read TSC of pvti's VCPU.
|
||||
* - KVM doesn't follow the versioning protocol, so data could
|
||||
* change before version if we left the VCPU.
|
||||
*/
|
||||
cpu1 = __getcpu() & VGETCPU_CPU_MASK;
|
||||
} while (unlikely(cpu != cpu1 ||
|
||||
(pvti->pvti.version & 1) ||
|
||||
pvti->pvti.version != version));
|
||||
smp_rmb();
|
||||
} while (unlikely((pvti->pvti.version & 1) ||
|
||||
pvti->pvti.version != version ||
|
||||
pvti->migrate_count != migrate_count));
|
||||
|
||||
if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
|
||||
*mode = VCLOCK_NONE;
|
||||
|
@ -24,17 +24,14 @@
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct arch_timer_kvm {
|
||||
#ifdef CONFIG_KVM_ARM_TIMER
|
||||
/* Is the timer enabled */
|
||||
bool enabled;
|
||||
|
||||
/* Virtual offset */
|
||||
cycle_t cntvoff;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct arch_timer_cpu {
|
||||
#ifdef CONFIG_KVM_ARM_TIMER
|
||||
/* Registers: control register, timer value */
|
||||
u32 cntv_ctl; /* Saved/restored */
|
||||
cycle_t cntv_cval; /* Saved/restored */
|
||||
@ -55,10 +52,8 @@ struct arch_timer_cpu {
|
||||
|
||||
/* Timer IRQ */
|
||||
const struct kvm_irq_level *irq;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KVM_ARM_TIMER
|
||||
int kvm_timer_hyp_init(void);
|
||||
void kvm_timer_enable(struct kvm *kvm);
|
||||
void kvm_timer_init(struct kvm *kvm);
|
||||
@ -72,30 +67,6 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
|
||||
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
|
||||
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
|
||||
|
||||
#else
|
||||
static inline int kvm_timer_hyp_init(void)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
static inline void kvm_timer_enable(struct kvm *kvm) {}
|
||||
static inline void kvm_timer_init(struct kvm *kvm) {}
|
||||
static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_irq_level *irq) {}
|
||||
static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/irqreturn.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
#define VGIC_NR_IRQS_LEGACY 256
|
||||
#define VGIC_NR_SGIS 16
|
||||
@ -140,16 +141,21 @@ struct vgic_params {
|
||||
};
|
||||
|
||||
struct vgic_vm_ops {
|
||||
bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
|
||||
struct kvm_exit_mmio *);
|
||||
bool (*queue_sgi)(struct kvm_vcpu *, int irq);
|
||||
void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
|
||||
int (*init_model)(struct kvm *);
|
||||
int (*map_resources)(struct kvm *, const struct vgic_params *);
|
||||
};
|
||||
|
||||
struct vgic_io_device {
|
||||
gpa_t addr;
|
||||
int len;
|
||||
const struct vgic_io_range *reg_ranges;
|
||||
struct kvm_vcpu *redist_vcpu;
|
||||
struct kvm_io_device dev;
|
||||
};
|
||||
|
||||
struct vgic_dist {
|
||||
#ifdef CONFIG_KVM_ARM_VGIC
|
||||
spinlock_t lock;
|
||||
bool in_kernel;
|
||||
bool ready;
|
||||
@ -197,6 +203,9 @@ struct vgic_dist {
|
||||
/* Level-triggered interrupt queued on VCPU interface */
|
||||
struct vgic_bitmap irq_queued;
|
||||
|
||||
/* Interrupt was active when unqueue from VCPU interface */
|
||||
struct vgic_bitmap irq_active;
|
||||
|
||||
/* Interrupt priority. Not used yet. */
|
||||
struct vgic_bytemap irq_priority;
|
||||
|
||||
@ -237,8 +246,12 @@ struct vgic_dist {
|
||||
/* Bitmap indicating which CPU has something pending */
|
||||
unsigned long *irq_pending_on_cpu;
|
||||
|
||||
/* Bitmap indicating which CPU has active IRQs */
|
||||
unsigned long *irq_active_on_cpu;
|
||||
|
||||
struct vgic_vm_ops vm_ops;
|
||||
#endif
|
||||
struct vgic_io_device dist_iodev;
|
||||
struct vgic_io_device *redist_iodevs;
|
||||
};
|
||||
|
||||
struct vgic_v2_cpu_if {
|
||||
@ -266,13 +279,18 @@ struct vgic_v3_cpu_if {
|
||||
};
|
||||
|
||||
struct vgic_cpu {
|
||||
#ifdef CONFIG_KVM_ARM_VGIC
|
||||
/* per IRQ to LR mapping */
|
||||
u8 *vgic_irq_lr_map;
|
||||
|
||||
/* Pending interrupts on this VCPU */
|
||||
/* Pending/active/both interrupts on this VCPU */
|
||||
DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
|
||||
DECLARE_BITMAP( active_percpu, VGIC_NR_PRIVATE_IRQS);
|
||||
DECLARE_BITMAP( pend_act_percpu, VGIC_NR_PRIVATE_IRQS);
|
||||
|
||||
/* Pending/active/both shared interrupts, dynamically sized */
|
||||
unsigned long *pending_shared;
|
||||
unsigned long *active_shared;
|
||||
unsigned long *pend_act_shared;
|
||||
|
||||
/* Bitmap of used/free list registers */
|
||||
DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
|
||||
@ -285,7 +303,6 @@ struct vgic_cpu {
|
||||
struct vgic_v2_cpu_if vgic_v2;
|
||||
struct vgic_v3_cpu_if vgic_v3;
|
||||
};
|
||||
#endif
|
||||
};
|
||||
|
||||
#define LR_EMPTY 0xff
|
||||
@ -295,10 +312,7 @@ struct vgic_cpu {
|
||||
|
||||
struct kvm;
|
||||
struct kvm_vcpu;
|
||||
struct kvm_run;
|
||||
struct kvm_exit_mmio;
|
||||
|
||||
#ifdef CONFIG_KVM_ARM_VGIC
|
||||
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
|
||||
int kvm_vgic_hyp_init(void);
|
||||
int kvm_vgic_map_resources(struct kvm *kvm);
|
||||
@ -312,8 +326,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
|
||||
bool level);
|
||||
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
|
||||
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
|
||||
bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio);
|
||||
int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
|
||||
|
||||
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
|
||||
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
|
||||
@ -335,84 +348,4 @@ static inline int vgic_v3_probe(struct device_node *vgic_node,
|
||||
}
|
||||
#endif
|
||||
|
||||
#else
|
||||
static inline int kvm_vgic_hyp_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static inline int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvm_vgic_destroy(struct kvm *kvm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
|
||||
unsigned int irq_num, bool level)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int irqchip_in_kernel(struct kvm *kvm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool vgic_initialized(struct kvm *kvm)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool vgic_ready(struct kvm *kvm)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int kvm_vgic_get_max_vcpus(void)
|
||||
{
|
||||
return KVM_MAX_VCPUS;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -9,17 +9,17 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __KVM_IODEV_H__
|
||||
#define __KVM_IODEV_H__
|
||||
|
||||
#include <linux/kvm_types.h>
|
||||
#include <asm/errno.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
struct kvm_io_device;
|
||||
struct kvm_vcpu;
|
||||
|
||||
/**
|
||||
* kvm_io_device_ops are called under kvm slots_lock.
|
||||
@ -27,11 +27,13 @@ struct kvm_io_device;
|
||||
* or non-zero to have it passed to the next device.
|
||||
**/
|
||||
struct kvm_io_device_ops {
|
||||
int (*read)(struct kvm_io_device *this,
|
||||
int (*read)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr,
|
||||
int len,
|
||||
void *val);
|
||||
int (*write)(struct kvm_io_device *this,
|
||||
int (*write)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr,
|
||||
int len,
|
||||
const void *val);
|
||||
@ -49,16 +51,20 @@ static inline void kvm_iodevice_init(struct kvm_io_device *dev,
|
||||
dev->ops = ops;
|
||||
}
|
||||
|
||||
static inline int kvm_iodevice_read(struct kvm_io_device *dev,
|
||||
gpa_t addr, int l, void *v)
|
||||
static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *dev, gpa_t addr,
|
||||
int l, void *v)
|
||||
{
|
||||
return dev->ops->read ? dev->ops->read(dev, addr, l, v) : -EOPNOTSUPP;
|
||||
return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v)
|
||||
: -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int kvm_iodevice_write(struct kvm_io_device *dev,
|
||||
gpa_t addr, int l, const void *v)
|
||||
static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *dev, gpa_t addr,
|
||||
int l, const void *v)
|
||||
{
|
||||
return dev->ops->write ? dev->ops->write(dev, addr, l, v) : -EOPNOTSUPP;
|
||||
return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v)
|
||||
: -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
|
@ -165,12 +165,12 @@ enum kvm_bus {
|
||||
KVM_NR_BUSES
|
||||
};
|
||||
|
||||
int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, const void *val);
|
||||
int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, const void *val, long cookie);
|
||||
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
|
||||
void *val);
|
||||
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
|
||||
gpa_t addr, int len, const void *val, long cookie);
|
||||
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, void *val);
|
||||
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, struct kvm_io_device *dev);
|
||||
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
|
||||
@ -658,7 +658,6 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
|
||||
|
||||
void *kvm_kvzalloc(unsigned long size);
|
||||
void kvm_kvfree(const void *addr);
|
||||
|
||||
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
|
||||
static inline struct kvm *kvm_arch_alloc_vm(void)
|
||||
@ -700,6 +699,20 @@ static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
/*
|
||||
* returns true if the virtual interrupt controller is initialized and
|
||||
* ready to accept virtual IRQ. On some architectures the virtual interrupt
|
||||
* controller is dynamically instantiated and this is not always true.
|
||||
*/
|
||||
bool kvm_arch_intc_initialized(struct kvm *kvm);
|
||||
#else
|
||||
static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
|
||||
void kvm_arch_destroy_vm(struct kvm *kvm);
|
||||
void kvm_arch_sync_events(struct kvm *kvm);
|
||||
@ -969,11 +982,16 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
#endif /* CONFIG_HAVE_KVM_EVENTFD */
|
||||
|
||||
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
|
||||
static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
|
||||
static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
|
||||
}
|
||||
|
||||
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
|
||||
|
||||
#else
|
||||
|
@ -176,6 +176,14 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
|
||||
extern void calc_global_load(unsigned long ticks);
|
||||
extern void update_cpu_load_nohz(void);
|
||||
|
||||
/* Notifier for when a task gets migrated to a new CPU */
|
||||
struct task_migration_notifier {
|
||||
struct task_struct *task;
|
||||
int from_cpu;
|
||||
int to_cpu;
|
||||
};
|
||||
extern void register_task_migration_notifier(struct notifier_block *n);
|
||||
|
||||
extern unsigned long get_parent_ip(unsigned long addr);
|
||||
|
||||
extern void dump_cpu_task(int cpu);
|
||||
|
@ -147,6 +147,16 @@ struct kvm_pit_config {
|
||||
|
||||
#define KVM_PIT_SPEAKER_DUMMY 1
|
||||
|
||||
struct kvm_s390_skeys {
|
||||
__u64 start_gfn;
|
||||
__u64 count;
|
||||
__u64 skeydata_addr;
|
||||
__u32 flags;
|
||||
__u32 reserved[9];
|
||||
};
|
||||
#define KVM_S390_GET_SKEYS_NONE 1
|
||||
#define KVM_S390_SKEYS_MAX 1048576
|
||||
|
||||
#define KVM_EXIT_UNKNOWN 0
|
||||
#define KVM_EXIT_EXCEPTION 1
|
||||
#define KVM_EXIT_IO 2
|
||||
@ -172,6 +182,7 @@ struct kvm_pit_config {
|
||||
#define KVM_EXIT_S390_TSCH 22
|
||||
#define KVM_EXIT_EPR 23
|
||||
#define KVM_EXIT_SYSTEM_EVENT 24
|
||||
#define KVM_EXIT_S390_STSI 25
|
||||
|
||||
/* For KVM_EXIT_INTERNAL_ERROR */
|
||||
/* Emulate instruction failed. */
|
||||
@ -309,6 +320,15 @@ struct kvm_run {
|
||||
__u32 type;
|
||||
__u64 flags;
|
||||
} system_event;
|
||||
/* KVM_EXIT_S390_STSI */
|
||||
struct {
|
||||
__u64 addr;
|
||||
__u8 ar;
|
||||
__u8 reserved;
|
||||
__u8 fc;
|
||||
__u8 sel1;
|
||||
__u16 sel2;
|
||||
} s390_stsi;
|
||||
/* Fix the size of the union. */
|
||||
char padding[256];
|
||||
};
|
||||
@ -324,7 +344,7 @@ struct kvm_run {
|
||||
__u64 kvm_dirty_regs;
|
||||
union {
|
||||
struct kvm_sync_regs regs;
|
||||
char padding[1024];
|
||||
char padding[2048];
|
||||
} s;
|
||||
};
|
||||
|
||||
@ -365,6 +385,24 @@ struct kvm_translation {
|
||||
__u8 pad[5];
|
||||
};
|
||||
|
||||
/* for KVM_S390_MEM_OP */
|
||||
struct kvm_s390_mem_op {
|
||||
/* in */
|
||||
__u64 gaddr; /* the guest address */
|
||||
__u64 flags; /* flags */
|
||||
__u32 size; /* amount of bytes */
|
||||
__u32 op; /* type of operation */
|
||||
__u64 buf; /* buffer in userspace */
|
||||
__u8 ar; /* the access register number */
|
||||
__u8 reserved[31]; /* should be set to 0 */
|
||||
};
|
||||
/* types for kvm_s390_mem_op->op */
|
||||
#define KVM_S390_MEMOP_LOGICAL_READ 0
|
||||
#define KVM_S390_MEMOP_LOGICAL_WRITE 1
|
||||
/* flags for kvm_s390_mem_op->flags */
|
||||
#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
|
||||
#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
|
||||
|
||||
/* for KVM_INTERRUPT */
|
||||
struct kvm_interrupt {
|
||||
/* in */
|
||||
@ -520,6 +558,13 @@ struct kvm_s390_irq {
|
||||
} u;
|
||||
};
|
||||
|
||||
struct kvm_s390_irq_state {
|
||||
__u64 buf;
|
||||
__u32 flags;
|
||||
__u32 len;
|
||||
__u32 reserved[4];
|
||||
};
|
||||
|
||||
/* for KVM_SET_GUEST_DEBUG */
|
||||
|
||||
#define KVM_GUESTDBG_ENABLE 0x00000001
|
||||
@ -760,6 +805,14 @@ struct kvm_ppc_smmu_info {
|
||||
#define KVM_CAP_PPC_ENABLE_HCALL 104
|
||||
#define KVM_CAP_CHECK_EXTENSION_VM 105
|
||||
#define KVM_CAP_S390_USER_SIGP 106
|
||||
#define KVM_CAP_S390_VECTOR_REGISTERS 107
|
||||
#define KVM_CAP_S390_MEM_OP 108
|
||||
#define KVM_CAP_S390_USER_STSI 109
|
||||
#define KVM_CAP_S390_SKEYS 110
|
||||
#define KVM_CAP_MIPS_FPU 111
|
||||
#define KVM_CAP_MIPS_MSA 112
|
||||
#define KVM_CAP_S390_INJECT_IRQ 113
|
||||
#define KVM_CAP_S390_IRQ_STATE 114
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
@ -1135,6 +1188,16 @@ struct kvm_s390_ucas_mapping {
|
||||
#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init)
|
||||
#define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init)
|
||||
#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
|
||||
/* Available with KVM_CAP_S390_MEM_OP */
|
||||
#define KVM_S390_MEM_OP _IOW(KVMIO, 0xb1, struct kvm_s390_mem_op)
|
||||
/* Available with KVM_CAP_S390_SKEYS */
|
||||
#define KVM_S390_GET_SKEYS _IOW(KVMIO, 0xb2, struct kvm_s390_skeys)
|
||||
#define KVM_S390_SET_SKEYS _IOW(KVMIO, 0xb3, struct kvm_s390_skeys)
|
||||
/* Available with KVM_CAP_S390_INJECT_IRQ */
|
||||
#define KVM_S390_IRQ _IOW(KVMIO, 0xb4, struct kvm_s390_irq)
|
||||
/* Available with KVM_CAP_S390_IRQ_STATE */
|
||||
#define KVM_S390_SET_IRQ_STATE _IOW(KVMIO, 0xb5, struct kvm_s390_irq_state)
|
||||
#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
|
||||
|
||||
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
|
||||
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
|
||||
|
@ -996,6 +996,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
||||
rq_clock_skip_update(rq, true);
|
||||
}
|
||||
|
||||
static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
|
||||
|
||||
void register_task_migration_notifier(struct notifier_block *n)
|
||||
{
|
||||
atomic_notifier_chain_register(&task_migration_notifier, n);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
{
|
||||
@ -1026,10 +1033,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
trace_sched_migrate_task(p, new_cpu);
|
||||
|
||||
if (task_cpu(p) != new_cpu) {
|
||||
struct task_migration_notifier tmn;
|
||||
|
||||
if (p->sched_class->migrate_task_rq)
|
||||
p->sched_class->migrate_task_rq(p, new_cpu);
|
||||
p->se.nr_migrations++;
|
||||
perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
|
||||
|
||||
tmn.task = p;
|
||||
tmn.from_cpu = task_cpu(p);
|
||||
tmn.to_cpu = new_cpu;
|
||||
|
||||
atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
|
||||
}
|
||||
|
||||
__set_task_cpu(p, new_cpu);
|
||||
|
@ -85,13 +85,22 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Work function for handling the backup timer that we schedule when a vcpu is
|
||||
* no longer running, but had a timer programmed to fire in the future.
|
||||
*/
|
||||
static void kvm_timer_inject_irq_work(struct work_struct *work)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
|
||||
vcpu->arch.timer_cpu.armed = false;
|
||||
kvm_timer_inject_irq(vcpu);
|
||||
|
||||
/*
|
||||
* If the vcpu is blocked we want to wake it up so that it will see
|
||||
* the timer has expired when entering the guest.
|
||||
*/
|
||||
kvm_vcpu_kick(vcpu);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
|
||||
@ -102,6 +111,21 @@ static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
|
||||
cycle_t cval, now;
|
||||
|
||||
if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
|
||||
!(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
|
||||
return false;
|
||||
|
||||
cval = timer->cntv_cval;
|
||||
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
|
||||
|
||||
return cval <= now;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
|
||||
* @vcpu: The vcpu pointer
|
||||
@ -119,6 +143,13 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
* populate the CPU timer again.
|
||||
*/
|
||||
timer_disarm(timer);
|
||||
|
||||
/*
|
||||
* If the timer expired while we were not scheduled, now is the time
|
||||
* to inject it.
|
||||
*/
|
||||
if (kvm_timer_should_fire(vcpu))
|
||||
kvm_timer_inject_irq(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -134,16 +165,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
cycle_t cval, now;
|
||||
u64 ns;
|
||||
|
||||
if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
|
||||
!(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
|
||||
return;
|
||||
|
||||
cval = timer->cntv_cval;
|
||||
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
|
||||
|
||||
BUG_ON(timer_is_armed(timer));
|
||||
|
||||
if (cval <= now) {
|
||||
if (kvm_timer_should_fire(vcpu)) {
|
||||
/*
|
||||
* Timer has already expired while we were not
|
||||
* looking. Inject the interrupt and carry on.
|
||||
@ -152,6 +176,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
return;
|
||||
}
|
||||
|
||||
cval = timer->cntv_cval;
|
||||
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
|
||||
|
||||
ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
|
||||
&timecounter->frac);
|
||||
timer_arm(timer, ns);
|
||||
|
@ -107,6 +107,22 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
|
||||
vcpu->vcpu_id);
|
||||
}
|
||||
|
||||
static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
|
||||
vcpu->vcpu_id);
|
||||
}
|
||||
|
||||
static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
|
||||
vcpu->vcpu_id);
|
||||
}
|
||||
|
||||
static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
@ -303,7 +319,7 @@ static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
|
||||
return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
|
||||
}
|
||||
|
||||
static const struct kvm_mmio_range vgic_dist_ranges[] = {
|
||||
static const struct vgic_io_range vgic_dist_ranges[] = {
|
||||
{
|
||||
.base = GIC_DIST_CTRL,
|
||||
.len = 12,
|
||||
@ -344,13 +360,13 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = {
|
||||
.base = GIC_DIST_ACTIVE_SET,
|
||||
.len = VGIC_MAX_IRQS / 8,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
.handle_mmio = handle_mmio_set_active_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_ACTIVE_CLEAR,
|
||||
.len = VGIC_MAX_IRQS / 8,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
.handle_mmio = handle_mmio_clear_active_reg,
|
||||
},
|
||||
{
|
||||
.base = GIC_DIST_PRI,
|
||||
@ -388,24 +404,6 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
{
|
||||
unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
|
||||
|
||||
if (!is_in_range(mmio->phys_addr, mmio->len, base,
|
||||
KVM_VGIC_V2_DIST_SIZE))
|
||||
return false;
|
||||
|
||||
/* GICv2 does not support accesses wider than 32 bits */
|
||||
if (mmio->len > 4) {
|
||||
kvm_inject_dabt(vcpu, mmio->phys_addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
|
||||
}
|
||||
|
||||
static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
@ -490,6 +488,7 @@ static bool vgic_v2_queue_sgi(struct kvm_vcpu *vcpu, int irq)
|
||||
static int vgic_v2_map_resources(struct kvm *kvm,
|
||||
const struct vgic_params *params)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
int ret = 0;
|
||||
|
||||
if (!irqchip_in_kernel(kvm))
|
||||
@ -500,13 +499,17 @@ static int vgic_v2_map_resources(struct kvm *kvm,
|
||||
if (vgic_ready(kvm))
|
||||
goto out;
|
||||
|
||||
if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
|
||||
IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
|
||||
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
|
||||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
|
||||
kvm_err("Need to set vgic cpu and dist addresses first\n");
|
||||
ret = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
|
||||
KVM_VGIC_V2_DIST_SIZE,
|
||||
vgic_dist_ranges, -1, &dist->dist_iodev);
|
||||
|
||||
/*
|
||||
* Initialize the vgic if this hasn't already been done on demand by
|
||||
* accessing the vgic state from userspace.
|
||||
@ -514,18 +517,23 @@ static int vgic_v2_map_resources(struct kvm *kvm,
|
||||
ret = vgic_init(kvm);
|
||||
if (ret) {
|
||||
kvm_err("Unable to allocate maps\n");
|
||||
goto out;
|
||||
goto out_unregister;
|
||||
}
|
||||
|
||||
ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
|
||||
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
|
||||
params->vcpu_base, KVM_VGIC_V2_CPU_SIZE,
|
||||
true);
|
||||
if (ret) {
|
||||
kvm_err("Unable to remap VGIC CPU to VCPU\n");
|
||||
goto out;
|
||||
goto out_unregister;
|
||||
}
|
||||
|
||||
kvm->arch.vgic.ready = true;
|
||||
dist->ready = true;
|
||||
goto out;
|
||||
|
||||
out_unregister:
|
||||
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
kvm_vgic_destroy(kvm);
|
||||
@ -554,7 +562,6 @@ void vgic_v2_init_emulation(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
|
||||
dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
|
||||
dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
|
||||
dist->vm_ops.init_model = vgic_v2_init_model;
|
||||
@ -631,7 +638,7 @@ static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
|
||||
* CPU Interface Register accesses - these are not accessed by the VM, but by
|
||||
* user space for saving and restoring VGIC state.
|
||||
*/
|
||||
static const struct kvm_mmio_range vgic_cpu_ranges[] = {
|
||||
static const struct vgic_io_range vgic_cpu_ranges[] = {
|
||||
{
|
||||
.base = GIC_CPU_CTRL,
|
||||
.len = 12,
|
||||
@ -658,12 +665,13 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
|
||||
struct kvm_device_attr *attr,
|
||||
u32 *reg, bool is_write)
|
||||
{
|
||||
const struct kvm_mmio_range *r = NULL, *ranges;
|
||||
const struct vgic_io_range *r = NULL, *ranges;
|
||||
phys_addr_t offset;
|
||||
int ret, cpuid, c;
|
||||
struct kvm_vcpu *vcpu, *tmp_vcpu;
|
||||
struct vgic_dist *vgic;
|
||||
struct kvm_exit_mmio mmio;
|
||||
u32 data;
|
||||
|
||||
offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
|
||||
cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
|
||||
@ -685,6 +693,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
|
||||
|
||||
mmio.len = 4;
|
||||
mmio.is_write = is_write;
|
||||
mmio.data = &data;
|
||||
if (is_write)
|
||||
mmio_data_write(&mmio, ~0, *reg);
|
||||
switch (attr->group) {
|
||||
@ -699,7 +708,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
r = vgic_find_range(ranges, &mmio, offset);
|
||||
r = vgic_find_range(ranges, 4, offset);
|
||||
|
||||
if (unlikely(!r || !r->handle_mmio)) {
|
||||
ret = -ENXIO;
|
||||
|
@ -340,7 +340,7 @@ static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
|
||||
static const struct vgic_io_range vgic_v3_dist_ranges[] = {
|
||||
{
|
||||
.base = GICD_CTLR,
|
||||
.len = 0x04,
|
||||
@ -502,6 +502,43 @@ static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
|
||||
{},
|
||||
};
|
||||
|
||||
static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
/* since we don't support LPIs, this register is zero for now */
|
||||
vgic_reg_access(mmio, NULL, offset,
|
||||
ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
u32 reg;
|
||||
u64 mpidr;
|
||||
struct kvm_vcpu *redist_vcpu = mmio->private;
|
||||
int target_vcpu_id = redist_vcpu->vcpu_id;
|
||||
|
||||
/* the upper 32 bits contain the affinity value */
|
||||
if ((offset & ~3) == 4) {
|
||||
mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
|
||||
reg = compress_mpidr(mpidr);
|
||||
|
||||
vgic_reg_access(mmio, ®, offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
|
||||
return false;
|
||||
}
|
||||
|
||||
reg = redist_vcpu->vcpu_id << 8;
|
||||
if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
|
||||
reg |= GICR_TYPER_LAST;
|
||||
vgic_reg_access(mmio, ®, offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
@ -570,113 +607,9 @@ static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
|
||||
return vgic_handle_cfg_reg(reg, mmio, offset);
|
||||
}
|
||||
|
||||
static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = {
|
||||
{
|
||||
.base = GICR_IGROUPR0,
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_rao_wi,
|
||||
},
|
||||
{
|
||||
.base = GICR_ISENABLER0,
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_set_enable_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = GICR_ICENABLER0,
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_clear_enable_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = GICR_ISPENDR0,
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_set_pending_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = GICR_ICPENDR0,
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_clear_pending_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = GICR_ISACTIVER0,
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = GICR_ICACTIVER0,
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = GICR_IPRIORITYR0,
|
||||
.len = 0x20,
|
||||
.bits_per_irq = 8,
|
||||
.handle_mmio = handle_mmio_priority_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = GICR_ICFGR0,
|
||||
.len = 0x08,
|
||||
.bits_per_irq = 2,
|
||||
.handle_mmio = handle_mmio_cfg_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = GICR_IGRPMODR0,
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = GICR_NSACR,
|
||||
.len = 0x04,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{},
|
||||
};
|
||||
#define SGI_base(x) ((x) + SZ_64K)
|
||||
|
||||
static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
/* since we don't support LPIs, this register is zero for now */
|
||||
vgic_reg_access(mmio, NULL, offset,
|
||||
ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
{
|
||||
u32 reg;
|
||||
u64 mpidr;
|
||||
struct kvm_vcpu *redist_vcpu = mmio->private;
|
||||
int target_vcpu_id = redist_vcpu->vcpu_id;
|
||||
|
||||
/* the upper 32 bits contain the affinity value */
|
||||
if ((offset & ~3) == 4) {
|
||||
mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
|
||||
reg = compress_mpidr(mpidr);
|
||||
|
||||
vgic_reg_access(mmio, ®, offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
|
||||
return false;
|
||||
}
|
||||
|
||||
reg = redist_vcpu->vcpu_id << 8;
|
||||
if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
|
||||
reg |= GICR_TYPER_LAST;
|
||||
vgic_reg_access(mmio, ®, offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct kvm_mmio_range vgic_redist_ranges[] = {
|
||||
static const struct vgic_io_range vgic_redist_ranges[] = {
|
||||
{
|
||||
.base = GICR_CTLR,
|
||||
.len = 0x04,
|
||||
@ -707,49 +640,74 @@ static const struct kvm_mmio_range vgic_redist_ranges[] = {
|
||||
.bits_per_irq = 0,
|
||||
.handle_mmio = handle_mmio_idregs,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_IGROUPR0),
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_rao_wi,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_ISENABLER0),
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_set_enable_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_ICENABLER0),
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_clear_enable_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_ISPENDR0),
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_set_pending_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_ICPENDR0),
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_clear_pending_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_ISACTIVER0),
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_ICACTIVER0),
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_IPRIORITYR0),
|
||||
.len = 0x20,
|
||||
.bits_per_irq = 8,
|
||||
.handle_mmio = handle_mmio_priority_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_ICFGR0),
|
||||
.len = 0x08,
|
||||
.bits_per_irq = 2,
|
||||
.handle_mmio = handle_mmio_cfg_reg_redist,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_IGRPMODR0),
|
||||
.len = 0x04,
|
||||
.bits_per_irq = 1,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{
|
||||
.base = SGI_base(GICR_NSACR),
|
||||
.len = 0x04,
|
||||
.handle_mmio = handle_mmio_raz_wi,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
/*
|
||||
* This function splits accesses between the distributor and the two
|
||||
* redistributor parts (private/SPI). As each redistributor is accessible
|
||||
* from any CPU, we have to determine the affected VCPU by taking the faulting
|
||||
* address into account. We then pass this VCPU to the handler function via
|
||||
* the private parameter.
|
||||
*/
|
||||
#define SGI_BASE_OFFSET SZ_64K
|
||||
static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
unsigned long dbase = dist->vgic_dist_base;
|
||||
unsigned long rdbase = dist->vgic_redist_base;
|
||||
int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
|
||||
int vcpu_id;
|
||||
const struct kvm_mmio_range *mmio_range;
|
||||
|
||||
if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
|
||||
return vgic_handle_mmio_range(vcpu, run, mmio,
|
||||
vgic_v3_dist_ranges, dbase);
|
||||
}
|
||||
|
||||
if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
|
||||
GIC_V3_REDIST_SIZE * nrcpus))
|
||||
return false;
|
||||
|
||||
vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
|
||||
rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
|
||||
mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
|
||||
|
||||
if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
|
||||
rdbase += SGI_BASE_OFFSET;
|
||||
mmio_range = vgic_redist_sgi_ranges;
|
||||
} else {
|
||||
mmio_range = vgic_redist_ranges;
|
||||
}
|
||||
return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
|
||||
}
|
||||
|
||||
static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
|
||||
{
|
||||
if (vgic_queue_irq(vcpu, 0, irq)) {
|
||||
@ -766,6 +724,9 @@ static int vgic_v3_map_resources(struct kvm *kvm,
|
||||
{
|
||||
int ret = 0;
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
gpa_t rdbase = dist->vgic_redist_base;
|
||||
struct vgic_io_device *iodevs = NULL;
|
||||
int i;
|
||||
|
||||
if (!irqchip_in_kernel(kvm))
|
||||
return 0;
|
||||
@ -791,7 +752,41 @@ static int vgic_v3_map_resources(struct kvm *kvm,
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvm->arch.vgic.ready = true;
|
||||
ret = vgic_register_kvm_io_dev(kvm, dist->vgic_dist_base,
|
||||
GIC_V3_DIST_SIZE, vgic_v3_dist_ranges,
|
||||
-1, &dist->dist_iodev);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
iodevs = kcalloc(dist->nr_cpus, sizeof(iodevs[0]), GFP_KERNEL);
|
||||
if (!iodevs) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unregister;
|
||||
}
|
||||
|
||||
for (i = 0; i < dist->nr_cpus; i++) {
|
||||
ret = vgic_register_kvm_io_dev(kvm, rdbase,
|
||||
SZ_128K, vgic_redist_ranges,
|
||||
i, &iodevs[i]);
|
||||
if (ret)
|
||||
goto out_unregister;
|
||||
rdbase += GIC_V3_REDIST_SIZE;
|
||||
}
|
||||
|
||||
dist->redist_iodevs = iodevs;
|
||||
dist->ready = true;
|
||||
goto out;
|
||||
|
||||
out_unregister:
|
||||
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dist->dist_iodev.dev);
|
||||
if (iodevs) {
|
||||
for (i = 0; i < dist->nr_cpus; i++) {
|
||||
if (iodevs[i].dev.ops)
|
||||
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
|
||||
&iodevs[i].dev);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
kvm_vgic_destroy(kvm);
|
||||
@ -832,7 +827,6 @@ void vgic_v3_init_emulation(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
|
||||
dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
|
||||
dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
|
||||
dist->vm_ops.init_model = vgic_v3_init_model;
|
||||
|
@ -31,6 +31,9 @@
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <trace/events/kvm.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
/*
|
||||
* How the whole thing works (courtesy of Christoffer Dall):
|
||||
@ -263,6 +266,13 @@ static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
|
||||
return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
|
||||
}
|
||||
|
||||
static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
|
||||
}
|
||||
|
||||
static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
@ -277,6 +287,20 @@ static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
|
||||
vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
|
||||
}
|
||||
|
||||
static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
|
||||
}
|
||||
|
||||
static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
|
||||
}
|
||||
|
||||
static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
@ -520,6 +544,44 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
|
||||
return false;
|
||||
}
|
||||
|
||||
bool vgic_handle_set_active_reg(struct kvm *kvm,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, int vcpu_id)
|
||||
{
|
||||
u32 *reg;
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
|
||||
vgic_reg_access(mmio, reg, offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
|
||||
|
||||
if (mmio->is_write) {
|
||||
vgic_update_state(kvm);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool vgic_handle_clear_active_reg(struct kvm *kvm,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, int vcpu_id)
|
||||
{
|
||||
u32 *reg;
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
|
||||
vgic_reg_access(mmio, reg, offset,
|
||||
ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
|
||||
|
||||
if (mmio->is_write) {
|
||||
vgic_update_state(kvm);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static u32 vgic_cfg_expand(u16 val)
|
||||
{
|
||||
u32 res = 0;
|
||||
@ -588,16 +650,12 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
|
||||
}
|
||||
|
||||
/**
|
||||
* vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
|
||||
* vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
|
||||
* @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
|
||||
*
|
||||
* Move any pending IRQs that have already been assigned to LRs back to the
|
||||
* Move any IRQs that have already been assigned to LRs back to the
|
||||
* emulated distributor state so that the complete emulated state can be read
|
||||
* from the main emulation structures without investigating the LRs.
|
||||
*
|
||||
* Note that IRQs in the active state in the LRs get their pending state moved
|
||||
* to the distributor but the active state stays in the LRs, because we don't
|
||||
* track the active state on the distributor side.
|
||||
*/
|
||||
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -613,12 +671,22 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
|
||||
* 01: pending
|
||||
* 10: active
|
||||
* 11: pending and active
|
||||
*
|
||||
* If the LR holds only an active interrupt (not pending) then
|
||||
* just leave it alone.
|
||||
*/
|
||||
if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
|
||||
continue;
|
||||
BUG_ON(!(lr.state & LR_STATE_MASK));
|
||||
|
||||
/* Reestablish SGI source for pending and active IRQs */
|
||||
if (lr.irq < VGIC_NR_SGIS)
|
||||
add_sgi_source(vcpu, lr.irq, lr.source);
|
||||
|
||||
/*
|
||||
* If the LR holds an active (10) or a pending and active (11)
|
||||
* interrupt then move the active state to the
|
||||
* distributor tracking bit.
|
||||
*/
|
||||
if (lr.state & LR_STATE_ACTIVE) {
|
||||
vgic_irq_set_active(vcpu, lr.irq);
|
||||
lr.state &= ~LR_STATE_ACTIVE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reestablish the pending state on the distributor and the
|
||||
@ -626,21 +694,19 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
|
||||
* is fine, then we are only setting a few bits that were
|
||||
* already set.
|
||||
*/
|
||||
vgic_dist_irq_set_pending(vcpu, lr.irq);
|
||||
if (lr.irq < VGIC_NR_SGIS)
|
||||
add_sgi_source(vcpu, lr.irq, lr.source);
|
||||
lr.state &= ~LR_STATE_PENDING;
|
||||
if (lr.state & LR_STATE_PENDING) {
|
||||
vgic_dist_irq_set_pending(vcpu, lr.irq);
|
||||
lr.state &= ~LR_STATE_PENDING;
|
||||
}
|
||||
|
||||
vgic_set_lr(vcpu, i, lr);
|
||||
|
||||
/*
|
||||
* If there's no state left on the LR (it could still be
|
||||
* active), then the LR does not hold any useful info and can
|
||||
* be marked as free for other use.
|
||||
* Mark the LR as free for other use.
|
||||
*/
|
||||
if (!(lr.state & LR_STATE_MASK)) {
|
||||
vgic_retire_lr(i, lr.irq, vcpu);
|
||||
vgic_irq_clear_queued(vcpu, lr.irq);
|
||||
}
|
||||
BUG_ON(lr.state & LR_STATE_MASK);
|
||||
vgic_retire_lr(i, lr.irq, vcpu);
|
||||
vgic_irq_clear_queued(vcpu, lr.irq);
|
||||
|
||||
/* Finally update the VGIC state. */
|
||||
vgic_update_state(vcpu->kvm);
|
||||
@ -648,24 +714,21 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
const
|
||||
struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset)
|
||||
struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
|
||||
int len, gpa_t offset)
|
||||
{
|
||||
const struct kvm_mmio_range *r = ranges;
|
||||
|
||||
while (r->len) {
|
||||
if (offset >= r->base &&
|
||||
(offset + mmio->len) <= (r->base + r->len))
|
||||
return r;
|
||||
r++;
|
||||
while (ranges->len) {
|
||||
if (offset >= ranges->base &&
|
||||
(offset + len) <= (ranges->base + ranges->len))
|
||||
return ranges;
|
||||
ranges++;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool vgic_validate_access(const struct vgic_dist *dist,
|
||||
const struct kvm_mmio_range *range,
|
||||
const struct vgic_io_range *range,
|
||||
unsigned long offset)
|
||||
{
|
||||
int irq;
|
||||
@ -693,9 +756,8 @@ static bool vgic_validate_access(const struct vgic_dist *dist,
|
||||
static bool call_range_handler(struct kvm_vcpu *vcpu,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
unsigned long offset,
|
||||
const struct kvm_mmio_range *range)
|
||||
const struct vgic_io_range *range)
|
||||
{
|
||||
u32 *data32 = (void *)mmio->data;
|
||||
struct kvm_exit_mmio mmio32;
|
||||
bool ret;
|
||||
|
||||
@ -712,91 +774,142 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
|
||||
mmio32.private = mmio->private;
|
||||
|
||||
mmio32.phys_addr = mmio->phys_addr + 4;
|
||||
if (mmio->is_write)
|
||||
*(u32 *)mmio32.data = data32[1];
|
||||
mmio32.data = &((u32 *)mmio->data)[1];
|
||||
ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
|
||||
if (!mmio->is_write)
|
||||
data32[1] = *(u32 *)mmio32.data;
|
||||
|
||||
mmio32.phys_addr = mmio->phys_addr;
|
||||
if (mmio->is_write)
|
||||
*(u32 *)mmio32.data = data32[0];
|
||||
mmio32.data = &((u32 *)mmio->data)[0];
|
||||
ret |= range->handle_mmio(vcpu, &mmio32, offset);
|
||||
if (!mmio->is_write)
|
||||
data32[0] = *(u32 *)mmio32.data;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vgic_handle_mmio_range - handle an in-kernel MMIO access
|
||||
* vgic_handle_mmio_access - handle an in-kernel MMIO access
|
||||
* This is called by the read/write KVM IO device wrappers below.
|
||||
* @vcpu: pointer to the vcpu performing the access
|
||||
* @run: pointer to the kvm_run structure
|
||||
* @mmio: pointer to the data describing the access
|
||||
* @ranges: array of MMIO ranges in a given region
|
||||
* @mmio_base: base address of that region
|
||||
* @this: pointer to the KVM IO device in charge
|
||||
* @addr: guest physical address of the access
|
||||
* @len: size of the access
|
||||
* @val: pointer to the data region
|
||||
* @is_write: read or write access
|
||||
*
|
||||
* returns true if the MMIO access could be performed
|
||||
*/
|
||||
bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
const struct kvm_mmio_range *ranges,
|
||||
unsigned long mmio_base)
|
||||
static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this, gpa_t addr,
|
||||
int len, void *val, bool is_write)
|
||||
{
|
||||
const struct kvm_mmio_range *range;
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
struct vgic_io_device *iodev = container_of(this,
|
||||
struct vgic_io_device, dev);
|
||||
struct kvm_run *run = vcpu->run;
|
||||
const struct vgic_io_range *range;
|
||||
struct kvm_exit_mmio mmio;
|
||||
bool updated_state;
|
||||
unsigned long offset;
|
||||
gpa_t offset;
|
||||
|
||||
offset = mmio->phys_addr - mmio_base;
|
||||
range = vgic_find_range(ranges, mmio, offset);
|
||||
offset = addr - iodev->addr;
|
||||
range = vgic_find_range(iodev->reg_ranges, len, offset);
|
||||
if (unlikely(!range || !range->handle_mmio)) {
|
||||
pr_warn("Unhandled access %d %08llx %d\n",
|
||||
mmio->is_write, mmio->phys_addr, mmio->len);
|
||||
return false;
|
||||
pr_warn("Unhandled access %d %08llx %d\n", is_write, addr, len);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
spin_lock(&vcpu->kvm->arch.vgic.lock);
|
||||
mmio.phys_addr = addr;
|
||||
mmio.len = len;
|
||||
mmio.is_write = is_write;
|
||||
mmio.data = val;
|
||||
mmio.private = iodev->redist_vcpu;
|
||||
|
||||
spin_lock(&dist->lock);
|
||||
offset -= range->base;
|
||||
if (vgic_validate_access(dist, range, offset)) {
|
||||
updated_state = call_range_handler(vcpu, mmio, offset, range);
|
||||
updated_state = call_range_handler(vcpu, &mmio, offset, range);
|
||||
} else {
|
||||
if (!mmio->is_write)
|
||||
memset(mmio->data, 0, mmio->len);
|
||||
if (!is_write)
|
||||
memset(val, 0, len);
|
||||
updated_state = false;
|
||||
}
|
||||
spin_unlock(&vcpu->kvm->arch.vgic.lock);
|
||||
kvm_prepare_mmio(run, mmio);
|
||||
spin_unlock(&dist->lock);
|
||||
run->mmio.is_write = is_write;
|
||||
run->mmio.len = len;
|
||||
run->mmio.phys_addr = addr;
|
||||
memcpy(run->mmio.data, val, len);
|
||||
|
||||
kvm_handle_mmio_return(vcpu, run);
|
||||
|
||||
if (updated_state)
|
||||
vgic_kick_vcpus(vcpu->kvm);
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
|
||||
* @vcpu: pointer to the vcpu performing the access
|
||||
* @run: pointer to the kvm_run structure
|
||||
* @mmio: pointer to the data describing the access
|
||||
*
|
||||
* returns true if the MMIO access has been performed in kernel space,
|
||||
* and false if it needs to be emulated in user space.
|
||||
* Calls the actual handling routine for the selected VGIC model.
|
||||
*/
|
||||
bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr, int len, void *val)
|
||||
{
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
return false;
|
||||
return vgic_handle_mmio_access(vcpu, this, addr, len, val, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* This will currently call either vgic_v2_handle_mmio() or
|
||||
* vgic_v3_handle_mmio(), which in turn will call
|
||||
* vgic_handle_mmio_range() defined above.
|
||||
*/
|
||||
return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio);
|
||||
static int vgic_handle_mmio_write(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this,
|
||||
gpa_t addr, int len, const void *val)
|
||||
{
|
||||
return vgic_handle_mmio_access(vcpu, this, addr, len, (void *)val,
|
||||
true);
|
||||
}
|
||||
|
||||
struct kvm_io_device_ops vgic_io_ops = {
|
||||
.read = vgic_handle_mmio_read,
|
||||
.write = vgic_handle_mmio_write,
|
||||
};
|
||||
|
||||
/**
|
||||
* vgic_register_kvm_io_dev - register VGIC register frame on the KVM I/O bus
|
||||
* @kvm: The VM structure pointer
|
||||
* @base: The (guest) base address for the register frame
|
||||
* @len: Length of the register frame window
|
||||
* @ranges: Describing the handler functions for each register
|
||||
* @redist_vcpu_id: The VCPU ID to pass on to the handlers on call
|
||||
* @iodev: Points to memory to be passed on to the handler
|
||||
*
|
||||
* @iodev stores the parameters of this function to be usable by the handler
|
||||
* respectively the dispatcher function (since the KVM I/O bus framework lacks
|
||||
* an opaque parameter). Initialization is done in this function, but the
|
||||
* reference should be valid and unique for the whole VGIC lifetime.
|
||||
* If the register frame is not mapped for a specific VCPU, pass -1 to
|
||||
* @redist_vcpu_id.
|
||||
*/
|
||||
int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
|
||||
const struct vgic_io_range *ranges,
|
||||
int redist_vcpu_id,
|
||||
struct vgic_io_device *iodev)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = NULL;
|
||||
int ret;
|
||||
|
||||
if (redist_vcpu_id >= 0)
|
||||
vcpu = kvm_get_vcpu(kvm, redist_vcpu_id);
|
||||
|
||||
iodev->addr = base;
|
||||
iodev->len = len;
|
||||
iodev->reg_ranges = ranges;
|
||||
iodev->redist_vcpu = vcpu;
|
||||
|
||||
kvm_iodevice_init(&iodev->dev, &vgic_io_ops);
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, base, len,
|
||||
&iodev->dev);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
/* Mark the iodev as invalid if registration fails. */
|
||||
if (ret)
|
||||
iodev->dev.ops = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vgic_nr_shared_irqs(struct vgic_dist *dist)
|
||||
@ -804,6 +917,36 @@ static int vgic_nr_shared_irqs(struct vgic_dist *dist)
|
||||
return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
|
||||
}
|
||||
|
||||
static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
unsigned long *active, *enabled, *act_percpu, *act_shared;
|
||||
unsigned long active_private, active_shared;
|
||||
int nr_shared = vgic_nr_shared_irqs(dist);
|
||||
int vcpu_id;
|
||||
|
||||
vcpu_id = vcpu->vcpu_id;
|
||||
act_percpu = vcpu->arch.vgic_cpu.active_percpu;
|
||||
act_shared = vcpu->arch.vgic_cpu.active_shared;
|
||||
|
||||
active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
|
||||
enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
|
||||
bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
|
||||
|
||||
active = vgic_bitmap_get_shared_map(&dist->irq_active);
|
||||
enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
|
||||
bitmap_and(act_shared, active, enabled, nr_shared);
|
||||
bitmap_and(act_shared, act_shared,
|
||||
vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
|
||||
nr_shared);
|
||||
|
||||
active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
|
||||
active_shared = find_first_bit(act_shared, nr_shared);
|
||||
|
||||
return (active_private < VGIC_NR_PRIVATE_IRQS ||
|
||||
active_shared < nr_shared);
|
||||
}
|
||||
|
||||
static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
@ -835,7 +978,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
|
||||
|
||||
/*
|
||||
* Update the interrupt state and determine which CPUs have pending
|
||||
* interrupts. Must be called with distributor lock held.
|
||||
* or active interrupts. Must be called with distributor lock held.
|
||||
*/
|
||||
void vgic_update_state(struct kvm *kvm)
|
||||
{
|
||||
@ -849,10 +992,13 @@ void vgic_update_state(struct kvm *kvm)
|
||||
}
|
||||
|
||||
kvm_for_each_vcpu(c, vcpu, kvm) {
|
||||
if (compute_pending_for_cpu(vcpu)) {
|
||||
pr_debug("CPU%d has pending interrupts\n", c);
|
||||
if (compute_pending_for_cpu(vcpu))
|
||||
set_bit(c, dist->irq_pending_on_cpu);
|
||||
}
|
||||
|
||||
if (compute_active_for_cpu(vcpu))
|
||||
set_bit(c, dist->irq_active_on_cpu);
|
||||
else
|
||||
clear_bit(c, dist->irq_active_on_cpu);
|
||||
}
|
||||
}
|
||||
|
||||
@ -955,6 +1101,26 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
|
||||
int lr_nr, struct vgic_lr vlr)
|
||||
{
|
||||
if (vgic_irq_is_active(vcpu, irq)) {
|
||||
vlr.state |= LR_STATE_ACTIVE;
|
||||
kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
|
||||
vgic_irq_clear_active(vcpu, irq);
|
||||
vgic_update_state(vcpu->kvm);
|
||||
} else if (vgic_dist_irq_is_pending(vcpu, irq)) {
|
||||
vlr.state |= LR_STATE_PENDING;
|
||||
kvm_debug("Set pending: 0x%x\n", vlr.state);
|
||||
}
|
||||
|
||||
if (!vgic_irq_is_edge(vcpu, irq))
|
||||
vlr.state |= LR_EOI_INT;
|
||||
|
||||
vgic_set_lr(vcpu, lr_nr, vlr);
|
||||
vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Queue an interrupt to a CPU virtual interface. Return true on success,
|
||||
* or false if it wasn't possible to queue it.
|
||||
@ -982,9 +1148,7 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
||||
if (vlr.source == sgi_source_id) {
|
||||
kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
|
||||
BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
|
||||
vlr.state |= LR_STATE_PENDING;
|
||||
vgic_set_lr(vcpu, lr, vlr);
|
||||
vgic_sync_lr_elrsr(vcpu, lr, vlr);
|
||||
vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1001,12 +1165,8 @@ bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
|
||||
|
||||
vlr.irq = irq;
|
||||
vlr.source = sgi_source_id;
|
||||
vlr.state = LR_STATE_PENDING;
|
||||
if (!vgic_irq_is_edge(vcpu, irq))
|
||||
vlr.state |= LR_EOI_INT;
|
||||
|
||||
vgic_set_lr(vcpu, lr, vlr);
|
||||
vgic_sync_lr_elrsr(vcpu, lr, vlr);
|
||||
vlr.state = 0;
|
||||
vgic_queue_irq_to_lr(vcpu, irq, lr, vlr);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1038,39 +1198,49 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
unsigned long *pa_percpu, *pa_shared;
|
||||
int i, vcpu_id;
|
||||
int overflow = 0;
|
||||
int nr_shared = vgic_nr_shared_irqs(dist);
|
||||
|
||||
vcpu_id = vcpu->vcpu_id;
|
||||
|
||||
pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
|
||||
pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
|
||||
|
||||
bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
|
||||
VGIC_NR_PRIVATE_IRQS);
|
||||
bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
|
||||
nr_shared);
|
||||
/*
|
||||
* We may not have any pending interrupt, or the interrupts
|
||||
* may have been serviced from another vcpu. In all cases,
|
||||
* move along.
|
||||
*/
|
||||
if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
|
||||
pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
|
||||
if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
|
||||
goto epilog;
|
||||
}
|
||||
|
||||
/* SGIs */
|
||||
for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
|
||||
for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
|
||||
if (!queue_sgi(vcpu, i))
|
||||
overflow = 1;
|
||||
}
|
||||
|
||||
/* PPIs */
|
||||
for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
|
||||
for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
|
||||
if (!vgic_queue_hwirq(vcpu, i))
|
||||
overflow = 1;
|
||||
}
|
||||
|
||||
/* SPIs */
|
||||
for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) {
|
||||
for_each_set_bit(i, pa_shared, nr_shared) {
|
||||
if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
|
||||
overflow = 1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
epilog:
|
||||
if (overflow) {
|
||||
vgic_enable_underflow(vcpu);
|
||||
@ -1089,7 +1259,9 @@ epilog:
|
||||
static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 status = vgic_get_interrupt_status(vcpu);
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
bool level_pending = false;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
kvm_debug("STATUS = %08x\n", status);
|
||||
|
||||
@ -1106,6 +1278,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
|
||||
WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
|
||||
|
||||
spin_lock(&dist->lock);
|
||||
vgic_irq_clear_queued(vcpu, vlr.irq);
|
||||
WARN_ON(vlr.state & LR_STATE_MASK);
|
||||
vlr.state = 0;
|
||||
@ -1124,6 +1297,17 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
|
||||
|
||||
/*
|
||||
* kvm_notify_acked_irq calls kvm_set_irq()
|
||||
* to reset the IRQ level. Need to release the
|
||||
* lock for kvm_set_irq to grab it.
|
||||
*/
|
||||
spin_unlock(&dist->lock);
|
||||
|
||||
kvm_notify_acked_irq(kvm, 0,
|
||||
vlr.irq - VGIC_NR_PRIVATE_IRQS);
|
||||
spin_lock(&dist->lock);
|
||||
|
||||
/* Any additional pending interrupt? */
|
||||
if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
|
||||
vgic_cpu_irq_set(vcpu, vlr.irq);
|
||||
@ -1133,6 +1317,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
vgic_cpu_irq_clear(vcpu, vlr.irq);
|
||||
}
|
||||
|
||||
spin_unlock(&dist->lock);
|
||||
|
||||
/*
|
||||
* Despite being EOIed, the LR may not have
|
||||
* been marked as empty.
|
||||
@ -1155,10 +1341,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
|
||||
return level_pending;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync back the VGIC state after a guest run. The distributor lock is
|
||||
* needed so we don't get preempted in the middle of the state processing.
|
||||
*/
|
||||
/* Sync back the VGIC state after a guest run */
|
||||
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
@ -1205,14 +1388,10 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
return;
|
||||
|
||||
spin_lock(&dist->lock);
|
||||
__kvm_vgic_sync_hwstate(vcpu);
|
||||
spin_unlock(&dist->lock);
|
||||
}
|
||||
|
||||
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
@ -1225,6 +1404,17 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
|
||||
}
|
||||
|
||||
int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm))
|
||||
return 0;
|
||||
|
||||
return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
|
||||
}
|
||||
|
||||
|
||||
void vgic_kick_vcpus(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
@ -1397,8 +1587,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
|
||||
kfree(vgic_cpu->pending_shared);
|
||||
kfree(vgic_cpu->active_shared);
|
||||
kfree(vgic_cpu->pend_act_shared);
|
||||
kfree(vgic_cpu->vgic_irq_lr_map);
|
||||
vgic_cpu->pending_shared = NULL;
|
||||
vgic_cpu->active_shared = NULL;
|
||||
vgic_cpu->pend_act_shared = NULL;
|
||||
vgic_cpu->vgic_irq_lr_map = NULL;
|
||||
}
|
||||
|
||||
@ -1408,9 +1602,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
|
||||
|
||||
int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
|
||||
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
|
||||
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
|
||||
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
|
||||
vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
|
||||
|
||||
if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
|
||||
if (!vgic_cpu->pending_shared
|
||||
|| !vgic_cpu->active_shared
|
||||
|| !vgic_cpu->pend_act_shared
|
||||
|| !vgic_cpu->vgic_irq_lr_map) {
|
||||
kvm_vgic_vcpu_destroy(vcpu);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1463,10 +1662,12 @@ void kvm_vgic_destroy(struct kvm *kvm)
|
||||
kfree(dist->irq_spi_mpidr);
|
||||
kfree(dist->irq_spi_target);
|
||||
kfree(dist->irq_pending_on_cpu);
|
||||
kfree(dist->irq_active_on_cpu);
|
||||
dist->irq_sgi_sources = NULL;
|
||||
dist->irq_spi_cpu = NULL;
|
||||
dist->irq_spi_target = NULL;
|
||||
dist->irq_pending_on_cpu = NULL;
|
||||
dist->irq_active_on_cpu = NULL;
|
||||
dist->nr_cpus = 0;
|
||||
}
|
||||
|
||||
@ -1502,6 +1703,7 @@ int vgic_init(struct kvm *kvm)
|
||||
ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
|
||||
ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
|
||||
ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
|
||||
ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
|
||||
ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
|
||||
ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
|
||||
|
||||
@ -1514,10 +1716,13 @@ int vgic_init(struct kvm *kvm)
|
||||
GFP_KERNEL);
|
||||
dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
|
||||
GFP_KERNEL);
|
||||
dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (!dist->irq_sgi_sources ||
|
||||
!dist->irq_spi_cpu ||
|
||||
!dist->irq_spi_target ||
|
||||
!dist->irq_pending_on_cpu) {
|
||||
!dist->irq_pending_on_cpu ||
|
||||
!dist->irq_active_on_cpu) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -1845,12 +2050,9 @@ int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
||||
return r;
|
||||
}
|
||||
|
||||
int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset)
|
||||
int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset)
|
||||
{
|
||||
struct kvm_exit_mmio dev_attr_mmio;
|
||||
|
||||
dev_attr_mmio.len = 4;
|
||||
if (vgic_find_range(ranges, &dev_attr_mmio, offset))
|
||||
if (vgic_find_range(ranges, 4, offset))
|
||||
return 0;
|
||||
else
|
||||
return -ENXIO;
|
||||
@ -1883,8 +2085,10 @@ static struct notifier_block vgic_cpu_nb = {
|
||||
};
|
||||
|
||||
static const struct of_device_id vgic_ids[] = {
|
||||
{ .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
|
||||
{ .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
|
||||
{ .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, },
|
||||
{ .compatible = "arm,cortex-a7-gic", .data = vgic_v2_probe, },
|
||||
{ .compatible = "arm,gic-400", .data = vgic_v2_probe, },
|
||||
{ .compatible = "arm,gic-v3", .data = vgic_v3_probe, },
|
||||
{},
|
||||
};
|
||||
|
||||
@ -1932,3 +2136,38 @@ out_free_irq:
|
||||
free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_irq_map_gsi(struct kvm *kvm,
|
||||
struct kvm_kernel_irq_routing_entry *entries,
|
||||
int gsi)
|
||||
{
|
||||
return gsi;
|
||||
}
|
||||
|
||||
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
|
||||
{
|
||||
return pin;
|
||||
}
|
||||
|
||||
int kvm_set_irq(struct kvm *kvm, int irq_source_id,
|
||||
u32 irq, int level, bool line_status)
|
||||
{
|
||||
unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
|
||||
|
||||
trace_kvm_set_irq(irq, level, irq_source_id);
|
||||
|
||||
BUG_ON(!vgic_initialized(kvm));
|
||||
|
||||
if (spi > kvm->arch.vgic.nr_irqs)
|
||||
return -EINVAL;
|
||||
return kvm_vgic_inject_irq(kvm, 0, spi, level);
|
||||
|
||||
}
|
||||
|
||||
/* MSI not implemented yet */
|
||||
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
||||
struct kvm *kvm, int irq_source_id,
|
||||
int level, bool line_status)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -20,6 +20,8 @@
|
||||
#ifndef __KVM_VGIC_H__
|
||||
#define __KVM_VGIC_H__
|
||||
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
#define VGIC_ADDR_UNDEF (-1)
|
||||
#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
|
||||
|
||||
@ -57,6 +59,14 @@ void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
|
||||
bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
|
||||
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_exit_mmio {
|
||||
phys_addr_t phys_addr;
|
||||
void *data;
|
||||
u32 len;
|
||||
bool is_write;
|
||||
void *private;
|
||||
};
|
||||
|
||||
void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
|
||||
phys_addr_t offset, int mode);
|
||||
bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
|
||||
@ -74,7 +84,7 @@ void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
|
||||
*((u32 *)mmio->data) = cpu_to_le32(value) & mask;
|
||||
}
|
||||
|
||||
struct kvm_mmio_range {
|
||||
struct vgic_io_range {
|
||||
phys_addr_t base;
|
||||
unsigned long len;
|
||||
int bits_per_irq;
|
||||
@ -82,6 +92,11 @@ struct kvm_mmio_range {
|
||||
phys_addr_t offset);
|
||||
};
|
||||
|
||||
int vgic_register_kvm_io_dev(struct kvm *kvm, gpa_t base, int len,
|
||||
const struct vgic_io_range *ranges,
|
||||
int redist_id,
|
||||
struct vgic_io_device *iodev);
|
||||
|
||||
static inline bool is_in_range(phys_addr_t addr, unsigned long len,
|
||||
phys_addr_t baseaddr, unsigned long size)
|
||||
{
|
||||
@ -89,14 +104,8 @@ static inline bool is_in_range(phys_addr_t addr, unsigned long len,
|
||||
}
|
||||
|
||||
const
|
||||
struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset);
|
||||
|
||||
bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
const struct kvm_mmio_range *ranges,
|
||||
unsigned long mmio_base);
|
||||
struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
|
||||
int len, gpa_t offset);
|
||||
|
||||
bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, int vcpu_id, int access);
|
||||
@ -107,12 +116,20 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
|
||||
bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, int vcpu_id);
|
||||
|
||||
bool vgic_handle_set_active_reg(struct kvm *kvm,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, int vcpu_id);
|
||||
|
||||
bool vgic_handle_clear_active_reg(struct kvm *kvm,
|
||||
struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset, int vcpu_id);
|
||||
|
||||
bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
|
||||
phys_addr_t offset);
|
||||
|
||||
void vgic_kick_vcpus(struct kvm *kvm);
|
||||
|
||||
int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset);
|
||||
int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset);
|
||||
int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
|
||||
int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr);
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "iodev.h"
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/slab.h>
|
||||
@ -60,8 +60,9 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int coalesced_mmio_write(struct kvm_io_device *this,
|
||||
gpa_t addr, int len, const void *val)
|
||||
static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
|
||||
struct kvm_io_device *this, gpa_t addr,
|
||||
int len, const void *val)
|
||||
{
|
||||
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
|
||||
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
|
||||
|
@ -36,7 +36,7 @@
|
||||
#include <linux/seqlock.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "iodev.h"
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_IRQFD
|
||||
/*
|
||||
@ -311,6 +311,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
|
||||
unsigned int events;
|
||||
int idx;
|
||||
|
||||
if (!kvm_arch_intc_initialized(kvm))
|
||||
return -EAGAIN;
|
||||
|
||||
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
|
||||
if (!irqfd)
|
||||
return -ENOMEM;
|
||||
@ -712,8 +715,8 @@ ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
|
||||
|
||||
/* MMIO/PIO writes trigger an event if the addr/val match */
|
||||
static int
|
||||
ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
|
||||
const void *val)
|
||||
ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
|
||||
int len, const void *val)
|
||||
{
|
||||
struct _ioeventfd *p = to_ioeventfd(this);
|
||||
|
||||
|
@ -105,7 +105,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
|
||||
i = kvm_irq_map_gsi(kvm, irq_set, irq);
|
||||
srcu_read_unlock(&kvm->irq_srcu, idx);
|
||||
|
||||
while(i--) {
|
||||
while (i--) {
|
||||
int r;
|
||||
r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
|
||||
line_status);
|
||||
|
@ -16,7 +16,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "iodev.h"
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/kvm.h>
|
||||
@ -66,13 +66,13 @@
|
||||
MODULE_AUTHOR("Qumranet");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
unsigned int halt_poll_ns = 0;
|
||||
static unsigned int halt_poll_ns;
|
||||
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
|
||||
|
||||
/*
|
||||
* Ordering of locks:
|
||||
*
|
||||
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
|
||||
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
|
||||
*/
|
||||
|
||||
DEFINE_SPINLOCK(kvm_lock);
|
||||
@ -80,7 +80,7 @@ static DEFINE_RAW_SPINLOCK(kvm_count_lock);
|
||||
LIST_HEAD(vm_list);
|
||||
|
||||
static cpumask_var_t cpus_hardware_enabled;
|
||||
static int kvm_usage_count = 0;
|
||||
static int kvm_usage_count;
|
||||
static atomic_t hardware_enable_failed;
|
||||
|
||||
struct kmem_cache *kvm_vcpu_cache;
|
||||
@ -539,20 +539,12 @@ void *kvm_kvzalloc(unsigned long size)
|
||||
return kzalloc(size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void kvm_kvfree(const void *addr)
|
||||
{
|
||||
if (is_vmalloc_addr(addr))
|
||||
vfree(addr);
|
||||
else
|
||||
kfree(addr);
|
||||
}
|
||||
|
||||
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
|
||||
{
|
||||
if (!memslot->dirty_bitmap)
|
||||
return;
|
||||
|
||||
kvm_kvfree(memslot->dirty_bitmap);
|
||||
kvfree(memslot->dirty_bitmap);
|
||||
memslot->dirty_bitmap = NULL;
|
||||
}
|
||||
|
||||
@ -888,8 +880,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
||||
* or moved, memslot will be created.
|
||||
*
|
||||
* validation of sp->gfn happens in:
|
||||
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
|
||||
* - kvm_is_visible_gfn (mmu_check_roots)
|
||||
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
|
||||
* - kvm_is_visible_gfn (mmu_check_roots)
|
||||
*/
|
||||
kvm_arch_flush_shadow_memslot(kvm, slot);
|
||||
|
||||
@ -1061,9 +1053,11 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
|
||||
mask = xchg(&dirty_bitmap[i], 0);
|
||||
dirty_bitmap_buffer[i] = mask;
|
||||
|
||||
offset = i * BITS_PER_LONG;
|
||||
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset,
|
||||
mask);
|
||||
if (mask) {
|
||||
offset = i * BITS_PER_LONG;
|
||||
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
|
||||
offset, mask);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
@ -1193,16 +1187,6 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
|
||||
return gfn_to_hva_memslot_prot(slot, gfn, writable);
|
||||
}
|
||||
|
||||
static int kvm_read_hva(void *data, void __user *hva, int len)
|
||||
{
|
||||
return __copy_from_user(data, hva, len);
|
||||
}
|
||||
|
||||
static int kvm_read_hva_atomic(void *data, void __user *hva, int len)
|
||||
{
|
||||
return __copy_from_user_inatomic(data, hva, len);
|
||||
}
|
||||
|
||||
static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int write, struct page **page)
|
||||
{
|
||||
@ -1481,7 +1465,6 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
||||
|
||||
return kvm_pfn_to_page(pfn);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
||||
|
||||
void kvm_release_page_clean(struct page *page)
|
||||
@ -1517,6 +1500,7 @@ void kvm_set_pfn_dirty(pfn_t pfn)
|
||||
{
|
||||
if (!kvm_is_reserved_pfn(pfn)) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
|
||||
if (!PageReserved(page))
|
||||
SetPageDirty(page);
|
||||
}
|
||||
@ -1554,7 +1538,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
|
||||
addr = gfn_to_hva_prot(kvm, gfn, NULL);
|
||||
if (kvm_is_error_hva(addr))
|
||||
return -EFAULT;
|
||||
r = kvm_read_hva(data, (void __user *)addr + offset, len);
|
||||
r = __copy_from_user(data, (void __user *)addr + offset, len);
|
||||
if (r)
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
@ -1593,7 +1577,7 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
|
||||
if (kvm_is_error_hva(addr))
|
||||
return -EFAULT;
|
||||
pagefault_disable();
|
||||
r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len);
|
||||
r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
|
||||
pagefault_enable();
|
||||
if (r)
|
||||
return -EFAULT;
|
||||
@ -1653,8 +1637,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
||||
ghc->generation = slots->generation;
|
||||
ghc->len = len;
|
||||
ghc->memslot = gfn_to_memslot(kvm, start_gfn);
|
||||
ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
|
||||
if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
|
||||
ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
|
||||
if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
|
||||
ghc->hva += offset;
|
||||
} else {
|
||||
/*
|
||||
@ -1742,7 +1726,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
|
||||
int offset = offset_in_page(gpa);
|
||||
int ret;
|
||||
|
||||
while ((seg = next_segment(len, offset)) != 0) {
|
||||
while ((seg = next_segment(len, offset)) != 0) {
|
||||
ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1800,6 +1784,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
|
||||
start = cur = ktime_get();
|
||||
if (halt_poll_ns) {
|
||||
ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
|
||||
|
||||
do {
|
||||
/*
|
||||
* This sets KVM_REQ_UNHALT if an interrupt
|
||||
@ -2118,7 +2103,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
|
||||
* Special cases: vcpu ioctls that are asynchronous to vcpu execution,
|
||||
* so vcpu_load() would break it.
|
||||
*/
|
||||
if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
|
||||
if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT)
|
||||
return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
|
||||
#endif
|
||||
|
||||
@ -2135,6 +2120,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
|
||||
/* The thread running this VCPU changed. */
|
||||
struct pid *oldpid = vcpu->pid;
|
||||
struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
|
||||
|
||||
rcu_assign_pointer(vcpu->pid, newpid);
|
||||
if (oldpid)
|
||||
synchronize_rcu();
|
||||
@ -2205,7 +2191,7 @@ out_free1:
|
||||
if (r)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(argp, &mp_state, sizeof mp_state))
|
||||
if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
|
||||
goto out;
|
||||
r = 0;
|
||||
break;
|
||||
@ -2214,7 +2200,7 @@ out_free1:
|
||||
struct kvm_mp_state mp_state;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&mp_state, argp, sizeof mp_state))
|
||||
if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
|
||||
goto out;
|
||||
r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
|
||||
break;
|
||||
@ -2223,13 +2209,13 @@ out_free1:
|
||||
struct kvm_translation tr;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&tr, argp, sizeof tr))
|
||||
if (copy_from_user(&tr, argp, sizeof(tr)))
|
||||
goto out;
|
||||
r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
|
||||
if (r)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(argp, &tr, sizeof tr))
|
||||
if (copy_to_user(argp, &tr, sizeof(tr)))
|
||||
goto out;
|
||||
r = 0;
|
||||
break;
|
||||
@ -2238,7 +2224,7 @@ out_free1:
|
||||
struct kvm_guest_debug dbg;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&dbg, argp, sizeof dbg))
|
||||
if (copy_from_user(&dbg, argp, sizeof(dbg)))
|
||||
goto out;
|
||||
r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
|
||||
break;
|
||||
@ -2252,14 +2238,14 @@ out_free1:
|
||||
if (argp) {
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&kvm_sigmask, argp,
|
||||
sizeof kvm_sigmask))
|
||||
sizeof(kvm_sigmask)))
|
||||
goto out;
|
||||
r = -EINVAL;
|
||||
if (kvm_sigmask.len != sizeof sigset)
|
||||
if (kvm_sigmask.len != sizeof(sigset))
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&sigset, sigmask_arg->sigset,
|
||||
sizeof sigset))
|
||||
sizeof(sigset)))
|
||||
goto out;
|
||||
p = &sigset;
|
||||
}
|
||||
@ -2321,14 +2307,14 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
|
||||
if (argp) {
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&kvm_sigmask, argp,
|
||||
sizeof kvm_sigmask))
|
||||
sizeof(kvm_sigmask)))
|
||||
goto out;
|
||||
r = -EINVAL;
|
||||
if (kvm_sigmask.len != sizeof csigset)
|
||||
if (kvm_sigmask.len != sizeof(csigset))
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&csigset, sigmask_arg->sigset,
|
||||
sizeof csigset))
|
||||
sizeof(csigset)))
|
||||
goto out;
|
||||
sigset_from_compat(&sigset, &csigset);
|
||||
r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
|
||||
@ -2525,7 +2511,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&kvm_userspace_mem, argp,
|
||||
sizeof kvm_userspace_mem))
|
||||
sizeof(kvm_userspace_mem)))
|
||||
goto out;
|
||||
|
||||
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
|
||||
@ -2535,7 +2521,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
struct kvm_dirty_log log;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&log, argp, sizeof log))
|
||||
if (copy_from_user(&log, argp, sizeof(log)))
|
||||
goto out;
|
||||
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
|
||||
break;
|
||||
@ -2543,16 +2529,18 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
|
||||
case KVM_REGISTER_COALESCED_MMIO: {
|
||||
struct kvm_coalesced_mmio_zone zone;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&zone, argp, sizeof zone))
|
||||
if (copy_from_user(&zone, argp, sizeof(zone)))
|
||||
goto out;
|
||||
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
|
||||
break;
|
||||
}
|
||||
case KVM_UNREGISTER_COALESCED_MMIO: {
|
||||
struct kvm_coalesced_mmio_zone zone;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&zone, argp, sizeof zone))
|
||||
if (copy_from_user(&zone, argp, sizeof(zone)))
|
||||
goto out;
|
||||
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
|
||||
break;
|
||||
@ -2562,7 +2550,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
struct kvm_irqfd data;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&data, argp, sizeof data))
|
||||
if (copy_from_user(&data, argp, sizeof(data)))
|
||||
goto out;
|
||||
r = kvm_irqfd(kvm, &data);
|
||||
break;
|
||||
@ -2571,7 +2559,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
struct kvm_ioeventfd data;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&data, argp, sizeof data))
|
||||
if (copy_from_user(&data, argp, sizeof(data)))
|
||||
goto out;
|
||||
r = kvm_ioeventfd(kvm, &data);
|
||||
break;
|
||||
@ -2592,7 +2580,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
struct kvm_msi msi;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&msi, argp, sizeof msi))
|
||||
if (copy_from_user(&msi, argp, sizeof(msi)))
|
||||
goto out;
|
||||
r = kvm_send_userspace_msi(kvm, &msi);
|
||||
break;
|
||||
@ -2604,7 +2592,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
struct kvm_irq_level irq_event;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&irq_event, argp, sizeof irq_event))
|
||||
if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
|
||||
goto out;
|
||||
|
||||
r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
|
||||
@ -2614,7 +2602,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
|
||||
r = -EFAULT;
|
||||
if (ioctl == KVM_IRQ_LINE_STATUS) {
|
||||
if (copy_to_user(argp, &irq_event, sizeof irq_event))
|
||||
if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2647,7 +2635,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
||||
goto out_free_irq_routing;
|
||||
r = kvm_set_irq_routing(kvm, entries, routing.nr,
|
||||
routing.flags);
|
||||
out_free_irq_routing:
|
||||
out_free_irq_routing:
|
||||
vfree(entries);
|
||||
break;
|
||||
}
|
||||
@ -2822,8 +2810,7 @@ static void hardware_enable_nolock(void *junk)
|
||||
if (r) {
|
||||
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
|
||||
atomic_inc(&hardware_enable_failed);
|
||||
printk(KERN_INFO "kvm: enabling virtualization on "
|
||||
"CPU%d failed\n", cpu);
|
||||
pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2899,12 +2886,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
|
||||
val &= ~CPU_TASKS_FROZEN;
|
||||
switch (val) {
|
||||
case CPU_DYING:
|
||||
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
|
||||
pr_info("kvm: disabling virtualization on CPU%d\n",
|
||||
cpu);
|
||||
hardware_disable();
|
||||
break;
|
||||
case CPU_STARTING:
|
||||
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
|
||||
pr_info("kvm: enabling virtualization on CPU%d\n",
|
||||
cpu);
|
||||
hardware_enable();
|
||||
break;
|
||||
@ -2921,7 +2908,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
|
||||
*
|
||||
* And Intel TXT required VMX off for all cpu when system shutdown.
|
||||
*/
|
||||
printk(KERN_INFO "kvm: exiting hardware virtualization\n");
|
||||
pr_info("kvm: exiting hardware virtualization\n");
|
||||
kvm_rebooting = true;
|
||||
on_each_cpu(hardware_disable_nolock, NULL, 1);
|
||||
return NOTIFY_OK;
|
||||
@ -2945,7 +2932,7 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
|
||||
}
|
||||
|
||||
static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
|
||||
const struct kvm_io_range *r2)
|
||||
const struct kvm_io_range *r2)
|
||||
{
|
||||
if (r1->addr < r2->addr)
|
||||
return -1;
|
||||
@ -2998,7 +2985,7 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
|
||||
return off;
|
||||
}
|
||||
|
||||
static int __kvm_io_bus_write(struct kvm_io_bus *bus,
|
||||
static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
|
||||
struct kvm_io_range *range, const void *val)
|
||||
{
|
||||
int idx;
|
||||
@ -3009,7 +2996,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,
|
||||
|
||||
while (idx < bus->dev_count &&
|
||||
kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
|
||||
if (!kvm_iodevice_write(bus->range[idx].dev, range->addr,
|
||||
if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
|
||||
range->len, val))
|
||||
return idx;
|
||||
idx++;
|
||||
@ -3019,7 +3006,7 @@ static int __kvm_io_bus_write(struct kvm_io_bus *bus,
|
||||
}
|
||||
|
||||
/* kvm_io_bus_write - called under kvm->slots_lock */
|
||||
int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, const void *val)
|
||||
{
|
||||
struct kvm_io_bus *bus;
|
||||
@ -3031,14 +3018,14 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
.len = len,
|
||||
};
|
||||
|
||||
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
|
||||
r = __kvm_io_bus_write(bus, &range, val);
|
||||
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
|
||||
r = __kvm_io_bus_write(vcpu, bus, &range, val);
|
||||
return r < 0 ? r : 0;
|
||||
}
|
||||
|
||||
/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
|
||||
int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, const void *val, long cookie)
|
||||
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
|
||||
gpa_t addr, int len, const void *val, long cookie)
|
||||
{
|
||||
struct kvm_io_bus *bus;
|
||||
struct kvm_io_range range;
|
||||
@ -3048,12 +3035,12 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
.len = len,
|
||||
};
|
||||
|
||||
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
|
||||
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
|
||||
|
||||
/* First try the device referenced by cookie. */
|
||||
if ((cookie >= 0) && (cookie < bus->dev_count) &&
|
||||
(kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
|
||||
if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len,
|
||||
if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
|
||||
val))
|
||||
return cookie;
|
||||
|
||||
@ -3061,11 +3048,11 @@ int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
* cookie contained garbage; fall back to search and return the
|
||||
* correct cookie value.
|
||||
*/
|
||||
return __kvm_io_bus_write(bus, &range, val);
|
||||
return __kvm_io_bus_write(vcpu, bus, &range, val);
|
||||
}
|
||||
|
||||
static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
|
||||
void *val)
|
||||
static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
|
||||
struct kvm_io_range *range, void *val)
|
||||
{
|
||||
int idx;
|
||||
|
||||
@ -3075,7 +3062,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
|
||||
|
||||
while (idx < bus->dev_count &&
|
||||
kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
|
||||
if (!kvm_iodevice_read(bus->range[idx].dev, range->addr,
|
||||
if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
|
||||
range->len, val))
|
||||
return idx;
|
||||
idx++;
|
||||
@ -3086,7 +3073,7 @@ static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
|
||||
EXPORT_SYMBOL_GPL(kvm_io_bus_write);
|
||||
|
||||
/* kvm_io_bus_read - called under kvm->slots_lock */
|
||||
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
|
||||
int len, void *val)
|
||||
{
|
||||
struct kvm_io_bus *bus;
|
||||
@ -3098,8 +3085,8 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
|
||||
.len = len,
|
||||
};
|
||||
|
||||
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
|
||||
r = __kvm_io_bus_read(bus, &range, val);
|
||||
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
|
||||
r = __kvm_io_bus_read(vcpu, bus, &range, val);
|
||||
return r < 0 ? r : 0;
|
||||
}
|
||||
|
||||
@ -3269,6 +3256,7 @@ struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
|
||||
static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
|
||||
|
||||
if (vcpu->preempted)
|
||||
vcpu->preempted = false;
|
||||
|
||||
@ -3350,7 +3338,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
||||
|
||||
r = misc_register(&kvm_dev);
|
||||
if (r) {
|
||||
printk(KERN_ERR "kvm: misc device register failed\n");
|
||||
pr_err("kvm: misc device register failed\n");
|
||||
goto out_unreg;
|
||||
}
|
||||
|
||||
@ -3361,7 +3349,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
|
||||
|
||||
r = kvm_init_debug();
|
||||
if (r) {
|
||||
printk(KERN_ERR "kvm: create debugfs files failed\n");
|
||||
pr_err("kvm: create debugfs files failed\n");
|
||||
goto out_undebugfs;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user