* Address a rather annoying bug w.r.t. guest timer offsetting.  The
   synchronization of timer offsets between vCPUs was broken, leading to
   inconsistent timer reads within the VM.
 
 x86:
 
 * New tests for the slow path of the EVTCHNOP_send Xen hypercall
 
 * Add missing nVMX consistency checks for CR0 and CR4
 
 * Fix bug that broke AMD GATag on 512 vCPU machines
 
 Selftests:
 
 * Skip hugetlb tests if huge pages are not available
 
 * Sync KVM exit reasons
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmQQhBMUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroPJsAf/aqKQtRJH2YDHuS/OvlH546lgrPTY
 zc2S187N4OofqKvm8HWAJOPravGI4Lkc3Jvlq2jPnlwl66musfako5YGXyyJesIP
 9pc32jxwbhpHyp39tSTxlNbjE68E4Tau2iFa5n6fq/2BOEkZNGRhTDWPfbJV4yZO
 JpkaguNm1nuZfKnRNxaaYhJwbqPIBc8l+Y3Q3nw6QLZHaNoupsd2pY3c4SuTYFcW
 UxUaFtNkpXQxbwve0MWFLh/JztOzFhQcdMi3OSTBYZz32T0vncjXFDuARfKLNKyw
 FgwkHgs2/d35AgE0JEwz1u6+/RMHvUheG08zkp8//lINfNgF/Cka7Dz2uA==
 =B1LI
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "ARM64:

   - Address a rather annoying bug w.r.t. guest timer offsetting. The
     synchronization of timer offsets between vCPUs was broken, leading
     to inconsistent timer reads within the VM.

  x86:

   - New tests for the slow path of the EVTCHNOP_send Xen hypercall

   - Add missing nVMX consistency checks for CR0 and CR4

   - Fix bug that broke AMD GATag on 512 vCPU machines

  Selftests:

   - Skip hugetlb tests if huge pages are not available

   - Sync KVM exit reasons"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: selftests: Sync KVM exit reasons in selftests
  KVM: selftests: Add macro to generate KVM exit reason strings
  KVM: selftests: Print expected and actual exit reason in KVM exit reason assert
  KVM: selftests: Make vCPU exit reason test assertion common
  KVM: selftests: Add EVTCHNOP_send slow path test to xen_shinfo_test
  KVM: selftests: Use enum for test numbers in xen_shinfo_test
  KVM: selftests: Add helpers to make Xen-style VMCALL/VMMCALL hypercalls
  KVM: selftests: Move the guts of kvm_hypercall() to a separate macro
  KVM: SVM: WARN if GATag generation drops VM or vCPU ID information
  KVM: SVM: Modify AVIC GATag to support max number of 512 vCPUs
  KVM: SVM: Fix a benign off-by-one bug in AVIC physical table mask
  selftests: KVM: skip hugetlb tests if huge pages are not available
  KVM: VMX: Use tabs instead of spaces for indentation
  KVM: VMX: Fix indentation coding style issue
  KVM: nVMX: remove unnecessary #ifdef
  KVM: nVMX: add missing consistency checks for CR0 and CR4
  KVM: arm64: timers: Convert per-vcpu virtual offset to a global value
This commit is contained in:
Linus Torvalds 2023-03-16 11:32:12 -07:00
commit 0ddc84d2dd
57 changed files with 354 additions and 511 deletions

View File

@ -193,6 +193,9 @@ struct kvm_arch {
/* Interrupt controller */
struct vgic_dist vgic;
/* Timers */
struct arch_timer_vm_data timer_data;
/* Mandated version of PSCI */
u32 psci_version;

View File

@ -84,14 +84,10 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
static u64 timer_get_offset(struct arch_timer_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
if (ctxt->offset.vm_offset)
return *ctxt->offset.vm_offset;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
default:
return 0;
}
return 0;
}
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
@ -128,15 +124,12 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
break;
default:
if (!ctxt->offset.vm_offset) {
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
return;
}
WRITE_ONCE(*ctxt->offset.vm_offset, offset);
}
u64 kvm_phys_timer_read(void)
@ -765,25 +758,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
return 0;
}
/* Make the updates of cntvoff for all vtimer contexts atomic */
static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
{
unsigned long i;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, tmp, kvm)
timer_set_offset(vcpu_vtimer(tmp), cntvoff);
/*
* When called from the vcpu create path, the CPU being created is not
* included in the loop above, so we just set it here as well.
*/
timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
mutex_unlock(&kvm->lock);
}
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
@ -791,10 +765,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
vtimer->vcpu = vcpu;
vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset;
ptimer->vcpu = vcpu;
/* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
timer_set_offset(vtimer, kvm_phys_timer_read());
timer_set_offset(ptimer, 0);
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
@ -840,7 +815,7 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
break;
case KVM_REG_ARM_TIMER_CNT:
timer = vcpu_vtimer(vcpu);
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
timer_set_offset(timer, kvm_phys_timer_read() - value);
break;
case KVM_REG_ARM_TIMER_CVAL:
timer = vcpu_vtimer(vcpu);

View File

@ -44,7 +44,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
feature = smccc_get_arg1(vcpu);
switch (feature) {
case KVM_PTP_VIRT_COUNTER:
cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2);
cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
break;
case KVM_PTP_PHYS_COUNTER:
cycles = systime_snapshot.cycles;

View File

@ -261,20 +261,22 @@ enum avic_ipi_failure_cause {
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
};
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(9, 0)
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
/*
* For AVIC, the max index allowed for physical APIC ID
* table is 0xff (255).
* For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as
* 0xff is a broadcast to all CPUs, i.e. can't be targeted individually.
*/
#define AVIC_MAX_PHYSICAL_ID 0XFEULL
/*
* For x2AVIC, the max index allowed for physical APIC ID
* table is 0x1ff (511).
* For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511).
*/
#define X2AVIC_MAX_PHYSICAL_ID 0x1FFUL
static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID);
static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID);
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL

View File

@ -27,19 +27,38 @@
#include "irq.h"
#include "svm.h"
/* AVIC GATAG is encoded using VM and VCPU IDs */
#define AVIC_VCPU_ID_BITS 8
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
/*
* Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
* into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
* if an interrupt can't be delivered, e.g. because the vCPU isn't running.
*
* For the vCPU ID, use however many bits are currently allowed for the max
* guest physical APIC ID (limited by the size of the physical ID table), and
* use whatever bits remain to assign arbitrary AVIC IDs to VMs. Note, the
* size of the GATag is defined by hardware (32 bits), but is an opaque value
* as far as hardware is concerned.
*/
#define AVIC_VCPU_ID_MASK AVIC_PHYSICAL_MAX_INDEX_MASK
#define AVIC_VM_ID_BITS 24
#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
#define AVIC_VM_ID_SHIFT HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
#define AVIC_VM_ID_MASK (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
(y & AVIC_VCPU_ID_MASK))
#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
#define __AVIC_GATAG(vm_id, vcpu_id) ((((vm_id) & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
((vcpu_id) & AVIC_VCPU_ID_MASK))
#define AVIC_GATAG(vm_id, vcpu_id) \
({ \
u32 ga_tag = __AVIC_GATAG(vm_id, vcpu_id); \
\
WARN_ON_ONCE(AVIC_GATAG_TO_VCPUID(ga_tag) != (vcpu_id)); \
WARN_ON_ONCE(AVIC_GATAG_TO_VMID(ga_tag) != (vm_id)); \
ga_tag; \
})
static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
static bool force_avic;
module_param_unsafe(force_avic, bool, 0444);

View File

@ -2903,7 +2903,7 @@ static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
bool ia32e;
bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
@ -2923,12 +2923,6 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
vmcs12->host_ia32_perf_global_ctrl)))
return -EINVAL;
#ifdef CONFIG_X86_64
ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
#else
ia32e = false;
#endif
if (ia32e) {
if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
return -EINVAL;
@ -3022,7 +3016,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
enum vm_entry_failure_code *entry_failure_code)
{
bool ia32e;
bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
*entry_failure_code = ENTRY_FAIL_DEFAULT;
@ -3048,6 +3042,13 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
vmcs12->guest_ia32_perf_global_ctrl)))
return -EINVAL;
if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
return -EINVAL;
if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
return -EINVAL;
/*
* If the load IA32_EFER VM-entry control is 1, the following checks
* are performed on the field for the IA32_EFER MSR:
@ -3059,7 +3060,6 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
*/
if (to_vmx(vcpu)->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&

View File

@ -262,7 +262,7 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
* eIBRS has its own protection against poisoned RSB, so it doesn't
* need the RSB filling sequence. But it does need to be enabled, and a
* single call to retire, before the first unbalanced RET.
*/
*/
FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
X86_FEATURE_RSB_VMEXIT_LITE
@ -311,7 +311,7 @@ SYM_FUNC_END(vmx_do_nmi_irqoff)
* vmread_error_trampoline - Trampoline from inline asm to vmread_error()
* @field: VMCS field encoding that failed
* @fault: %true if the VMREAD faulted, %false if it failed
*
* Save and restore volatile registers across a call to vmread_error(). Note,
* all parameters are passed on the stack.
*/

View File

@ -874,7 +874,7 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
*/
if (is_guest_mode(vcpu))
eb |= get_vmcs12(vcpu)->exception_bitmap;
else {
else {
int mask = 0, match = 0;
if (enable_ept && (eb & (1u << PF_VECTOR))) {
@ -1282,7 +1282,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
}
}
if (vmx->nested.need_vmcs12_to_shadow_sync)
if (vmx->nested.need_vmcs12_to_shadow_sync)
nested_sync_vmcs12_to_shadow(vcpu);
if (vmx->guest_state_loaded)
@ -5049,10 +5049,10 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (to_vmx(vcpu)->nested.nested_run_pending)
return -EBUSY;
/*
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
* e.g. if the IRQ arrived asynchronously after checking nested events.
*/
/*
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
* e.g. if the IRQ arrived asynchronously after checking nested events.
*/
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
return -EBUSY;

View File

@ -23,6 +23,19 @@ enum kvm_arch_timer_regs {
TIMER_REG_CTL,
};
struct arch_timer_offset {
/*
* If set, pointer to one of the offsets in the kvm's offset
* structure. If NULL, assume a zero offset.
*/
u64 *vm_offset;
};
struct arch_timer_vm_data {
/* Offset applied to the virtual timer/counter */
u64 voffset;
};
struct arch_timer_context {
struct kvm_vcpu *vcpu;
@ -32,6 +45,8 @@ struct arch_timer_context {
/* Emulated Timer (may be unused) */
struct hrtimer hrtimer;
/* Offset for this counter/timer */
struct arch_timer_offset offset;
/*
* We have multiple paths which can save/restore the timer state onto
* the hardware, so we need some way of keeping track of where the

View File

@ -180,9 +180,7 @@ static void host_test_system_suspend(void)
enter_guest(source);
TEST_ASSERT(run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
"Unhandled exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(source, KVM_EXIT_SYSTEM_EVENT);
TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND,
"Unhandled system event: %u (expected: %u)",
run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND);

View File

@ -63,6 +63,15 @@ void test_assert(bool exp, const char *exp_str,
#a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
} while (0)
#define TEST_ASSERT_KVM_EXIT_REASON(vcpu, expected) do { \
__u32 exit_reason = (vcpu)->run->exit_reason; \
\
TEST_ASSERT(exit_reason == (expected), \
"Wanted KVM exit reason: %u (%s), got: %u (%s)", \
(expected), exit_reason_str((expected)), \
exit_reason, exit_reason_str(exit_reason)); \
} while (0)
#define TEST_FAIL(fmt, ...) do { \
TEST_ASSERT(false, fmt, ##__VA_ARGS__); \
__builtin_unreachable(); \

View File

@ -1063,6 +1063,8 @@ uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
uint64_t a3);
uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
void xen_hypercall(uint64_t nr, uint64_t a0, void *a1);
void __vm_xsave_require_permission(int bit, const char *name);

View File

@ -1815,38 +1815,53 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
vcpu_dump(stream, vcpu, indent + 2);
}
#define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x}
/* Known KVM exit reasons */
static struct exit_reason {
unsigned int reason;
const char *name;
} exit_reasons_known[] = {
{KVM_EXIT_UNKNOWN, "UNKNOWN"},
{KVM_EXIT_EXCEPTION, "EXCEPTION"},
{KVM_EXIT_IO, "IO"},
{KVM_EXIT_HYPERCALL, "HYPERCALL"},
{KVM_EXIT_DEBUG, "DEBUG"},
{KVM_EXIT_HLT, "HLT"},
{KVM_EXIT_MMIO, "MMIO"},
{KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
{KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
{KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
{KVM_EXIT_INTR, "INTR"},
{KVM_EXIT_SET_TPR, "SET_TPR"},
{KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
{KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
{KVM_EXIT_S390_RESET, "S390_RESET"},
{KVM_EXIT_DCR, "DCR"},
{KVM_EXIT_NMI, "NMI"},
{KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
{KVM_EXIT_OSI, "OSI"},
{KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
{KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"},
{KVM_EXIT_X86_RDMSR, "RDMSR"},
{KVM_EXIT_X86_WRMSR, "WRMSR"},
{KVM_EXIT_XEN, "XEN"},
{KVM_EXIT_HYPERV, "HYPERV"},
KVM_EXIT_STRING(UNKNOWN),
KVM_EXIT_STRING(EXCEPTION),
KVM_EXIT_STRING(IO),
KVM_EXIT_STRING(HYPERCALL),
KVM_EXIT_STRING(DEBUG),
KVM_EXIT_STRING(HLT),
KVM_EXIT_STRING(MMIO),
KVM_EXIT_STRING(IRQ_WINDOW_OPEN),
KVM_EXIT_STRING(SHUTDOWN),
KVM_EXIT_STRING(FAIL_ENTRY),
KVM_EXIT_STRING(INTR),
KVM_EXIT_STRING(SET_TPR),
KVM_EXIT_STRING(TPR_ACCESS),
KVM_EXIT_STRING(S390_SIEIC),
KVM_EXIT_STRING(S390_RESET),
KVM_EXIT_STRING(DCR),
KVM_EXIT_STRING(NMI),
KVM_EXIT_STRING(INTERNAL_ERROR),
KVM_EXIT_STRING(OSI),
KVM_EXIT_STRING(PAPR_HCALL),
KVM_EXIT_STRING(S390_UCONTROL),
KVM_EXIT_STRING(WATCHDOG),
KVM_EXIT_STRING(S390_TSCH),
KVM_EXIT_STRING(EPR),
KVM_EXIT_STRING(SYSTEM_EVENT),
KVM_EXIT_STRING(S390_STSI),
KVM_EXIT_STRING(IOAPIC_EOI),
KVM_EXIT_STRING(HYPERV),
KVM_EXIT_STRING(ARM_NISV),
KVM_EXIT_STRING(X86_RDMSR),
KVM_EXIT_STRING(X86_WRMSR),
KVM_EXIT_STRING(DIRTY_RING_FULL),
KVM_EXIT_STRING(AP_RESET_HOLD),
KVM_EXIT_STRING(X86_BUS_LOCK),
KVM_EXIT_STRING(XEN),
KVM_EXIT_STRING(RISCV_SBI),
KVM_EXIT_STRING(RISCV_CSR),
KVM_EXIT_STRING(NOTIFY),
#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
{KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
KVM_EXIT_STRING(MEMORY_NOT_PRESENT),
#endif
};

View File

@ -35,8 +35,7 @@ static uint64_t diag318_handler(void)
vcpu_run(vcpu);
run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"DIAGNOSE 0x0318 instruction was not intercepted");
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s390_sieic.icptcode == ICPT_INSTRUCTION,
"Unexpected intercept code: 0x%x", run->s390_sieic.icptcode);
TEST_ASSERT((run->s390_sieic.ipa & 0xff00) == IPA0_DIAG,

View File

@ -165,26 +165,33 @@ size_t get_trans_hugepagesz(void)
size_t get_def_hugetlb_pagesz(void)
{
char buf[64];
const char *tag = "Hugepagesize:";
const char *hugepagesize = "Hugepagesize:";
const char *hugepages_total = "HugePages_Total:";
FILE *f;
f = fopen("/proc/meminfo", "r");
TEST_ASSERT(f != NULL, "Error in opening /proc/meminfo");
while (fgets(buf, sizeof(buf), f) != NULL) {
if (strstr(buf, tag) == buf) {
if (strstr(buf, hugepages_total) == buf) {
unsigned long long total = strtoull(buf + strlen(hugepages_total), NULL, 10);
if (!total) {
fprintf(stderr, "HUGETLB is not enabled in /proc/sys/vm/nr_hugepages\n");
exit(KSFT_SKIP);
}
}
if (strstr(buf, hugepagesize) == buf) {
fclose(f);
return strtoull(buf + strlen(tag), NULL, 10) << 10;
return strtoull(buf + strlen(hugepagesize), NULL, 10) << 10;
}
}
if (feof(f))
TEST_FAIL("HUGETLB is not configured in host kernel");
else
TEST_FAIL("Error in reading /proc/meminfo");
if (feof(f)) {
fprintf(stderr, "HUGETLB is not configured in host kernel");
exit(KSFT_SKIP);
}
fclose(f);
return 0;
TEST_FAIL("Error in reading /proc/meminfo");
}
#define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)

View File

@ -1139,21 +1139,36 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
return NULL;
}
#define X86_HYPERCALL(inputs...) \
({ \
uint64_t r; \
\
asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \
"jnz 1f\n\t" \
"vmcall\n\t" \
"jmp 2f\n\t" \
"1: vmmcall\n\t" \
"2:" \
: "=a"(r) \
: [use_vmmcall] "r" (host_cpu_is_amd), inputs); \
\
r; \
})
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
uint64_t a3)
{
uint64_t r;
return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
}
asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t"
"jnz 1f\n\t"
"vmcall\n\t"
"jmp 2f\n\t"
"1: vmmcall\n\t"
"2:"
: "=a"(r)
: "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3),
[use_vmmcall] "r" (host_cpu_is_amd));
return r;
uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
{
return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1));
}
void xen_hypercall(uint64_t nr, uint64_t a0, void *a1)
{
GUEST_ASSERT(!__xen_hypercall(nr, a0, a1));
}
const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)

View File

@ -126,10 +126,7 @@ void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu)
run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s390_sieic.icptcode == 4 &&
(run->s390_sieic.ipa >> 8) == 0x83 &&
(run->s390_sieic.ipb >> 16) == 0x501,
@ -165,10 +162,7 @@ void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu)
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s.regs.gprs[11] == 0xBAD1DEA + 1,
"r11 sync regs value incorrect 0x%llx.",
run->s.regs.gprs[11]);
@ -200,10 +194,7 @@ void test_clear_kvm_dirty_regs_bits(struct kvm_vcpu *vcpu)
run->s.regs.diag318 = 0x4B1D;
rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
TEST_ASSERT(run->s.regs.gprs[11] != 0xDEADBEEF,
"r11 sync regs value incorrect 0x%llx.",
run->s.regs.gprs[11]);

View File

@ -308,7 +308,6 @@ static void test_delete_memory_region(void)
static void test_zero_memory_regions(void)
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
pr_info("Testing KVM_RUN with zero added memory regions\n");
@ -318,10 +317,7 @@ static void test_zero_memory_regions(void)
vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
vcpu_run(vcpu);
run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Unexpected exit_reason = %u\n", run->exit_reason);
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
kvm_vm_free(vm);
}

View File

@ -241,7 +241,6 @@ int main(int argc, char *argv[])
struct kvm_regs regs1, regs2;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_x86_state *state;
int xsave_restore_size;
vm_vaddr_t amx_cfg, tiledata, xsavedata;
@ -268,7 +267,6 @@ int main(int argc, char *argv[])
"KVM should enumerate max XSAVE size when XSAVE is supported");
xsave_restore_size = kvm_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE);
run = vcpu->run;
vcpu_regs_get(vcpu, &regs1);
/* Register #NM handler */
@ -291,10 +289,7 @@ int main(int argc, char *argv[])
for (stage = 1; ; stage++) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
@ -350,7 +345,6 @@ int main(int argc, char *argv[])
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
run = vcpu->run;
kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2));

View File

@ -50,7 +50,6 @@ static void guest_code(void)
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct kvm_sregs sregs;
struct ucall uc;
@ -58,15 +57,10 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
while (1) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:

View File

@ -204,7 +204,7 @@ int main(void)
vcpu_guest_debug_set(vcpu, &debug);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO");
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
cmd = get_ucall(vcpu, &uc);
TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE");

View File

@ -24,10 +24,7 @@ static inline void handle_flds_emulation_failure_exit(struct kvm_vcpu *vcpu)
uint8_t *insn_bytes;
uint64_t flags;
TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Unexpected exit reason: %u (%s)",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
"Unexpected suberror: %u",

View File

@ -207,13 +207,11 @@ int main(void)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
vm_vaddr_t tsc_page_gva;
int stage;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
run = vcpu->run;
vcpu_set_hv_cpuid(vcpu);
@ -227,10 +225,7 @@ int main(void)
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -237,7 +237,6 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
int stage;
@ -266,13 +265,8 @@ int main(int argc, char *argv[])
pr_info("Running L1 which uses EVMCS to run L2\n");
for (stage = 1;; stage++) {
run = vcpu->run;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -122,7 +122,6 @@ static void guest_test_msrs_access(void)
{
struct kvm_cpuid2 *prev_cpuid = NULL;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
int stage = 0;
@ -151,8 +150,6 @@ static void guest_test_msrs_access(void)
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
run = vcpu->run;
/* TODO: Make this entire test easier to maintain. */
if (stage >= 21)
vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
@ -494,9 +491,7 @@ static void guest_test_msrs_access(void)
msr->idx, msr->write ? "write" : "read");
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
@ -518,7 +513,6 @@ static void guest_test_hcalls_access(void)
{
struct kvm_cpuid2 *prev_cpuid = NULL;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
int stage = 0;
@ -550,8 +544,6 @@ static void guest_test_hcalls_access(void)
vcpu_init_cpuid(vcpu, prev_cpuid);
}
run = vcpu->run;
switch (stage) {
case 0:
vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
@ -669,9 +661,7 @@ static void guest_test_hcalls_access(void)
pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -243,7 +243,6 @@ int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_vcpu *vcpu[3];
unsigned int exit_reason;
vm_vaddr_t hcall_page;
pthread_t threads[2];
int stage = 1, r;
@ -283,10 +282,7 @@ int main(int argc, char *argv[])
while (true) {
vcpu_run(vcpu[0]);
exit_reason = vcpu[0]->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
exit_reason, exit_reason_str(exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO);
switch (get_ucall(vcpu[0], &uc)) {
case UCALL_SYNC:

View File

@ -156,7 +156,6 @@ int main(int argc, char *argv[])
vm_vaddr_t hcall_page;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
int stage;
@ -165,7 +164,6 @@ int main(int argc, char *argv[])
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_set_hv_cpuid(vcpu);
run = vcpu->run;
vcpu_alloc_svm(vm, &nested_gva);
vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
@ -177,10 +175,7 @@ int main(int argc, char *argv[])
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -542,18 +542,13 @@ static void *vcpu_thread(void *arg)
struct ucall uc;
int old;
int r;
unsigned int exit_reason;
r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
vcpu->id, r);
vcpu_run(vcpu);
exit_reason = vcpu->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
vcpu->id, exit_reason, exit_reason_str(exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
@ -587,7 +582,6 @@ int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_vcpu *vcpu[3];
unsigned int exit_reason;
pthread_t threads[2];
vm_vaddr_t test_data_page, gva;
vm_paddr_t gpa;
@ -657,11 +651,7 @@ int main(int argc, char *argv[])
while (true) {
vcpu_run(vcpu[0]);
exit_reason = vcpu[0]->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
exit_reason, exit_reason_str(exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO);
switch (get_ucall(vcpu[0], &uc)) {
case UCALL_SYNC:

View File

@ -105,7 +105,6 @@ static void setup_clock(struct kvm_vm *vm, struct test_case *test_case)
static void enter_guest(struct kvm_vcpu *vcpu)
{
struct kvm_clock_data start, end;
struct kvm_run *run = vcpu->run;
struct kvm_vm *vm = vcpu->vm;
struct ucall uc;
int i;
@ -118,9 +117,7 @@ static void enter_guest(struct kvm_vcpu *vcpu)
vcpu_run(vcpu);
vm_ioctl(vm, KVM_GET_CLOCK, &end);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:

View File

@ -111,14 +111,11 @@ static void pr_hcall(struct ucall *uc)
static void enter_guest(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct ucall uc;
while (true) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_PR_MSR:

View File

@ -64,7 +64,6 @@ int main(int argc, char *argv[])
{
uint64_t disabled_quirks;
struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
int testcase;
@ -74,18 +73,12 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_MWAIT);
run = vcpu->run;
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vcpu);
while (1) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:

View File

@ -166,12 +166,9 @@ static void __attribute__((__flatten__)) l1_guest_code(void *test_data)
static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)
{
struct kvm_run *run = vcpu->run;
struct ucall uc;
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:

View File

@ -36,15 +36,12 @@ static void guest_code(void)
static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct ucall uc;
vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, true);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu\n", uc.cmd);
@ -56,14 +53,9 @@ static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu)
static void test_msr_platform_info_disabled(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, false);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
"Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
}
int main(int argc, char *argv[])

View File

@ -151,14 +151,10 @@ static void amd_guest_code(void)
*/
static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu", uc.cmd);

View File

@ -133,7 +133,6 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu;
struct kvm_regs regs;
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_x86_state *state;
int stage, stage_reported;
@ -142,8 +141,6 @@ int main(int argc, char *argv[])
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
SMRAM_MEMSLOT, SMRAM_PAGES, 0);
TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
@ -169,10 +166,7 @@ int main(int argc, char *argv[])
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
memset(&regs, 0, sizeof(regs));
vcpu_regs_get(vcpu, &regs);
@ -208,7 +202,6 @@ int main(int argc, char *argv[])
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
run = vcpu->run;
kvm_x86_state_cleanup(state);
}

View File

@ -158,14 +158,12 @@ int main(int argc, char *argv[])
struct kvm_regs regs1, regs2;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_x86_state *state;
struct ucall uc;
int stage;
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
vcpu_regs_get(vcpu, &regs1);
@ -183,10 +181,7 @@ int main(int argc, char *argv[])
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
@ -214,7 +209,6 @@ int main(int argc, char *argv[])
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
run = vcpu->run;
kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2));

View File

@ -85,7 +85,6 @@ static void l1_guest_code(struct svm_test_data *svm)
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
vm_vaddr_t svm_gva;
struct kvm_vm *vm;
struct ucall uc;
@ -103,13 +102,8 @@ int main(int argc, char *argv[])
vcpu_alloc_svm(vm, &svm_gva);
vcpu_args_set(vcpu, 1, svm_gva);
run = vcpu->run;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -42,7 +42,6 @@ static void l1_guest_code(struct svm_test_data *svm, struct idt_entry *idt)
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
struct kvm_run *run;
vm_vaddr_t svm_gva;
struct kvm_vm *vm;
@ -55,13 +54,9 @@ int main(int argc, char *argv[])
vcpu_alloc_svm(vm, &svm_gva);
vcpu_args_set(vcpu, 2, svm_gva, vm->idt);
run = vcpu->run;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
"Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
kvm_vm_free(vm);
}

View File

@ -176,16 +176,12 @@ static void run_test(bool is_nmi)
memset(&debug, 0, sizeof(debug));
vcpu_guest_debug_set(vcpu, &debug);
struct kvm_run *run = vcpu->run;
struct ucall uc;
alarm(2);
vcpu_run(vcpu);
alarm(0);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -47,14 +47,10 @@ int main(int argc, char *argv[])
vcpu_args_set(vcpu, 1, svm_gva);
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -132,10 +132,7 @@ int main(int argc, char *argv[])
/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
vcpu_regs_get(vcpu, &regs);
compare_regs(&regs, &run->s.regs.regs);
@ -154,10 +151,7 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
@ -181,10 +175,7 @@ int main(int argc, char *argv[])
run->kvm_dirty_regs = 0;
run->s.regs.regs.rbx = 0xDEADBEEF;
rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
@ -199,10 +190,7 @@ int main(int argc, char *argv[])
regs.rbx = 0xBAC0;
vcpu_regs_set(vcpu, &regs);
rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
@ -219,10 +207,7 @@ int main(int argc, char *argv[])
run->kvm_dirty_regs = TEST_SYNC_FIELDS;
run->s.regs.regs.rbx = 0xBBBB;
rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);

View File

@ -89,9 +89,7 @@ int main(void)
run = vcpu->run;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Expected KVM_EXIT_IO, got: %u (%s)\n",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
"Expected IN from port %d from L2, got port %d",
ARBITRARY_IO_PORT, run->io.port);
@ -111,10 +109,7 @@ int main(void)
if (has_svm) {
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
"Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
} else {
switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:

View File

@ -64,14 +64,10 @@ static void *run_vcpu(void *_cpu_nr)
pthread_spin_unlock(&create_lock);
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:

View File

@ -137,15 +137,11 @@ static void guest_gp_handler(struct ex_regs *regs)
static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu)
{
unsigned int exit_reason;
struct ucall uc;
vcpu_run(vcpu);
exit_reason = vcpu->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
exit_reason, exit_reason_str(exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_SYNC,
"Expect UCALL_SYNC\n");
TEST_ASSERT(uc.args[1] == SYNC_GP, "#GP is expected.");
@ -182,7 +178,6 @@ static void *run_ucna_injection(void *arg)
struct ucall uc;
int old;
int r;
unsigned int exit_reason;
r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
TEST_ASSERT(r == 0,
@ -191,10 +186,7 @@ static void *run_ucna_injection(void *arg)
vcpu_run(params->vcpu);
exit_reason = params->vcpu->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"unexpected exit reason %u-%s, expected KVM_EXIT_IO",
exit_reason, exit_reason_str(exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
"Expect UCALL_SYNC\n");
TEST_ASSERT(uc.args[1] == SYNC_FIRST_UCNA, "Injecting first UCNA.");
@ -204,10 +196,7 @@ static void *run_ucna_injection(void *arg)
inject_ucna(params->vcpu, FIRST_UCNA_ADDR);
vcpu_run(params->vcpu);
exit_reason = params->vcpu->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"unexpected exit reason %u-%s, expected KVM_EXIT_IO",
exit_reason, exit_reason_str(exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
"Expect UCALL_SYNC\n");
TEST_ASSERT(uc.args[1] == SYNC_SECOND_UCNA, "Injecting second UCNA.");
@ -217,10 +206,7 @@ static void *run_ucna_injection(void *arg)
inject_ucna(params->vcpu, SECOND_UCNA_ADDR);
vcpu_run(params->vcpu);
exit_reason = params->vcpu->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"unexpected exit reason %u-%s, expected KVM_EXIT_IO",
exit_reason, exit_reason_str(exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO);
if (get_ucall(params->vcpu, &uc) == UCALL_ABORT) {
TEST_ASSERT(false, "vCPU assertion failure: %s.\n",
(const char *)uc.args[0]);

View File

@ -63,11 +63,7 @@ int main(int argc, char *argv[])
while (1) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
if (get_ucall(vcpu, &uc))
break;

View File

@ -410,10 +410,7 @@ static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_RDMSR);
TEST_ASSERT(run->msr.index == msr_index,
"Unexpected msr (0x%04x), expected 0x%04x",
run->msr.index, msr_index);
@ -445,10 +442,7 @@ static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_WRMSR);
TEST_ASSERT(run->msr.index == msr_index,
"Unexpected msr (0x%04x), expected 0x%04x",
run->msr.index, msr_index);
@ -472,15 +466,11 @@ static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
static void process_ucall_done(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct ucall uc;
check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s)",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
"Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
@ -489,15 +479,11 @@ static void process_ucall_done(struct kvm_vcpu *vcpu)
static uint64_t process_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct ucall uc = {};
check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s)",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:

View File

@ -96,21 +96,14 @@ int main(int argc, char *argv[])
vcpu_run(vcpu);
if (apic_access_addr == high_gpa) {
TEST_ASSERT(run->exit_reason ==
KVM_EXIT_INTERNAL_ERROR,
"Got exit reason other than KVM_EXIT_INTERNAL_ERROR: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
TEST_ASSERT(run->internal.suberror ==
KVM_INTERNAL_ERROR_EMULATION,
"Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n",
run->internal.suberror);
break;
}
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -64,10 +64,7 @@ int main(int argc, char *argv[])
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
if (run->io.port == PORT_L0_EXIT)
break;

View File

@ -73,7 +73,6 @@ int main(int argc, char *argv[])
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
bool done = false;
@ -84,7 +83,6 @@ int main(int argc, char *argv[])
vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vcpu, 1, vmx_pages_gva);
run = vcpu->run;
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
@ -117,10 +115,7 @@ int main(int argc, char *argv[])
while (!done) {
memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -26,9 +26,7 @@ static void __run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu)
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Expected KVM_EXIT_INTERNAL_ERROR, got %d (%s)\n",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
"Expected emulation failure, got %d\n",
run->emulation_failure.suberror);

View File

@ -74,9 +74,7 @@ int main(int argc, char *argv[])
* The first exit to L0 userspace should be an I/O access from L2.
* Running L1 should launch L2 without triggering an exit to userspace.
*/
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Expected KVM_EXIT_IO, got: %u (%s)\n",
run->exit_reason, exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
"Expected IN from port %d from L2, got port %d",

View File

@ -183,14 +183,10 @@ int main(int argc, char *argv[])
vcpu_ioctl(vcpu, KVM_SET_TSC_KHZ, (void *) (tsc_khz / l1_scale_factor));
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -157,7 +157,6 @@ int main(int argc, char *argv[])
struct kvm_regs regs1, regs2;
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_vcpu *vcpu;
struct kvm_x86_state *state;
struct ucall uc;
@ -173,7 +172,6 @@ int main(int argc, char *argv[])
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
run = vcpu->run;
vcpu_regs_get(vcpu, &regs1);
@ -182,10 +180,7 @@ int main(int argc, char *argv[])
for (stage = 1;; stage++) {
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
@ -237,7 +232,6 @@ int main(int argc, char *argv[])
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
run = vcpu->run;
kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2));

View File

@ -131,14 +131,10 @@ int main(int argc, char *argv[])
vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:

View File

@ -198,7 +198,6 @@ static void *vcpu_thread(void *arg)
struct ucall uc;
int old;
int r;
unsigned int exit_reason;
r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
TEST_ASSERT(r == 0,
@ -207,11 +206,8 @@ static void *vcpu_thread(void *arg)
fprintf(stderr, "vCPU thread running vCPU %u\n", vcpu->id);
vcpu_run(vcpu);
exit_reason = vcpu->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
vcpu->id, exit_reason, exit_reason_str(exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
if (get_ucall(vcpu, &uc) == UCALL_ABORT) {
TEST_ASSERT(false,

View File

@ -26,6 +26,9 @@
#define DUMMY_REGION_GPA (SHINFO_REGION_GPA + (3 * PAGE_SIZE))
#define DUMMY_REGION_SLOT 11
#define DUMMY_REGION_GPA_2 (SHINFO_REGION_GPA + (4 * PAGE_SIZE))
#define DUMMY_REGION_SLOT_2 12
#define SHINFO_ADDR (SHINFO_REGION_GPA)
#define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40)
#define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE)
@ -41,6 +44,37 @@
#define EVTCHN_TEST2 66
#define EVTCHN_TIMER 13
enum {
TEST_INJECT_VECTOR = 0,
TEST_RUNSTATE_runnable,
TEST_RUNSTATE_blocked,
TEST_RUNSTATE_offline,
TEST_RUNSTATE_ADJUST,
TEST_RUNSTATE_DATA,
TEST_STEAL_TIME,
TEST_EVTCHN_MASKED,
TEST_EVTCHN_UNMASKED,
TEST_EVTCHN_SLOWPATH,
TEST_EVTCHN_SEND_IOCTL,
TEST_EVTCHN_HCALL,
TEST_EVTCHN_HCALL_SLOWPATH,
TEST_EVTCHN_HCALL_EVENTFD,
TEST_TIMER_SETUP,
TEST_TIMER_WAIT,
TEST_TIMER_RESTORE,
TEST_POLL_READY,
TEST_POLL_TIMEOUT,
TEST_POLL_MASKED,
TEST_POLL_WAKE,
TEST_TIMER_PAST,
TEST_LOCKING_SEND_RACE,
TEST_LOCKING_POLL_RACE,
TEST_LOCKING_POLL_TIMEOUT,
TEST_DONE,
TEST_GUEST_SAW_IRQ,
};
#define XEN_HYPERCALL_MSR 0x40000000
#define MIN_STEAL_TIME 50000
@ -144,7 +178,7 @@ static void evtchn_handler(struct ex_regs *regs)
vi->evtchn_pending_sel = 0;
guest_saw_irq = true;
GUEST_SYNC(0x20);
GUEST_SYNC(TEST_GUEST_SAW_IRQ);
}
static void guest_wait_for_irq(void)
@ -165,41 +199,41 @@ static void guest_code(void)
);
/* Trigger an interrupt injection */
GUEST_SYNC(0);
GUEST_SYNC(TEST_INJECT_VECTOR);
guest_wait_for_irq();
/* Test having the host set runstates manually */
GUEST_SYNC(RUNSTATE_runnable);
GUEST_SYNC(TEST_RUNSTATE_runnable);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0);
GUEST_ASSERT(rs->state == 0);
GUEST_SYNC(RUNSTATE_blocked);
GUEST_SYNC(TEST_RUNSTATE_blocked);
GUEST_ASSERT(rs->time[RUNSTATE_blocked] != 0);
GUEST_ASSERT(rs->state == 0);
GUEST_SYNC(RUNSTATE_offline);
GUEST_SYNC(TEST_RUNSTATE_offline);
GUEST_ASSERT(rs->time[RUNSTATE_offline] != 0);
GUEST_ASSERT(rs->state == 0);
/* Test runstate time adjust */
GUEST_SYNC(4);
GUEST_SYNC(TEST_RUNSTATE_ADJUST);
GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x5a);
GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x6b6b);
/* Test runstate time set */
GUEST_SYNC(5);
GUEST_SYNC(TEST_RUNSTATE_DATA);
GUEST_ASSERT(rs->state_entry_time >= 0x8000);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] == 0);
GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x6b6b);
GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x5a);
/* sched_yield() should result in some 'runnable' time */
GUEST_SYNC(6);
GUEST_SYNC(TEST_STEAL_TIME);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] >= MIN_STEAL_TIME);
/* Attempt to deliver a *masked* interrupt */
GUEST_SYNC(7);
GUEST_SYNC(TEST_EVTCHN_MASKED);
/* Wait until we see the bit set */
struct shared_info *si = (void *)SHINFO_VADDR;
@ -207,71 +241,65 @@ static void guest_code(void)
__asm__ __volatile__ ("rep nop" : : : "memory");
/* Now deliver an *unmasked* interrupt */
GUEST_SYNC(8);
GUEST_SYNC(TEST_EVTCHN_UNMASKED);
guest_wait_for_irq();
/* Change memslots and deliver an interrupt */
GUEST_SYNC(9);
GUEST_SYNC(TEST_EVTCHN_SLOWPATH);
guest_wait_for_irq();
/* Deliver event channel with KVM_XEN_HVM_EVTCHN_SEND */
GUEST_SYNC(10);
GUEST_SYNC(TEST_EVTCHN_SEND_IOCTL);
guest_wait_for_irq();
GUEST_SYNC(11);
GUEST_SYNC(TEST_EVTCHN_HCALL);
/* Our turn. Deliver event channel (to ourselves) with
* EVTCHNOP_send hypercall. */
unsigned long rax;
struct evtchn_send s = { .port = 127 };
__asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_event_channel_op),
"D" (EVTCHNOP_send),
"S" (&s));
GUEST_ASSERT(rax == 0);
xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
guest_wait_for_irq();
GUEST_SYNC(12);
GUEST_SYNC(TEST_EVTCHN_HCALL_SLOWPATH);
/*
* Same again, but this time the host has messed with memslots so it
* should take the slow path in kvm_xen_set_evtchn().
*/
xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
guest_wait_for_irq();
GUEST_SYNC(TEST_EVTCHN_HCALL_EVENTFD);
/* Deliver "outbound" event channel to an eventfd which
* happens to be one of our own irqfds. */
s.port = 197;
__asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_event_channel_op),
"D" (EVTCHNOP_send),
"S" (&s));
GUEST_ASSERT(rax == 0);
xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s);
guest_wait_for_irq();
GUEST_SYNC(13);
GUEST_SYNC(TEST_TIMER_SETUP);
/* Set a timer 100ms in the future. */
__asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_set_timer_op),
"D" (rs->state_entry_time + 100000000));
GUEST_ASSERT(rax == 0);
xen_hypercall(__HYPERVISOR_set_timer_op,
rs->state_entry_time + 100000000, NULL);
GUEST_SYNC(14);
GUEST_SYNC(TEST_TIMER_WAIT);
/* Now wait for the timer */
guest_wait_for_irq();
GUEST_SYNC(15);
GUEST_SYNC(TEST_TIMER_RESTORE);
/* The host has 'restored' the timer. Just wait for it. */
guest_wait_for_irq();
GUEST_SYNC(16);
GUEST_SYNC(TEST_POLL_READY);
/* Poll for an event channel port which is already set */
u32 ports[1] = { EVTCHN_TIMER };
@ -281,65 +309,41 @@ static void guest_code(void)
.timeout = 0,
};
__asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_sched_op),
"D" (SCHEDOP_poll),
"S" (&p));
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
GUEST_ASSERT(rax == 0);
GUEST_SYNC(17);
GUEST_SYNC(TEST_POLL_TIMEOUT);
/* Poll for an unset port and wait for the timeout. */
p.timeout = 100000000;
__asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_sched_op),
"D" (SCHEDOP_poll),
"S" (&p));
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
GUEST_ASSERT(rax == 0);
GUEST_SYNC(18);
GUEST_SYNC(TEST_POLL_MASKED);
/* A timer will wake the masked port we're waiting on, while we poll */
p.timeout = 0;
__asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_sched_op),
"D" (SCHEDOP_poll),
"S" (&p));
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
GUEST_ASSERT(rax == 0);
GUEST_SYNC(19);
GUEST_SYNC(TEST_POLL_WAKE);
/* A timer wake an *unmasked* port which should wake us with an
* actual interrupt, while we're polling on a different port. */
ports[0]++;
p.timeout = 0;
__asm__ __volatile__ ("vmcall" :
"=a" (rax) :
"a" (__HYPERVISOR_sched_op),
"D" (SCHEDOP_poll),
"S" (&p));
GUEST_ASSERT(rax == 0);
xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
guest_wait_for_irq();
GUEST_SYNC(20);
GUEST_SYNC(TEST_TIMER_PAST);
/* Timer should have fired already */
guest_wait_for_irq();
GUEST_SYNC(21);
GUEST_SYNC(TEST_LOCKING_SEND_RACE);
/* Racing host ioctls */
guest_wait_for_irq();
GUEST_SYNC(22);
GUEST_SYNC(TEST_LOCKING_POLL_RACE);
/* Racing vmcall against host ioctl */
ports[0] = 0;
@ -360,24 +364,19 @@ wait_for_timer:
* timer IRQ is dropped due to an invalid event channel.
*/
for (i = 0; i < 100 && !guest_saw_irq; i++)
asm volatile("vmcall"
: "=a" (rax)
: "a" (__HYPERVISOR_sched_op),
"D" (SCHEDOP_poll),
"S" (&p)
: "memory");
__xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p);
/*
* Re-send the timer IRQ if it was (likely) dropped due to the timer
* expiring while the event channel was invalid.
*/
if (!guest_saw_irq) {
GUEST_SYNC(23);
GUEST_SYNC(TEST_LOCKING_POLL_TIMEOUT);
goto wait_for_timer;
}
guest_saw_irq = false;
GUEST_SYNC(24);
GUEST_SYNC(TEST_DONE);
}
static int cmp_timespec(struct timespec *a, struct timespec *b)
@ -623,15 +622,10 @@ int main(int argc, char *argv[])
bool evtchn_irq_expected = false;
for (;;) {
volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
@ -647,25 +641,26 @@ int main(int argc, char *argv[])
"runstate times don't add up");
switch (uc.args[1]) {
case 0:
case TEST_INJECT_VECTOR:
if (verbose)
printf("Delivering evtchn upcall\n");
evtchn_irq_expected = true;
vinfo->evtchn_upcall_pending = 1;
break;
case RUNSTATE_runnable...RUNSTATE_offline:
case TEST_RUNSTATE_runnable...TEST_RUNSTATE_offline:
TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen");
if (!do_runstate_tests)
goto done;
if (verbose)
printf("Testing runstate %s\n", runstate_names[uc.args[1]]);
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
rst.u.runstate.state = uc.args[1];
rst.u.runstate.state = uc.args[1] + RUNSTATE_runnable -
TEST_RUNSTATE_runnable;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case 4:
case TEST_RUNSTATE_ADJUST:
if (verbose)
printf("Testing RUNSTATE_ADJUST\n");
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST;
@ -680,7 +675,7 @@ int main(int argc, char *argv[])
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case 5:
case TEST_RUNSTATE_DATA:
if (verbose)
printf("Testing RUNSTATE_DATA\n");
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA;
@ -692,7 +687,7 @@ int main(int argc, char *argv[])
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case 6:
case TEST_STEAL_TIME:
if (verbose)
printf("Testing steal time\n");
/* Yield until scheduler delay exceeds target */
@ -702,7 +697,7 @@ int main(int argc, char *argv[])
} while (get_run_delay() < rundelay);
break;
case 7:
case TEST_EVTCHN_MASKED:
if (!do_eventfd_tests)
goto done;
if (verbose)
@ -712,7 +707,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 8:
case TEST_EVTCHN_UNMASKED:
if (verbose)
printf("Testing unmasked event channel\n");
/* Unmask that, but deliver the other one */
@ -723,7 +718,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 9:
case TEST_EVTCHN_SLOWPATH:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[1] = 0;
@ -736,7 +731,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 10:
case TEST_EVTCHN_SEND_IOCTL:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
if (!do_evtchn_tests)
@ -756,7 +751,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 11:
case TEST_EVTCHN_HCALL:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[1] = 0;
@ -767,7 +762,20 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 12:
case TEST_EVTCHN_HCALL_SLOWPATH:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[0] = 0;
if (verbose)
printf("Testing guest EVTCHNOP_send direct to evtchn after memslot change\n");
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
DUMMY_REGION_GPA_2, DUMMY_REGION_SLOT_2, 1, 0);
evtchn_irq_expected = true;
alarm(1);
break;
case TEST_EVTCHN_HCALL_EVENTFD:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[0] = 0;
@ -778,7 +786,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 13:
case TEST_TIMER_SETUP:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[1] = 0;
@ -787,7 +795,7 @@ int main(int argc, char *argv[])
printf("Testing guest oneshot timer\n");
break;
case 14:
case TEST_TIMER_WAIT:
memset(&tmr, 0, sizeof(tmr));
tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
@ -801,7 +809,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 15:
case TEST_TIMER_RESTORE:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
shinfo->evtchn_pending[0] = 0;
@ -815,7 +823,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 16:
case TEST_POLL_READY:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
@ -825,14 +833,14 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 17:
case TEST_POLL_TIMEOUT:
if (verbose)
printf("Testing SCHEDOP_poll timeout\n");
shinfo->evtchn_pending[0] = 0;
alarm(1);
break;
case 18:
case TEST_POLL_MASKED:
if (verbose)
printf("Testing SCHEDOP_poll wake on masked event\n");
@ -841,7 +849,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 19:
case TEST_POLL_WAKE:
shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 0;
if (verbose)
printf("Testing SCHEDOP_poll wake on unmasked event\n");
@ -858,7 +866,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 20:
case TEST_TIMER_PAST:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
/* Read timer and check it is no longer pending */
@ -875,7 +883,7 @@ int main(int argc, char *argv[])
alarm(1);
break;
case 21:
case TEST_LOCKING_SEND_RACE:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
alarm(0);
@ -897,7 +905,7 @@ int main(int argc, char *argv[])
__vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &uxe);
break;
case 22:
case TEST_LOCKING_POLL_RACE:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
@ -912,7 +920,7 @@ int main(int argc, char *argv[])
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
break;
case 23:
case TEST_LOCKING_POLL_TIMEOUT:
/*
* Optional and possibly repeated sync point.
* Injecting the timer IRQ may fail if the
@ -934,7 +942,7 @@ int main(int argc, char *argv[])
SHINFO_RACE_TIMEOUT * 1000000000ULL;
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
break;
case 24:
case TEST_DONE:
TEST_ASSERT(!evtchn_irq_expected,
"Expected event channel IRQ but it didn't happen");
@ -945,7 +953,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(ret == 0, "pthread_join() failed: %s", strerror(ret));
goto done;
case 0x20:
case TEST_GUEST_SAW_IRQ:
TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ");
evtchn_irq_expected = false;
break;

View File

@ -122,10 +122,7 @@ int main(int argc, char *argv[])
continue;
}
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT: