mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 01:24:12 +08:00
KVM selftests changes for 6.9:
- Add macros to reduce the amount of boilerplate code needed to write "simple" selftests, and to utilize selftest TAP infrastructure, which is especially beneficial for KVM selftests with multiple testcases. - Add basic smoke tests for SEV and SEV-ES, along with a pile of library support for handling private/encrypted/protected memory. - Fix benign bugs where tests neglect to close() guest_memfd files. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEKTobbabEP7vbhhN9OlYIJqCjN/0FAmXrUT8ACgkQOlYIJqCj N/0azBAAkjVan7STJkDkyoSJAfXbGLFtt1SrSi7886siW+IVIwINyHAdqFbJG8h/ OXSfkQ6Mu4GY27qmuPqAbfVksb6ccAd0SdEDNixtErs2qU4BJvAiNfxxJlfx9b0f IGhN5mNNcxC4LosEIXZJRI9QPfXsxWkiXvShJ7qQmGXx1/oZGMCTyL6L6Bpqz4PV PDUAgeQDME1G0uw2AbN5pl9yS1Macl1R5Z0FjXs7pHu/Qy05fn3Afb1UsC4LfcW6 BTUgD4NYamaBOjzgiOzjBZCAL6ee3ZUx+Wy0ohfM2Ewm/MSArPt3SRuIck07bmUu FRuAKvb0q4Mc6uL9mvxP5t5aowP/2IIb1qR1DakXbXqSIVS4+yQzRhJqaVKdIRuD KXnxUFXqZ0QOLTgoWRK8fRVwMJWT0kFskNaAmDhcIoWVPxlvGjlXLSYncLIYTeic qC4Da02p+DSatw+GeONh3Eh2LUfyHuET5Wjb6GVsPr12IAx4KREUWJLShjHtF4FZ cXncKS6DCT3X5EjoruXgxYYKNoYG0S4ied8G0xE8El/i/O8X8IyeJu6sisdYZF/G SYpdooF+jnJeMq5eivL+WlaThOVcMpPeNp9fmU3g/TUTn/fIGpBtMf+goZG5jFLz pzLucXYehpsx28duyEC5SckdVJQ36J5EwZ/ybB35hh6NadMm7LM= =x6+F -----END PGP SIGNATURE----- Merge tag 'kvm-x86-selftests-6.9' of https://github.com/kvm-x86/linux into HEAD KVM selftests changes for 6.9: - Add macros to reduce the amount of boilerplate code needed to write "simple" selftests, and to utilize selftest TAP infrastructure, which is especially beneficial for KVM selftests with multiple testcases. - Add basic smoke tests for SEV and SEV-ES, along with a pile of library support for handling private/encrypted/protected memory. - Fix benign bugs where tests neglect to close() guest_memfd files.
This commit is contained in:
commit
4d4c02852a
@ -37,6 +37,7 @@ LIBKVM_x86_64 += lib/x86_64/handlers.S
|
||||
LIBKVM_x86_64 += lib/x86_64/hyperv.c
|
||||
LIBKVM_x86_64 += lib/x86_64/memstress.c
|
||||
LIBKVM_x86_64 += lib/x86_64/processor.c
|
||||
LIBKVM_x86_64 += lib/x86_64/sev.c
|
||||
LIBKVM_x86_64 += lib/x86_64/svm.c
|
||||
LIBKVM_x86_64 += lib/x86_64/ucall.c
|
||||
LIBKVM_x86_64 += lib/x86_64/vmx.c
|
||||
@ -118,6 +119,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_caps_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/sev_smoke_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
|
||||
|
@ -167,6 +167,9 @@ static void test_create_guest_memfd_multiple(struct kvm_vm *vm)
|
||||
TEST_ASSERT(ret != -1, "memfd fstat should succeed");
|
||||
TEST_ASSERT(st1.st_size == 4096, "first memfd st_size should still match requested size");
|
||||
TEST_ASSERT(st1.st_ino != st2.st_ino, "different memfd should have different inode numbers");
|
||||
|
||||
close(fd2);
|
||||
close(fd1);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
|
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef SELFTEST_KVM_UTIL_ARCH_H
|
||||
#define SELFTEST_KVM_UTIL_ARCH_H
|
||||
|
||||
struct kvm_vm_arch {};
|
||||
|
||||
#endif // SELFTEST_KVM_UTIL_ARCH_H
|
36
tools/testing/selftests/kvm/include/kvm_test_harness.h
Normal file
36
tools/testing/selftests/kvm/include/kvm_test_harness.h
Normal file
@ -0,0 +1,36 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Macros for defining a KVM test
|
||||
*
|
||||
* Copyright (C) 2022, Google LLC.
|
||||
*/
|
||||
|
||||
#ifndef SELFTEST_KVM_TEST_HARNESS_H
|
||||
#define SELFTEST_KVM_TEST_HARNESS_H
|
||||
|
||||
#include "kselftest_harness.h"
|
||||
|
||||
#define KVM_ONE_VCPU_TEST_SUITE(name) \
|
||||
FIXTURE(name) { \
|
||||
struct kvm_vcpu *vcpu; \
|
||||
}; \
|
||||
\
|
||||
FIXTURE_SETUP(name) { \
|
||||
(void)vm_create_with_one_vcpu(&self->vcpu, NULL); \
|
||||
} \
|
||||
\
|
||||
FIXTURE_TEARDOWN(name) { \
|
||||
kvm_vm_free(self->vcpu->vm); \
|
||||
}
|
||||
|
||||
#define KVM_ONE_VCPU_TEST(suite, test, guestcode) \
|
||||
static void __suite##_##test(struct kvm_vcpu *vcpu); \
|
||||
\
|
||||
TEST_F(suite, test) \
|
||||
{ \
|
||||
vcpu_arch_set_entry_point(self->vcpu, guestcode); \
|
||||
__suite##_##test(self->vcpu); \
|
||||
} \
|
||||
static void __suite##_##test(struct kvm_vcpu *vcpu)
|
||||
|
||||
#endif /* SELFTEST_KVM_TEST_HARNESS_H */
|
@ -18,9 +18,11 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/kvm.h>
|
||||
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "kvm_util_arch.h"
|
||||
#include "sparsebit.h"
|
||||
|
||||
/*
|
||||
@ -46,6 +48,7 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
|
||||
struct userspace_mem_region {
|
||||
struct kvm_userspace_memory_region2 region;
|
||||
struct sparsebit *unused_phy_pages;
|
||||
struct sparsebit *protected_phy_pages;
|
||||
int fd;
|
||||
off_t offset;
|
||||
enum vm_mem_backing_src_type backing_src_type;
|
||||
@ -90,6 +93,7 @@ enum kvm_mem_region_type {
|
||||
struct kvm_vm {
|
||||
int mode;
|
||||
unsigned long type;
|
||||
uint8_t subtype;
|
||||
int kvm_fd;
|
||||
int fd;
|
||||
unsigned int pgtable_levels;
|
||||
@ -111,6 +115,9 @@ struct kvm_vm {
|
||||
vm_vaddr_t idt;
|
||||
vm_vaddr_t handlers;
|
||||
uint32_t dirty_ring_size;
|
||||
uint64_t gpa_tag_mask;
|
||||
|
||||
struct kvm_vm_arch arch;
|
||||
|
||||
/* Cache of information for binary stats interface */
|
||||
int stats_fd;
|
||||
@ -191,10 +198,14 @@ enum vm_guest_mode {
|
||||
};
|
||||
|
||||
struct vm_shape {
|
||||
enum vm_guest_mode mode;
|
||||
unsigned int type;
|
||||
uint32_t type;
|
||||
uint8_t mode;
|
||||
uint8_t subtype;
|
||||
uint16_t padding;
|
||||
};
|
||||
|
||||
kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
|
||||
|
||||
#define VM_TYPE_DEFAULT 0
|
||||
|
||||
#define VM_SHAPE(__mode) \
|
||||
@ -564,6 +575,13 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
||||
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
|
||||
uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
|
||||
|
||||
#ifndef vm_arch_has_protected_memory
|
||||
static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
|
||||
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
|
||||
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
|
||||
@ -573,6 +591,9 @@ vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_mi
|
||||
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
|
||||
vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type);
|
||||
vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
|
||||
vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type);
|
||||
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
|
||||
vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
|
||||
enum kvm_mem_region_type type);
|
||||
@ -585,6 +606,12 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
|
||||
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
|
||||
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
|
||||
|
||||
|
||||
static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
|
||||
{
|
||||
return gpa & ~vm->gpa_tag_mask;
|
||||
}
|
||||
|
||||
void vcpu_run(struct kvm_vcpu *vcpu);
|
||||
int _vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
@ -827,10 +854,23 @@ const char *exit_reason_str(unsigned int exit_reason);
|
||||
|
||||
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
|
||||
uint32_t memslot);
|
||||
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot);
|
||||
vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot,
|
||||
bool protected);
|
||||
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
|
||||
|
||||
static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot)
|
||||
{
|
||||
/*
|
||||
* By default, allocate memory as protected for VMs that support
|
||||
* protected memory, as the majority of memory for such VMs is
|
||||
* protected, i.e. using shared memory is effectively opt-in.
|
||||
*/
|
||||
return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
|
||||
vm_arch_has_protected_memory(vm));
|
||||
}
|
||||
|
||||
/*
|
||||
* ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
|
||||
* loads the test binary into guest memory and creates an IRQ chip (x86 only).
|
||||
@ -969,15 +1009,18 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
|
||||
* Input Args:
|
||||
* vm - Virtual Machine
|
||||
* vcpu_id - The id of the VCPU to add to the VM.
|
||||
* guest_code - The vCPU's entry point
|
||||
*/
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
void *guest_code);
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
|
||||
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
|
||||
|
||||
static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
void *guest_code)
|
||||
{
|
||||
return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
|
||||
struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
|
||||
|
||||
vcpu_arch_set_entry_point(vcpu, guest_code);
|
||||
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
|
||||
@ -1081,6 +1124,8 @@ void kvm_selftest_arch_init(void);
|
||||
|
||||
void kvm_arch_vm_post_create(struct kvm_vm *vm);
|
||||
|
||||
bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
|
||||
|
||||
uint32_t guest_get_vcpuid(void);
|
||||
|
||||
#endif /* SELFTEST_KVM_UTIL_BASE_H */
|
||||
|
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef SELFTEST_KVM_UTIL_ARCH_H
|
||||
#define SELFTEST_KVM_UTIL_ARCH_H
|
||||
|
||||
struct kvm_vm_arch {};
|
||||
|
||||
#endif // SELFTEST_KVM_UTIL_ARCH_H
|
@ -0,0 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef SELFTEST_KVM_UTIL_ARCH_H
|
||||
#define SELFTEST_KVM_UTIL_ARCH_H
|
||||
|
||||
struct kvm_vm_arch {};
|
||||
|
||||
#endif // SELFTEST_KVM_UTIL_ARCH_H
|
@ -30,26 +30,26 @@ typedef uint64_t sparsebit_num_t;
|
||||
|
||||
struct sparsebit *sparsebit_alloc(void);
|
||||
void sparsebit_free(struct sparsebit **sbitp);
|
||||
void sparsebit_copy(struct sparsebit *dstp, struct sparsebit *src);
|
||||
void sparsebit_copy(struct sparsebit *dstp, const struct sparsebit *src);
|
||||
|
||||
bool sparsebit_is_set(struct sparsebit *sbit, sparsebit_idx_t idx);
|
||||
bool sparsebit_is_set_num(struct sparsebit *sbit,
|
||||
bool sparsebit_is_set(const struct sparsebit *sbit, sparsebit_idx_t idx);
|
||||
bool sparsebit_is_set_num(const struct sparsebit *sbit,
|
||||
sparsebit_idx_t idx, sparsebit_num_t num);
|
||||
bool sparsebit_is_clear(struct sparsebit *sbit, sparsebit_idx_t idx);
|
||||
bool sparsebit_is_clear_num(struct sparsebit *sbit,
|
||||
bool sparsebit_is_clear(const struct sparsebit *sbit, sparsebit_idx_t idx);
|
||||
bool sparsebit_is_clear_num(const struct sparsebit *sbit,
|
||||
sparsebit_idx_t idx, sparsebit_num_t num);
|
||||
sparsebit_num_t sparsebit_num_set(struct sparsebit *sbit);
|
||||
bool sparsebit_any_set(struct sparsebit *sbit);
|
||||
bool sparsebit_any_clear(struct sparsebit *sbit);
|
||||
bool sparsebit_all_set(struct sparsebit *sbit);
|
||||
bool sparsebit_all_clear(struct sparsebit *sbit);
|
||||
sparsebit_idx_t sparsebit_first_set(struct sparsebit *sbit);
|
||||
sparsebit_idx_t sparsebit_first_clear(struct sparsebit *sbit);
|
||||
sparsebit_idx_t sparsebit_next_set(struct sparsebit *sbit, sparsebit_idx_t prev);
|
||||
sparsebit_idx_t sparsebit_next_clear(struct sparsebit *sbit, sparsebit_idx_t prev);
|
||||
sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *sbit,
|
||||
sparsebit_num_t sparsebit_num_set(const struct sparsebit *sbit);
|
||||
bool sparsebit_any_set(const struct sparsebit *sbit);
|
||||
bool sparsebit_any_clear(const struct sparsebit *sbit);
|
||||
bool sparsebit_all_set(const struct sparsebit *sbit);
|
||||
bool sparsebit_all_clear(const struct sparsebit *sbit);
|
||||
sparsebit_idx_t sparsebit_first_set(const struct sparsebit *sbit);
|
||||
sparsebit_idx_t sparsebit_first_clear(const struct sparsebit *sbit);
|
||||
sparsebit_idx_t sparsebit_next_set(const struct sparsebit *sbit, sparsebit_idx_t prev);
|
||||
sparsebit_idx_t sparsebit_next_clear(const struct sparsebit *sbit, sparsebit_idx_t prev);
|
||||
sparsebit_idx_t sparsebit_next_set_num(const struct sparsebit *sbit,
|
||||
sparsebit_idx_t start, sparsebit_num_t num);
|
||||
sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *sbit,
|
||||
sparsebit_idx_t sparsebit_next_clear_num(const struct sparsebit *sbit,
|
||||
sparsebit_idx_t start, sparsebit_num_t num);
|
||||
|
||||
void sparsebit_set(struct sparsebit *sbitp, sparsebit_idx_t idx);
|
||||
@ -62,9 +62,29 @@ void sparsebit_clear_num(struct sparsebit *sbitp,
|
||||
sparsebit_idx_t start, sparsebit_num_t num);
|
||||
void sparsebit_clear_all(struct sparsebit *sbitp);
|
||||
|
||||
void sparsebit_dump(FILE *stream, struct sparsebit *sbit,
|
||||
void sparsebit_dump(FILE *stream, const struct sparsebit *sbit,
|
||||
unsigned int indent);
|
||||
void sparsebit_validate_internal(struct sparsebit *sbit);
|
||||
void sparsebit_validate_internal(const struct sparsebit *sbit);
|
||||
|
||||
/*
|
||||
* Iterate over an inclusive ranges within sparsebit @s. In each iteration,
|
||||
* @range_begin and @range_end will take the beginning and end of the set
|
||||
* range, which are of type sparsebit_idx_t.
|
||||
*
|
||||
* For example, if the range [3, 7] (inclusive) is set, within the
|
||||
* iteration,@range_begin will take the value 3 and @range_end will take
|
||||
* the value 7.
|
||||
*
|
||||
* Ensure that there is at least one bit set before using this macro with
|
||||
* sparsebit_any_set(), because sparsebit_first_set() will abort if none
|
||||
* are set.
|
||||
*/
|
||||
#define sparsebit_for_each_set_range(s, range_begin, range_end) \
|
||||
for (range_begin = sparsebit_first_set(s), \
|
||||
range_end = sparsebit_next_clear(s, range_begin) - 1; \
|
||||
range_begin && range_end; \
|
||||
range_begin = sparsebit_next_set(s, range_end), \
|
||||
range_end = sparsebit_next_clear(s, range_begin) - 1)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
23
tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h
Normal file
23
tools/testing/selftests/kvm/include/x86_64/kvm_util_arch.h
Normal file
@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
#ifndef SELFTEST_KVM_UTIL_ARCH_H
|
||||
#define SELFTEST_KVM_UTIL_ARCH_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
struct kvm_vm_arch {
|
||||
uint64_t c_bit;
|
||||
uint64_t s_bit;
|
||||
int sev_fd;
|
||||
bool is_pt_protected;
|
||||
};
|
||||
|
||||
static inline bool __vm_arch_has_protected_memory(struct kvm_vm_arch *arch)
|
||||
{
|
||||
return arch->c_bit || arch->s_bit;
|
||||
}
|
||||
|
||||
#define vm_arch_has_protected_memory(vm) \
|
||||
__vm_arch_has_protected_memory(&(vm)->arch)
|
||||
|
||||
#endif // SELFTEST_KVM_UTIL_ARCH_H
|
@ -23,6 +23,12 @@
|
||||
extern bool host_cpu_is_intel;
|
||||
extern bool host_cpu_is_amd;
|
||||
|
||||
enum vm_guest_x86_subtype {
|
||||
VM_SUBTYPE_NONE = 0,
|
||||
VM_SUBTYPE_SEV,
|
||||
VM_SUBTYPE_SEV_ES,
|
||||
};
|
||||
|
||||
#define NMI_VECTOR 0x02
|
||||
|
||||
#define X86_EFLAGS_FIXED (1u << 1)
|
||||
@ -273,6 +279,7 @@ struct kvm_x86_cpu_property {
|
||||
#define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
|
||||
#define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
|
||||
#define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
|
||||
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
|
||||
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
|
||||
|
||||
#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
|
||||
@ -1059,6 +1066,7 @@ do { \
|
||||
} while (0)
|
||||
|
||||
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
|
||||
void kvm_init_vm_address_properties(struct kvm_vm *vm);
|
||||
bool vm_is_unrestricted_guest(struct kvm_vm *vm);
|
||||
|
||||
struct ex_regs {
|
||||
|
107
tools/testing/selftests/kvm/include/x86_64/sev.h
Normal file
107
tools/testing/selftests/kvm/include/x86_64/sev.h
Normal file
@ -0,0 +1,107 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Helpers used for SEV guests
|
||||
*
|
||||
*/
|
||||
#ifndef SELFTEST_KVM_SEV_H
|
||||
#define SELFTEST_KVM_SEV_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "linux/psp-sev.h"
|
||||
|
||||
#include "kvm_util.h"
|
||||
#include "svm_util.h"
|
||||
#include "processor.h"
|
||||
|
||||
enum sev_guest_state {
|
||||
SEV_GUEST_STATE_UNINITIALIZED = 0,
|
||||
SEV_GUEST_STATE_LAUNCH_UPDATE,
|
||||
SEV_GUEST_STATE_LAUNCH_SECRET,
|
||||
SEV_GUEST_STATE_RUNNING,
|
||||
};
|
||||
|
||||
#define SEV_POLICY_NO_DBG (1UL << 0)
|
||||
#define SEV_POLICY_ES (1UL << 2)
|
||||
|
||||
#define GHCB_MSR_TERM_REQ 0x100
|
||||
|
||||
void sev_vm_launch(struct kvm_vm *vm, uint32_t policy);
|
||||
void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement);
|
||||
void sev_vm_launch_finish(struct kvm_vm *vm);
|
||||
|
||||
struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t policy, void *guest_code,
|
||||
struct kvm_vcpu **cpu);
|
||||
|
||||
kvm_static_assert(SEV_RET_SUCCESS == 0);
|
||||
|
||||
/*
|
||||
* The KVM_MEMORY_ENCRYPT_OP uAPI is utter garbage and takes an "unsigned long"
|
||||
* instead of a proper struct. The size of the parameter is embedded in the
|
||||
* ioctl number, i.e. is ABI and thus immutable. Hack around the mess by
|
||||
* creating an overlay to pass in an "unsigned long" without a cast (casting
|
||||
* will make the compiler unhappy due to dereferencing an aliased pointer).
|
||||
*/
|
||||
#define __vm_sev_ioctl(vm, cmd, arg) \
|
||||
({ \
|
||||
int r; \
|
||||
\
|
||||
union { \
|
||||
struct kvm_sev_cmd c; \
|
||||
unsigned long raw; \
|
||||
} sev_cmd = { .c = { \
|
||||
.id = (cmd), \
|
||||
.data = (uint64_t)(arg), \
|
||||
.sev_fd = (vm)->arch.sev_fd, \
|
||||
} }; \
|
||||
\
|
||||
r = __vm_ioctl(vm, KVM_MEMORY_ENCRYPT_OP, &sev_cmd.raw); \
|
||||
r ?: sev_cmd.c.error; \
|
||||
})
|
||||
|
||||
#define vm_sev_ioctl(vm, cmd, arg) \
|
||||
({ \
|
||||
int ret = __vm_sev_ioctl(vm, cmd, arg); \
|
||||
\
|
||||
__TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
|
||||
})
|
||||
|
||||
static inline void sev_vm_init(struct kvm_vm *vm)
|
||||
{
|
||||
vm->arch.sev_fd = open_sev_dev_path_or_exit();
|
||||
|
||||
vm_sev_ioctl(vm, KVM_SEV_INIT, NULL);
|
||||
}
|
||||
|
||||
|
||||
static inline void sev_es_vm_init(struct kvm_vm *vm)
|
||||
{
|
||||
vm->arch.sev_fd = open_sev_dev_path_or_exit();
|
||||
|
||||
vm_sev_ioctl(vm, KVM_SEV_ES_INIT, NULL);
|
||||
}
|
||||
|
||||
static inline void sev_register_encrypted_memory(struct kvm_vm *vm,
|
||||
struct userspace_mem_region *region)
|
||||
{
|
||||
struct kvm_enc_region range = {
|
||||
.addr = region->region.userspace_addr,
|
||||
.size = region->region.memory_size,
|
||||
};
|
||||
|
||||
vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
|
||||
}
|
||||
|
||||
static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa,
|
||||
uint64_t size)
|
||||
{
|
||||
struct kvm_sev_launch_update_data update_data = {
|
||||
.uaddr = (unsigned long)addr_gpa2hva(vm, gpa),
|
||||
.len = size,
|
||||
};
|
||||
|
||||
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data);
|
||||
}
|
||||
|
||||
#endif /* SELFTEST_KVM_SEV_H */
|
@ -365,8 +365,13 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
|
||||
indent, "", pstate, pc);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
struct kvm_vcpu_init *init, void *guest_code)
|
||||
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
|
||||
{
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
|
||||
}
|
||||
|
||||
static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
struct kvm_vcpu_init *init)
|
||||
{
|
||||
size_t stack_size;
|
||||
uint64_t stack_vaddr;
|
||||
@ -381,15 +386,22 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
aarch64_vcpu_setup(vcpu, init);
|
||||
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
|
||||
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
struct kvm_vcpu_init *init, void *guest_code)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init);
|
||||
|
||||
vcpu_arch_set_entry_point(vcpu, guest_code);
|
||||
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
void *guest_code)
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
{
|
||||
return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
|
||||
return __aarch64_vcpu_add(vm, vcpu_id, NULL);
|
||||
}
|
||||
|
||||
void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
|
||||
|
@ -226,6 +226,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
|
||||
|
||||
vm->mode = shape.mode;
|
||||
vm->type = shape.type;
|
||||
vm->subtype = shape.subtype;
|
||||
|
||||
vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits;
|
||||
vm->va_bits = vm_guest_mode_params[vm->mode].va_bits;
|
||||
@ -266,6 +267,7 @@ struct kvm_vm *____vm_create(struct vm_shape shape)
|
||||
case VM_MODE_PXXV48_4K:
|
||||
#ifdef __x86_64__
|
||||
kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
|
||||
kvm_init_vm_address_properties(vm);
|
||||
/*
|
||||
* Ignore KVM support for 5-level paging (vm->va_bits == 57),
|
||||
* it doesn't take effect unless a CR4.LA57 is set, which it
|
||||
@ -666,6 +668,7 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
|
||||
vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, ®ion->region);
|
||||
|
||||
sparsebit_free(®ion->unused_phy_pages);
|
||||
sparsebit_free(®ion->protected_phy_pages);
|
||||
ret = munmap(region->mmap_start, region->mmap_size);
|
||||
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
|
||||
if (region->fd >= 0) {
|
||||
@ -1047,6 +1050,8 @@ void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
|
||||
}
|
||||
|
||||
region->unused_phy_pages = sparsebit_alloc();
|
||||
if (vm_arch_has_protected_memory(vm))
|
||||
region->protected_phy_pages = sparsebit_alloc();
|
||||
sparsebit_set_num(region->unused_phy_pages,
|
||||
guest_paddr >> vm->page_shift, npages);
|
||||
region->region.slot = slot;
|
||||
@ -1377,15 +1382,17 @@ va_found:
|
||||
return pgidx_start * vm->page_size;
|
||||
}
|
||||
|
||||
vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type)
|
||||
static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz,
|
||||
vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type,
|
||||
bool protected)
|
||||
{
|
||||
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
|
||||
|
||||
virt_pgd_alloc(vm);
|
||||
vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
|
||||
KVM_UTIL_MIN_PFN * vm->page_size,
|
||||
vm->memslots[type]);
|
||||
vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages,
|
||||
KVM_UTIL_MIN_PFN * vm->page_size,
|
||||
vm->memslots[type], protected);
|
||||
|
||||
/*
|
||||
* Find an unused range of virtual page addresses of at least
|
||||
@ -1405,6 +1412,20 @@ vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
|
||||
return vaddr_start;
|
||||
}
|
||||
|
||||
vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type)
|
||||
{
|
||||
return ____vm_vaddr_alloc(vm, sz, vaddr_min, type,
|
||||
vm_arch_has_protected_memory(vm));
|
||||
}
|
||||
|
||||
vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
|
||||
vm_vaddr_t vaddr_min,
|
||||
enum kvm_mem_region_type type)
|
||||
{
|
||||
return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* VM Virtual Address Allocate
|
||||
*
|
||||
@ -1527,6 +1548,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
|
||||
{
|
||||
struct userspace_mem_region *region;
|
||||
|
||||
gpa = vm_untag_gpa(vm, gpa);
|
||||
|
||||
region = userspace_mem_region_find(vm, gpa, gpa);
|
||||
if (!region) {
|
||||
TEST_FAIL("No vm physical memory at 0x%lx", gpa);
|
||||
@ -1873,6 +1896,10 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
region->host_mem);
|
||||
fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
|
||||
sparsebit_dump(stream, region->unused_phy_pages, 0);
|
||||
if (region->protected_phy_pages) {
|
||||
fprintf(stream, "%*sprotected_phy_pages: ", indent + 2, "");
|
||||
sparsebit_dump(stream, region->protected_phy_pages, 0);
|
||||
}
|
||||
}
|
||||
fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
|
||||
sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
|
||||
@ -1974,6 +2001,7 @@ const char *exit_reason_str(unsigned int exit_reason)
|
||||
* num - number of pages
|
||||
* paddr_min - Physical address minimum
|
||||
* memslot - Memory region to allocate page from
|
||||
* protected - True if the pages will be used as protected/private memory
|
||||
*
|
||||
* Output Args: None
|
||||
*
|
||||
@ -1985,8 +2013,9 @@ const char *exit_reason_str(unsigned int exit_reason)
|
||||
* and their base address is returned. A TEST_ASSERT failure occurs if
|
||||
* not enough pages are available at or above paddr_min.
|
||||
*/
|
||||
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot)
|
||||
vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
vm_paddr_t paddr_min, uint32_t memslot,
|
||||
bool protected)
|
||||
{
|
||||
struct userspace_mem_region *region;
|
||||
sparsebit_idx_t pg, base;
|
||||
@ -1999,8 +2028,10 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
paddr_min, vm->page_size);
|
||||
|
||||
region = memslot2region(vm, memslot);
|
||||
base = pg = paddr_min >> vm->page_shift;
|
||||
TEST_ASSERT(!protected || region->protected_phy_pages,
|
||||
"Region doesn't support protected memory");
|
||||
|
||||
base = pg = paddr_min >> vm->page_shift;
|
||||
do {
|
||||
for (; pg < base + num; ++pg) {
|
||||
if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
|
||||
@ -2019,8 +2050,11 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
|
||||
abort();
|
||||
}
|
||||
|
||||
for (pg = base; pg < base + num; ++pg)
|
||||
for (pg = base; pg < base + num; ++pg) {
|
||||
sparsebit_clear(region->unused_phy_pages, pg);
|
||||
if (protected)
|
||||
sparsebit_set(region->protected_phy_pages, pg);
|
||||
}
|
||||
|
||||
return base * vm->page_size;
|
||||
}
|
||||
@ -2224,3 +2258,18 @@ void __attribute((constructor)) kvm_selftest_init(void)
|
||||
|
||||
kvm_selftest_arch_init();
|
||||
}
|
||||
|
||||
bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
|
||||
{
|
||||
sparsebit_idx_t pg = 0;
|
||||
struct userspace_mem_region *region;
|
||||
|
||||
if (!vm_arch_has_protected_memory(vm))
|
||||
return false;
|
||||
|
||||
region = userspace_mem_region_find(vm, paddr, paddr);
|
||||
TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr);
|
||||
|
||||
pg = paddr >> vm->page_shift;
|
||||
return sparsebit_is_set(region->protected_phy_pages, pg);
|
||||
}
|
||||
|
@ -289,8 +289,12 @@ static void __aligned(16) guest_unexp_trap(void)
|
||||
0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
void *guest_code)
|
||||
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
|
||||
{
|
||||
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
{
|
||||
int r;
|
||||
size_t stack_size;
|
||||
@ -324,7 +328,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
|
||||
/* Setup stack pointer and program counter of guest */
|
||||
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
|
||||
vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
|
||||
|
||||
/* Setup sscratch for guest_get_vcpuid() */
|
||||
vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(sscratch), vcpu_id);
|
||||
|
@ -155,15 +155,18 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
|
||||
virt_dump_region(stream, vm, indent, vm->pgd);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
void *guest_code)
|
||||
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
|
||||
{
|
||||
vcpu->run->psw_addr = (uintptr_t)guest_code;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
{
|
||||
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
|
||||
uint64_t stack_vaddr;
|
||||
struct kvm_regs regs;
|
||||
struct kvm_sregs sregs;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_run *run;
|
||||
|
||||
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
|
||||
vm->page_size);
|
||||
@ -184,9 +187,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */
|
||||
vcpu_sregs_set(vcpu, &sregs);
|
||||
|
||||
run = vcpu->run;
|
||||
run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
|
||||
run->psw_addr = (uintptr_t)guest_code;
|
||||
vcpu->run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
|
||||
|
||||
return vcpu;
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ static sparsebit_num_t node_num_set(struct node *nodep)
|
||||
/* Returns a pointer to the node that describes the
|
||||
* lowest bit index.
|
||||
*/
|
||||
static struct node *node_first(struct sparsebit *s)
|
||||
static struct node *node_first(const struct sparsebit *s)
|
||||
{
|
||||
struct node *nodep;
|
||||
|
||||
@ -216,7 +216,7 @@ static struct node *node_first(struct sparsebit *s)
|
||||
* lowest bit index > the index of the node pointed to by np.
|
||||
* Returns NULL if no node with a higher index exists.
|
||||
*/
|
||||
static struct node *node_next(struct sparsebit *s, struct node *np)
|
||||
static struct node *node_next(const struct sparsebit *s, struct node *np)
|
||||
{
|
||||
struct node *nodep = np;
|
||||
|
||||
@ -244,7 +244,7 @@ static struct node *node_next(struct sparsebit *s, struct node *np)
|
||||
* highest index < the index of the node pointed to by np.
|
||||
* Returns NULL if no node with a lower index exists.
|
||||
*/
|
||||
static struct node *node_prev(struct sparsebit *s, struct node *np)
|
||||
static struct node *node_prev(const struct sparsebit *s, struct node *np)
|
||||
{
|
||||
struct node *nodep = np;
|
||||
|
||||
@ -273,7 +273,7 @@ static struct node *node_prev(struct sparsebit *s, struct node *np)
|
||||
* subtree and duplicates the bit settings to the newly allocated nodes.
|
||||
* Returns the newly allocated copy of subtree.
|
||||
*/
|
||||
static struct node *node_copy_subtree(struct node *subtree)
|
||||
static struct node *node_copy_subtree(const struct node *subtree)
|
||||
{
|
||||
struct node *root;
|
||||
|
||||
@ -307,7 +307,7 @@ static struct node *node_copy_subtree(struct node *subtree)
|
||||
* index is within the bits described by the mask bits or the number of
|
||||
* contiguous bits set after the mask. Returns NULL if there is no such node.
|
||||
*/
|
||||
static struct node *node_find(struct sparsebit *s, sparsebit_idx_t idx)
|
||||
static struct node *node_find(const struct sparsebit *s, sparsebit_idx_t idx)
|
||||
{
|
||||
struct node *nodep;
|
||||
|
||||
@ -393,7 +393,7 @@ static struct node *node_add(struct sparsebit *s, sparsebit_idx_t idx)
|
||||
}
|
||||
|
||||
/* Returns whether all the bits in the sparsebit array are set. */
|
||||
bool sparsebit_all_set(struct sparsebit *s)
|
||||
bool sparsebit_all_set(const struct sparsebit *s)
|
||||
{
|
||||
/*
|
||||
* If any nodes there must be at least one bit set. Only case
|
||||
@ -775,7 +775,7 @@ static void node_reduce(struct sparsebit *s, struct node *nodep)
|
||||
/* Returns whether the bit at the index given by idx, within the
|
||||
* sparsebit array is set or not.
|
||||
*/
|
||||
bool sparsebit_is_set(struct sparsebit *s, sparsebit_idx_t idx)
|
||||
bool sparsebit_is_set(const struct sparsebit *s, sparsebit_idx_t idx)
|
||||
{
|
||||
struct node *nodep;
|
||||
|
||||
@ -921,7 +921,7 @@ static inline sparsebit_idx_t node_first_clear(struct node *nodep, int start)
|
||||
* used by test cases after they detect an unexpected condition, as a means
|
||||
* to capture diagnostic information.
|
||||
*/
|
||||
static void sparsebit_dump_internal(FILE *stream, struct sparsebit *s,
|
||||
static void sparsebit_dump_internal(FILE *stream, const struct sparsebit *s,
|
||||
unsigned int indent)
|
||||
{
|
||||
/* Dump the contents of s */
|
||||
@ -969,7 +969,7 @@ void sparsebit_free(struct sparsebit **sbitp)
|
||||
* sparsebit_alloc(). It can though already have bits set, which
|
||||
* if different from src will be cleared.
|
||||
*/
|
||||
void sparsebit_copy(struct sparsebit *d, struct sparsebit *s)
|
||||
void sparsebit_copy(struct sparsebit *d, const struct sparsebit *s)
|
||||
{
|
||||
/* First clear any bits already set in the destination */
|
||||
sparsebit_clear_all(d);
|
||||
@ -981,7 +981,7 @@ void sparsebit_copy(struct sparsebit *d, struct sparsebit *s)
|
||||
}
|
||||
|
||||
/* Returns whether num consecutive bits starting at idx are all set. */
|
||||
bool sparsebit_is_set_num(struct sparsebit *s,
|
||||
bool sparsebit_is_set_num(const struct sparsebit *s,
|
||||
sparsebit_idx_t idx, sparsebit_num_t num)
|
||||
{
|
||||
sparsebit_idx_t next_cleared;
|
||||
@ -1005,14 +1005,14 @@ bool sparsebit_is_set_num(struct sparsebit *s,
|
||||
}
|
||||
|
||||
/* Returns whether the bit at the index given by idx. */
|
||||
bool sparsebit_is_clear(struct sparsebit *s,
|
||||
bool sparsebit_is_clear(const struct sparsebit *s,
|
||||
sparsebit_idx_t idx)
|
||||
{
|
||||
return !sparsebit_is_set(s, idx);
|
||||
}
|
||||
|
||||
/* Returns whether num consecutive bits starting at idx are all cleared. */
|
||||
bool sparsebit_is_clear_num(struct sparsebit *s,
|
||||
bool sparsebit_is_clear_num(const struct sparsebit *s,
|
||||
sparsebit_idx_t idx, sparsebit_num_t num)
|
||||
{
|
||||
sparsebit_idx_t next_set;
|
||||
@ -1041,13 +1041,13 @@ bool sparsebit_is_clear_num(struct sparsebit *s,
|
||||
* value. Use sparsebit_any_set(), instead of sparsebit_num_set() > 0,
|
||||
* to determine if the sparsebit array has any bits set.
|
||||
*/
|
||||
sparsebit_num_t sparsebit_num_set(struct sparsebit *s)
|
||||
sparsebit_num_t sparsebit_num_set(const struct sparsebit *s)
|
||||
{
|
||||
return s->num_set;
|
||||
}
|
||||
|
||||
/* Returns whether any bit is set in the sparsebit array. */
|
||||
bool sparsebit_any_set(struct sparsebit *s)
|
||||
bool sparsebit_any_set(const struct sparsebit *s)
|
||||
{
|
||||
/*
|
||||
* Nodes only describe set bits. If any nodes then there
|
||||
@ -1070,20 +1070,20 @@ bool sparsebit_any_set(struct sparsebit *s)
|
||||
}
|
||||
|
||||
/* Returns whether all the bits in the sparsebit array are cleared. */
|
||||
bool sparsebit_all_clear(struct sparsebit *s)
|
||||
bool sparsebit_all_clear(const struct sparsebit *s)
|
||||
{
|
||||
return !sparsebit_any_set(s);
|
||||
}
|
||||
|
||||
/* Returns whether all the bits in the sparsebit array are set. */
|
||||
bool sparsebit_any_clear(struct sparsebit *s)
|
||||
bool sparsebit_any_clear(const struct sparsebit *s)
|
||||
{
|
||||
return !sparsebit_all_set(s);
|
||||
}
|
||||
|
||||
/* Returns the index of the first set bit. Abort if no bits are set.
|
||||
*/
|
||||
sparsebit_idx_t sparsebit_first_set(struct sparsebit *s)
|
||||
sparsebit_idx_t sparsebit_first_set(const struct sparsebit *s)
|
||||
{
|
||||
struct node *nodep;
|
||||
|
||||
@ -1097,7 +1097,7 @@ sparsebit_idx_t sparsebit_first_set(struct sparsebit *s)
|
||||
/* Returns the index of the first cleared bit. Abort if
|
||||
* no bits are cleared.
|
||||
*/
|
||||
sparsebit_idx_t sparsebit_first_clear(struct sparsebit *s)
|
||||
sparsebit_idx_t sparsebit_first_clear(const struct sparsebit *s)
|
||||
{
|
||||
struct node *nodep1, *nodep2;
|
||||
|
||||
@ -1151,7 +1151,7 @@ sparsebit_idx_t sparsebit_first_clear(struct sparsebit *s)
|
||||
/* Returns index of next bit set within s after the index given by prev.
|
||||
* Returns 0 if there are no bits after prev that are set.
|
||||
*/
|
||||
sparsebit_idx_t sparsebit_next_set(struct sparsebit *s,
|
||||
sparsebit_idx_t sparsebit_next_set(const struct sparsebit *s,
|
||||
sparsebit_idx_t prev)
|
||||
{
|
||||
sparsebit_idx_t lowest_possible = prev + 1;
|
||||
@ -1244,7 +1244,7 @@ sparsebit_idx_t sparsebit_next_set(struct sparsebit *s,
|
||||
/* Returns index of next bit cleared within s after the index given by prev.
|
||||
* Returns 0 if there are no bits after prev that are cleared.
|
||||
*/
|
||||
sparsebit_idx_t sparsebit_next_clear(struct sparsebit *s,
|
||||
sparsebit_idx_t sparsebit_next_clear(const struct sparsebit *s,
|
||||
sparsebit_idx_t prev)
|
||||
{
|
||||
sparsebit_idx_t lowest_possible = prev + 1;
|
||||
@ -1300,7 +1300,7 @@ sparsebit_idx_t sparsebit_next_clear(struct sparsebit *s,
|
||||
* and returns the index of the first sequence of num consecutively set
|
||||
* bits. Returns a value of 0 of no such sequence exists.
|
||||
*/
|
||||
sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *s,
|
||||
sparsebit_idx_t sparsebit_next_set_num(const struct sparsebit *s,
|
||||
sparsebit_idx_t start, sparsebit_num_t num)
|
||||
{
|
||||
sparsebit_idx_t idx;
|
||||
@ -1335,7 +1335,7 @@ sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *s,
|
||||
* and returns the index of the first sequence of num consecutively cleared
|
||||
* bits. Returns a value of 0 of no such sequence exists.
|
||||
*/
|
||||
sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *s,
|
||||
sparsebit_idx_t sparsebit_next_clear_num(const struct sparsebit *s,
|
||||
sparsebit_idx_t start, sparsebit_num_t num)
|
||||
{
|
||||
sparsebit_idx_t idx;
|
||||
@ -1583,7 +1583,7 @@ static size_t display_range(FILE *stream, sparsebit_idx_t low,
|
||||
* contiguous bits. This is done because '-' is used to specify command-line
|
||||
* options, and sometimes ranges are specified as command-line arguments.
|
||||
*/
|
||||
void sparsebit_dump(FILE *stream, struct sparsebit *s,
|
||||
void sparsebit_dump(FILE *stream, const struct sparsebit *s,
|
||||
unsigned int indent)
|
||||
{
|
||||
size_t current_line_len = 0;
|
||||
@ -1681,7 +1681,7 @@ void sparsebit_dump(FILE *stream, struct sparsebit *s,
|
||||
* s. On error, diagnostic information is printed to stderr and
|
||||
* abort is called.
|
||||
*/
|
||||
void sparsebit_validate_internal(struct sparsebit *s)
|
||||
void sparsebit_validate_internal(const struct sparsebit *s)
|
||||
{
|
||||
bool error_detected = false;
|
||||
struct node *nodep, *prev = NULL;
|
||||
|
@ -29,7 +29,8 @@ void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
|
||||
vm_vaddr_t vaddr;
|
||||
int i;
|
||||
|
||||
vaddr = __vm_vaddr_alloc(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, MEM_REGION_DATA);
|
||||
vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR,
|
||||
MEM_REGION_DATA);
|
||||
hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr);
|
||||
memset(hdr, 0, sizeof(*hdr));
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
#include "sev.h"
|
||||
|
||||
#ifndef NUM_INTERRUPTS
|
||||
#define NUM_INTERRUPTS 256
|
||||
@ -157,6 +158,8 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
|
||||
{
|
||||
uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level);
|
||||
|
||||
paddr = vm_untag_gpa(vm, paddr);
|
||||
|
||||
if (!(*pte & PTE_PRESENT_MASK)) {
|
||||
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
|
||||
if (current_level == target_level)
|
||||
@ -200,6 +203,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
|
||||
"Physical address beyond maximum supported,\n"
|
||||
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
|
||||
paddr, vm->max_gfn, vm->page_size);
|
||||
TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
|
||||
"Unexpected bits in paddr: %lx", paddr);
|
||||
|
||||
/*
|
||||
* Allocate upper level page tables, if not already present. Return
|
||||
@ -222,6 +227,15 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
|
||||
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
|
||||
"PTE already present for 4k page at vaddr: 0x%lx", vaddr);
|
||||
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
|
||||
|
||||
/*
|
||||
* Neither SEV nor TDX supports shared page tables, so only the final
|
||||
* leaf PTE needs manually set the C/S-bit.
|
||||
*/
|
||||
if (vm_is_gpa_protected(vm, paddr))
|
||||
*pte |= vm->arch.c_bit;
|
||||
else
|
||||
*pte |= vm->arch.s_bit;
|
||||
}
|
||||
|
||||
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
|
||||
@ -265,6 +279,9 @@ uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
|
||||
{
|
||||
uint64_t *pml4e, *pdpe, *pde;
|
||||
|
||||
TEST_ASSERT(!vm->arch.is_pt_protected,
|
||||
"Walking page tables of protected guests is impossible");
|
||||
|
||||
TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM,
|
||||
"Invalid PG_LEVEL_* '%d'", *level);
|
||||
|
||||
@ -496,7 +513,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
|
||||
* No need for a hugepage mask on the PTE, x86-64 requires the "unused"
|
||||
* address bits to be zero.
|
||||
*/
|
||||
return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level));
|
||||
return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
|
||||
}
|
||||
|
||||
static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
|
||||
@ -560,10 +577,23 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
|
||||
vm_create_irqchip(vm);
|
||||
sync_global_to_guest(vm, host_cpu_is_intel);
|
||||
sync_global_to_guest(vm, host_cpu_is_amd);
|
||||
|
||||
if (vm->subtype == VM_SUBTYPE_SEV)
|
||||
sev_vm_init(vm);
|
||||
else if (vm->subtype == VM_SUBTYPE_SEV_ES)
|
||||
sev_es_vm_init(vm);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
void *guest_code)
|
||||
void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
|
||||
{
|
||||
struct kvm_regs regs;
|
||||
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
regs.rip = (unsigned long) guest_code;
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
|
||||
{
|
||||
struct kvm_mp_state mp_state;
|
||||
struct kvm_regs regs;
|
||||
@ -597,7 +627,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
regs.rflags = regs.rflags | 0x2;
|
||||
regs.rsp = stack_vaddr;
|
||||
regs.rip = (unsigned long) guest_code;
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
|
||||
/* Setup the MP state */
|
||||
@ -1041,6 +1070,14 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_init_vm_address_properties(struct kvm_vm *vm)
|
||||
{
|
||||
if (vm->subtype == VM_SUBTYPE_SEV || vm->subtype == VM_SUBTYPE_SEV_ES) {
|
||||
vm->arch.c_bit = BIT_ULL(this_cpu_property(X86_PROPERTY_SEV_C_BIT));
|
||||
vm->gpa_tag_mask = vm->arch.c_bit;
|
||||
}
|
||||
}
|
||||
|
||||
static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
|
||||
int dpl, unsigned short selector)
|
||||
{
|
||||
|
114
tools/testing/selftests/kvm/lib/x86_64/sev.c
Normal file
114
tools/testing/selftests/kvm/lib/x86_64/sev.c
Normal file
@ -0,0 +1,114 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#define _GNU_SOURCE /* for program_invocation_short_name */
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "sev.h"
|
||||
|
||||
/*
|
||||
* sparsebit_next_clear() can return 0 if [x, 2**64-1] are all set, and the
|
||||
* -1 would then cause an underflow back to 2**64 - 1. This is expected and
|
||||
* correct.
|
||||
*
|
||||
* If the last range in the sparsebit is [x, y] and we try to iterate,
|
||||
* sparsebit_next_set() will return 0, and sparsebit_next_clear() will try
|
||||
* and find the first range, but that's correct because the condition
|
||||
* expression would cause us to quit the loop.
|
||||
*/
|
||||
static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region)
|
||||
{
|
||||
const struct sparsebit *protected_phy_pages = region->protected_phy_pages;
|
||||
const vm_paddr_t gpa_base = region->region.guest_phys_addr;
|
||||
const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift;
|
||||
sparsebit_idx_t i, j;
|
||||
|
||||
if (!sparsebit_any_set(protected_phy_pages))
|
||||
return;
|
||||
|
||||
sev_register_encrypted_memory(vm, region);
|
||||
|
||||
sparsebit_for_each_set_range(protected_phy_pages, i, j) {
|
||||
const uint64_t size = (j - i + 1) * vm->page_size;
|
||||
const uint64_t offset = (i - lowest_page_in_region) * vm->page_size;
|
||||
|
||||
sev_launch_update_data(vm, gpa_base + offset, size);
|
||||
}
|
||||
}
|
||||
|
||||
void sev_vm_launch(struct kvm_vm *vm, uint32_t policy)
|
||||
{
|
||||
struct kvm_sev_launch_start launch_start = {
|
||||
.policy = policy,
|
||||
};
|
||||
struct userspace_mem_region *region;
|
||||
struct kvm_sev_guest_status status;
|
||||
int ctr;
|
||||
|
||||
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start);
|
||||
vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
|
||||
|
||||
TEST_ASSERT_EQ(status.policy, policy);
|
||||
TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE);
|
||||
|
||||
hash_for_each(vm->regions.slot_hash, ctr, region, slot_node)
|
||||
encrypt_region(vm, region);
|
||||
|
||||
if (policy & SEV_POLICY_ES)
|
||||
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
|
||||
|
||||
vm->arch.is_pt_protected = true;
|
||||
}
|
||||
|
||||
void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement)
|
||||
{
|
||||
struct kvm_sev_launch_measure launch_measure;
|
||||
struct kvm_sev_guest_status guest_status;
|
||||
|
||||
launch_measure.len = 256;
|
||||
launch_measure.uaddr = (__u64)measurement;
|
||||
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure);
|
||||
|
||||
vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &guest_status);
|
||||
TEST_ASSERT_EQ(guest_status.state, SEV_GUEST_STATE_LAUNCH_SECRET);
|
||||
}
|
||||
|
||||
void sev_vm_launch_finish(struct kvm_vm *vm)
|
||||
{
|
||||
struct kvm_sev_guest_status status;
|
||||
|
||||
vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
|
||||
TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE ||
|
||||
status.state == SEV_GUEST_STATE_LAUNCH_SECRET,
|
||||
"Unexpected guest state: %d", status.state);
|
||||
|
||||
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL);
|
||||
|
||||
vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
|
||||
TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING);
|
||||
}
|
||||
|
||||
struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t policy, void *guest_code,
|
||||
struct kvm_vcpu **cpu)
|
||||
{
|
||||
struct vm_shape shape = {
|
||||
.type = VM_TYPE_DEFAULT,
|
||||
.mode = VM_MODE_DEFAULT,
|
||||
.subtype = policy & SEV_POLICY_ES ? VM_SUBTYPE_SEV_ES :
|
||||
VM_SUBTYPE_SEV,
|
||||
};
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_vcpu *cpus[1];
|
||||
uint8_t measurement[512];
|
||||
|
||||
vm = __vm_create_with_vcpus(shape, 1, 0, guest_code, cpus);
|
||||
*cpu = cpus[0];
|
||||
|
||||
sev_vm_launch(vm, policy);
|
||||
|
||||
/* TODO: Validate the measurement is as expected. */
|
||||
sev_vm_launch_measure(vm, measurement);
|
||||
|
||||
sev_vm_launch_finish(vm);
|
||||
|
||||
return vm;
|
||||
}
|
@ -9,6 +9,7 @@
|
||||
#include <linux/stringify.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "kvm_test_harness.h"
|
||||
#include "apic.h"
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
@ -83,6 +84,8 @@ static void guest_main(void)
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST_SUITE(fix_hypercall);
|
||||
|
||||
static void enter_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
@ -103,14 +106,11 @@ static void enter_guest(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void test_fix_hypercall(bool disable_quirk)
|
||||
static void test_fix_hypercall(struct kvm_vcpu *vcpu, bool disable_quirk)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_vm *vm = vcpu->vm;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
|
||||
|
||||
vm_init_descriptor_tables(vcpu->vm);
|
||||
vm_init_descriptor_tables(vm);
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
|
||||
|
||||
@ -126,10 +126,19 @@ static void test_fix_hypercall(bool disable_quirk)
|
||||
enter_guest(vcpu);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
KVM_ONE_VCPU_TEST(fix_hypercall, enable_quirk, guest_main)
|
||||
{
|
||||
test_fix_hypercall(vcpu, false);
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(fix_hypercall, disable_quirk, guest_main)
|
||||
{
|
||||
test_fix_hypercall(vcpu, true);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
|
||||
|
||||
test_fix_hypercall(false);
|
||||
test_fix_hypercall(true);
|
||||
return test_harness_run(argc, argv);
|
||||
}
|
||||
|
@ -434,6 +434,8 @@ static void test_mem_conversions(enum vm_mem_backing_src_type src_type, uint32_t
|
||||
|
||||
r = fallocate(memfd, FALLOC_FL_KEEP_SIZE, 0, memfd_size);
|
||||
TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
|
||||
|
||||
close(memfd);
|
||||
}
|
||||
|
||||
static void usage(const char *cmd)
|
||||
|
@ -10,11 +10,9 @@
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
#include "svm_util.h"
|
||||
#include "sev.h"
|
||||
#include "kselftest.h"
|
||||
|
||||
#define SEV_POLICY_ES 0b100
|
||||
|
||||
#define NR_MIGRATE_TEST_VCPUS 4
|
||||
#define NR_MIGRATE_TEST_VMS 3
|
||||
#define NR_LOCK_TESTING_THREADS 3
|
||||
@ -22,46 +20,24 @@
|
||||
|
||||
bool have_sev_es;
|
||||
|
||||
static int __sev_ioctl(int vm_fd, int cmd_id, void *data, __u32 *fw_error)
|
||||
{
|
||||
struct kvm_sev_cmd cmd = {
|
||||
.id = cmd_id,
|
||||
.data = (uint64_t)data,
|
||||
.sev_fd = open_sev_dev_path_or_exit(),
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
|
||||
*fw_error = cmd.error;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sev_ioctl(int vm_fd, int cmd_id, void *data)
|
||||
{
|
||||
int ret;
|
||||
__u32 fw_error;
|
||||
|
||||
ret = __sev_ioctl(vm_fd, cmd_id, data, &fw_error);
|
||||
TEST_ASSERT(ret == 0 && fw_error == SEV_RET_SUCCESS,
|
||||
"%d failed: return code: %d, errno: %d, fw error: %d",
|
||||
cmd_id, ret, errno, fw_error);
|
||||
}
|
||||
|
||||
static struct kvm_vm *sev_vm_create(bool es)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_sev_launch_start start = { 0 };
|
||||
int i;
|
||||
|
||||
vm = vm_create_barebones();
|
||||
sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
|
||||
if (!es)
|
||||
sev_vm_init(vm);
|
||||
else
|
||||
sev_es_vm_init(vm);
|
||||
|
||||
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
|
||||
__vm_vcpu_add(vm, i);
|
||||
|
||||
sev_vm_launch(vm, es ? SEV_POLICY_ES : 0);
|
||||
|
||||
if (es)
|
||||
start.policy |= SEV_POLICY_ES;
|
||||
sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
|
||||
if (es)
|
||||
sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
|
||||
vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
|
||||
return vm;
|
||||
}
|
||||
|
||||
@ -181,7 +157,7 @@ static void test_sev_migrate_parameters(void)
|
||||
sev_vm = sev_vm_create(/* es= */ false);
|
||||
sev_es_vm = sev_vm_create(/* es= */ true);
|
||||
sev_es_vm_no_vmsa = vm_create_barebones();
|
||||
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
|
||||
sev_es_vm_init(sev_es_vm_no_vmsa);
|
||||
__vm_vcpu_add(sev_es_vm_no_vmsa, 1);
|
||||
|
||||
ret = __sev_migrate_from(sev_vm, sev_es_vm);
|
||||
@ -230,13 +206,13 @@ static void sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
|
||||
TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d", ret, errno);
|
||||
}
|
||||
|
||||
static void verify_mirror_allowed_cmds(int vm_fd)
|
||||
static void verify_mirror_allowed_cmds(struct kvm_vm *vm)
|
||||
{
|
||||
struct kvm_sev_guest_status status;
|
||||
int cmd_id;
|
||||
|
||||
for (int cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) {
|
||||
for (cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) {
|
||||
int ret;
|
||||
__u32 fw_error;
|
||||
|
||||
/*
|
||||
* These commands are allowed for mirror VMs, all others are
|
||||
@ -256,14 +232,14 @@ static void verify_mirror_allowed_cmds(int vm_fd)
|
||||
* These commands should be disallowed before the data
|
||||
* parameter is examined so NULL is OK here.
|
||||
*/
|
||||
ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error);
|
||||
ret = __vm_sev_ioctl(vm, cmd_id, NULL);
|
||||
TEST_ASSERT(
|
||||
ret == -1 && errno == EINVAL,
|
||||
"Should not be able call command: %d. ret: %d, errno: %d",
|
||||
cmd_id, ret, errno);
|
||||
}
|
||||
|
||||
sev_ioctl(vm_fd, KVM_SEV_GUEST_STATUS, &status);
|
||||
vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
|
||||
}
|
||||
|
||||
static void test_sev_mirror(bool es)
|
||||
@ -281,9 +257,9 @@ static void test_sev_mirror(bool es)
|
||||
__vm_vcpu_add(dst_vm, i);
|
||||
|
||||
if (es)
|
||||
sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
|
||||
vm_sev_ioctl(dst_vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
|
||||
|
||||
verify_mirror_allowed_cmds(dst_vm->fd);
|
||||
verify_mirror_allowed_cmds(dst_vm);
|
||||
|
||||
kvm_vm_free(src_vm);
|
||||
kvm_vm_free(dst_vm);
|
||||
|
88
tools/testing/selftests/kvm/x86_64/sev_smoke_test.c
Normal file
88
tools/testing/selftests/kvm/x86_64/sev_smoke_test.c
Normal file
@ -0,0 +1,88 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
#include "svm_util.h"
|
||||
#include "linux/psp-sev.h"
|
||||
#include "sev.h"
|
||||
|
||||
|
||||
static void guest_sev_es_code(void)
|
||||
{
|
||||
/* TODO: Check CPUID after GHCB-based hypercall support is added. */
|
||||
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
|
||||
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
|
||||
|
||||
/*
|
||||
* TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply
|
||||
* force "termination" to signal "done" via the GHCB MSR protocol.
|
||||
*/
|
||||
wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
|
||||
__asm__ __volatile__("rep; vmmcall");
|
||||
}
|
||||
|
||||
static void guest_sev_code(void)
|
||||
{
|
||||
GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV));
|
||||
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
|
||||
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
static void test_sev(void *guest_code, uint64_t policy)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
struct ucall uc;
|
||||
|
||||
vm = vm_sev_create_with_one_vcpu(policy, guest_code, &vcpu);
|
||||
|
||||
for (;;) {
|
||||
vcpu_run(vcpu);
|
||||
|
||||
if (policy & SEV_POLICY_ES) {
|
||||
TEST_ASSERT(vcpu->run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
|
||||
"Wanted SYSTEM_EVENT, got %s",
|
||||
exit_reason_str(vcpu->run->exit_reason));
|
||||
TEST_ASSERT_EQ(vcpu->run->system_event.type, KVM_SYSTEM_EVENT_SEV_TERM);
|
||||
TEST_ASSERT_EQ(vcpu->run->system_event.ndata, 1);
|
||||
TEST_ASSERT_EQ(vcpu->run->system_event.data[0], GHCB_MSR_TERM_REQ);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (get_ucall(vcpu, &uc)) {
|
||||
case UCALL_SYNC:
|
||||
continue;
|
||||
case UCALL_DONE:
|
||||
return;
|
||||
case UCALL_ABORT:
|
||||
REPORT_GUEST_ASSERT(uc);
|
||||
default:
|
||||
TEST_FAIL("Unexpected exit: %s",
|
||||
exit_reason_str(vcpu->run->exit_reason));
|
||||
}
|
||||
}
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
|
||||
|
||||
test_sev(guest_sev_code, SEV_POLICY_NO_DBG);
|
||||
test_sev(guest_sev_code, 0);
|
||||
|
||||
if (kvm_cpu_has(X86_FEATURE_SEV_ES)) {
|
||||
test_sev(guest_sev_es_code, SEV_POLICY_ES | SEV_POLICY_NO_DBG);
|
||||
test_sev(guest_sev_es_code, SEV_POLICY_ES);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -17,6 +17,7 @@
|
||||
#include <sys/ioctl.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "kvm_test_harness.h"
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
@ -41,6 +42,8 @@ void guest_code(void)
|
||||
: "rax", "rbx");
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST_SUITE(sync_regs_test);
|
||||
|
||||
static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
|
||||
{
|
||||
#define REG_COMPARE(reg) \
|
||||
@ -152,18 +155,15 @@ static noinline void *race_sregs_cr4(void *arg)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void race_sync_regs(void *racer)
|
||||
static void race_sync_regs(struct kvm_vcpu *vcpu, void *racer)
|
||||
{
|
||||
const time_t TIMEOUT = 2; /* seconds, roughly */
|
||||
struct kvm_x86_state *state;
|
||||
struct kvm_translation tr;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_run *run;
|
||||
struct kvm_vm *vm;
|
||||
pthread_t thread;
|
||||
time_t t;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
run = vcpu->run;
|
||||
|
||||
run->kvm_valid_regs = KVM_SYNC_X86_SREGS;
|
||||
@ -205,26 +205,12 @@ static void race_sync_regs(void *racer)
|
||||
TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
|
||||
|
||||
kvm_x86_state_cleanup(state);
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, read_invalid, guest_code)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_run *run;
|
||||
struct kvm_regs regs;
|
||||
struct kvm_sregs sregs;
|
||||
struct kvm_vcpu_events events;
|
||||
int rv, cap;
|
||||
|
||||
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
|
||||
TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
|
||||
TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
|
||||
run = vcpu->run;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
int rv;
|
||||
|
||||
/* Request reading invalid register set from VCPU. */
|
||||
run->kvm_valid_regs = INVALID_SYNC_FIELD;
|
||||
@ -240,6 +226,12 @@ int main(int argc, char *argv[])
|
||||
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
|
||||
rv);
|
||||
run->kvm_valid_regs = 0;
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, set_invalid, guest_code)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
int rv;
|
||||
|
||||
/* Request setting invalid register set into VCPU. */
|
||||
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
|
||||
@ -255,11 +247,19 @@ int main(int argc, char *argv[])
|
||||
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
|
||||
rv);
|
||||
run->kvm_dirty_regs = 0;
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, req_and_verify_all_valid, guest_code)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
struct kvm_vcpu_events events;
|
||||
struct kvm_sregs sregs;
|
||||
struct kvm_regs regs;
|
||||
|
||||
/* Request and verify all valid register sets. */
|
||||
/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
rv = _vcpu_run(vcpu);
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
|
||||
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
@ -270,6 +270,19 @@ int main(int argc, char *argv[])
|
||||
|
||||
vcpu_events_get(vcpu, &events);
|
||||
compare_vcpu_events(&events, &run->s.regs.events);
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, set_and_verify_various, guest_code)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
struct kvm_vcpu_events events;
|
||||
struct kvm_sregs sregs;
|
||||
struct kvm_regs regs;
|
||||
|
||||
/* Run once to get register set */
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
|
||||
|
||||
/* Set and verify various register values. */
|
||||
run->s.regs.regs.rbx = 0xBAD1DEA;
|
||||
@ -278,7 +291,7 @@ int main(int argc, char *argv[])
|
||||
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
|
||||
rv = _vcpu_run(vcpu);
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
|
||||
TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
|
||||
"rbx sync regs value incorrect 0x%llx.",
|
||||
@ -295,6 +308,11 @@ int main(int argc, char *argv[])
|
||||
|
||||
vcpu_events_get(vcpu, &events);
|
||||
compare_vcpu_events(&events, &run->s.regs.events);
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_dirty_regs_bits, guest_code)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
|
||||
/* Clear kvm_dirty_regs bits, verify new s.regs values are
|
||||
* overwritten with existing guest values.
|
||||
@ -302,11 +320,22 @@ int main(int argc, char *argv[])
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
run->kvm_dirty_regs = 0;
|
||||
run->s.regs.regs.rbx = 0xDEADBEEF;
|
||||
rv = _vcpu_run(vcpu);
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
|
||||
TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
|
||||
"rbx sync regs value incorrect 0x%llx.",
|
||||
run->s.regs.regs.rbx);
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_valid_and_dirty_regs, guest_code)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
struct kvm_regs regs;
|
||||
|
||||
/* Run once to get register set */
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
|
||||
|
||||
/* Clear kvm_valid_regs bits and kvm_dirty_bits.
|
||||
* Verify s.regs values are not overwritten with existing guest values
|
||||
@ -315,9 +344,10 @@ int main(int argc, char *argv[])
|
||||
run->kvm_valid_regs = 0;
|
||||
run->kvm_dirty_regs = 0;
|
||||
run->s.regs.regs.rbx = 0xAAAA;
|
||||
vcpu_regs_get(vcpu, ®s);
|
||||
regs.rbx = 0xBAC0;
|
||||
vcpu_regs_set(vcpu, ®s);
|
||||
rv = _vcpu_run(vcpu);
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
|
||||
TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
|
||||
"rbx sync regs value incorrect 0x%llx.",
|
||||
@ -326,6 +356,17 @@ int main(int argc, char *argv[])
|
||||
TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
|
||||
"rbx guest value incorrect 0x%llx.",
|
||||
regs.rbx);
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_valid_regs_bits, guest_code)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
struct kvm_regs regs;
|
||||
|
||||
/* Run once to get register set */
|
||||
run->kvm_valid_regs = TEST_SYNC_FIELDS;
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
|
||||
|
||||
/* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
|
||||
* with existing guest values but that guest values are overwritten
|
||||
@ -334,7 +375,7 @@ int main(int argc, char *argv[])
|
||||
run->kvm_valid_regs = 0;
|
||||
run->kvm_dirty_regs = TEST_SYNC_FIELDS;
|
||||
run->s.regs.regs.rbx = 0xBBBB;
|
||||
rv = _vcpu_run(vcpu);
|
||||
vcpu_run(vcpu);
|
||||
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
|
||||
TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
|
||||
"rbx sync regs value incorrect 0x%llx.",
|
||||
@ -343,12 +384,30 @@ int main(int argc, char *argv[])
|
||||
TEST_ASSERT(regs.rbx == 0xBBBB + 1,
|
||||
"rbx guest value incorrect 0x%llx.",
|
||||
regs.rbx);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
||||
race_sync_regs(race_sregs_cr4);
|
||||
race_sync_regs(race_events_exc);
|
||||
race_sync_regs(race_events_inj_pen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, race_cr4, guest_code)
|
||||
{
|
||||
race_sync_regs(vcpu, race_sregs_cr4);
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, race_exc, guest_code)
|
||||
{
|
||||
race_sync_regs(vcpu, race_events_exc);
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST(sync_regs_test, race_inj_pen, guest_code)
|
||||
{
|
||||
race_sync_regs(vcpu, race_events_inj_pen);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int cap;
|
||||
|
||||
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
|
||||
TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
|
||||
TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
|
||||
|
||||
return test_harness_run(argc, argv);
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#define _GNU_SOURCE /* for program_invocation_short_name */
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "kvm_test_harness.h"
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "vmx.h"
|
||||
@ -527,13 +528,12 @@ static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu)
|
||||
process_ucall_done(vcpu);
|
||||
}
|
||||
|
||||
static void test_msr_filter_allow(void)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
int rc;
|
||||
KVM_ONE_VCPU_TEST_SUITE(user_msr);
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_allow);
|
||||
KVM_ONE_VCPU_TEST(user_msr, msr_filter_allow, guest_code_filter_allow)
|
||||
{
|
||||
struct kvm_vm *vm = vcpu->vm;
|
||||
int rc;
|
||||
|
||||
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
|
||||
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
|
||||
@ -585,8 +585,6 @@ static void test_msr_filter_allow(void)
|
||||
} else {
|
||||
printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
|
||||
}
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static int handle_ucall(struct kvm_vcpu *vcpu)
|
||||
@ -646,16 +644,12 @@ static void handle_wrmsr(struct kvm_run *run)
|
||||
}
|
||||
}
|
||||
|
||||
static void test_msr_filter_deny(void)
|
||||
KVM_ONE_VCPU_TEST(user_msr, msr_filter_deny, guest_code_filter_deny)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_run *run;
|
||||
struct kvm_vm *vm = vcpu->vm;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
int rc;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_deny);
|
||||
run = vcpu->run;
|
||||
|
||||
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
|
||||
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
|
||||
vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_INVAL |
|
||||
@ -689,18 +683,13 @@ static void test_msr_filter_deny(void)
|
||||
done:
|
||||
TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
|
||||
TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static void test_msr_permission_bitmap(void)
|
||||
KVM_ONE_VCPU_TEST(user_msr, msr_permission_bitmap, guest_code_permission_bitmap)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_vm *vm = vcpu->vm;
|
||||
int rc;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, guest_code_permission_bitmap);
|
||||
|
||||
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
|
||||
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
|
||||
vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
|
||||
@ -715,8 +704,6 @@ static void test_msr_permission_bitmap(void)
|
||||
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
|
||||
run_guest_then_process_rdmsr(vcpu, MSR_GS_BASE);
|
||||
run_guest_then_process_ucall_done(vcpu);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
#define test_user_exit_msr_ioctl(vm, cmd, arg, flag, valid_mask) \
|
||||
@ -786,31 +773,18 @@ static void run_msr_filter_flag_test(struct kvm_vm *vm)
|
||||
}
|
||||
|
||||
/* Test that attempts to write to the unused bits in a flag fails. */
|
||||
static void test_user_exit_msr_flags(void)
|
||||
KVM_ONE_VCPU_TEST(user_msr, user_exit_msr_flags, NULL)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, NULL);
|
||||
struct kvm_vm *vm = vcpu->vm;
|
||||
|
||||
/* Test flags for KVM_CAP_X86_USER_SPACE_MSR. */
|
||||
run_user_space_msr_flag_test(vm);
|
||||
|
||||
/* Test flags and range flags for KVM_X86_SET_MSR_FILTER. */
|
||||
run_msr_filter_flag_test(vm);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
test_msr_filter_allow();
|
||||
|
||||
test_msr_filter_deny();
|
||||
|
||||
test_msr_permission_bitmap();
|
||||
|
||||
test_user_exit_msr_flags();
|
||||
|
||||
return 0;
|
||||
return test_harness_run(argc, argv);
|
||||
}
|
||||
|
@ -15,10 +15,11 @@
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
#include "kvm_test_harness.h"
|
||||
#include "kvm_util.h"
|
||||
#include "vmx.h"
|
||||
|
||||
union perf_capabilities {
|
||||
static union perf_capabilities {
|
||||
struct {
|
||||
u64 lbr_format:6;
|
||||
u64 pebs_trap:1;
|
||||
@ -32,7 +33,7 @@ union perf_capabilities {
|
||||
u64 anythread_deprecated:1;
|
||||
};
|
||||
u64 capabilities;
|
||||
};
|
||||
} host_cap;
|
||||
|
||||
/*
|
||||
* The LBR format and most PEBS features are immutable, all other features are
|
||||
@ -73,19 +74,19 @@ static void guest_code(uint64_t current_val)
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
KVM_ONE_VCPU_TEST_SUITE(vmx_pmu_caps);
|
||||
|
||||
/*
|
||||
* Verify that guest WRMSRs to PERF_CAPABILITIES #GP regardless of the value
|
||||
* written, that the guest always sees the userspace controlled value, and that
|
||||
* PERF_CAPABILITIES is immutable after KVM_RUN.
|
||||
*/
|
||||
static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap)
|
||||
KVM_ONE_VCPU_TEST(vmx_pmu_caps, guest_wrmsr_perf_capabilities, guest_code)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, guest_code);
|
||||
struct ucall uc;
|
||||
int r, i;
|
||||
|
||||
vm_init_descriptor_tables(vm);
|
||||
vm_init_descriptor_tables(vcpu->vm);
|
||||
vcpu_init_descriptor_tables(vcpu);
|
||||
|
||||
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
|
||||
@ -117,31 +118,21 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap)
|
||||
TEST_ASSERT(!r, "Post-KVM_RUN write '0x%llx'didn't fail",
|
||||
host_cap.capabilities ^ BIT_ULL(i));
|
||||
}
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify KVM allows writing PERF_CAPABILITIES with all KVM-supported features
|
||||
* enabled, as well as '0' (to disable all features).
|
||||
*/
|
||||
static void test_basic_perf_capabilities(union perf_capabilities host_cap)
|
||||
KVM_ONE_VCPU_TEST(vmx_pmu_caps, basic_perf_capabilities, guest_code)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL);
|
||||
|
||||
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0);
|
||||
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
static void test_fungible_perf_capabilities(union perf_capabilities host_cap)
|
||||
KVM_ONE_VCPU_TEST(vmx_pmu_caps, fungible_perf_capabilities, guest_code)
|
||||
{
|
||||
const uint64_t fungible_caps = host_cap.capabilities & ~immutable_caps.capabilities;
|
||||
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL);
|
||||
int bit;
|
||||
|
||||
for_each_set_bit(bit, &fungible_caps, 64) {
|
||||
@ -150,8 +141,6 @@ static void test_fungible_perf_capabilities(union perf_capabilities host_cap)
|
||||
host_cap.capabilities & ~BIT_ULL(bit));
|
||||
}
|
||||
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -160,14 +149,11 @@ static void test_fungible_perf_capabilities(union perf_capabilities host_cap)
|
||||
* separately as they are multi-bit values, e.g. toggling or setting a single
|
||||
* bit can generate a false positive without dedicated safeguards.
|
||||
*/
|
||||
static void test_immutable_perf_capabilities(union perf_capabilities host_cap)
|
||||
KVM_ONE_VCPU_TEST(vmx_pmu_caps, immutable_perf_capabilities, guest_code)
|
||||
{
|
||||
const uint64_t reserved_caps = (~host_cap.capabilities |
|
||||
immutable_caps.capabilities) &
|
||||
~format_caps.capabilities;
|
||||
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm = vm_create_with_one_vcpu(&vcpu, NULL);
|
||||
union perf_capabilities val = host_cap;
|
||||
int r, bit;
|
||||
|
||||
@ -201,8 +187,6 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap)
|
||||
TEST_ASSERT(!r, "Bad PEBS FMT = 0x%x didn't fail, host = 0x%x",
|
||||
val.pebs_format, host_cap.pebs_format);
|
||||
}
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -211,17 +195,13 @@ static void test_immutable_perf_capabilities(union perf_capabilities host_cap)
|
||||
* LBR_TOS as those bits are writable across all uarch implementations (arch
|
||||
* LBRs will need to poke a different MSR).
|
||||
*/
|
||||
static void test_lbr_perf_capabilities(union perf_capabilities host_cap)
|
||||
KVM_ONE_VCPU_TEST(vmx_pmu_caps, lbr_perf_capabilities, guest_code)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vm *vm;
|
||||
int r;
|
||||
|
||||
if (!host_cap.lbr_format)
|
||||
return;
|
||||
|
||||
vm = vm_create_with_one_vcpu(&vcpu, NULL);
|
||||
|
||||
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
|
||||
vcpu_set_msr(vcpu, MSR_LBR_TOS, 7);
|
||||
|
||||
@ -229,14 +209,10 @@ static void test_lbr_perf_capabilities(union perf_capabilities host_cap)
|
||||
|
||||
r = _vcpu_set_msr(vcpu, MSR_LBR_TOS, 7);
|
||||
TEST_ASSERT(!r, "Writing LBR_TOS should fail after disabling vPMU");
|
||||
|
||||
kvm_vm_free(vm);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
union perf_capabilities host_cap;
|
||||
|
||||
TEST_REQUIRE(get_kvm_param_bool("enable_pmu"));
|
||||
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
|
||||
|
||||
@ -248,9 +224,5 @@ int main(int argc, char *argv[])
|
||||
TEST_ASSERT(host_cap.full_width_write,
|
||||
"Full-width writes should always be supported");
|
||||
|
||||
test_basic_perf_capabilities(host_cap);
|
||||
test_fungible_perf_capabilities(host_cap);
|
||||
test_immutable_perf_capabilities(host_cap);
|
||||
test_guest_wrmsr_perf_capabilities(host_cap);
|
||||
test_lbr_perf_capabilities(host_cap);
|
||||
return test_harness_run(argc, argv);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user