selftests: KVM: avoid failures due to reserved HyperTransport region

AMD proceessors define an address range that is reserved by HyperTransport
and causes a failure if used for guest physical addresses.  Avoid
selftests failures by reserving those guest physical addresses; the
rules are:

- On parts with <40 bits, its fully hidden from software.

- Before Fam17h, it was always 12G just below 1T, even if there was more
RAM above this location.  In this case we just not use any RAM above 1T.

- On Fam17h and later, it is variable based on SME, and is either just
below 2^48 (no encryption) or 2^43 (encryption).

Fixes: ef4c9f4f65 ("KVM: selftests: Fix 32-bit truncation of vm_get_max_gfn()")
Cc: stable@vger.kernel.org
Cc: David Matlack <dmatlack@google.com>
Reported-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210805105423.412878-1-pbonzini@redhat.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Tested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2021-08-05 06:54:23 -04:00
parent 3244867af8
commit c8cc43c1ea
3 changed files with 78 additions and 1 deletions

View File

@ -71,6 +71,15 @@ enum vm_guest_mode {
#endif
#if defined(__x86_64__)
unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
#else
static inline unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
}
#endif
#define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
#define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)

View File

@ -302,7 +302,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
(1ULL << (vm->va_bits - 1)) >> vm->page_shift);
/* Limit physical addresses to PA-bits. */
vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
vm->max_gfn = vm_compute_max_gfn(vm);
/* Allocate and setup memory for guest. */
vm->vpages_mapped = sparsebit_alloc();

View File

@ -1431,3 +1431,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui
return cpuid;
}
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
static inline unsigned x86_family(unsigned int eax)
{
unsigned int x86;
x86 = (eax >> 8) & 0xf;
if (x86 == 0xf)
x86 += (eax >> 20) & 0xff;
return x86;
}
unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
{
const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
unsigned long ht_gfn, max_gfn, max_pfn;
uint32_t eax, ebx, ecx, edx, max_ext_leaf;
max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
/* Avoid reserved HyperTransport region on AMD processors. */
eax = ecx = 0;
cpuid(&eax, &ebx, &ecx, &edx);
if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx ||
ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx ||
edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
return max_gfn;
/* On parts with <40 physical address bits, the area is fully hidden */
if (vm->pa_bits < 40)
return max_gfn;
/* Before family 17h, the HyperTransport area is just below 1T. */
ht_gfn = (1 << 28) - num_ht_pages;
eax = 1;
cpuid(&eax, &ebx, &ecx, &edx);
if (x86_family(eax) < 0x17)
goto done;
/*
* Otherwise it's at the top of the physical address space, possibly
* reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
* the old conservative value if MAXPHYADDR is not enumerated.
*/
eax = 0x80000000;
cpuid(&eax, &ebx, &ecx, &edx);
max_ext_leaf = eax;
if (max_ext_leaf < 0x80000008)
goto done;
eax = 0x80000008;
cpuid(&eax, &ebx, &ecx, &edx);
max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
if (max_ext_leaf >= 0x8000001f) {
eax = 0x8000001f;
cpuid(&eax, &ebx, &ecx, &edx);
max_pfn >>= (ebx >> 6) & 0x3f;
}
ht_gfn = max_pfn - num_ht_pages;
done:
return min(max_gfn, ht_gfn - 1);
}