mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 13:14:07 +08:00
ARM:
* Fix for kmemleak with pKVM s390: * Fixes for VFIO with zPCI * smatch fix x86: * Ensure XSAVE-capable hosts always allow FP and SSE state to be saved and restored via KVM_{GET,SET}_XSAVE * Fix broken max_mmu_rmap_size stat * Fix compile error with old glibc that doesn't have gettid() -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmMtvg4UHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroNy2Af/VybWK2uAbaMkV6irQ1YTJIrRPco1 C+JQdiQklYbjzThfPWNF/MiH+VTObloR1KqztOeQbfcrgwzygO68D3bs0wkAukLA mtdcMjdsqNx8r9u533i6S8Dpo0RkHKl+I8+3mHdPHTzlrbCuYJFFFxFNLhE+xbrK DP2Gl/xXIGYwOv2nfHA/xxI7TRICv4IxmzQazxlmC27n6BLNSr8qp6jI9lXJQfJ8 XJh3SbmRux3/cs2oEqONg8DySJh631kI1jGGOmL3qk07ZR7A5KZ+lju0xM7vQIEq aR25YNYZux+BPIY/WxT1R0j6pwinBmFp8OoQYCs8DaQ65fdE5gegtJRpow== =VADm -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm fixes from Paolo Bonzini: "As everyone back came back from conferences, here are the pending patches for Linux 6.0. ARM: - Fix for kmemleak with pKVM s390: - Fixes for VFIO with zPCI - smatch fix x86: - Ensure XSAVE-capable hosts always allow FP and SSE state to be saved and restored via KVM_{GET,SET}_XSAVE - Fix broken max_mmu_rmap_size stat - Fix compile error with old glibc that doesn't have gettid()" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: Inject #UD on emulated XSETBV if XSAVES isn't enabled KVM: x86: Always enable legacy FP/SSE in allowed user XFEATURES KVM: x86: Reinstate kvm_vcpu_arch.guest_supported_xcr0 KVM: x86/mmu: add missing update to max_mmu_rmap_size selftests: kvm: Fix a compile error in selftests/kvm/rseq_test.c KVM: s390: pci: register pci hooks without interpretation KVM: s390: pci: fix GAIT physical vs virtual pointers usage KVM: s390: Pass initialized arg even if unused KVM: s390: pci: fix plain integer as NULL pointer warnings KVM: arm64: Use kmemleak_free_part_phys() to unregister hyp_mem_base
This commit is contained in:
commit
317fab7ec5
@ -2114,7 +2114,7 @@ static int finalize_hyp_mode(void)
|
||||
* at, which would end badly once inaccessible.
|
||||
*/
|
||||
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
|
||||
kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size);
|
||||
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
|
||||
return pkvm_drop_host_privileges();
|
||||
}
|
||||
|
||||
|
@ -489,6 +489,8 @@ enum prot_type {
|
||||
PROT_TYPE_ALC = 2,
|
||||
PROT_TYPE_DAT = 3,
|
||||
PROT_TYPE_IEP = 4,
|
||||
/* Dummy value for passing an initialized value when code != PGM_PROTECTION */
|
||||
PROT_NONE,
|
||||
};
|
||||
|
||||
static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
|
||||
@ -504,6 +506,10 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
|
||||
switch (code) {
|
||||
case PGM_PROTECTION:
|
||||
switch (prot) {
|
||||
case PROT_NONE:
|
||||
/* We should never get here, acts like termination */
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
case PROT_TYPE_IEP:
|
||||
tec->b61 = 1;
|
||||
fallthrough;
|
||||
@ -968,8 +974,10 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||
return rc;
|
||||
} else {
|
||||
gpa = kvm_s390_real_to_abs(vcpu, ga);
|
||||
if (kvm_is_error_gpa(vcpu->kvm, gpa))
|
||||
if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
|
||||
rc = PGM_ADDRESSING;
|
||||
prot = PROT_NONE;
|
||||
}
|
||||
}
|
||||
if (rc)
|
||||
return trans_exc(vcpu, rc, ga, ar, mode, prot);
|
||||
@ -1112,8 +1120,6 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||
if (rc == PGM_PROTECTION && try_storage_prot_override)
|
||||
rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
|
||||
data, fragment_len, PAGE_SPO_ACC);
|
||||
if (rc == PGM_PROTECTION)
|
||||
prot = PROT_TYPE_KEYC;
|
||||
if (rc)
|
||||
break;
|
||||
len -= fragment_len;
|
||||
@ -1123,6 +1129,10 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||
if (rc > 0) {
|
||||
bool terminate = (mode == GACC_STORE) && (idx > 0);
|
||||
|
||||
if (rc == PGM_PROTECTION)
|
||||
prot = PROT_TYPE_KEYC;
|
||||
else
|
||||
prot = PROT_NONE;
|
||||
rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
|
||||
}
|
||||
out_unlock:
|
||||
|
@ -3324,7 +3324,7 @@ static void aen_host_forward(unsigned long si)
|
||||
if (gaite->count == 0)
|
||||
return;
|
||||
if (gaite->aisb != 0)
|
||||
set_bit_inv(gaite->aisbo, (unsigned long *)gaite->aisb);
|
||||
set_bit_inv(gaite->aisbo, phys_to_virt(gaite->aisb));
|
||||
|
||||
kvm = kvm_s390_pci_si_to_kvm(aift, si);
|
||||
if (!kvm)
|
||||
|
@ -505,7 +505,7 @@ int kvm_arch_init(void *opaque)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (kvm_s390_pci_interp_allowed()) {
|
||||
if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
|
||||
rc = kvm_s390_pci_init();
|
||||
if (rc) {
|
||||
pr_err("Unable to allocate AIFT for PCI\n");
|
||||
@ -527,7 +527,7 @@ out:
|
||||
void kvm_arch_exit(void)
|
||||
{
|
||||
kvm_s390_gib_destroy();
|
||||
if (kvm_s390_pci_interp_allowed())
|
||||
if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
|
||||
kvm_s390_pci_exit();
|
||||
debug_unregister(kvm_s390_dbf);
|
||||
debug_unregister(kvm_s390_dbf_uv);
|
||||
|
@ -58,7 +58,7 @@ static int zpci_setup_aipb(u8 nisc)
|
||||
if (!zpci_aipb)
|
||||
return -ENOMEM;
|
||||
|
||||
aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, 0);
|
||||
aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, NULL);
|
||||
if (!aift->sbv) {
|
||||
rc = -ENOMEM;
|
||||
goto free_aipb;
|
||||
@ -71,7 +71,7 @@ static int zpci_setup_aipb(u8 nisc)
|
||||
rc = -ENOMEM;
|
||||
goto free_sbv;
|
||||
}
|
||||
aift->gait = (struct zpci_gaite *)page_to_phys(page);
|
||||
aift->gait = (struct zpci_gaite *)page_to_virt(page);
|
||||
|
||||
zpci_aipb->aipb.faisb = virt_to_phys(aift->sbv->vector);
|
||||
zpci_aipb->aipb.gait = virt_to_phys(aift->gait);
|
||||
@ -373,7 +373,7 @@ static int kvm_s390_pci_aif_disable(struct zpci_dev *zdev, bool force)
|
||||
gaite->gisc = 0;
|
||||
gaite->aisbo = 0;
|
||||
gaite->gisa = 0;
|
||||
aift->kzdev[zdev->aisb] = 0;
|
||||
aift->kzdev[zdev->aisb] = NULL;
|
||||
/* Clear zdev info */
|
||||
airq_iv_free_bit(aift->sbv, zdev->aisb);
|
||||
airq_iv_release(zdev->aibv);
|
||||
@ -672,23 +672,31 @@ out:
|
||||
|
||||
int kvm_s390_pci_init(void)
|
||||
{
|
||||
zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
|
||||
zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
|
||||
|
||||
if (!kvm_s390_pci_interp_allowed())
|
||||
return 0;
|
||||
|
||||
aift = kzalloc(sizeof(struct zpci_aift), GFP_KERNEL);
|
||||
if (!aift)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&aift->gait_lock);
|
||||
mutex_init(&aift->aift_lock);
|
||||
zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
|
||||
zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_s390_pci_exit(void)
|
||||
{
|
||||
mutex_destroy(&aift->aift_lock);
|
||||
zpci_kvm_hook.kvm_register = NULL;
|
||||
zpci_kvm_hook.kvm_unregister = NULL;
|
||||
|
||||
if (!kvm_s390_pci_interp_allowed())
|
||||
return;
|
||||
|
||||
mutex_destroy(&aift->aift_lock);
|
||||
|
||||
kfree(aift);
|
||||
}
|
||||
|
@ -46,9 +46,9 @@ extern struct zpci_aift *aift;
|
||||
static inline struct kvm *kvm_s390_pci_si_to_kvm(struct zpci_aift *aift,
|
||||
unsigned long si)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM) || aift->kzdev == 0 ||
|
||||
aift->kzdev[si] == 0)
|
||||
return 0;
|
||||
if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM) || !aift->kzdev ||
|
||||
!aift->kzdev[si])
|
||||
return NULL;
|
||||
return aift->kzdev[si]->kvm;
|
||||
};
|
||||
|
||||
|
@ -729,6 +729,7 @@ struct kvm_vcpu_arch {
|
||||
struct fpu_guest guest_fpu;
|
||||
|
||||
u64 xcr0;
|
||||
u64 guest_supported_xcr0;
|
||||
|
||||
struct kvm_pio_request pio;
|
||||
void *pio_data;
|
||||
|
@ -315,7 +315,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
u64 guest_supported_xcr0;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 1);
|
||||
if (best && apic) {
|
||||
@ -327,10 +326,16 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
|
||||
kvm_apic_set_version(vcpu);
|
||||
}
|
||||
|
||||
guest_supported_xcr0 =
|
||||
vcpu->arch.guest_supported_xcr0 =
|
||||
cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
|
||||
|
||||
vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
|
||||
/*
|
||||
* FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
|
||||
* XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
|
||||
* supported by the host.
|
||||
*/
|
||||
vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
|
||||
XFEATURE_MASK_FPSSE;
|
||||
|
||||
kvm_update_pv_runtime(vcpu);
|
||||
|
||||
|
@ -4132,6 +4132,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
u32 eax, ecx, edx;
|
||||
|
||||
if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
|
||||
return emulate_ud(ctxt);
|
||||
|
||||
eax = reg_read(ctxt, VCPU_REGS_RAX);
|
||||
edx = reg_read(ctxt, VCPU_REGS_RDX);
|
||||
ecx = reg_read(ctxt, VCPU_REGS_RCX);
|
||||
|
@ -1596,6 +1596,8 @@ static void __rmap_add(struct kvm *kvm,
|
||||
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
|
||||
rmap_count = pte_list_add(cache, spte, rmap_head);
|
||||
|
||||
if (rmap_count > kvm->stat.max_mmu_rmap_size)
|
||||
kvm->stat.max_mmu_rmap_size = rmap_count;
|
||||
if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
|
||||
kvm_zap_all_rmap_sptes(kvm, rmap_head);
|
||||
kvm_flush_remote_tlbs_with_address(
|
||||
|
@ -1011,15 +1011,10 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
|
||||
|
||||
static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
|
||||
return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1042,7 +1037,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
||||
* saving. However, xcr0 bit 0 is always set, even if the
|
||||
* emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
|
||||
*/
|
||||
valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
|
||||
valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
|
||||
if (xcr0 & ~valid_bits)
|
||||
return 1;
|
||||
|
||||
@ -1070,6 +1065,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
||||
|
||||
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
|
||||
if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
|
||||
__kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
|
||||
kvm_inject_gp(vcpu, 0);
|
||||
|
@ -227,7 +227,7 @@ int main(int argc, char *argv[])
|
||||
ucall_init(vm, NULL);
|
||||
|
||||
pthread_create(&migration_thread, NULL, migration_worker,
|
||||
(void *)(unsigned long)gettid());
|
||||
(void *)(unsigned long)syscall(SYS_gettid));
|
||||
|
||||
for (i = 0; !done; i++) {
|
||||
vcpu_run(vcpu);
|
||||
|
Loading…
Reference in New Issue
Block a user