mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
kvm: x86/mmu: Support write protection for nesting in tdp MMU
To support nested virtualization, KVM will sometimes need to write protect pages which are part of a shadowed paging structure or are not writable in the shadowed paging structure. Add a function to write protect GFN mappings for this purpose. Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell machine. This series introduced no new failures. This series can be viewed in Gerrit at: https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538 Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20201014182700.2888246-18-bgardon@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
1488199856
commit
46044f72c3
@ -1299,6 +1299,10 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
|
||||
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
|
||||
}
|
||||
|
||||
if (kvm->arch.tdp_mmu_enabled)
|
||||
write_protected |=
|
||||
kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
|
||||
|
||||
return write_protected;
|
||||
}
|
||||
|
||||
|
@ -1078,3 +1078,53 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
||||
kvm_mmu_put_root(kvm, root);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes write access on the last level SPTE mapping this GFN and unsets the
|
||||
* SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
|
||||
* Returns true if an SPTE was set and a TLB flush is needed.
|
||||
*/
|
||||
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
gfn_t gfn)
|
||||
{
|
||||
struct tdp_iter iter;
|
||||
u64 new_spte;
|
||||
bool spte_set = false;
|
||||
|
||||
tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
|
||||
if (!is_writable_pte(iter.old_spte))
|
||||
break;
|
||||
|
||||
new_spte = iter.old_spte &
|
||||
~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
|
||||
|
||||
tdp_mmu_set_spte(kvm, &iter, new_spte);
|
||||
spte_set = true;
|
||||
}
|
||||
|
||||
return spte_set;
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes write access on the last level SPTE mapping this GFN and unsets the
|
||||
* SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
|
||||
* Returns true if an SPTE was set and a TLB flush is needed.
|
||||
*/
|
||||
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot, gfn_t gfn)
|
||||
{
|
||||
struct kvm_mmu_page *root;
|
||||
int root_as_id;
|
||||
bool spte_set = false;
|
||||
|
||||
lockdep_assert_held(&kvm->mmu_lock);
|
||||
for_each_tdp_mmu_root(kvm, root) {
|
||||
root_as_id = kvm_mmu_page_as_id(root);
|
||||
if (root_as_id != slot->as_id)
|
||||
continue;
|
||||
|
||||
spte_set |= write_protect_gfn(kvm, root, gfn);
|
||||
}
|
||||
return spte_set;
|
||||
}
|
||||
|
||||
|
@ -40,4 +40,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
||||
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
|
||||
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
||||
const struct kvm_memory_slot *slot);
|
||||
|
||||
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot, gfn_t gfn);
|
||||
#endif /* __KVM_X86_MMU_TDP_MMU_H */
|
||||
|
Loading…
Reference in New Issue
Block a user