mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-20 17:44:33 +08:00
KVM: x86/mmu: Fix wrong gfn range of tlb flushing in kvm_set_pte_rmapp()
When the spte of hupe page is dropped in kvm_set_pte_rmapp(), the whole
gfn range covered by the spte should be flushed. However,
rmap_walk_init_level() doesn't align down the gfn for new level like tdp
iterator does, then the gfn used in kvm_set_pte_rmapp() is not the base
gfn of huge page. And the size of gfn range is wrong too for huge page.
Use the base gfn of huge page and the size of huge page for flushing
tlbs for huge page. Also introduce a helper function to flush the given
page (huge or not) of guest memory, which would help prevent future
buggy use of kvm_flush_remote_tlbs_with_address() in such case.
Fixes: c3134ce240
("KVM: Replace old tlb flush function with new one to flush a specified range.")
Signed-off-by: Hou Wenlong <houwenlong.hwl@antgroup.com>
Link: https://lore.kernel.org/r/0ce24d7078fa5f1f8d64b0c59826c50f32f8065e.1665214747.git.houwenlong.hwl@antgroup.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
c667a3baed
commit
9ffe926537
@ -1469,7 +1469,7 @@ restart:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (need_flush && kvm_available_flush_tlb_with_range()) {
|
if (need_flush && kvm_available_flush_tlb_with_range()) {
|
||||||
kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
|
kvm_flush_remote_tlbs_gfn(kvm, gfn, level);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,8 +169,17 @@ void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
|
|||||||
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
|
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
|
||||||
struct kvm_memory_slot *slot, u64 gfn,
|
struct kvm_memory_slot *slot, u64 gfn,
|
||||||
int min_level);
|
int min_level);
|
||||||
|
|
||||||
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
|
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
|
||||||
u64 start_gfn, u64 pages);
|
u64 start_gfn, u64 pages);
|
||||||
|
|
||||||
|
/* Flush the given page (huge or not) of guest memory. */
|
||||||
|
static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
|
||||||
|
{
|
||||||
|
kvm_flush_remote_tlbs_with_address(kvm, gfn_round_for_level(gfn, level),
|
||||||
|
KVM_PAGES_PER_HPAGE(level));
|
||||||
|
}
|
||||||
|
|
||||||
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
|
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
|
||||||
|
|
||||||
extern int nx_huge_pages;
|
extern int nx_huge_pages;
|
||||||
|
Loading…
Reference in New Issue
Block a user