mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
KVM: Move kvm_arch_flush_remote_tlbs_memslot() to common code
Move kvm_arch_flush_remote_tlbs_memslot() to common code and drop "arch_" from the name. kvm_arch_flush_remote_tlbs_memslot() is just a range-based TLB invalidation where the range is defined by the memslot. Now that kvm_flush_remote_tlbs_range() can be called from common code we can just use that and drop a bunch of duplicate code from the arch directories. Note this adds a lockdep assertion for slots_lock being held when calling kvm_flush_remote_tlbs_memslot(), which was previously only asserted on x86. MIPS has calls to kvm_flush_remote_tlbs_memslot(), but they all hold the slots_lock, so the lockdep assertion continues to hold true. Also drop the CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT ifdef gating kvm_flush_remote_tlbs_memslot(), since it is no longer necessary. Signed-off-by: David Matlack <dmatlack@google.com> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com> Reviewed-by: Gavin Shan <gshan@redhat.com> Reviewed-by: Shaoqin Huang <shahuang@redhat.com> Acked-by: Anup Patel <anup@brainfault.org> Acked-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20230811045127.3308641-7-rananta@google.com
This commit is contained in:
parent
d478899605
commit
619b507244
@ -1532,12 +1532,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
|
|
||||||
const struct kvm_memory_slot *memslot)
|
|
||||||
{
|
|
||||||
kvm_flush_remote_tlbs(kvm);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
|
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
|
||||||
struct kvm_arm_device_addr *dev_addr)
|
struct kvm_arm_device_addr *dev_addr)
|
||||||
{
|
{
|
||||||
|
@ -199,7 +199,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
|||||||
/* Flush slot from GPA */
|
/* Flush slot from GPA */
|
||||||
kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
|
kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
|
||||||
slot->base_gfn + slot->npages - 1);
|
slot->base_gfn + slot->npages - 1);
|
||||||
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
|
kvm_flush_remote_tlbs_memslot(kvm, slot);
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,7 +235,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||||||
needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
|
needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
|
||||||
new->base_gfn + new->npages - 1);
|
new->base_gfn + new->npages - 1);
|
||||||
if (needs_flush)
|
if (needs_flush)
|
||||||
kvm_arch_flush_remote_tlbs_memslot(kvm, new);
|
kvm_flush_remote_tlbs_memslot(kvm, new);
|
||||||
spin_unlock(&kvm->mmu_lock);
|
spin_unlock(&kvm->mmu_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -987,12 +987,6 @@ int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
|
|
||||||
const struct kvm_memory_slot *memslot)
|
|
||||||
{
|
|
||||||
kvm_flush_remote_tlbs(kvm);
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
|
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
@ -406,12 +406,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
|
|
||||||
const struct kvm_memory_slot *memslot)
|
|
||||||
{
|
|
||||||
kvm_flush_remote_tlbs(kvm);
|
|
||||||
}
|
|
||||||
|
|
||||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
|
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -6666,7 +6666,7 @@ static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
|
|||||||
*/
|
*/
|
||||||
if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
|
if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
|
||||||
PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
|
PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
|
||||||
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
|
kvm_flush_remote_tlbs_memslot(kvm, slot);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
||||||
@ -6685,20 +6685,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
|
|
||||||
const struct kvm_memory_slot *memslot)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* All current use cases for flushing the TLBs for a specific memslot
|
|
||||||
* related to dirty logging, and many do the TLB flush out of mmu_lock.
|
|
||||||
* The interaction between the various operations on memslot must be
|
|
||||||
* serialized by slots_locks to ensure the TLB flush from one operation
|
|
||||||
* is observed by any other operation on the same memslot.
|
|
||||||
*/
|
|
||||||
lockdep_assert_held(&kvm->slots_lock);
|
|
||||||
kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
|
|
||||||
}
|
|
||||||
|
|
||||||
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
|
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
|
||||||
const struct kvm_memory_slot *memslot)
|
const struct kvm_memory_slot *memslot)
|
||||||
{
|
{
|
||||||
|
@ -12751,7 +12751,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
|
|||||||
* See is_writable_pte() for more details (the case involving
|
* See is_writable_pte() for more details (the case involving
|
||||||
* access-tracked SPTEs is particularly relevant).
|
* access-tracked SPTEs is particularly relevant).
|
||||||
*/
|
*/
|
||||||
kvm_arch_flush_remote_tlbs_memslot(kvm, new);
|
kvm_flush_remote_tlbs_memslot(kvm, new);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1360,6 +1360,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
|
|||||||
|
|
||||||
void kvm_flush_remote_tlbs(struct kvm *kvm);
|
void kvm_flush_remote_tlbs(struct kvm *kvm);
|
||||||
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
|
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
|
||||||
|
void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
|
||||||
|
const struct kvm_memory_slot *memslot);
|
||||||
|
|
||||||
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
||||||
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
|
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
|
||||||
@ -1388,10 +1390,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
|
|||||||
unsigned long mask);
|
unsigned long mask);
|
||||||
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
|
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||||
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
|
|
||||||
const struct kvm_memory_slot *memslot);
|
|
||||||
#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
|
|
||||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
|
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
|
||||||
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
|
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
|
||||||
int *is_dirty, struct kvm_memory_slot **memslot);
|
int *is_dirty, struct kvm_memory_slot **memslot);
|
||||||
|
@ -379,6 +379,20 @@ void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
|
|||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs(kvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
|
||||||
|
const struct kvm_memory_slot *memslot)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* All current use cases for flushing the TLBs for a specific memslot
|
||||||
|
* are related to dirty logging, and many do the TLB flush out of
|
||||||
|
* mmu_lock. The interaction between the various operations on memslot
|
||||||
|
* must be serialized by slots_locks to ensure the TLB flush from one
|
||||||
|
* operation is observed by any other operation on the same memslot.
|
||||||
|
*/
|
||||||
|
lockdep_assert_held(&kvm->slots_lock);
|
||||||
|
kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
|
||||||
|
}
|
||||||
|
|
||||||
static void kvm_flush_shadow_all(struct kvm *kvm)
|
static void kvm_flush_shadow_all(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
kvm_arch_flush_shadow_all(kvm);
|
kvm_arch_flush_shadow_all(kvm);
|
||||||
@ -2191,7 +2205,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (flush)
|
if (flush)
|
||||||
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
|
kvm_flush_remote_tlbs_memslot(kvm, memslot);
|
||||||
|
|
||||||
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
|
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
@ -2308,7 +2322,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
|
|||||||
KVM_MMU_UNLOCK(kvm);
|
KVM_MMU_UNLOCK(kvm);
|
||||||
|
|
||||||
if (flush)
|
if (flush)
|
||||||
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
|
kvm_flush_remote_tlbs_memslot(kvm, memslot);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user