mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
KVM: Kill kvm_map_gfn() / kvm_unmap_gfn() and gfn_to_pfn_cache
In commit 7e2175ebd6
("KVM: x86: Fix recording of guest steal time /
preempted status") I removed the only user of these functions because
it was basically impossible to use them safely.
There are two stages to the GFN->PFN mapping; first through the KVM
memslots to a userspace HVA and then through the page tables to
translate that HVA to an underlying PFN. Invalidations of the former
were being handled correctly, but no attempt was made to use the MMU
notifiers to invalidate the cache when the HVA->GFN mapping changed.
As a prelude to reinventing the gfn_to_pfn_cache with more usable
semantics, rip it out entirely and untangle the implementation of
the unsafe kvm_vcpu_map()/kvm_vcpu_unmap() functions from it.
All current users of kvm_vcpu_map() also look broken right now, and
will be dealt with separately. They broadly fall into two classes:
* Those which map, access the data and immediately unmap. This is
mostly gratuitous and could just as well use the existing user
HVA, and could probably benefit from a gfn_to_hva_cache as they
do so.
* Those which keep the mapping around for a longer time, perhaps
even using the PFN directly from the guest. These will need to
be converted to the new gfn_to_pfn_cache and then kvm_vcpu_map()
can be removed too.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Message-Id: <20211115165030.7422-8-dwmw2@infradead.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
cee66664dc
commit
357a18ad23
@ -866,7 +866,7 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn);
|
||||
void kvm_set_pfn_dirty(kvm_pfn_t pfn);
|
||||
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
|
||||
|
||||
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
|
||||
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
|
||||
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
|
||||
int len);
|
||||
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
|
||||
@ -942,12 +942,8 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
|
||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
||||
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache, bool atomic);
|
||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
||||
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
|
||||
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
|
||||
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
|
||||
|
@ -53,13 +53,6 @@ struct gfn_to_hva_cache {
|
||||
struct kvm_memory_slot *memslot;
|
||||
};
|
||||
|
||||
struct gfn_to_pfn_cache {
|
||||
u64 generation;
|
||||
gfn_t gfn;
|
||||
kvm_pfn_t pfn;
|
||||
bool dirty;
|
||||
};
|
||||
|
||||
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
|
||||
/*
|
||||
* Memory caches are used to preallocate memory ahead of various MMU flows,
|
||||
|
@ -2548,72 +2548,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
||||
|
||||
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
|
||||
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
|
||||
{
|
||||
if (pfn == 0)
|
||||
return;
|
||||
|
||||
if (cache)
|
||||
cache->pfn = cache->gfn = 0;
|
||||
|
||||
if (dirty)
|
||||
kvm_release_pfn_dirty(pfn);
|
||||
else
|
||||
kvm_release_pfn_clean(pfn);
|
||||
}
|
||||
|
||||
static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
|
||||
struct gfn_to_pfn_cache *cache, u64 gen)
|
||||
{
|
||||
kvm_release_pfn(cache->pfn, cache->dirty, cache);
|
||||
|
||||
cache->pfn = gfn_to_pfn_memslot(slot, gfn);
|
||||
cache->gfn = gfn;
|
||||
cache->dirty = false;
|
||||
cache->generation = gen;
|
||||
}
|
||||
|
||||
static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
||||
struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache,
|
||||
bool atomic)
|
||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
||||
{
|
||||
kvm_pfn_t pfn;
|
||||
void *hva = NULL;
|
||||
struct page *page = KVM_UNMAPPED_PAGE;
|
||||
struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
|
||||
u64 gen = slots->generation;
|
||||
|
||||
if (!map)
|
||||
return -EINVAL;
|
||||
|
||||
if (cache) {
|
||||
if (!cache->pfn || cache->gfn != gfn ||
|
||||
cache->generation != gen) {
|
||||
if (atomic)
|
||||
return -EAGAIN;
|
||||
kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
|
||||
}
|
||||
pfn = cache->pfn;
|
||||
} else {
|
||||
if (atomic)
|
||||
return -EAGAIN;
|
||||
pfn = gfn_to_pfn_memslot(slot, gfn);
|
||||
}
|
||||
pfn = gfn_to_pfn(vcpu->kvm, gfn);
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return -EINVAL;
|
||||
|
||||
if (pfn_valid(pfn)) {
|
||||
page = pfn_to_page(pfn);
|
||||
if (atomic)
|
||||
hva = kmap_atomic(page);
|
||||
else
|
||||
hva = kmap(page);
|
||||
hva = kmap(page);
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
} else if (!atomic) {
|
||||
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2627,27 +2591,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache, bool atomic)
|
||||
{
|
||||
return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
|
||||
cache, atomic);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_map_gfn);
|
||||
|
||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
||||
{
|
||||
return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
|
||||
NULL, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
|
||||
|
||||
static void __kvm_unmap_gfn(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache,
|
||||
bool dirty, bool atomic)
|
||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
||||
{
|
||||
if (!map)
|
||||
return;
|
||||
@ -2655,45 +2601,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm,
|
||||
if (!map->hva)
|
||||
return;
|
||||
|
||||
if (map->page != KVM_UNMAPPED_PAGE) {
|
||||
if (atomic)
|
||||
kunmap_atomic(map->hva);
|
||||
else
|
||||
kunmap(map->page);
|
||||
}
|
||||
if (map->page != KVM_UNMAPPED_PAGE)
|
||||
kunmap(map->page);
|
||||
#ifdef CONFIG_HAS_IOMEM
|
||||
else if (!atomic)
|
||||
memunmap(map->hva);
|
||||
else
|
||||
WARN_ONCE(1, "Unexpected unmapping in atomic context");
|
||||
memunmap(map->hva);
|
||||
#endif
|
||||
|
||||
if (dirty)
|
||||
mark_page_dirty_in_slot(kvm, memslot, map->gfn);
|
||||
kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
|
||||
|
||||
if (cache)
|
||||
cache->dirty |= dirty;
|
||||
else
|
||||
kvm_release_pfn(map->pfn, dirty, NULL);
|
||||
kvm_release_pfn(map->pfn, dirty);
|
||||
|
||||
map->hva = NULL;
|
||||
map->page = NULL;
|
||||
}
|
||||
|
||||
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
||||
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
|
||||
{
|
||||
__kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
|
||||
cache, dirty, atomic);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
|
||||
|
||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
||||
{
|
||||
__kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
|
||||
map, NULL, dirty, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
|
||||
|
||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
|
Loading…
Reference in New Issue
Block a user