2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 07:35:12 +08:00

KVM: Remove user_alloc from struct kvm_memory_slot

This field was needed to differentiate memory slots created by the new
API, KVM_SET_USER_MEMORY_REGION, from those by the old equivalent,
KVM_SET_MEMORY_REGION, whose support was dropped long before:

  commit b74a07beed
  KVM: Remove kernel-allocated memory regions

Although we also have private memory slots to which KVM allocates
memory with vm_mmap(), !user_alloc slots in other words, the slot id
should be enough for differentiating them.

Note: corresponding function parameters will be removed later.

Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
This commit is contained in:
Takuya Yoshikawa 2013-02-07 18:55:57 +09:00 committed by Gleb Natapov
parent 257090f702
commit 7a905b1485
3 changed files with 16 additions and 23 deletions

View File

@ -6897,33 +6897,28 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
bool user_alloc)
{
int npages = memslot->npages;
int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
/* Prevent internal slot pages from being moved by fork()/COW. */
if (memslot->id >= KVM_USER_MEM_SLOTS)
map_flags = MAP_SHARED | MAP_ANONYMOUS;
/*To keep backward compatibility with older userspace,
*x86 needs to handle !user_alloc case.
/*
* Only private memory slots need to be mapped here since
* KVM_SET_MEMORY_REGION ioctl is no longer supported.
*/
if (!user_alloc) {
if (npages && !old.npages) {
unsigned long userspace_addr;
if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
unsigned long userspace_addr;
userspace_addr = vm_mmap(NULL, 0,
npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
map_flags,
0);
/*
* MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW.
*/
userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0);
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
memslot->userspace_addr = userspace_addr;
}
memslot->userspace_addr = userspace_addr;
}
return 0;
}
@ -6935,7 +6930,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
if (!user_alloc && !old.user_alloc && old.npages && !npages) {
if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) {
int ret;
ret = vm_munmap(old.userspace_addr,

View File

@ -273,7 +273,6 @@ struct kvm_memory_slot {
unsigned long userspace_addr;
u32 flags;
short id;
bool user_alloc;
};
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)

View File

@ -839,7 +839,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
r = -ENOMEM;
if (change == KVM_MR_CREATE) {
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
if (kvm_arch_create_memslot(&new, npages))