mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 09:44:18 +08:00
090b7aff27
Today kvm_io_bus_regsiter_dev() returns void and will internally BUG_ON if it fails. We want to create dynamic MMIO/PIO entries driven from userspace later in the series, so we need to enhance the code to be more robust with the following changes: 1) Add a return value to the registration function 2) Fix up all the callsites to check the return code, handle any failures, and percolate the error up to the caller. 3) Add an unregister function that collapses holes in the array Signed-off-by: Gregory Haskins <ghaskins@novell.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
165 lines
3.5 KiB
C
165 lines
3.5 KiB
C
/*
|
|
* KVM coalesced MMIO
|
|
*
|
|
* Copyright (c) 2008 Bull S.A.S.
|
|
*
|
|
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
|
|
*
|
|
*/
|
|
|
|
#include "iodev.h"
|
|
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/kvm.h>
|
|
|
|
#include "coalesced_mmio.h"
|
|
|
|
static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
|
|
{
|
|
return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
|
|
}
|
|
|
|
static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
|
|
gpa_t addr, int len)
|
|
{
|
|
struct kvm_coalesced_mmio_zone *zone;
|
|
struct kvm_coalesced_mmio_ring *ring;
|
|
unsigned avail;
|
|
int i;
|
|
|
|
/* Are we able to batch it ? */
|
|
|
|
/* last is the first free entry
|
|
* check if we don't meet the first used entry
|
|
* there is always one unused entry in the buffer
|
|
*/
|
|
ring = dev->kvm->coalesced_mmio_ring;
|
|
avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
|
|
if (avail < KVM_MAX_VCPUS) {
|
|
/* full */
|
|
return 0;
|
|
}
|
|
|
|
/* is it in a batchable area ? */
|
|
|
|
for (i = 0; i < dev->nb_zones; i++) {
|
|
zone = &dev->zone[i];
|
|
|
|
/* (addr,len) is fully included in
|
|
* (zone->addr, zone->size)
|
|
*/
|
|
|
|
if (zone->addr <= addr &&
|
|
addr + len <= zone->addr + zone->size)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int coalesced_mmio_write(struct kvm_io_device *this,
|
|
gpa_t addr, int len, const void *val)
|
|
{
|
|
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
|
|
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
|
|
if (!coalesced_mmio_in_range(dev, addr, len))
|
|
return -EOPNOTSUPP;
|
|
|
|
spin_lock(&dev->lock);
|
|
|
|
/* copy data in first free entry of the ring */
|
|
|
|
ring->coalesced_mmio[ring->last].phys_addr = addr;
|
|
ring->coalesced_mmio[ring->last].len = len;
|
|
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
|
|
smp_wmb();
|
|
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
|
|
spin_unlock(&dev->lock);
|
|
return 0;
|
|
}
|
|
|
|
static void coalesced_mmio_destructor(struct kvm_io_device *this)
|
|
{
|
|
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
|
|
|
|
kfree(dev);
|
|
}
|
|
|
|
static const struct kvm_io_device_ops coalesced_mmio_ops = {
|
|
.write = coalesced_mmio_write,
|
|
.destructor = coalesced_mmio_destructor,
|
|
};
|
|
|
|
int kvm_coalesced_mmio_init(struct kvm *kvm)
|
|
{
|
|
struct kvm_coalesced_mmio_dev *dev;
|
|
int ret;
|
|
|
|
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
|
|
if (!dev)
|
|
return -ENOMEM;
|
|
spin_lock_init(&dev->lock);
|
|
kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
|
|
dev->kvm = kvm;
|
|
kvm->coalesced_mmio_dev = dev;
|
|
|
|
ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev);
|
|
if (ret < 0)
|
|
kfree(dev);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
|
|
struct kvm_coalesced_mmio_zone *zone)
|
|
{
|
|
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
|
|
|
|
if (dev == NULL)
|
|
return -EINVAL;
|
|
|
|
down_write(&kvm->slots_lock);
|
|
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
|
|
up_write(&kvm->slots_lock);
|
|
return -ENOBUFS;
|
|
}
|
|
|
|
dev->zone[dev->nb_zones] = *zone;
|
|
dev->nb_zones++;
|
|
|
|
up_write(&kvm->slots_lock);
|
|
return 0;
|
|
}
|
|
|
|
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
|
|
struct kvm_coalesced_mmio_zone *zone)
|
|
{
|
|
int i;
|
|
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
|
|
struct kvm_coalesced_mmio_zone *z;
|
|
|
|
if (dev == NULL)
|
|
return -EINVAL;
|
|
|
|
down_write(&kvm->slots_lock);
|
|
|
|
i = dev->nb_zones;
|
|
while(i) {
|
|
z = &dev->zone[i - 1];
|
|
|
|
/* unregister all zones
|
|
* included in (zone->addr, zone->size)
|
|
*/
|
|
|
|
if (zone->addr <= z->addr &&
|
|
z->addr + z->size <= zone->addr + zone->size) {
|
|
dev->nb_zones--;
|
|
*z = dev->zone[dev->nb_zones];
|
|
}
|
|
i--;
|
|
}
|
|
|
|
up_write(&kvm->slots_lock);
|
|
|
|
return 0;
|
|
}
|