mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 10:14:23 +08:00
b49bdc8602
When releasing the vfio-ccw mdev, we currently do not release any existing channel program and its pinned pages. This can lead to the following warning: [1038876.561565] WARNING: CPU: 2 PID: 144727 at drivers/vfio/vfio_iommu_type1.c:1494 vfio_sanity_check_pfn_list+0x40/0x70 [vfio_iommu_type1] .... 1038876.561921] Call Trace: [1038876.561935] ([<00000009897fb870>] 0x9897fb870) [1038876.561949] [<000003ff8013bf62>] vfio_iommu_type1_detach_group+0xda/0x2f0 [vfio_iommu_type1] [1038876.561965] [<000003ff8007b634>] __vfio_group_unset_container+0x64/0x190 [vfio] [1038876.561978] [<000003ff8007b87e>] vfio_group_put_external_user+0x26/0x38 [vfio] [1038876.562024] [<000003ff806fc608>] kvm_vfio_group_put_external_user+0x40/0x60 [kvm] [1038876.562045] [<000003ff806fcb9e>] kvm_vfio_destroy+0x5e/0xd0 [kvm] [1038876.562065] [<000003ff806f63fc>] kvm_put_kvm+0x2a4/0x3d0 [kvm] [1038876.562083] [<000003ff806f655e>] kvm_vm_release+0x36/0x48 [kvm] [1038876.562098] [<00000000003c2dc4>] __fput+0x144/0x228 [1038876.562113] [<000000000016ee82>] task_work_run+0x8a/0xd8 [1038876.562125] [<000000000014c7a8>] do_exit+0x5d8/0xd90 [1038876.562140] [<000000000014d084>] do_group_exit+0xc4/0xc8 [1038876.562155] [<000000000015c046>] get_signal+0x9ae/0xa68 [1038876.562169] [<0000000000108d66>] do_signal+0x66/0x768 [1038876.562185] [<0000000000b9e37e>] system_call+0x1ea/0x2d8 [1038876.562195] 2 locks held by qemu-system-s39/144727: [1038876.562205] #0: 00000000537abaf9 (&container->group_lock){++++}, at: __vfio_group_unset_container+0x3c/0x190 [vfio] [1038876.562230] #1: 00000000670008b5 (&iommu->lock){+.+.}, at: vfio_iommu_type1_detach_group+0x36/0x2f0 [vfio_iommu_type1] [1038876.562250] Last Breaking-Event-Address: [1038876.562262] [<000003ff8013aa24>] vfio_sanity_check_pfn_list+0x3c/0x70 [vfio_iommu_type1] [1038876.562272] irq event stamp: 4236481 [1038876.562287] hardirqs last enabled at (4236489): [<00000000001cee7a>] console_unlock+0x6d2/0x740 [1038876.562299] hardirqs last disabled at (4236496): [<00000000001ce87e>] console_unlock+0xd6/0x740 [1038876.562311] softirqs last enabled at (4234162): [<0000000000b9fa1e>] __do_softirq+0x556/0x598 [1038876.562325] softirqs last disabled at (4234153): [<000000000014e4cc>] irq_exit+0xac/0x108 [1038876.562337] ---[ end trace 6c96d467b1c3ca06 ]--- Similarly we do not free the channel program when we are removing the vfio-ccw device. Let's fix this by resetting the device and freeing the channel program and pinned pages in the release path. For the remove path we can just quiesce the device, since in the remove path the mediated device is going away for good and so we don't need to do a full reset. Signed-off-by: Farhan Ali <alifm@linux.ibm.com> Message-Id: <ae9f20dc8873f2027f7b3c5d2aaa0bdfe06850b8.1554756534.git.alifm@linux.ibm.com> Acked-by: Eric Farman <farman@linux.ibm.com> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
588 lines
14 KiB
C
588 lines
14 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Physical device callbacks for vfio_ccw
|
|
*
|
|
* Copyright IBM Corp. 2017
|
|
* Copyright Red Hat, Inc. 2019
|
|
*
|
|
* Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
|
|
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
|
|
* Cornelia Huck <cohuck@redhat.com>
|
|
*/
|
|
|
|
#include <linux/vfio.h>
|
|
#include <linux/mdev.h>
|
|
#include <linux/nospec.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include "vfio_ccw_private.h"
|
|
|
|
static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
|
|
{
|
|
struct vfio_ccw_private *private;
|
|
struct subchannel *sch;
|
|
int ret;
|
|
|
|
private = dev_get_drvdata(mdev_parent_dev(mdev));
|
|
sch = private->sch;
|
|
/*
|
|
* TODO:
|
|
* In the cureent stage, some things like "no I/O running" and "no
|
|
* interrupt pending" are clear, but we are not sure what other state
|
|
* we need to care about.
|
|
* There are still a lot more instructions need to be handled. We
|
|
* should come back here later.
|
|
*/
|
|
ret = vfio_ccw_sch_quiesce(sch);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
|
|
if (!ret)
|
|
private->state = VFIO_CCW_STATE_IDLE;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
|
|
unsigned long action,
|
|
void *data)
|
|
{
|
|
struct vfio_ccw_private *private =
|
|
container_of(nb, struct vfio_ccw_private, nb);
|
|
|
|
/*
|
|
* Vendor drivers MUST unpin pages in response to an
|
|
* invalidation.
|
|
*/
|
|
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
|
|
struct vfio_iommu_type1_dma_unmap *unmap = data;
|
|
|
|
if (!cp_iova_pinned(&private->cp, unmap->iova))
|
|
return NOTIFY_OK;
|
|
|
|
if (vfio_ccw_mdev_reset(private->mdev))
|
|
return NOTIFY_BAD;
|
|
|
|
cp_free(&private->cp);
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
|
|
{
|
|
return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
|
|
}
|
|
static MDEV_TYPE_ATTR_RO(name);
|
|
|
|
static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
|
|
char *buf)
|
|
{
|
|
return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
|
|
}
|
|
static MDEV_TYPE_ATTR_RO(device_api);
|
|
|
|
static ssize_t available_instances_show(struct kobject *kobj,
|
|
struct device *dev, char *buf)
|
|
{
|
|
struct vfio_ccw_private *private = dev_get_drvdata(dev);
|
|
|
|
return sprintf(buf, "%d\n", atomic_read(&private->avail));
|
|
}
|
|
static MDEV_TYPE_ATTR_RO(available_instances);
|
|
|
|
static struct attribute *mdev_types_attrs[] = {
|
|
&mdev_type_attr_name.attr,
|
|
&mdev_type_attr_device_api.attr,
|
|
&mdev_type_attr_available_instances.attr,
|
|
NULL,
|
|
};
|
|
|
|
static struct attribute_group mdev_type_group = {
|
|
.name = "io",
|
|
.attrs = mdev_types_attrs,
|
|
};
|
|
|
|
static struct attribute_group *mdev_type_groups[] = {
|
|
&mdev_type_group,
|
|
NULL,
|
|
};
|
|
|
|
static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
|
|
{
|
|
struct vfio_ccw_private *private =
|
|
dev_get_drvdata(mdev_parent_dev(mdev));
|
|
|
|
if (private->state == VFIO_CCW_STATE_NOT_OPER)
|
|
return -ENODEV;
|
|
|
|
if (atomic_dec_if_positive(&private->avail) < 0)
|
|
return -EPERM;
|
|
|
|
private->mdev = mdev;
|
|
private->state = VFIO_CCW_STATE_IDLE;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
|
|
{
|
|
struct vfio_ccw_private *private =
|
|
dev_get_drvdata(mdev_parent_dev(mdev));
|
|
|
|
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
|
|
(private->state != VFIO_CCW_STATE_STANDBY)) {
|
|
if (!vfio_ccw_sch_quiesce(private->sch))
|
|
private->state = VFIO_CCW_STATE_STANDBY;
|
|
/* The state will be NOT_OPER on error. */
|
|
}
|
|
|
|
cp_free(&private->cp);
|
|
private->mdev = NULL;
|
|
atomic_inc(&private->avail);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int vfio_ccw_mdev_open(struct mdev_device *mdev)
|
|
{
|
|
struct vfio_ccw_private *private =
|
|
dev_get_drvdata(mdev_parent_dev(mdev));
|
|
unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
|
|
int ret;
|
|
|
|
private->nb.notifier_call = vfio_ccw_mdev_notifier;
|
|
|
|
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
|
|
&events, &private->nb);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = vfio_ccw_register_async_dev_regions(private);
|
|
if (ret)
|
|
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
|
|
&private->nb);
|
|
return ret;
|
|
}
|
|
|
|
static void vfio_ccw_mdev_release(struct mdev_device *mdev)
|
|
{
|
|
struct vfio_ccw_private *private =
|
|
dev_get_drvdata(mdev_parent_dev(mdev));
|
|
int i;
|
|
|
|
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
|
|
(private->state != VFIO_CCW_STATE_STANDBY)) {
|
|
if (!vfio_ccw_mdev_reset(mdev))
|
|
private->state = VFIO_CCW_STATE_STANDBY;
|
|
/* The state will be NOT_OPER on error. */
|
|
}
|
|
|
|
cp_free(&private->cp);
|
|
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
|
|
&private->nb);
|
|
|
|
for (i = 0; i < private->num_regions; i++)
|
|
private->region[i].ops->release(private, &private->region[i]);
|
|
|
|
private->num_regions = 0;
|
|
kfree(private->region);
|
|
private->region = NULL;
|
|
}
|
|
|
|
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
|
|
char __user *buf, size_t count,
|
|
loff_t *ppos)
|
|
{
|
|
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
|
|
struct ccw_io_region *region;
|
|
int ret;
|
|
|
|
if (pos + count > sizeof(*region))
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&private->io_mutex);
|
|
region = private->io_region;
|
|
if (copy_to_user(buf, (void *)region + pos, count))
|
|
ret = -EFAULT;
|
|
else
|
|
ret = count;
|
|
mutex_unlock(&private->io_mutex);
|
|
return ret;
|
|
}
|
|
|
|
static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
|
|
char __user *buf,
|
|
size_t count,
|
|
loff_t *ppos)
|
|
{
|
|
unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
|
|
struct vfio_ccw_private *private;
|
|
|
|
private = dev_get_drvdata(mdev_parent_dev(mdev));
|
|
|
|
if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
|
|
return -EINVAL;
|
|
|
|
switch (index) {
|
|
case VFIO_CCW_CONFIG_REGION_INDEX:
|
|
return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
|
|
default:
|
|
index -= VFIO_CCW_NUM_REGIONS;
|
|
return private->region[index].ops->read(private, buf, count,
|
|
ppos);
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
|
|
const char __user *buf,
|
|
size_t count, loff_t *ppos)
|
|
{
|
|
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
|
|
struct ccw_io_region *region;
|
|
int ret;
|
|
|
|
if (pos + count > sizeof(*region))
|
|
return -EINVAL;
|
|
|
|
if (!mutex_trylock(&private->io_mutex))
|
|
return -EAGAIN;
|
|
|
|
region = private->io_region;
|
|
if (copy_from_user((void *)region + pos, buf, count)) {
|
|
ret = -EFAULT;
|
|
goto out_unlock;
|
|
}
|
|
|
|
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
|
|
if (region->ret_code != 0)
|
|
private->state = VFIO_CCW_STATE_IDLE;
|
|
ret = (region->ret_code != 0) ? region->ret_code : count;
|
|
|
|
out_unlock:
|
|
mutex_unlock(&private->io_mutex);
|
|
return ret;
|
|
}
|
|
|
|
static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
|
|
const char __user *buf,
|
|
size_t count,
|
|
loff_t *ppos)
|
|
{
|
|
unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
|
|
struct vfio_ccw_private *private;
|
|
|
|
private = dev_get_drvdata(mdev_parent_dev(mdev));
|
|
|
|
if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
|
|
return -EINVAL;
|
|
|
|
switch (index) {
|
|
case VFIO_CCW_CONFIG_REGION_INDEX:
|
|
return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
|
|
default:
|
|
index -= VFIO_CCW_NUM_REGIONS;
|
|
return private->region[index].ops->write(private, buf, count,
|
|
ppos);
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
|
|
struct mdev_device *mdev)
|
|
{
|
|
struct vfio_ccw_private *private;
|
|
|
|
private = dev_get_drvdata(mdev_parent_dev(mdev));
|
|
info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
|
|
info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
|
|
info->num_irqs = VFIO_CCW_NUM_IRQS;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
|
|
struct mdev_device *mdev,
|
|
unsigned long arg)
|
|
{
|
|
struct vfio_ccw_private *private;
|
|
int i;
|
|
|
|
private = dev_get_drvdata(mdev_parent_dev(mdev));
|
|
switch (info->index) {
|
|
case VFIO_CCW_CONFIG_REGION_INDEX:
|
|
info->offset = 0;
|
|
info->size = sizeof(struct ccw_io_region);
|
|
info->flags = VFIO_REGION_INFO_FLAG_READ
|
|
| VFIO_REGION_INFO_FLAG_WRITE;
|
|
return 0;
|
|
default: /* all other regions are handled via capability chain */
|
|
{
|
|
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
|
|
struct vfio_region_info_cap_type cap_type = {
|
|
.header.id = VFIO_REGION_INFO_CAP_TYPE,
|
|
.header.version = 1 };
|
|
int ret;
|
|
|
|
if (info->index >=
|
|
VFIO_CCW_NUM_REGIONS + private->num_regions)
|
|
return -EINVAL;
|
|
|
|
info->index = array_index_nospec(info->index,
|
|
VFIO_CCW_NUM_REGIONS +
|
|
private->num_regions);
|
|
|
|
i = info->index - VFIO_CCW_NUM_REGIONS;
|
|
|
|
info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
|
|
info->size = private->region[i].size;
|
|
info->flags = private->region[i].flags;
|
|
|
|
cap_type.type = private->region[i].type;
|
|
cap_type.subtype = private->region[i].subtype;
|
|
|
|
ret = vfio_info_add_capability(&caps, &cap_type.header,
|
|
sizeof(cap_type));
|
|
if (ret)
|
|
return ret;
|
|
|
|
info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
|
|
if (info->argsz < sizeof(*info) + caps.size) {
|
|
info->argsz = sizeof(*info) + caps.size;
|
|
info->cap_offset = 0;
|
|
} else {
|
|
vfio_info_cap_shift(&caps, sizeof(*info));
|
|
if (copy_to_user((void __user *)arg + sizeof(*info),
|
|
caps.buf, caps.size)) {
|
|
kfree(caps.buf);
|
|
return -EFAULT;
|
|
}
|
|
info->cap_offset = sizeof(*info);
|
|
}
|
|
|
|
kfree(caps.buf);
|
|
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
|
|
{
|
|
if (info->index != VFIO_CCW_IO_IRQ_INDEX)
|
|
return -EINVAL;
|
|
|
|
info->count = 1;
|
|
info->flags = VFIO_IRQ_INFO_EVENTFD;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
|
|
uint32_t flags,
|
|
void __user *data)
|
|
{
|
|
struct vfio_ccw_private *private;
|
|
struct eventfd_ctx **ctx;
|
|
|
|
if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
|
|
return -EINVAL;
|
|
|
|
private = dev_get_drvdata(mdev_parent_dev(mdev));
|
|
ctx = &private->io_trigger;
|
|
|
|
switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
|
|
case VFIO_IRQ_SET_DATA_NONE:
|
|
{
|
|
if (*ctx)
|
|
eventfd_signal(*ctx, 1);
|
|
return 0;
|
|
}
|
|
case VFIO_IRQ_SET_DATA_BOOL:
|
|
{
|
|
uint8_t trigger;
|
|
|
|
if (get_user(trigger, (uint8_t __user *)data))
|
|
return -EFAULT;
|
|
|
|
if (trigger && *ctx)
|
|
eventfd_signal(*ctx, 1);
|
|
return 0;
|
|
}
|
|
case VFIO_IRQ_SET_DATA_EVENTFD:
|
|
{
|
|
int32_t fd;
|
|
|
|
if (get_user(fd, (int32_t __user *)data))
|
|
return -EFAULT;
|
|
|
|
if (fd == -1) {
|
|
if (*ctx)
|
|
eventfd_ctx_put(*ctx);
|
|
*ctx = NULL;
|
|
} else if (fd >= 0) {
|
|
struct eventfd_ctx *efdctx;
|
|
|
|
efdctx = eventfd_ctx_fdget(fd);
|
|
if (IS_ERR(efdctx))
|
|
return PTR_ERR(efdctx);
|
|
|
|
if (*ctx)
|
|
eventfd_ctx_put(*ctx);
|
|
|
|
*ctx = efdctx;
|
|
} else
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
|
|
unsigned int subtype,
|
|
const struct vfio_ccw_regops *ops,
|
|
size_t size, u32 flags, void *data)
|
|
{
|
|
struct vfio_ccw_region *region;
|
|
|
|
region = krealloc(private->region,
|
|
(private->num_regions + 1) * sizeof(*region),
|
|
GFP_KERNEL);
|
|
if (!region)
|
|
return -ENOMEM;
|
|
|
|
private->region = region;
|
|
private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
|
|
private->region[private->num_regions].subtype = subtype;
|
|
private->region[private->num_regions].ops = ops;
|
|
private->region[private->num_regions].size = size;
|
|
private->region[private->num_regions].flags = flags;
|
|
private->region[private->num_regions].data = data;
|
|
|
|
private->num_regions++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
|
|
unsigned int cmd,
|
|
unsigned long arg)
|
|
{
|
|
int ret = 0;
|
|
unsigned long minsz;
|
|
|
|
switch (cmd) {
|
|
case VFIO_DEVICE_GET_INFO:
|
|
{
|
|
struct vfio_device_info info;
|
|
|
|
minsz = offsetofend(struct vfio_device_info, num_irqs);
|
|
|
|
if (copy_from_user(&info, (void __user *)arg, minsz))
|
|
return -EFAULT;
|
|
|
|
if (info.argsz < minsz)
|
|
return -EINVAL;
|
|
|
|
ret = vfio_ccw_mdev_get_device_info(&info, mdev);
|
|
if (ret)
|
|
return ret;
|
|
|
|
return copy_to_user((void __user *)arg, &info, minsz);
|
|
}
|
|
case VFIO_DEVICE_GET_REGION_INFO:
|
|
{
|
|
struct vfio_region_info info;
|
|
|
|
minsz = offsetofend(struct vfio_region_info, offset);
|
|
|
|
if (copy_from_user(&info, (void __user *)arg, minsz))
|
|
return -EFAULT;
|
|
|
|
if (info.argsz < minsz)
|
|
return -EINVAL;
|
|
|
|
ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
|
|
if (ret)
|
|
return ret;
|
|
|
|
return copy_to_user((void __user *)arg, &info, minsz);
|
|
}
|
|
case VFIO_DEVICE_GET_IRQ_INFO:
|
|
{
|
|
struct vfio_irq_info info;
|
|
|
|
minsz = offsetofend(struct vfio_irq_info, count);
|
|
|
|
if (copy_from_user(&info, (void __user *)arg, minsz))
|
|
return -EFAULT;
|
|
|
|
if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
|
|
return -EINVAL;
|
|
|
|
ret = vfio_ccw_mdev_get_irq_info(&info);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (info.count == -1)
|
|
return -EINVAL;
|
|
|
|
return copy_to_user((void __user *)arg, &info, minsz);
|
|
}
|
|
case VFIO_DEVICE_SET_IRQS:
|
|
{
|
|
struct vfio_irq_set hdr;
|
|
size_t data_size;
|
|
void __user *data;
|
|
|
|
minsz = offsetofend(struct vfio_irq_set, count);
|
|
|
|
if (copy_from_user(&hdr, (void __user *)arg, minsz))
|
|
return -EFAULT;
|
|
|
|
ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
|
|
VFIO_CCW_NUM_IRQS,
|
|
&data_size);
|
|
if (ret)
|
|
return ret;
|
|
|
|
data = (void __user *)(arg + minsz);
|
|
return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
|
|
}
|
|
case VFIO_DEVICE_RESET:
|
|
return vfio_ccw_mdev_reset(mdev);
|
|
default:
|
|
return -ENOTTY;
|
|
}
|
|
}
|
|
|
|
static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
|
|
.owner = THIS_MODULE,
|
|
.supported_type_groups = mdev_type_groups,
|
|
.create = vfio_ccw_mdev_create,
|
|
.remove = vfio_ccw_mdev_remove,
|
|
.open = vfio_ccw_mdev_open,
|
|
.release = vfio_ccw_mdev_release,
|
|
.read = vfio_ccw_mdev_read,
|
|
.write = vfio_ccw_mdev_write,
|
|
.ioctl = vfio_ccw_mdev_ioctl,
|
|
};
|
|
|
|
int vfio_ccw_mdev_reg(struct subchannel *sch)
|
|
{
|
|
return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
|
|
}
|
|
|
|
void vfio_ccw_mdev_unreg(struct subchannel *sch)
|
|
{
|
|
mdev_unregister_device(&sch->dev);
|
|
}
|