2020-03-26 22:01:21 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* vDPA bus.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2020, Red Hat. All rights reserved.
|
|
|
|
* Author: Jason Wang <jasowang@redhat.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/idr.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/vdpa.h>
|
2021-01-05 18:32:00 +08:00
|
|
|
#include <uapi/linux/vdpa.h>
|
|
|
|
#include <net/genetlink.h>
|
|
|
|
#include <linux/mod_devicetable.h>
|
2021-10-27 01:55:13 +08:00
|
|
|
#include <linux/virtio_ids.h>
|
2020-03-26 22:01:21 +08:00
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
static LIST_HEAD(mdev_head);
|
2021-01-05 18:31:59 +08:00
|
|
|
/* A global mutex that protects vdpa management device and device level operations. */
|
2022-05-18 21:38:01 +08:00
|
|
|
static DECLARE_RWSEM(vdpa_dev_lock);
|
2020-03-26 22:01:21 +08:00
|
|
|
static DEFINE_IDA(vdpa_index_ida);
|
|
|
|
|
2022-01-05 19:46:35 +08:00
|
|
|
void vdpa_set_status(struct vdpa_device *vdev, u8 status)
|
|
|
|
{
|
2022-05-18 21:38:02 +08:00
|
|
|
down_write(&vdev->cf_lock);
|
2022-01-05 19:46:35 +08:00
|
|
|
vdev->config->set_status(vdev, status);
|
2022-05-18 21:38:02 +08:00
|
|
|
up_write(&vdev->cf_lock);
|
2022-01-05 19:46:35 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(vdpa_set_status);
|
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
static struct genl_family vdpa_nl_family;
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
static int vdpa_dev_probe(struct device *d)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = dev_to_vdpa(d);
|
|
|
|
struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
|
2021-10-29 17:14:46 +08:00
|
|
|
const struct vdpa_config_ops *ops = vdev->config;
|
|
|
|
u32 max_num, min_num = 1;
|
2020-03-26 22:01:21 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2023-01-19 14:15:24 +08:00
|
|
|
d->dma_mask = &d->coherent_dma_mask;
|
|
|
|
ret = dma_set_mask_and_coherent(d, DMA_BIT_MASK(64));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2021-10-29 17:14:46 +08:00
|
|
|
max_num = ops->get_vq_num_max(vdev);
|
|
|
|
if (ops->get_vq_num_min)
|
|
|
|
min_num = ops->get_vq_num_min(vdev);
|
|
|
|
if (max_num < min_num)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
if (drv && drv->probe)
|
|
|
|
ret = drv->probe(vdev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-07-14 03:35:22 +08:00
|
|
|
static void vdpa_dev_remove(struct device *d)
|
2020-03-26 22:01:21 +08:00
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = dev_to_vdpa(d);
|
|
|
|
struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
|
|
|
|
|
|
|
|
if (drv && drv->remove)
|
|
|
|
drv->remove(vdev);
|
|
|
|
}
|
|
|
|
|
2021-11-27 00:47:53 +08:00
|
|
|
static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = dev_to_vdpa(dev);
|
|
|
|
|
|
|
|
/* Check override first, and if set, only use the named driver */
|
|
|
|
if (vdev->driver_override)
|
|
|
|
return strcmp(vdev->driver_override, drv->name) == 0;
|
|
|
|
|
|
|
|
/* Currently devices must be supported by all vDPA bus drivers */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t driver_override_store(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = dev_to_vdpa(dev);
|
2022-04-19 19:34:31 +08:00
|
|
|
int ret;
|
2021-11-27 00:47:53 +08:00
|
|
|
|
2022-04-19 19:34:31 +08:00
|
|
|
ret = driver_set_override(dev, &vdev->driver_override, buf, count);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2021-11-27 00:47:53 +08:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t driver_override_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = dev_to_vdpa(dev);
|
|
|
|
ssize_t len;
|
|
|
|
|
|
|
|
device_lock(dev);
|
|
|
|
len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
|
|
|
|
device_unlock(dev);
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR_RW(driver_override);
|
|
|
|
|
|
|
|
static struct attribute *vdpa_dev_attrs[] = {
|
|
|
|
&dev_attr_driver_override.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group vdpa_dev_group = {
|
|
|
|
.attrs = vdpa_dev_attrs,
|
|
|
|
};
|
|
|
|
__ATTRIBUTE_GROUPS(vdpa_dev);
|
|
|
|
|
2024-02-05 04:50:45 +08:00
|
|
|
static const struct bus_type vdpa_bus = {
|
2020-03-26 22:01:21 +08:00
|
|
|
.name = "vdpa",
|
2021-11-27 00:47:53 +08:00
|
|
|
.dev_groups = vdpa_dev_groups,
|
|
|
|
.match = vdpa_dev_match,
|
2020-03-26 22:01:21 +08:00
|
|
|
.probe = vdpa_dev_probe,
|
|
|
|
.remove = vdpa_dev_remove,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void vdpa_release_dev(struct device *d)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = dev_to_vdpa(d);
|
|
|
|
const struct vdpa_config_ops *ops = vdev->config;
|
|
|
|
|
|
|
|
if (ops->free)
|
|
|
|
ops->free(vdev);
|
|
|
|
|
2023-12-11 01:51:50 +08:00
|
|
|
ida_free(&vdpa_index_ida, vdev->index);
|
2021-11-27 00:47:53 +08:00
|
|
|
kfree(vdev->driver_override);
|
2020-03-26 22:01:21 +08:00
|
|
|
kfree(vdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __vdpa_alloc_device - allocate and initilaize a vDPA device
|
|
|
|
* This allows driver to some prepartion after device is
|
|
|
|
* initialized but before registered.
|
|
|
|
* @parent: the parent device
|
|
|
|
* @config: the bus operations that is supported by this device
|
2022-03-31 02:03:45 +08:00
|
|
|
* @ngroups: number of groups supported by this device
|
2022-03-31 02:03:46 +08:00
|
|
|
* @nas: number of address spaces supported by this device
|
2020-03-26 22:01:21 +08:00
|
|
|
* @size: size of the parent structure that contains private data
|
2021-01-05 18:31:59 +08:00
|
|
|
* @name: name of the vdpa device; optional.
|
2021-08-31 18:36:31 +08:00
|
|
|
* @use_va: indicate whether virtual address must be used by this device
|
2020-03-26 22:01:21 +08:00
|
|
|
*
|
2020-05-27 14:05:28 +08:00
|
|
|
* Driver should use vdpa_alloc_device() wrapper macro instead of
|
2020-03-26 22:01:21 +08:00
|
|
|
* using this directly.
|
|
|
|
*
|
2021-04-07 01:04:45 +08:00
|
|
|
* Return: Returns an error when parent/config/dma_dev is not set or fail to get
|
|
|
|
* ida.
|
2020-03-26 22:01:21 +08:00
|
|
|
*/
|
|
|
|
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
|
|
|
|
const struct vdpa_config_ops *config,
|
2022-03-31 02:03:46 +08:00
|
|
|
unsigned int ngroups, unsigned int nas,
|
2021-08-31 18:36:31 +08:00
|
|
|
size_t size, const char *name,
|
|
|
|
bool use_va)
|
2020-03-26 22:01:21 +08:00
|
|
|
{
|
|
|
|
struct vdpa_device *vdev;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
if (!config)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (!!config->dma_map != !!config->dma_unmap)
|
|
|
|
goto err;
|
|
|
|
|
2021-08-31 18:36:31 +08:00
|
|
|
/* It should only work for the device that use on-chip IOMMU */
|
|
|
|
if (use_va && !(config->dma_map || config->set_map))
|
|
|
|
goto err;
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
vdev = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!vdev)
|
|
|
|
goto err;
|
|
|
|
|
2020-11-12 14:40:00 +08:00
|
|
|
err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
|
2020-03-26 22:01:21 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto err_ida;
|
|
|
|
|
|
|
|
vdev->dev.bus = &vdpa_bus;
|
|
|
|
vdev->dev.parent = parent;
|
|
|
|
vdev->dev.release = vdpa_release_dev;
|
|
|
|
vdev->index = err;
|
|
|
|
vdev->config = config;
|
2020-07-27 22:51:55 +08:00
|
|
|
vdev->features_valid = false;
|
2021-08-31 18:36:31 +08:00
|
|
|
vdev->use_va = use_va;
|
2022-03-31 02:03:45 +08:00
|
|
|
vdev->ngroups = ngroups;
|
2022-03-31 02:03:46 +08:00
|
|
|
vdev->nas = nas;
|
2020-03-26 22:01:21 +08:00
|
|
|
|
2021-01-05 18:31:59 +08:00
|
|
|
if (name)
|
|
|
|
err = dev_set_name(&vdev->dev, "%s", name);
|
|
|
|
else
|
|
|
|
err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
|
2020-03-26 22:01:21 +08:00
|
|
|
if (err)
|
|
|
|
goto err_name;
|
|
|
|
|
2022-05-18 21:38:02 +08:00
|
|
|
init_rwsem(&vdev->cf_lock);
|
2020-03-26 22:01:21 +08:00
|
|
|
device_initialize(&vdev->dev);
|
|
|
|
|
|
|
|
return vdev;
|
|
|
|
|
|
|
|
err_name:
|
2023-12-11 01:51:50 +08:00
|
|
|
ida_free(&vdpa_index_ida, vdev->index);
|
2020-03-26 22:01:21 +08:00
|
|
|
err_ida:
|
|
|
|
kfree(vdev);
|
|
|
|
err:
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
|
|
|
|
|
2021-01-05 18:31:59 +08:00
|
|
|
static int vdpa_name_match(struct device *dev, const void *data)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
|
|
|
|
|
|
|
|
return (strcmp(dev_name(&vdev->dev), data) == 0);
|
|
|
|
}
|
|
|
|
|
2022-03-15 11:25:52 +08:00
|
|
|
static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
|
2021-01-05 18:32:01 +08:00
|
|
|
{
|
|
|
|
struct device *dev;
|
|
|
|
|
2021-02-23 14:19:04 +08:00
|
|
|
vdev->nvqs = nvqs;
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
lockdep_assert_held(&vdpa_dev_lock);
|
2021-01-05 18:32:01 +08:00
|
|
|
dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
|
|
|
|
if (dev) {
|
|
|
|
put_device(dev);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
return device_add(&vdev->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* _vdpa_register_device - register a vDPA device with vdpa lock held
|
|
|
|
* Caller must have a succeed call of vdpa_alloc_device() before.
|
|
|
|
* Caller must invoke this routine in the management device dev_add()
|
|
|
|
* callback after setting up valid mgmtdev for this vdpa device.
|
|
|
|
* @vdev: the vdpa device to be registered to vDPA bus
|
2021-02-23 14:19:04 +08:00
|
|
|
* @nvqs: number of virtqueues supported by this device
|
2021-01-05 18:32:01 +08:00
|
|
|
*
|
2021-04-07 01:04:45 +08:00
|
|
|
* Return: Returns an error when fail to add device to vDPA bus
|
2021-01-05 18:32:01 +08:00
|
|
|
*/
|
2022-03-15 11:25:52 +08:00
|
|
|
int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
|
2021-01-05 18:32:01 +08:00
|
|
|
{
|
|
|
|
if (!vdev->mdev)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-02-23 14:19:04 +08:00
|
|
|
return __vdpa_register_device(vdev, nvqs);
|
2021-01-05 18:32:01 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(_vdpa_register_device);
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
/**
|
|
|
|
* vdpa_register_device - register a vDPA device
|
2020-04-13 17:37:38 +08:00
|
|
|
* Callers must have a succeed call of vdpa_alloc_device() before.
|
2020-03-26 22:01:21 +08:00
|
|
|
* @vdev: the vdpa device to be registered to vDPA bus
|
2021-02-23 14:19:04 +08:00
|
|
|
* @nvqs: number of virtqueues supported by this device
|
2020-03-26 22:01:21 +08:00
|
|
|
*
|
2021-04-07 01:04:45 +08:00
|
|
|
* Return: Returns an error when fail to add to vDPA bus
|
2020-03-26 22:01:21 +08:00
|
|
|
*/
|
2022-03-15 11:25:52 +08:00
|
|
|
int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs)
|
2020-03-26 22:01:21 +08:00
|
|
|
{
|
2021-01-05 18:31:59 +08:00
|
|
|
int err;
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
down_write(&vdpa_dev_lock);
|
2021-02-23 14:19:04 +08:00
|
|
|
err = __vdpa_register_device(vdev, nvqs);
|
2022-05-18 21:38:01 +08:00
|
|
|
up_write(&vdpa_dev_lock);
|
2021-01-05 18:31:59 +08:00
|
|
|
return err;
|
2020-03-26 22:01:21 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vdpa_register_device);
|
|
|
|
|
2021-01-05 18:32:01 +08:00
|
|
|
/**
|
|
|
|
* _vdpa_unregister_device - unregister a vDPA device
|
|
|
|
* Caller must invoke this routine as part of management device dev_del()
|
|
|
|
* callback.
|
|
|
|
* @vdev: the vdpa device to be unregisted from vDPA bus
|
|
|
|
*/
|
|
|
|
void _vdpa_unregister_device(struct vdpa_device *vdev)
|
|
|
|
{
|
2022-05-18 21:38:01 +08:00
|
|
|
lockdep_assert_held(&vdpa_dev_lock);
|
2021-01-05 18:32:01 +08:00
|
|
|
WARN_ON(!vdev->mdev);
|
|
|
|
device_unregister(&vdev->dev);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(_vdpa_unregister_device);
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
/**
|
|
|
|
* vdpa_unregister_device - unregister a vDPA device
|
|
|
|
* @vdev: the vdpa device to be unregisted from vDPA bus
|
|
|
|
*/
|
|
|
|
void vdpa_unregister_device(struct vdpa_device *vdev)
|
|
|
|
{
|
2022-05-18 21:38:01 +08:00
|
|
|
down_write(&vdpa_dev_lock);
|
2020-03-26 22:01:21 +08:00
|
|
|
device_unregister(&vdev->dev);
|
2022-05-18 21:38:01 +08:00
|
|
|
up_write(&vdpa_dev_lock);
|
2020-03-26 22:01:21 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vdpa_unregister_device);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __vdpa_register_driver - register a vDPA device driver
|
|
|
|
* @drv: the vdpa device driver to be registered
|
|
|
|
* @owner: module owner of the driver
|
|
|
|
*
|
2021-04-07 01:04:45 +08:00
|
|
|
* Return: Returns an err when fail to do the registration
|
2020-03-26 22:01:21 +08:00
|
|
|
*/
|
|
|
|
int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
|
|
|
|
{
|
|
|
|
drv->driver.bus = &vdpa_bus;
|
|
|
|
drv->driver.owner = owner;
|
|
|
|
|
|
|
|
return driver_register(&drv->driver);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__vdpa_register_driver);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vdpa_unregister_driver - unregister a vDPA device driver
|
|
|
|
* @drv: the vdpa device driver to be unregistered
|
|
|
|
*/
|
|
|
|
void vdpa_unregister_driver(struct vdpa_driver *drv)
|
|
|
|
{
|
|
|
|
driver_unregister(&drv->driver);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
|
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
/**
|
|
|
|
* vdpa_mgmtdev_register - register a vdpa management device
|
|
|
|
*
|
|
|
|
* @mdev: Pointer to vdpa management device
|
|
|
|
* vdpa_mgmtdev_register() register a vdpa management device which supports
|
|
|
|
* vdpa device management.
|
2021-04-07 01:04:45 +08:00
|
|
|
* Return: Returns 0 on success or failure when required callback ops are not
|
|
|
|
* initialized.
|
2021-01-05 18:32:00 +08:00
|
|
|
*/
|
|
|
|
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev)
|
|
|
|
{
|
|
|
|
if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&mdev->list);
|
2022-05-18 21:38:01 +08:00
|
|
|
down_write(&vdpa_dev_lock);
|
2021-01-05 18:32:00 +08:00
|
|
|
list_add_tail(&mdev->list, &mdev_head);
|
2022-05-18 21:38:01 +08:00
|
|
|
up_write(&vdpa_dev_lock);
|
2021-01-05 18:32:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register);
|
|
|
|
|
2021-01-05 18:32:01 +08:00
|
|
|
static int vdpa_match_remove(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
|
|
|
|
struct vdpa_mgmt_dev *mdev = vdev->mdev;
|
|
|
|
|
|
|
|
if (mdev == data)
|
|
|
|
mdev->ops->dev_del(mdev, vdev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
|
|
|
|
{
|
2022-05-18 21:38:01 +08:00
|
|
|
down_write(&vdpa_dev_lock);
|
2021-01-05 18:32:01 +08:00
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
list_del(&mdev->list);
|
2021-01-05 18:32:01 +08:00
|
|
|
|
|
|
|
/* Filter out all the entries belong to this management device and delete it. */
|
|
|
|
bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove);
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
up_write(&vdpa_dev_lock);
|
2021-01-05 18:32:00 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
|
|
|
|
|
2022-01-05 19:46:36 +08:00
|
|
|
static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
|
|
|
|
unsigned int offset,
|
|
|
|
void *buf, unsigned int len)
|
|
|
|
{
|
|
|
|
const struct vdpa_config_ops *ops = vdev->config;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Config accesses aren't supposed to trigger before features are set.
|
|
|
|
* If it does happen we assume a legacy guest.
|
|
|
|
*/
|
|
|
|
if (!vdev->features_valid)
|
2022-01-15 08:27:59 +08:00
|
|
|
vdpa_set_features_unlocked(vdev, 0);
|
2022-01-05 19:46:36 +08:00
|
|
|
ops->get_config(vdev, offset, buf, len);
|
|
|
|
}
|
|
|
|
|
2021-10-27 01:55:12 +08:00
|
|
|
/**
|
|
|
|
* vdpa_get_config - Get one or more device configuration fields.
|
|
|
|
* @vdev: vdpa device to operate on
|
|
|
|
* @offset: starting byte offset of the field
|
|
|
|
* @buf: buffer pointer to read to
|
|
|
|
* @len: length of the configuration fields in bytes
|
|
|
|
*/
|
|
|
|
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
|
|
|
|
void *buf, unsigned int len)
|
|
|
|
{
|
2022-05-18 21:38:02 +08:00
|
|
|
down_read(&vdev->cf_lock);
|
2022-01-05 19:46:36 +08:00
|
|
|
vdpa_get_config_unlocked(vdev, offset, buf, len);
|
2022-05-18 21:38:02 +08:00
|
|
|
up_read(&vdev->cf_lock);
|
2021-10-27 01:55:12 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vdpa_get_config);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vdpa_set_config - Set one or more device configuration fields.
|
|
|
|
* @vdev: vdpa device to operate on
|
|
|
|
* @offset: starting byte offset of the field
|
|
|
|
* @buf: buffer pointer to read from
|
|
|
|
* @length: length of the configuration fields in bytes
|
|
|
|
*/
|
|
|
|
void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
|
|
|
|
const void *buf, unsigned int length)
|
|
|
|
{
|
2022-05-18 21:38:02 +08:00
|
|
|
down_write(&vdev->cf_lock);
|
2021-10-27 01:55:12 +08:00
|
|
|
vdev->config->set_config(vdev, offset, buf, length);
|
2022-05-18 21:38:02 +08:00
|
|
|
up_write(&vdev->cf_lock);
|
2021-10-27 01:55:12 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vdpa_set_config);
|
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
|
|
|
|
const char *busname, const char *devname)
|
|
|
|
{
|
|
|
|
/* Bus name is optional for simulated management device, so ignore the
|
|
|
|
* device with bus if bus attribute is provided.
|
|
|
|
*/
|
|
|
|
if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!busname && strcmp(dev_name(mdev->device), devname) == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (busname && (strcmp(mdev->device->bus->name, busname) == 0) &&
|
|
|
|
(strcmp(dev_name(mdev->device), devname) == 0))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs)
|
|
|
|
{
|
|
|
|
struct vdpa_mgmt_dev *mdev;
|
|
|
|
const char *busname = NULL;
|
|
|
|
const char *devname;
|
|
|
|
|
|
|
|
if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME])
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]);
|
|
|
|
if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME])
|
|
|
|
busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]);
|
|
|
|
|
|
|
|
list_for_each_entry(mdev, &mdev_head, list) {
|
|
|
|
if (mgmtdev_handle_match(mdev, busname, devname))
|
|
|
|
return mdev;
|
|
|
|
}
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev)
|
|
|
|
{
|
|
|
|
if (mdev->device->bus &&
|
|
|
|
nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device)))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-02-07 07:12:02 +08:00
|
|
|
static u64 vdpa_mgmtdev_get_classes(const struct vdpa_mgmt_dev *mdev,
|
|
|
|
unsigned int *nclasses)
|
|
|
|
{
|
|
|
|
u64 supported_classes = 0;
|
|
|
|
unsigned int n = 0;
|
|
|
|
|
|
|
|
for (int i = 0; mdev->id_table[i].device; i++) {
|
|
|
|
if (mdev->id_table[i].device > 63)
|
|
|
|
continue;
|
|
|
|
supported_classes |= BIT_ULL(mdev->id_table[i].device);
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
if (nclasses)
|
|
|
|
*nclasses = n;
|
|
|
|
|
|
|
|
return supported_classes;
|
|
|
|
}
|
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg,
|
|
|
|
u32 portid, u32 seq, int flags)
|
|
|
|
{
|
|
|
|
void *hdr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW);
|
|
|
|
if (!hdr)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
err = vdpa_nl_mgmtdev_handle_fill(msg, mdev);
|
|
|
|
if (err)
|
|
|
|
goto msg_err;
|
|
|
|
|
|
|
|
if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES,
|
2023-02-07 07:12:02 +08:00
|
|
|
vdpa_mgmtdev_get_classes(mdev, NULL),
|
|
|
|
VDPA_ATTR_UNSPEC)) {
|
2021-01-05 18:32:00 +08:00
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto msg_err;
|
|
|
|
}
|
vdpa: Support reporting max device capabilities
Add max_supported_vqs and supported_features fields to struct
vdpa_mgmt_dev. Upstream drivers need to feel these values according to
the device capabilities.
These values are reported back in a netlink message when showing management
devices.
Examples:
$ auxiliary/mlx5_core.sf.1:
supported_classes net
max_supported_vqs 257
dev_features CSUM GUEST_CSUM MTU HOST_TSO4 HOST_TSO6 STATUS CTRL_VQ MQ \
CTRL_MAC_ADDR VERSION_1 ACCESS_PLATFORM
$ vdpa -j mgmtdev show
{"mgmtdev":{"auxiliary/mlx5_core.sf.1":{"supported_classes":["net"], \
"max_supported_vqs":257,"dev_features":["CSUM","GUEST_CSUM","MTU", \
"HOST_TSO4","HOST_TSO6","STATUS","CTRL_VQ","MQ","CTRL_MAC_ADDR", \
"VERSION_1","ACCESS_PLATFORM"]}}}
$ vdpa -jp mgmtdev show
{
"mgmtdev": {
"auxiliary/mlx5_core.sf.1": {
"supported_classes": [ "net" ],
"max_supported_vqs": 257,
"dev_features": ["CSUM","GUEST_CSUM","MTU","HOST_TSO4", \
"HOST_TSO6","STATUS","CTRL_VQ","MQ", \
"CTRL_MAC_ADDR","VERSION_1","ACCESS_PLATFORM"]
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Link: https://lore.kernel.org/r/20220105114646.577224-11-elic@nvidia.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Si-Wei Liu<si-wei.liu@oracle.com>
2022-01-05 19:46:42 +08:00
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
|
|
|
|
mdev->max_supported_vqs)) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto msg_err;
|
|
|
|
}
|
|
|
|
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
|
|
|
|
mdev->supported_features, VDPA_ATTR_PAD)) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto msg_err;
|
|
|
|
}
|
2021-01-05 18:32:00 +08:00
|
|
|
|
|
|
|
genlmsg_end(msg, hdr);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
msg_err:
|
|
|
|
genlmsg_cancel(msg, hdr);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct vdpa_mgmt_dev *mdev;
|
|
|
|
struct sk_buff *msg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
down_read(&vdpa_dev_lock);
|
2021-01-05 18:32:00 +08:00
|
|
|
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
|
|
|
|
if (IS_ERR(mdev)) {
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
2021-01-05 18:32:00 +08:00
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device");
|
|
|
|
err = PTR_ERR(mdev);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0);
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
2021-01-05 18:32:00 +08:00
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
err = genlmsg_reply(msg, info);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
out:
|
|
|
|
nlmsg_free(msg);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
|
|
|
|
{
|
|
|
|
struct vdpa_mgmt_dev *mdev;
|
|
|
|
int start = cb->args[0];
|
|
|
|
int idx = 0;
|
|
|
|
int err;
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
down_read(&vdpa_dev_lock);
|
2021-01-05 18:32:00 +08:00
|
|
|
list_for_each_entry(mdev, &mdev_head, list) {
|
|
|
|
if (idx < start) {
|
|
|
|
idx++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid,
|
|
|
|
cb->nlh->nlmsg_seq, NLM_F_MULTI);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
idx++;
|
|
|
|
}
|
|
|
|
out:
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
2021-01-05 18:32:00 +08:00
|
|
|
cb->args[0] = idx;
|
|
|
|
return msg->len;
|
|
|
|
}
|
|
|
|
|
2022-01-05 19:46:45 +08:00
|
|
|
#define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
|
|
|
|
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
|
|
|
|
BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
|
2021-10-27 01:55:15 +08:00
|
|
|
|
2023-02-07 07:12:02 +08:00
|
|
|
/*
|
|
|
|
* Bitmask for all per-device features: feature bits VIRTIO_TRANSPORT_F_START
|
|
|
|
* through VIRTIO_TRANSPORT_F_END are unset, i.e. 0xfffffc000fffffff for
|
|
|
|
* all 64bit features. If the features are extended beyond 64 bits, or new
|
|
|
|
* "holes" are reserved for other type of features than per-device, this
|
|
|
|
* macro would have to be updated.
|
|
|
|
*/
|
|
|
|
#define VIRTIO_DEVICE_F_MASK (~0ULL << (VIRTIO_TRANSPORT_F_END + 1) | \
|
|
|
|
((1ULL << VIRTIO_TRANSPORT_F_START) - 1))
|
|
|
|
|
2021-01-05 18:32:01 +08:00
|
|
|
static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
2021-10-27 01:55:15 +08:00
|
|
|
struct vdpa_dev_set_config config = {};
|
|
|
|
struct nlattr **nl_attrs = info->attrs;
|
2021-01-05 18:32:01 +08:00
|
|
|
struct vdpa_mgmt_dev *mdev;
|
2023-02-07 07:12:02 +08:00
|
|
|
unsigned int ncls = 0;
|
2021-10-27 01:55:15 +08:00
|
|
|
const u8 *macaddr;
|
2021-01-05 18:32:01 +08:00
|
|
|
const char *name;
|
2023-02-07 07:12:02 +08:00
|
|
|
u64 classes;
|
2021-01-05 18:32:01 +08:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!info->attrs[VDPA_ATTR_DEV_NAME])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
|
|
|
|
|
2021-10-27 01:55:15 +08:00
|
|
|
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
|
|
|
|
macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
|
|
|
|
memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
|
2022-01-05 19:46:45 +08:00
|
|
|
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
|
2021-10-27 01:55:15 +08:00
|
|
|
}
|
|
|
|
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
|
|
|
|
config.net.mtu =
|
|
|
|
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
|
2022-01-05 19:46:45 +08:00
|
|
|
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
|
2021-10-27 01:55:15 +08:00
|
|
|
}
|
2022-01-05 19:46:37 +08:00
|
|
|
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
|
|
|
|
config.net.max_vq_pairs =
|
|
|
|
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
|
|
|
|
if (!config.net.max_vq_pairs) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack,
|
|
|
|
"At least one pair of VQs is required");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
|
|
|
|
}
|
2022-09-27 15:48:08 +08:00
|
|
|
if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
|
2023-02-07 07:12:01 +08:00
|
|
|
u64 missing = 0x0ULL;
|
|
|
|
|
2022-09-27 15:48:08 +08:00
|
|
|
config.device_features =
|
|
|
|
nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
|
2023-02-07 07:12:01 +08:00
|
|
|
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR] &&
|
|
|
|
!(config.device_features & BIT_ULL(VIRTIO_NET_F_MAC)))
|
|
|
|
missing |= BIT_ULL(VIRTIO_NET_F_MAC);
|
|
|
|
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU] &&
|
|
|
|
!(config.device_features & BIT_ULL(VIRTIO_NET_F_MTU)))
|
|
|
|
missing |= BIT_ULL(VIRTIO_NET_F_MTU);
|
|
|
|
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP] &&
|
|
|
|
config.net.max_vq_pairs > 1 &&
|
|
|
|
!(config.device_features & BIT_ULL(VIRTIO_NET_F_MQ)))
|
|
|
|
missing |= BIT_ULL(VIRTIO_NET_F_MQ);
|
|
|
|
if (missing) {
|
|
|
|
NL_SET_ERR_MSG_FMT_MOD(info->extack,
|
|
|
|
"Missing features 0x%llx for provided attributes",
|
|
|
|
missing);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2022-09-27 15:48:08 +08:00
|
|
|
config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
|
|
|
|
}
|
2021-10-27 01:55:15 +08:00
|
|
|
|
|
|
|
/* Skip checking capability if user didn't prefer to configure any
|
|
|
|
* device networking attributes. It is likely that user might have used
|
|
|
|
* a device specific method to configure such attributes or using device
|
|
|
|
* default attributes.
|
|
|
|
*/
|
|
|
|
if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
|
|
|
|
!netlink_capable(skb, CAP_NET_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
down_write(&vdpa_dev_lock);
|
2021-01-05 18:32:01 +08:00
|
|
|
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
|
|
|
|
if (IS_ERR(mdev)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device");
|
|
|
|
err = PTR_ERR(mdev);
|
|
|
|
goto err;
|
|
|
|
}
|
2023-02-07 07:11:59 +08:00
|
|
|
|
2021-10-27 01:55:15 +08:00
|
|
|
if ((config.mask & mdev->config_attr_mask) != config.mask) {
|
2023-02-07 07:11:59 +08:00
|
|
|
NL_SET_ERR_MSG_FMT_MOD(info->extack,
|
|
|
|
"Some provided attributes are not supported: 0x%llx",
|
|
|
|
config.mask & ~mdev->config_attr_mask);
|
2021-10-27 01:55:15 +08:00
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto err;
|
|
|
|
}
|
2021-01-05 18:32:01 +08:00
|
|
|
|
2023-02-07 07:12:02 +08:00
|
|
|
classes = vdpa_mgmtdev_get_classes(mdev, &ncls);
|
|
|
|
if (config.mask & VDPA_DEV_NET_ATTRS_MASK &&
|
|
|
|
!(classes & BIT_ULL(VIRTIO_ID_NET))) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack,
|
|
|
|
"Network class attributes provided on unsupported management device");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (!(config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
|
|
|
|
config.mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES) &&
|
|
|
|
classes & BIT_ULL(VIRTIO_ID_NET) && ncls > 1 &&
|
|
|
|
config.device_features & VIRTIO_DEVICE_F_MASK) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack,
|
|
|
|
"Management device supports multi-class while device features specified are ambiguous");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2021-10-27 01:55:15 +08:00
|
|
|
err = mdev->ops->dev_add(mdev, name, &config);
|
2021-01-05 18:32:01 +08:00
|
|
|
err:
|
2022-05-18 21:38:01 +08:00
|
|
|
up_write(&vdpa_dev_lock);
|
2021-01-05 18:32:01 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct vdpa_mgmt_dev *mdev;
|
|
|
|
struct vdpa_device *vdev;
|
|
|
|
struct device *dev;
|
|
|
|
const char *name;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (!info->attrs[VDPA_ATTR_DEV_NAME])
|
|
|
|
return -EINVAL;
|
|
|
|
name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
down_write(&vdpa_dev_lock);
|
2021-01-05 18:32:01 +08:00
|
|
|
dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match);
|
|
|
|
if (!dev) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
|
|
|
|
err = -ENODEV;
|
|
|
|
goto dev_err;
|
|
|
|
}
|
|
|
|
vdev = container_of(dev, struct vdpa_device, dev);
|
|
|
|
if (!vdev->mdev) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto mdev_err;
|
|
|
|
}
|
|
|
|
mdev = vdev->mdev;
|
|
|
|
mdev->ops->dev_del(mdev, vdev);
|
|
|
|
mdev_err:
|
|
|
|
put_device(dev);
|
|
|
|
dev_err:
|
2022-05-18 21:38:01 +08:00
|
|
|
up_write(&vdpa_dev_lock);
|
2021-01-05 18:32:01 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-01-05 18:32:02 +08:00
|
|
|
static int
|
|
|
|
vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
|
|
|
|
int flags, struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
u16 max_vq_size;
|
2021-10-29 17:14:48 +08:00
|
|
|
u16 min_vq_size = 1;
|
2021-01-05 18:32:02 +08:00
|
|
|
u32 device_id;
|
|
|
|
u32 vendor_id;
|
|
|
|
void *hdr;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW);
|
|
|
|
if (!hdr)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev);
|
|
|
|
if (err)
|
|
|
|
goto msg_err;
|
|
|
|
|
|
|
|
device_id = vdev->config->get_device_id(vdev);
|
|
|
|
vendor_id = vdev->config->get_vendor_id(vdev);
|
|
|
|
max_vq_size = vdev->config->get_vq_num_max(vdev);
|
2021-10-29 17:14:48 +08:00
|
|
|
if (vdev->config->get_vq_num_min)
|
|
|
|
min_vq_size = vdev->config->get_vq_num_min(vdev);
|
2021-01-05 18:32:02 +08:00
|
|
|
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
|
|
|
|
goto msg_err;
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id))
|
|
|
|
goto msg_err;
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id))
|
|
|
|
goto msg_err;
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs))
|
|
|
|
goto msg_err;
|
|
|
|
if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
|
|
|
|
goto msg_err;
|
2021-10-29 17:14:48 +08:00
|
|
|
if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
|
|
|
|
goto msg_err;
|
2021-01-05 18:32:02 +08:00
|
|
|
|
|
|
|
genlmsg_end(msg, hdr);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
msg_err:
|
|
|
|
genlmsg_cancel(msg, hdr);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev;
|
|
|
|
struct sk_buff *msg;
|
|
|
|
const char *devname;
|
|
|
|
struct device *dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!info->attrs[VDPA_ATTR_DEV_NAME])
|
|
|
|
return -EINVAL;
|
|
|
|
devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
|
|
|
|
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
down_read(&vdpa_dev_lock);
|
2021-01-05 18:32:02 +08:00
|
|
|
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
|
|
|
|
if (!dev) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
|
|
|
|
err = -ENODEV;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
vdev = container_of(dev, struct vdpa_device, dev);
|
|
|
|
if (!vdev->mdev) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto mdev_err;
|
|
|
|
}
|
|
|
|
err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack);
|
2022-05-18 21:37:59 +08:00
|
|
|
if (err)
|
|
|
|
goto mdev_err;
|
|
|
|
|
|
|
|
err = genlmsg_reply(msg, info);
|
|
|
|
put_device(dev);
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
2022-05-18 21:37:59 +08:00
|
|
|
return err;
|
|
|
|
|
2021-01-05 18:32:02 +08:00
|
|
|
mdev_err:
|
|
|
|
put_device(dev);
|
|
|
|
err:
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
2022-05-18 21:37:59 +08:00
|
|
|
nlmsg_free(msg);
|
2021-01-05 18:32:02 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct vdpa_dev_dump_info {
|
|
|
|
struct sk_buff *msg;
|
|
|
|
struct netlink_callback *cb;
|
|
|
|
int start_idx;
|
|
|
|
int idx;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int vdpa_dev_dump(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
|
|
|
|
struct vdpa_dev_dump_info *info = data;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!vdev->mdev)
|
|
|
|
return 0;
|
|
|
|
if (info->idx < info->start_idx) {
|
|
|
|
info->idx++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
|
|
|
|
info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
info->idx++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
|
|
|
|
{
|
|
|
|
struct vdpa_dev_dump_info info;
|
|
|
|
|
|
|
|
info.msg = msg;
|
|
|
|
info.cb = cb;
|
|
|
|
info.start_idx = cb->args[0];
|
|
|
|
info.idx = 0;
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
down_read(&vdpa_dev_lock);
|
2021-01-05 18:32:02 +08:00
|
|
|
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump);
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
2021-01-05 18:32:02 +08:00
|
|
|
cb->args[0] = info.idx;
|
|
|
|
return msg->len;
|
|
|
|
}
|
|
|
|
|
2022-09-29 09:45:52 +08:00
|
|
|
static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
|
2021-10-27 01:55:13 +08:00
|
|
|
const struct virtio_net_config *config)
|
|
|
|
{
|
|
|
|
u16 val_u16;
|
|
|
|
|
2022-09-29 09:45:52 +08:00
|
|
|
if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
|
|
|
|
(features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
|
2021-10-27 01:55:13 +08:00
|
|
|
return 0;
|
|
|
|
|
2022-09-29 09:45:54 +08:00
|
|
|
val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);
|
|
|
|
|
2021-10-27 01:55:13 +08:00
|
|
|
return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
|
|
|
|
}
|
|
|
|
|
2022-09-29 09:45:55 +08:00
|
|
|
static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_net_config *config)
|
|
|
|
{
|
|
|
|
u16 val_u16;
|
|
|
|
|
|
|
|
if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val_u16 = __virtio16_to_cpu(true, config->mtu);
|
|
|
|
|
|
|
|
return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_net_config *config)
|
|
|
|
{
|
|
|
|
if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
|
|
|
|
sizeof(config->mac), config->mac);
|
|
|
|
}
|
|
|
|
|
2023-02-07 07:12:00 +08:00
|
|
|
static int vdpa_dev_net_status_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_net_config *config)
|
|
|
|
{
|
|
|
|
u16 val_u16;
|
|
|
|
|
|
|
|
if ((features & BIT_ULL(VIRTIO_NET_F_STATUS)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val_u16 = __virtio16_to_cpu(true, config->status);
|
|
|
|
return nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16);
|
|
|
|
}
|
|
|
|
|
2021-10-27 01:55:13 +08:00
|
|
|
static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
|
|
|
|
{
|
|
|
|
struct virtio_net_config config = {};
|
2022-09-29 09:45:51 +08:00
|
|
|
u64 features_device;
|
2021-10-27 01:55:13 +08:00
|
|
|
|
2022-09-29 09:45:50 +08:00
|
|
|
vdev->config->get_config(vdev, 0, &config, sizeof(config));
|
2021-10-27 01:55:13 +08:00
|
|
|
|
2022-09-29 09:45:50 +08:00
|
|
|
features_device = vdev->config->get_device_features(vdev);
|
2021-10-27 01:55:13 +08:00
|
|
|
|
2022-10-11 01:27:03 +08:00
|
|
|
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
|
vdpa: Add support for returning device configuration information
Add netlink attribute to store the negotiated features. This can be used
by userspace to get the current state of the vdpa instance.
Examples:
$ vdpa dev config show vdpa-a
vdpa-a: mac 00:00:00:00:88:88 link up link_announce false max_vq_pairs 16 mtu 1500
negotiated_features CSUM GUEST_CSUM MTU MAC HOST_TSO4 HOST_TSO6 STATUS \
CTRL_VQ MQ CTRL_MAC_ADDR VERSION_1 ACCESS_PLATFORM
$ vdpa -j dev config show vdpa-a
{"config":{"vdpa-a":{"mac":"00:00:00:00:88:88","link ":"up","link_announce":false, \
"max_vq_pairs":16,"mtu":1500,"negotiated_features":["CSUM","GUEST_CSUM","MTU","MAC", \
"HOST_TSO4","HOST_TSO6","STATUS","CTRL_VQ","MQ","CTRL_MAC_ADDR","VERSION_1", \
"ACCESS_PLATFORM"]}}}
$ vdpa -jp dev config show vdpa-a
{
"config": {
"vdpa-a": {
"mac": "00:00:00:00:88:88",
"link ": "up",
"link_announce ": false,
"max_vq_pairs": 16,
"mtu": 1500,
"negotiated_features": [
"CSUM","GUEST_CSUM","MTU","MAC","HOST_TSO4","HOST_TSO6","STATUS","CTRL_VQ","MQ", \
"CTRL_MAC_ADDR","VERSION_1","ACCESS_PLATFORM"
]
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Link: https://lore.kernel.org/r/20220105114646.577224-9-elic@nvidia.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
2022-01-05 19:46:40 +08:00
|
|
|
VDPA_ATTR_PAD))
|
|
|
|
return -EMSGSIZE;
|
2021-10-27 01:55:13 +08:00
|
|
|
|
2022-09-29 09:45:55 +08:00
|
|
|
if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2023-02-07 07:12:00 +08:00
|
|
|
if (vdpa_dev_net_status_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2022-09-29 09:45:53 +08:00
|
|
|
return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
|
2021-10-27 01:55:13 +08:00
|
|
|
}
|
|
|
|
|
2024-02-19 02:55:57 +08:00
|
|
|
static int
|
|
|
|
vdpa_dev_blk_capacity_config_fill(struct sk_buff *msg,
|
|
|
|
const struct virtio_blk_config *config)
|
|
|
|
{
|
|
|
|
u64 val_u64;
|
|
|
|
|
|
|
|
val_u64 = __virtio64_to_cpu(true, config->capacity);
|
|
|
|
|
|
|
|
return nla_put_u64_64bit(msg, VDPA_ATTR_DEV_BLK_CFG_CAPACITY,
|
|
|
|
val_u64, VDPA_ATTR_PAD);
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:55:58 +08:00
|
|
|
static int
|
|
|
|
vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_blk_config *config)
|
|
|
|
{
|
|
|
|
u32 val_u32;
|
|
|
|
|
|
|
|
if ((features & BIT_ULL(VIRTIO_BLK_F_SIZE_MAX)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val_u32 = __virtio32_to_cpu(true, config->size_max);
|
|
|
|
|
|
|
|
return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, val_u32);
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:55:59 +08:00
|
|
|
/* fill the block size*/
|
|
|
|
static int
|
|
|
|
vdpa_dev_blk_block_size_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_blk_config *config)
|
|
|
|
{
|
|
|
|
u32 val_u32;
|
|
|
|
|
|
|
|
if ((features & BIT_ULL(VIRTIO_BLK_F_BLK_SIZE)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val_u32 = __virtio32_to_cpu(true, config->blk_size);
|
|
|
|
|
|
|
|
return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE, val_u32);
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:56:00 +08:00
|
|
|
static int
|
|
|
|
vdpa_dev_blk_seg_max_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_blk_config *config)
|
|
|
|
{
|
|
|
|
u32 val_u32;
|
|
|
|
|
|
|
|
if ((features & BIT_ULL(VIRTIO_BLK_F_SEG_MAX)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val_u32 = __virtio32_to_cpu(true, config->seg_max);
|
|
|
|
|
|
|
|
return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_MAX, val_u32);
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:56:01 +08:00
|
|
|
static int vdpa_dev_blk_mq_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_blk_config *config)
|
|
|
|
{
|
|
|
|
u16 val_u16;
|
|
|
|
|
|
|
|
if ((features & BIT_ULL(VIRTIO_BLK_F_MQ)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val_u16 = __virtio16_to_cpu(true, config->num_queues);
|
|
|
|
|
|
|
|
return nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES, val_u16);
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:56:02 +08:00
|
|
|
static int vdpa_dev_blk_topology_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_blk_config *config)
|
|
|
|
{
|
|
|
|
u16 min_io_size;
|
|
|
|
u32 opt_io_size;
|
|
|
|
|
|
|
|
if ((features & BIT_ULL(VIRTIO_BLK_F_TOPOLOGY)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
min_io_size = __virtio16_to_cpu(true, config->min_io_size);
|
|
|
|
opt_io_size = __virtio32_to_cpu(true, config->opt_io_size);
|
|
|
|
|
|
|
|
if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_PHY_BLK_EXP,
|
|
|
|
config->physical_block_exp))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_ALIGN_OFFSET,
|
|
|
|
config->alignment_offset))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_MIN_IO_SIZE, min_io_size))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_OPT_IO_SIZE, opt_io_size))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:56:03 +08:00
|
|
|
static int vdpa_dev_blk_discard_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_blk_config *config)
|
|
|
|
{
|
|
|
|
u32 val_u32;
|
|
|
|
|
|
|
|
if ((features & BIT_ULL(VIRTIO_BLK_F_DISCARD)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val_u32 = __virtio32_to_cpu(true, config->max_discard_sectors);
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEC, val_u32))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
val_u32 = __virtio32_to_cpu(true, config->max_discard_seg);
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEG, val_u32))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
val_u32 = __virtio32_to_cpu(true, config->discard_sector_alignment);
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN, val_u32))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:56:04 +08:00
|
|
|
static int
|
|
|
|
vdpa_dev_blk_write_zeroes_config_fill(struct sk_buff *msg, u64 features,
|
|
|
|
const struct virtio_blk_config *config)
|
|
|
|
{
|
|
|
|
u32 val_u32;
|
|
|
|
|
|
|
|
if ((features & BIT_ULL(VIRTIO_BLK_F_WRITE_ZEROES)) == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_sectors);
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC, val_u32))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_seg);
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG, val_u32))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:56:05 +08:00
|
|
|
static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features)
|
|
|
|
{
|
|
|
|
u8 ro;
|
|
|
|
|
|
|
|
ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1;
|
|
|
|
if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, ro))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:56:06 +08:00
|
|
|
static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features)
|
|
|
|
{
|
|
|
|
u8 flush;
|
|
|
|
|
|
|
|
flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1;
|
|
|
|
if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_FLUSH, flush))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-02-19 02:55:57 +08:00
|
|
|
static int vdpa_dev_blk_config_fill(struct vdpa_device *vdev,
|
|
|
|
struct sk_buff *msg)
|
|
|
|
{
|
|
|
|
struct virtio_blk_config config = {};
|
|
|
|
u64 features_device;
|
|
|
|
|
|
|
|
vdev->config->get_config(vdev, 0, &config, sizeof(config));
|
|
|
|
|
|
|
|
features_device = vdev->config->get_device_features(vdev);
|
|
|
|
|
|
|
|
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
|
|
|
|
VDPA_ATTR_PAD))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (vdpa_dev_blk_capacity_config_fill(msg, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:55:58 +08:00
|
|
|
if (vdpa_dev_blk_seg_size_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:55:59 +08:00
|
|
|
if (vdpa_dev_blk_block_size_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:56:00 +08:00
|
|
|
if (vdpa_dev_blk_seg_max_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:56:01 +08:00
|
|
|
if (vdpa_dev_blk_mq_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:56:02 +08:00
|
|
|
if (vdpa_dev_blk_topology_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:56:03 +08:00
|
|
|
if (vdpa_dev_blk_discard_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:56:04 +08:00
|
|
|
if (vdpa_dev_blk_write_zeroes_config_fill(msg, features_device, &config))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:56:05 +08:00
|
|
|
if (vdpa_dev_blk_ro_config_fill(msg, features_device))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:56:06 +08:00
|
|
|
if (vdpa_dev_blk_flush_config_fill(msg, features_device))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2024-02-19 02:55:57 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-27 01:55:13 +08:00
|
|
|
static int
|
|
|
|
vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
|
|
|
|
int flags, struct netlink_ext_ack *extack)
|
|
|
|
{
|
2022-09-29 09:45:51 +08:00
|
|
|
u64 features_driver;
|
|
|
|
u8 status = 0;
|
2021-10-27 01:55:13 +08:00
|
|
|
u32 device_id;
|
|
|
|
void *hdr;
|
|
|
|
int err;
|
|
|
|
|
2022-05-18 21:38:02 +08:00
|
|
|
down_read(&vdev->cf_lock);
|
2021-10-27 01:55:13 +08:00
|
|
|
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
|
|
|
|
VDPA_CMD_DEV_CONFIG_GET);
|
2022-01-05 19:46:36 +08:00
|
|
|
if (!hdr) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-10-27 01:55:13 +08:00
|
|
|
|
|
|
|
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto msg_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
device_id = vdev->config->get_device_id(vdev);
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto msg_err;
|
|
|
|
}
|
|
|
|
|
2022-09-29 09:45:51 +08:00
|
|
|
/* only read driver features after the feature negotiation is done */
|
|
|
|
status = vdev->config->get_status(vdev);
|
|
|
|
if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
|
|
|
|
features_driver = vdev->config->get_driver_features(vdev);
|
|
|
|
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
|
|
|
|
VDPA_ATTR_PAD)) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto msg_err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-27 01:55:13 +08:00
|
|
|
switch (device_id) {
|
|
|
|
case VIRTIO_ID_NET:
|
|
|
|
err = vdpa_dev_net_config_fill(vdev, msg);
|
|
|
|
break;
|
2024-02-19 02:55:57 +08:00
|
|
|
case VIRTIO_ID_BLOCK:
|
|
|
|
err = vdpa_dev_blk_config_fill(vdev, msg);
|
|
|
|
break;
|
2021-10-27 01:55:13 +08:00
|
|
|
default:
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
goto msg_err;
|
|
|
|
|
2022-05-18 21:38:02 +08:00
|
|
|
up_read(&vdev->cf_lock);
|
2021-10-27 01:55:13 +08:00
|
|
|
genlmsg_end(msg, hdr);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
msg_err:
|
|
|
|
genlmsg_cancel(msg, hdr);
|
2022-01-05 19:46:36 +08:00
|
|
|
out:
|
2022-05-18 21:38:02 +08:00
|
|
|
up_read(&vdev->cf_lock);
|
2021-10-27 01:55:13 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
|
|
|
|
struct genl_info *info, u32 index)
|
|
|
|
{
|
|
|
|
struct virtio_net_config config = {};
|
|
|
|
u64 features;
|
|
|
|
u8 status;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
status = vdev->config->get_status(vdev);
|
|
|
|
if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete");
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
|
|
|
|
|
|
|
|
features = vdev->config->get_driver_features(vdev);
|
|
|
|
if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
|
|
|
|
features, VDPA_ATTR_PAD))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
2022-09-07 14:01:10 +08:00
|
|
|
err = vdpa_dev_net_mq_config_fill(msg, features, &config);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg,
|
|
|
|
struct genl_info *info, u32 index)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2022-05-18 21:38:02 +08:00
|
|
|
down_read(&vdev->cf_lock);
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
if (!vdev->config->get_vendor_vq_stats) {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = vdpa_fill_stats_rec(vdev, msg, info, index);
|
|
|
|
out:
|
2022-05-18 21:38:02 +08:00
|
|
|
up_read(&vdev->cf_lock);
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev,
|
|
|
|
struct sk_buff *msg,
|
|
|
|
struct genl_info *info, u32 index)
|
|
|
|
{
|
|
|
|
u32 device_id;
|
|
|
|
void *hdr;
|
|
|
|
int err;
|
|
|
|
u32 portid = info->snd_portid;
|
|
|
|
u32 seq = info->snd_seq;
|
|
|
|
u32 flags = 0;
|
|
|
|
|
|
|
|
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
|
|
|
|
VDPA_CMD_DEV_VSTATS_GET);
|
|
|
|
if (!hdr)
|
|
|
|
return -EMSGSIZE;
|
|
|
|
|
|
|
|
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto undo_msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
device_id = vdev->config->get_device_id(vdev);
|
|
|
|
if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
|
|
|
|
err = -EMSGSIZE;
|
|
|
|
goto undo_msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (device_id) {
|
|
|
|
case VIRTIO_ID_NET:
|
|
|
|
if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) {
|
2023-01-30 17:26:44 +08:00
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "queue index exceeds max value");
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
err = -ERANGE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = vendor_stats_fill(vdev, msg, info, index);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
genlmsg_end(msg, hdr);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
undo_msg:
|
|
|
|
genlmsg_cancel(msg, hdr);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-10-27 01:55:13 +08:00
|
|
|
static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev;
|
|
|
|
struct sk_buff *msg;
|
|
|
|
const char *devname;
|
|
|
|
struct device *dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!info->attrs[VDPA_ATTR_DEV_NAME])
|
|
|
|
return -EINVAL;
|
|
|
|
devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
|
|
|
|
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
down_read(&vdpa_dev_lock);
|
2021-10-27 01:55:13 +08:00
|
|
|
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
|
|
|
|
if (!dev) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
|
|
|
|
err = -ENODEV;
|
|
|
|
goto dev_err;
|
|
|
|
}
|
|
|
|
vdev = container_of(dev, struct vdpa_device, dev);
|
|
|
|
if (!vdev->mdev) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto mdev_err;
|
|
|
|
}
|
|
|
|
err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
|
|
|
|
0, info->extack);
|
|
|
|
if (!err)
|
|
|
|
err = genlmsg_reply(msg, info);
|
|
|
|
|
|
|
|
mdev_err:
|
|
|
|
put_device(dev);
|
|
|
|
dev_err:
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
2021-10-27 01:55:13 +08:00
|
|
|
if (err)
|
|
|
|
nlmsg_free(msg);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vdpa_dev_config_dump(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
|
|
|
|
struct vdpa_dev_dump_info *info = data;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!vdev->mdev)
|
|
|
|
return 0;
|
|
|
|
if (info->idx < info->start_idx) {
|
|
|
|
info->idx++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
|
|
|
|
info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
|
|
|
info->cb->extack);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
info->idx++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
|
|
|
|
{
|
|
|
|
struct vdpa_dev_dump_info info;
|
|
|
|
|
|
|
|
info.msg = msg;
|
|
|
|
info.cb = cb;
|
|
|
|
info.start_idx = cb->args[0];
|
|
|
|
info.idx = 0;
|
|
|
|
|
2022-05-18 21:38:01 +08:00
|
|
|
down_read(&vdpa_dev_lock);
|
2021-10-27 01:55:13 +08:00
|
|
|
bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
2021-10-27 01:55:13 +08:00
|
|
|
cb->args[0] = info.idx;
|
|
|
|
return msg->len;
|
|
|
|
}
|
|
|
|
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb,
|
|
|
|
struct genl_info *info)
|
|
|
|
{
|
|
|
|
struct vdpa_device *vdev;
|
|
|
|
struct sk_buff *msg;
|
|
|
|
const char *devname;
|
|
|
|
struct device *dev;
|
|
|
|
u32 index;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!info->attrs[VDPA_ATTR_DEV_NAME])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX])
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
|
|
|
|
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]);
|
2022-05-18 21:38:01 +08:00
|
|
|
down_read(&vdpa_dev_lock);
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
|
|
|
|
if (!dev) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "device not found");
|
|
|
|
err = -ENODEV;
|
|
|
|
goto dev_err;
|
|
|
|
}
|
|
|
|
vdev = container_of(dev, struct vdpa_device, dev);
|
|
|
|
if (!vdev->mdev) {
|
|
|
|
NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto mdev_err;
|
|
|
|
}
|
|
|
|
err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index);
|
|
|
|
if (err)
|
|
|
|
goto mdev_err;
|
|
|
|
|
|
|
|
err = genlmsg_reply(msg, info);
|
|
|
|
|
|
|
|
put_device(dev);
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
mdev_err:
|
|
|
|
put_device(dev);
|
|
|
|
dev_err:
|
|
|
|
nlmsg_free(msg);
|
2022-05-18 21:38:01 +08:00
|
|
|
up_read(&vdpa_dev_lock);
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-01-05 18:32:00 +08:00
|
|
|
static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
|
|
|
|
[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
|
|
|
|
[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
|
2021-01-05 18:32:01 +08:00
|
|
|
[VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
|
2021-10-27 01:55:15 +08:00
|
|
|
[VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
|
2023-07-28 01:57:52 +08:00
|
|
|
[VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 },
|
2021-10-27 01:55:15 +08:00
|
|
|
/* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
|
|
|
|
[VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
|
2023-07-28 01:57:50 +08:00
|
|
|
[VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 },
|
2023-07-28 01:57:48 +08:00
|
|
|
[VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 },
|
2021-01-05 18:32:00 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct genl_ops vdpa_nl_ops[] = {
|
|
|
|
{
|
|
|
|
.cmd = VDPA_CMD_MGMTDEV_GET,
|
|
|
|
.doit = vdpa_nl_cmd_mgmtdev_get_doit,
|
|
|
|
.dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
|
|
|
|
},
|
2021-01-05 18:32:01 +08:00
|
|
|
{
|
|
|
|
.cmd = VDPA_CMD_DEV_NEW,
|
|
|
|
.doit = vdpa_nl_cmd_dev_add_set_doit,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.cmd = VDPA_CMD_DEV_DEL,
|
|
|
|
.doit = vdpa_nl_cmd_dev_del_set_doit,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
2021-01-05 18:32:02 +08:00
|
|
|
{
|
|
|
|
.cmd = VDPA_CMD_DEV_GET,
|
|
|
|
.doit = vdpa_nl_cmd_dev_get_doit,
|
|
|
|
.dumpit = vdpa_nl_cmd_dev_get_dumpit,
|
|
|
|
},
|
2021-10-27 01:55:13 +08:00
|
|
|
{
|
|
|
|
.cmd = VDPA_CMD_DEV_CONFIG_GET,
|
|
|
|
.doit = vdpa_nl_cmd_dev_config_get_doit,
|
|
|
|
.dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
|
|
|
|
},
|
vdpa: Add support for querying vendor statistics
Allows to read vendor statistics of a vdpa device. The specific
statistics data are received from the upstream driver in the form of an
(attribute name, attribute value) pairs.
An example of statistics for mlx5_vdpa device are:
received_desc - number of descriptors received by the virtqueue
completed_desc - number of descriptors completed by the virtqueue
A descriptor using indirect buffers is still counted as 1. In addition,
N chained descriptors are counted correctly N times as one would expect.
A new callback was added to vdpa_config_ops which provides the means for
the vdpa driver to return statistics results.
The interface allows for reading all the supported virtqueues, including
the control virtqueue if it exists.
Below are some examples taken from mlx5_vdpa which are introduced in the
following patch:
1. Read statistics for the virtqueue at index 1
$ vdpa dev vstats show vdpa-a qidx 1
vdpa-a:
queue_type tx queue_index 1 received_desc 3844836 completed_desc 3844836
2. Read statistics for the virtqueue at index 32
$ vdpa dev vstats show vdpa-a qidx 32
vdpa-a:
queue_type control_vq queue_index 32 received_desc 62 completed_desc 62
3. Read statisitics for the virtqueue at index 0 with json output
$ vdpa -j dev vstats show vdpa-a qidx 0
{"vstats":{"vdpa-a":{
"queue_type":"rx","queue_index":0,"name":"received_desc","value":417776,\
"name":"completed_desc","value":417548}}}
4. Read statistics for the virtqueue at index 0 with preety json output
$ vdpa -jp dev vstats show vdpa-a qidx 0
{
"vstats": {
"vdpa-a": {
"queue_type": "rx",
"queue_index": 0,
"name": "received_desc",
"value": 417776,
"name": "completed_desc",
"value": 417548
}
}
}
Signed-off-by: Eli Cohen <elic@nvidia.com>
Message-Id: <20220518133804.1075129-3-elic@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2022-05-18 21:38:00 +08:00
|
|
|
{
|
|
|
|
.cmd = VDPA_CMD_DEV_VSTATS_GET,
|
|
|
|
.doit = vdpa_nl_cmd_dev_stats_get_doit,
|
|
|
|
.flags = GENL_ADMIN_PERM,
|
|
|
|
},
|
2021-01-05 18:32:00 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct genl_family vdpa_nl_family __ro_after_init = {
|
|
|
|
.name = VDPA_GENL_NAME,
|
|
|
|
.version = VDPA_GENL_VERSION,
|
|
|
|
.maxattr = VDPA_ATTR_MAX,
|
|
|
|
.policy = vdpa_nl_policy,
|
|
|
|
.netnsok = false,
|
|
|
|
.module = THIS_MODULE,
|
|
|
|
.ops = vdpa_nl_ops,
|
|
|
|
.n_ops = ARRAY_SIZE(vdpa_nl_ops),
|
2022-08-25 08:18:30 +08:00
|
|
|
.resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1,
|
2021-01-05 18:32:00 +08:00
|
|
|
};
|
|
|
|
|
2020-03-26 22:01:21 +08:00
|
|
|
static int vdpa_init(void)
|
|
|
|
{
|
2021-01-05 18:32:00 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = bus_register(&vdpa_bus);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
err = genl_register_family(&vdpa_nl_family);
|
|
|
|
if (err)
|
|
|
|
goto err;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
bus_unregister(&vdpa_bus);
|
|
|
|
return err;
|
2020-03-26 22:01:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit vdpa_exit(void)
|
|
|
|
{
|
2021-01-05 18:32:00 +08:00
|
|
|
genl_unregister_family(&vdpa_nl_family);
|
2020-03-26 22:01:21 +08:00
|
|
|
bus_unregister(&vdpa_bus);
|
|
|
|
ida_destroy(&vdpa_index_ida);
|
|
|
|
}
|
|
|
|
core_initcall(vdpa_init);
|
|
|
|
module_exit(vdpa_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
|
|
|
|
MODULE_LICENSE("GPL v2");
|