drm/virtio: implement context init: add virtio_gpu_fence_event

Similar to DRM_VMW_EVENT_FENCE_SIGNALED.  Sends a pollable event
to the DRM file descriptor when a fence on a specific ring is
signaled.

One difference is the event is not exposed via the UAPI -- this is
because host responses are on a shared memory buffer of type
BLOB_MEM_GUEST [this is the common way to receive responses with
virtgpu].  As such, there is no context specific read(..)
implementation either -- just a poll(..) implementation.

Signed-off-by: Gurchetan Singh <gurchetansingh@chromium.org>
Acked-by: Nicholas Verne <nverne@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20210921232024.817-12-gurchetansingh@chromium.org
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
This commit is contained in:
Gurchetan Singh 2021-09-21 16:20:23 -07:00 committed by Gerd Hoffmann
parent 8d6b006e1f
commit cd7f5ca335
4 changed files with 93 additions and 1 deletions

View File

@ -29,6 +29,8 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include <drm/drm.h>
#include <drm/drm_aperture.h>
@ -155,6 +157,35 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
schedule_work(&vgdev->config_changed_work);
}
static __poll_t virtio_gpu_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct drm_file *drm_file = filp->private_data;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_device *dev = drm_file->minor->dev;
struct drm_pending_event *e = NULL;
__poll_t mask = 0;
if (!vfpriv->ring_idx_mask)
return drm_poll(filp, wait);
poll_wait(filp, &drm_file->event_wait, wait);
if (!list_empty(&drm_file->event_list)) {
spin_lock_irq(&dev->event_lock);
e = list_first_entry(&drm_file->event_list,
struct drm_pending_event, link);
drm_file->event_space += e->event->length;
list_del(&e->link);
spin_unlock_irq(&dev->event_lock);
kfree(e);
mask |= EPOLLIN | EPOLLRDNORM;
}
return mask;
}
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 },
@ -194,7 +225,17 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct file_operations virtio_gpu_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.poll = virtio_gpu_poll,
.read = drm_read,
.llseek = noop_llseek,
.mmap = drm_gem_mmap
};
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,

View File

@ -138,11 +138,18 @@ struct virtio_gpu_fence_driver {
spinlock_t lock;
};
#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
struct virtio_gpu_fence_event {
struct drm_pending_event base;
struct drm_event event;
};
struct virtio_gpu_fence {
struct dma_fence f;
uint32_t ring_idx;
uint64_t fence_id;
bool emit_fence_info;
struct virtio_gpu_fence_event *e;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};

View File

@ -152,11 +152,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
continue;
dma_fence_signal_locked(&curr->f);
if (curr->e) {
drm_send_event(vgdev->ddev, &curr->e->base);
curr->e = NULL;
}
list_del(&curr->node);
dma_fence_put(&curr->f);
}
dma_fence_signal_locked(&signaled->f);
if (signaled->e) {
drm_send_event(vgdev->ddev, &signaled->e->base);
signaled->e = NULL;
}
list_del(&signaled->node);
dma_fence_put(&signaled->f);
break;

View File

@ -38,6 +38,36 @@
VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
static int virtio_gpu_fence_event_create(struct drm_device *dev,
struct drm_file *file,
struct virtio_gpu_fence *fence,
uint32_t ring_idx)
{
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence_event *e = NULL;
int ret;
if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
return 0;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
e->event.length = sizeof(e->event);
ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
if (ret)
goto free;
fence->e = e;
return 0;
free:
kfree(e);
return ret;
}
/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fpriv *vfpriv)
@ -195,6 +225,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unresv;
}
ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
if (ret)
goto out_unresv;
if (out_fence_fd >= 0) {
sync_file = sync_file_create(&out_fence->f);
if (!sync_file) {