2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 06:34:11 +08:00

drm/virtio: fence: pass plain pointer

Since commit "9fdd90c0f4 drm/virtio: add virtio_gpu_alloc_fence()"
fences are not allocated any more by virtio_gpu_fence_emit().  So there
is no need to pass down a reference to the fence pointer, a plain
pointer is enough now.

Convert virtio_gpu_fence_emit() and callers.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Robert Foss <robert.foss@collabora.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20181128151021.29565-2-kraxel@redhat.com
This commit is contained in:
Gerd Hoffmann 2018-11-28 16:10:20 +01:00
parent 7cdf33ab02
commit 4d55fd66b4
5 changed files with 27 additions and 27 deletions

View File

@ -273,7 +273,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset, uint64_t offset,
__le32 width, __le32 height, __le32 width, __le32 height,
__le32 x, __le32 y, __le32 x, __le32 y,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t resource_id,
uint32_t x, uint32_t y, uint32_t x, uint32_t y,
@ -284,7 +284,7 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y); uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj, struct virtio_gpu_object *obj,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj); struct virtio_gpu_object *obj);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
@ -309,23 +309,23 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id); uint32_t resource_id);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size, void *data, uint32_t data_size,
uint32_t ctx_id, struct virtio_gpu_fence **fence); uint32_t ctx_id, struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id, uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
uint32_t ctx_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
struct virtio_gpu_resource_create_3d *rc_3d, struct virtio_gpu_resource_create_3d *rc_3d,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq); void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq); void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq); void virtio_gpu_fence_ack(struct virtqueue *vq);
@ -358,7 +358,7 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(
void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence); void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence **fence); struct virtio_gpu_fence *fence);
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq); u64 last_seq);

View File

@ -91,19 +91,19 @@ void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
unsigned long irq_flags; unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags); spin_lock_irqsave(&drv->lock, irq_flags);
(*fence)->seq = ++drv->sync_seq; fence->seq = ++drv->sync_seq;
dma_fence_get(&(*fence)->f); dma_fence_get(&fence->f);
list_add_tail(&(*fence)->node, &drv->fences); list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags); spin_unlock_irqrestore(&drv->lock, irq_flags);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
cmd_hdr->fence_id = cpu_to_le64((*fence)->seq); cmd_hdr->fence_id = cpu_to_le64(fence->seq);
return 0; return 0;
} }

View File

@ -221,7 +221,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
} }
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, &out_fence); vfpriv->ctx_id, out_fence);
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f); ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
@ -349,7 +349,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
} }
virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL); virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL);
ret = virtio_gpu_object_attach(vgdev, qobj, &fence); ret = virtio_gpu_object_attach(vgdev, qobj, fence);
if (ret) { if (ret) {
virtio_gpu_fence_cleanup(fence); virtio_gpu_fence_cleanup(fence);
goto fail_backoff; goto fail_backoff;
@ -450,7 +450,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
virtio_gpu_cmd_transfer_from_host_3d virtio_gpu_cmd_transfer_from_host_3d
(vgdev, qobj->hw_res_handle, (vgdev, qobj->hw_res_handle,
vfpriv->ctx_id, offset, args->level, vfpriv->ctx_id, offset, args->level,
&box, &fence); &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv, reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f); &fence->f);
@ -504,7 +504,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_transfer_to_host_3d virtio_gpu_cmd_transfer_to_host_3d
(vgdev, qobj, (vgdev, qobj,
vfpriv ? vfpriv->ctx_id : 0, offset, vfpriv ? vfpriv->ctx_id : 0, offset,
args->level, &box, &fence); args->level, &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv, reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f); &fence->f);
dma_fence_put(&fence->f); dma_fence_put(&fence->f);

View File

@ -204,7 +204,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, bo, 0, (vgdev, bo, 0,
cpu_to_le32(plane->state->crtc_w), cpu_to_le32(plane->state->crtc_w),
cpu_to_le32(plane->state->crtc_h), cpu_to_le32(plane->state->crtc_h),
0, 0, &vgfb->fence); 0, 0, vgfb->fence);
ret = virtio_gpu_object_reserve(bo, false); ret = virtio_gpu_object_reserve(bo, false);
if (!ret) { if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv, reservation_object_add_excl_fence(bo->tbo.resv,

View File

@ -298,7 +298,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf, struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_ctrl_hdr *hdr, struct virtio_gpu_ctrl_hdr *hdr,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtqueue *vq = vgdev->ctrlq.vq; struct virtqueue *vq = vgdev->ctrlq.vq;
int rc; int rc;
@ -405,7 +405,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t resource_id,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_resource_detach_backing *cmd_p; struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -467,7 +467,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset, uint64_t offset,
__le32 width, __le32 height, __le32 width, __le32 height,
__le32 x, __le32 y, __le32 x, __le32 y,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -497,7 +497,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t resource_id,
struct virtio_gpu_mem_entry *ents, struct virtio_gpu_mem_entry *ents,
uint32_t nents, uint32_t nents,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_resource_attach_backing *cmd_p; struct virtio_gpu_resource_attach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -821,7 +821,7 @@ void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo, struct virtio_gpu_object *bo,
struct virtio_gpu_resource_create_3d *rc_3d, struct virtio_gpu_resource_create_3d *rc_3d,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_resource_create_3d *cmd_p; struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -842,7 +842,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -870,7 +870,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id, uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level, uint64_t offset, uint32_t level,
struct virtio_gpu_box *box, struct virtio_gpu_box *box,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -890,7 +890,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size, void *data, uint32_t data_size,
uint32_t ctx_id, struct virtio_gpu_fence **fence) uint32_t ctx_id, struct virtio_gpu_fence *fence)
{ {
struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
@ -910,7 +910,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj, struct virtio_gpu_object *obj,
struct virtio_gpu_fence **fence) struct virtio_gpu_fence *fence)
{ {
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents; struct virtio_gpu_mem_entry *ents;
@ -967,7 +967,7 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
if (use_dma_api && obj->mapped) { if (use_dma_api && obj->mapped) {
struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */ /* detach backing and wait for the host process it ... */
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence); virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
dma_fence_wait(&fence->f, true); dma_fence_wait(&fence->f, true);
dma_fence_put(&fence->f); dma_fence_put(&fence->f);