mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 12:43:55 +08:00
A few resources-related fixes for qxl, some doc build warnings and ioctl
fixes for dma-buf, an off-by-one fix in edid, and a return code fix in DP-MST -----BEGIN PGP SIGNATURE----- iHQEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCXqrvVgAKCRDj7w1vZxhR xazeAPMFYoHj36L2cbr7lrUDER2s6cNdCpUGN0tuQx9fYjmQAP0RRCXpbfyFESvf MG5BBZvARO7OUtUCujogiPbmAVn1DQ== =r+Az -----END PGP SIGNATURE----- Merge tag 'drm-misc-fixes-2020-04-30' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes A few resources-related fixes for qxl, some doc build warnings and ioctl fixes for dma-buf, an off-by-one fix in edid, and a return code fix in DP-MST Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20200430153201.wx6of2b2gsoip7bk@gilmour.lan
This commit is contained in:
commit
c62098c991
@ -388,7 +388,8 @@ static long dma_buf_ioctl(struct file *file,
|
||||
|
||||
return ret;
|
||||
|
||||
case DMA_BUF_SET_NAME:
|
||||
case DMA_BUF_SET_NAME_A:
|
||||
case DMA_BUF_SET_NAME_B:
|
||||
return dma_buf_set_name(dmabuf, (const char __user *)arg);
|
||||
|
||||
default:
|
||||
@ -655,8 +656,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
|
||||
* calls attach() of dma_buf_ops to allow device-specific attach functionality
|
||||
* @dmabuf: [in] buffer to attach device to.
|
||||
* @dev: [in] device to be attached.
|
||||
* @importer_ops [in] importer operations for the attachment
|
||||
* @importer_priv [in] importer private pointer for the attachment
|
||||
* @importer_ops: [in] importer operations for the attachment
|
||||
* @importer_priv: [in] importer private pointer for the attachment
|
||||
*
|
||||
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
|
||||
* must be cleaned up by calling dma_buf_detach().
|
||||
|
@ -3442,8 +3442,12 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
|
||||
drm_dp_queue_down_tx(mgr, txmsg);
|
||||
|
||||
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
|
||||
if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
|
||||
ret = -EIO;
|
||||
if (ret > 0) {
|
||||
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
|
||||
ret = -EIO;
|
||||
else
|
||||
ret = size;
|
||||
}
|
||||
|
||||
kfree(txmsg);
|
||||
fail_put:
|
||||
|
@ -5111,7 +5111,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
|
||||
struct drm_display_mode *mode;
|
||||
unsigned pixel_clock = (timings->pixel_clock[0] |
|
||||
(timings->pixel_clock[1] << 8) |
|
||||
(timings->pixel_clock[2] << 16));
|
||||
(timings->pixel_clock[2] << 16)) + 1;
|
||||
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
|
||||
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
|
||||
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
|
||||
|
@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
||||
return ret;
|
||||
|
||||
ret = qxl_release_reserve_list(release, true);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
qxl_release_free(qdev, release);
|
||||
return ret;
|
||||
|
||||
}
|
||||
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
|
||||
cmd->type = QXL_SURFACE_CMD_CREATE;
|
||||
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
|
||||
@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
||||
/* no need to add a release to the fence for this surface bo,
|
||||
since it is only released when we ask to destroy the surface
|
||||
and it would never signal otherwise */
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
surf->hw_surf_alloc = true;
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
|
||||
cmd->surface_id = id;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -510,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
|
||||
cmd->u.set.visible = 1;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
|
||||
return ret;
|
||||
|
||||
@ -652,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
|
||||
cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
|
||||
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
|
||||
if (old_cursor_bo != NULL)
|
||||
qxl_bo_unpin(old_cursor_bo);
|
||||
@ -700,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
|
||||
cmd->type = QXL_CURSOR_HIDE;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
}
|
||||
|
||||
static void qxl_update_dumb_head(struct qxl_device *qdev,
|
||||
|
@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
|
||||
goto out_release_backoff;
|
||||
|
||||
rects = drawable_set_clipping(qdev, num_clips, clips_bo);
|
||||
if (!rects)
|
||||
if (!rects) {
|
||||
ret = -EINVAL;
|
||||
goto out_release_backoff;
|
||||
|
||||
}
|
||||
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
|
||||
|
||||
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
|
||||
@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
|
||||
}
|
||||
qxl_bo_kunmap(clips_bo);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
|
||||
out_release_backoff:
|
||||
if (ret)
|
||||
|
@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unsupported image bit depth\n");
|
||||
return -EINVAL; /* TODO: cleanup */
|
||||
qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
|
||||
return -EINVAL;
|
||||
}
|
||||
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
|
||||
image->u.bitmap.x = width;
|
||||
|
@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
|
||||
apply_surf_reloc(qdev, &reloc_info[i]);
|
||||
}
|
||||
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
|
||||
if (ret)
|
||||
qxl_release_backoff_reserve_list(release);
|
||||
else
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
out_free_bos:
|
||||
out_free_release:
|
||||
|
@ -53,14 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
|
||||
events_clear, &events_clear);
|
||||
}
|
||||
|
||||
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
|
||||
uint32_t ctx_id)
|
||||
{
|
||||
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
|
||||
virtio_gpu_notify(vgdev);
|
||||
ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
|
||||
}
|
||||
|
||||
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
|
||||
void (*work_func)(struct work_struct *work))
|
||||
{
|
||||
@ -275,14 +267,17 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
|
||||
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_fpriv *vfpriv;
|
||||
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
|
||||
|
||||
if (!vgdev->has_virgl_3d)
|
||||
return;
|
||||
|
||||
vfpriv = file->driver_priv;
|
||||
if (vfpriv->context_created) {
|
||||
virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
|
||||
virtio_gpu_notify(vgdev);
|
||||
}
|
||||
|
||||
virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
|
||||
ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
|
||||
mutex_destroy(&vfpriv->context_lock);
|
||||
kfree(vfpriv);
|
||||
file->driver_priv = NULL;
|
||||
|
@ -329,13 +329,12 @@ struct dma_buf {
|
||||
|
||||
/**
|
||||
* struct dma_buf_attach_ops - importer operations for an attachment
|
||||
* @move_notify: [optional] notification that the DMA-buf is moving
|
||||
*
|
||||
* Attachment operations implemented by the importer.
|
||||
*/
|
||||
struct dma_buf_attach_ops {
|
||||
/**
|
||||
* @move_notify
|
||||
* @move_notify: [optional] notification that the DMA-buf is moving
|
||||
*
|
||||
* If this callback is provided the framework can avoid pinning the
|
||||
* backing store while mappings exists.
|
||||
|
@ -39,6 +39,12 @@ struct dma_buf_sync {
|
||||
|
||||
#define DMA_BUF_BASE 'b'
|
||||
#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
|
||||
|
||||
/* 32/64bitness of this uapi was botched in android, there's no difference
|
||||
* between them in actual uapi, they're just different numbers.
|
||||
*/
|
||||
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
|
||||
#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
|
||||
#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user