2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 03:33:59 +08:00

drm/vmwgfx: Use a per-device semaphore for reservation protection

Don't use a per-master semaphore (ttm lock) for reservation protection, but
rather a per-device semaphore. This is needed since clients connecting using
render nodes aren't master aware.

The ttm lock used should probably be replaced with a reader-write semaphore
once the function down_xx_interruptible() is available.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
This commit is contained in:
Thomas Hellstrom 2014-02-27 12:34:51 +01:00
parent 4beb6d9fa6
commit 294adf7d86
11 changed files with 46 additions and 58 deletions

View File

@ -462,7 +462,6 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
@ -474,7 +473,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_context_size == 0))
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@ -521,7 +520,7 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}

View File

@ -52,11 +52,10 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
struct ttm_placement *placement,
bool interruptible)
{
struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
int ret;
ret = ttm_write_lock(&vmaster->lock, interruptible);
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
@ -71,7 +70,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&vmaster->lock);
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@ -95,12 +94,11 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement *placement;
int ret;
ret = ttm_write_lock(&vmaster->lock, interruptible);
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
@ -143,7 +141,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
err_unreserve:
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&vmaster->lock);
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@ -198,7 +196,6 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
int ret = 0;
@ -209,7 +206,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
placement = vmw_vram_placement;
placement.lpfn = bo->num_pages;
ret = ttm_write_lock(&vmaster->lock, interruptible);
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
@ -232,7 +229,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmaster->lock);
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}

View File

@ -606,6 +606,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@ -1175,12 +1176,11 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
{
struct vmw_private *dev_priv =
container_of(nb, struct vmw_private, pm_nb);
struct vmw_master *vmaster = dev_priv->active_master;
switch (val) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
ttm_suspend_lock(&vmaster->lock);
ttm_suspend_lock(&dev_priv->reservation_sem);
/**
* This empties VRAM and unbinds all GMR bindings.
@ -1194,7 +1194,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
case PM_POST_RESTORE:
ttm_suspend_unlock(&vmaster->lock);
ttm_suspend_unlock(&dev_priv->reservation_sem);
break;
case PM_RESTORE_PREPARE:

View File

@ -486,6 +486,11 @@ struct vmw_private {
struct mutex release_mutex;
uint32_t num_3d_resources;
/*
* Replace this with an rwsem as soon as we have down_xx_interruptible()
*/
struct ttm_lock reservation_sem;
/*
* Query processing. These members
* are protected by the cmdbuf mutex.

View File

@ -2712,7 +2712,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
/*
@ -2729,7 +2728,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@ -2745,6 +2744,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
vmw_kms_cursor_post_execbuf(dev_priv);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}

View File

@ -377,14 +377,13 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* interuptable? */
ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
if (unlikely(ret != 0))
return ret;
(void) ttm_write_lock(&vmw_priv->reservation_sem, false);
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
if (!vmw_bo)
if (!vmw_bo) {
ret = -ENOMEM;
goto err_unlock;
}
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
&ne_placement,

View File

@ -226,7 +226,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_present_arg *arg =
(struct drm_vmw_present_arg *)data;
struct vmw_surface *surface;
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
struct drm_framebuffer *fb;
@ -271,7 +270,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
}
vfb = vmw_framebuffer_to_vfb(fb);
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
goto out_no_ttm_lock;
@ -291,7 +290,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
vmw_surface_unreference(&surface);
out_no_surface:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
drm_framebuffer_unreference(fb);
out_no_fb:
@ -311,7 +310,6 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)
(unsigned long)arg->fence_rep;
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct drm_vmw_rect __user *clips_ptr;
struct drm_vmw_rect *clips = NULL;
struct drm_framebuffer *fb;
@ -361,7 +359,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_no_ttm_lock;
}
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
goto out_no_ttm_lock;
@ -369,7 +367,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
vfb, user_fence_rep,
clips, num_clips);
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
drm_framebuffer_unreference(fb);
out_no_fb:

View File

@ -596,7 +596,6 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct drm_clip_rect norect;
@ -611,7 +610,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
drm_modeset_lock_all(dev_priv->dev);
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev);
return ret;
@ -632,7 +631,7 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
flags, color,
clips, num_clips, inc, NULL);
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
@ -954,7 +953,6 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
@ -962,7 +960,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
drm_modeset_lock_all(dev_priv->dev);
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev);
return ret;
@ -989,7 +987,7 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
clips, num_clips, increment, NULL);
}
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
@ -2022,7 +2020,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
void __user *user_rects;
struct drm_vmw_rect *rects;
unsigned rects_size;
@ -2030,7 +2027,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
int i;
struct drm_mode_config *mode_config = &dev->mode_config;
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@ -2072,6 +2069,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
out_free:
kfree(rects);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}

View File

@ -676,10 +676,9 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_dma_buffer *dma_buf;
uint32_t handle;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@ -696,7 +695,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@ -873,7 +872,6 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
/*
@ -884,7 +882,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
if (unlikely(vmw_user_stream_size == 0))
vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@ -932,7 +930,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@ -985,14 +983,13 @@ int vmw_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_dma_buffer *dma_buf;
int ret;
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@ -1004,7 +1001,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}

View File

@ -449,7 +449,6 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_dma_buffer *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
@ -487,14 +486,14 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
goto out_bad_arg;
}
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
goto out_bad_arg;
ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
shader_type, tfile, &arg->shader_handle);
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
vmw_dmabuf_unreference(&buffer);
return ret;

View File

@ -697,7 +697,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct vmw_surface_offset *cur_offset;
uint32_t num_sizes;
uint32_t size;
struct vmw_master *vmaster = vmw_master(file_priv->master);
const struct svga3d_surface_desc *desc;
if (unlikely(vmw_user_surface_size == 0))
@ -723,7 +722,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@ -862,7 +861,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res);
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return 0;
out_no_copy:
kfree(srf->offsets);
@ -873,7 +872,7 @@ out_no_sizes:
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@ -1173,7 +1172,6 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
struct vmw_master *vmaster = vmw_master(file_priv->master);
const struct svga3d_surface_desc *desc;
uint32_t backup_handle;
@ -1189,7 +1187,7 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
ret = ttm_read_lock(&vmaster->lock, true);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@ -1283,12 +1281,12 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
vmw_resource_unreference(&res);
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return 0;
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
ttm_read_unlock(&vmaster->lock);
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}