mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 21:24:00 +08:00
Merge remote branch 'korg/drm-core-next' into drm-next-stage
* korg/drm-core-next: drm/ttm: handle OOM in ttm_tt_swapout drm/radeon/kms/atom: fix shr/shl ops drm/kms: fix spelling of "CLOCK" drm/kms: fix fb_changed = true else statement drivers/gpu/drm/drm_fb_helper.c: don't use private implementation of atoi() drm: switch all GEM/KMS ioctls to unlocked ioctl status. Use drm_gem_object_[handle_]unreference_unlocked where possible drm: introduce drm_gem_object_[handle_]unreference_unlocked
This commit is contained in:
commit
79fa9eb739
@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
|
||||
mode_changed = true;
|
||||
} else if (set->fb == NULL) {
|
||||
mode_changed = true;
|
||||
} else if ((set->fb->bits_per_pixel !=
|
||||
set->crtc->fb->bits_per_pixel) ||
|
||||
set->fb->depth != set->crtc->fb->depth)
|
||||
fb_changed = true;
|
||||
else
|
||||
} else
|
||||
fb_changed = true;
|
||||
}
|
||||
|
||||
|
@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = {
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
||||
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
|
||||
};
|
||||
|
||||
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||
|
@ -27,6 +27,7 @@
|
||||
* Dave Airlie <airlied@linux.ie>
|
||||
* Jesse Barnes <jesse.barnes@intel.com>
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/fb.h>
|
||||
#include "drmP.h"
|
||||
@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_add_connector);
|
||||
|
||||
static int my_atoi(const char *name)
|
||||
{
|
||||
int val = 0;
|
||||
|
||||
for (;; name++) {
|
||||
switch (*name) {
|
||||
case '0' ... '9':
|
||||
val = 10*val+(*name-'0');
|
||||
break;
|
||||
default:
|
||||
return val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_fb_helper_connector_parse_command_line - parse command line for connector
|
||||
* @connector - connector to parse line for
|
||||
@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
|
||||
namelen = i;
|
||||
if (!refresh_specified && !bpp_specified &&
|
||||
!yres_specified) {
|
||||
refresh = my_atoi(&name[i+1]);
|
||||
refresh = simple_strtol(&name[i+1], NULL, 10);
|
||||
refresh_specified = 1;
|
||||
if (cvt || rb)
|
||||
cvt = 0;
|
||||
@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
|
||||
case '-':
|
||||
namelen = i;
|
||||
if (!bpp_specified && !yres_specified) {
|
||||
bpp = my_atoi(&name[i+1]);
|
||||
bpp = simple_strtol(&name[i+1], NULL, 10);
|
||||
bpp_specified = 1;
|
||||
if (cvt || rb)
|
||||
cvt = 0;
|
||||
@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
|
||||
break;
|
||||
case 'x':
|
||||
if (!yres_specified) {
|
||||
yres = my_atoi(&name[i+1]);
|
||||
yres = simple_strtol(&name[i+1], NULL, 10);
|
||||
yres_specified = 1;
|
||||
} else
|
||||
goto done;
|
||||
@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
|
||||
}
|
||||
}
|
||||
if (i < 0 && yres_specified) {
|
||||
xres = my_atoi(name);
|
||||
xres = simple_strtol(name, NULL, 10);
|
||||
res_specified = 1;
|
||||
}
|
||||
done:
|
||||
@ -694,7 +680,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
|
||||
int i;
|
||||
|
||||
if (var->pixclock != 0) {
|
||||
DRM_ERROR("PIXEL CLCOK SET\n");
|
||||
DRM_ERROR("PIXEL CLOCK SET\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -192,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
|
||||
idr_remove(&filp->object_idr, handle);
|
||||
spin_unlock(&filp->table_lock);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference_unlocked(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -325,9 +323,7 @@ again:
|
||||
}
|
||||
|
||||
err:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -358,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -390,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
|
||||
{
|
||||
struct drm_gem_object *obj = ptr;
|
||||
|
||||
drm_gem_object_handle_unreference(obj);
|
||||
drm_gem_object_handle_unreference_unlocked(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -403,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
|
||||
void
|
||||
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
|
||||
{
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
idr_for_each(&file_private->object_idr,
|
||||
&drm_gem_object_release_handle, NULL);
|
||||
|
||||
idr_destroy(&file_private->object_idr);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
drm_gem_object_free_common(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
fput(obj->filp);
|
||||
atomic_dec(&dev->object_count);
|
||||
atomic_sub(obj->size, &dev->object_memory);
|
||||
kfree(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after the last reference to the object has been lost.
|
||||
* Must be called holding struct_ mutex
|
||||
*
|
||||
* Frees the object
|
||||
*/
|
||||
@ -427,13 +430,39 @@ drm_gem_object_free(struct kref *kref)
|
||||
if (dev->driver->gem_free_object != NULL)
|
||||
dev->driver->gem_free_object(obj);
|
||||
|
||||
fput(obj->filp);
|
||||
atomic_dec(&dev->object_count);
|
||||
atomic_sub(obj->size, &dev->object_memory);
|
||||
kfree(obj);
|
||||
drm_gem_object_free_common(obj);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_free);
|
||||
|
||||
/**
|
||||
* Called after the last reference to the object has been lost.
|
||||
* Must be called without holding struct_mutex
|
||||
*
|
||||
* Frees the object
|
||||
*/
|
||||
void
|
||||
drm_gem_object_free_unlocked(struct kref *kref)
|
||||
{
|
||||
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
if (dev->driver->gem_free_object_unlocked != NULL)
|
||||
dev->driver->gem_free_object_unlocked(obj);
|
||||
else if (dev->driver->gem_free_object != NULL) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev->driver->gem_free_object(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
drm_gem_object_free_common(obj);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_free_unlocked);
|
||||
|
||||
static void drm_gem_object_ref_bug(struct kref *list_kref)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after the last handle to the object has been closed
|
||||
*
|
||||
@ -458,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref)
|
||||
/*
|
||||
* The object name held a reference to this object, drop
|
||||
* that now.
|
||||
*
|
||||
* This cannot be the last reference, since the handle holds one too.
|
||||
*/
|
||||
drm_gem_object_unreference(obj);
|
||||
kref_put(&obj->refcount, drm_gem_object_ref_bug);
|
||||
} else
|
||||
spin_unlock(&dev->object_name_lock);
|
||||
|
||||
@ -477,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open);
|
||||
void drm_gem_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_vm_close);
|
||||
|
||||
|
@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOMEM;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference_unlocked(obj);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -488,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
*/
|
||||
if (args->offset > obj->size || args->size > obj->size ||
|
||||
args->offset + args->size > obj->size) {
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -501,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
file_priv);
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -961,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
*/
|
||||
if (args->offset > obj->size || args->size > obj->size ||
|
||||
args->offset + args->size > obj->size) {
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -995,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
DRM_INFO("pwrite failed %d\n", ret);
|
||||
#endif
|
||||
|
||||
drm_gem_object_unreference(obj);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1138,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
args->offset);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
if (IS_ERR((void *)addr))
|
||||
return addr;
|
||||
|
||||
|
@ -438,9 +438,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
obj_priv = obj->driver_private;
|
||||
|
||||
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -3553,11 +3553,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
intel_crtc->cursor_bo = bo;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
fail_locked:
|
||||
drm_gem_object_unreference(bo);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
fail:
|
||||
drm_gem_object_unreference_unlocked(bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4476,9 +4475,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
intelfb_remove(dev, fb);
|
||||
|
||||
drm_framebuffer_cleanup(fb);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(intel_fb->obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(intel_fb->obj);
|
||||
|
||||
kfree(intel_fb);
|
||||
}
|
||||
@ -4541,9 +4538,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
|
||||
|
||||
ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
|
||||
if (ret) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1179,7 +1179,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
|
||||
out_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
drm_gem_object_unreference(new_bo);
|
||||
drm_gem_object_unreference_unlocked(new_bo);
|
||||
kfree(params);
|
||||
|
||||
return ret;
|
||||
|
@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
|
||||
if (drm_fb->fbdev)
|
||||
nouveau_fbcon_remove(dev, drm_fb);
|
||||
|
||||
if (fb->nvbo) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(fb->nvbo->gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
if (fb->nvbo)
|
||||
drm_gem_object_unreference_unlocked(fb->nvbo->gem);
|
||||
|
||||
drm_framebuffer_cleanup(drm_fb);
|
||||
kfree(fb);
|
||||
|
@ -401,10 +401,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
||||
|
||||
unregister_framebuffer(info);
|
||||
nouveau_bo_unmap(nouveau_fb->nvbo);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(nouveau_fb->nvbo->gem);
|
||||
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
|
||||
nouveau_fb->nvbo = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (par)
|
||||
drm_fb_helper_free(&par->helper);
|
||||
framebuffer_release(info);
|
||||
|
@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
|
||||
out:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference(nvbo->gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference_unlocked(nvbo->gem);
|
||||
|
||||
if (ret)
|
||||
drm_gem_object_unreference(nvbo->gem);
|
||||
drm_gem_object_unreference_unlocked(nvbo->gem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -865,9 +863,7 @@ nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
|
||||
req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
|
||||
|
||||
out:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -891,9 +887,7 @@ nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
|
||||
|
||||
ret = nouveau_bo_unpin(nouveau_gem_object(gem));
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -935,9 +929,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -965,9 +957,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -986,9 +976,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
|
||||
ret = nouveau_gem_info(gem, req);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
|
||||
|
||||
chan->notifier_bo = ntfy;
|
||||
out_err:
|
||||
if (ret) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(ntfy->gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
if (ret)
|
||||
drm_gem_object_unreference_unlocked(ntfy->gem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
|
||||
nouveau_bo_unmap(chan->notifier_bo);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
nouveau_bo_unpin(chan->notifier_bo);
|
||||
drm_gem_object_unreference(chan->notifier_bo->gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
|
||||
nouveau_mem_takedown(&chan->notifier_heap);
|
||||
}
|
||||
|
||||
|
@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
|
||||
nv_crtc->cursor.show(nv_crtc, true);
|
||||
out:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -358,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
nv_crtc->cursor.show(nv_crtc, true);
|
||||
|
||||
out:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gem);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -881,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
|
||||
uint8_t attr = U8((*ptr)++), shift;
|
||||
uint32_t saved, dst;
|
||||
int dptr = *ptr;
|
||||
attr &= 0x38;
|
||||
attr |= atom_def_dst[attr >> 3] << 6;
|
||||
SDEBUG(" dst: ");
|
||||
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
|
||||
shift = atom_get_src(ctx, attr, ptr);
|
||||
@ -897,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
|
||||
uint8_t attr = U8((*ptr)++), shift;
|
||||
uint32_t saved, dst;
|
||||
int dptr = *ptr;
|
||||
attr &= 0x38;
|
||||
attr |= atom_def_dst[attr >> 3] << 6;
|
||||
SDEBUG(" dst: ");
|
||||
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
|
||||
shift = atom_get_src(ctx, attr, ptr);
|
||||
|
@ -194,11 +194,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
||||
}
|
||||
radeon_bo_list_unreserve(&parser->validated);
|
||||
for (i = 0; i < parser->nrelocs; i++) {
|
||||
if (parser->relocs[i].gobj) {
|
||||
mutex_lock(&parser->rdev->ddev->struct_mutex);
|
||||
drm_gem_object_unreference(parser->relocs[i].gobj);
|
||||
mutex_unlock(&parser->rdev->ddev->struct_mutex);
|
||||
}
|
||||
if (parser->relocs[i].gobj)
|
||||
drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
|
||||
}
|
||||
kfree(parser->track);
|
||||
kfree(parser->relocs);
|
||||
|
@ -169,17 +169,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
unpin:
|
||||
if (radeon_crtc->cursor_bo) {
|
||||
radeon_gem_object_unpin(radeon_crtc->cursor_bo);
|
||||
mutex_lock(&crtc->dev->struct_mutex);
|
||||
drm_gem_object_unreference(radeon_crtc->cursor_bo);
|
||||
mutex_unlock(&crtc->dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_bo = obj;
|
||||
return 0;
|
||||
fail:
|
||||
mutex_lock(&crtc->dev->struct_mutex);
|
||||
drm_gem_object_unreference(obj);
|
||||
mutex_unlock(&crtc->dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -679,11 +679,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
if (fb->fbdev)
|
||||
radeonfb_remove(dev, fb);
|
||||
|
||||
if (radeon_fb->obj) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(radeon_fb->obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
if (radeon_fb->obj)
|
||||
drm_gem_object_unreference_unlocked(radeon_fb->obj);
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(radeon_fb);
|
||||
}
|
||||
|
@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
|
||||
size, initial_domain, alignment, r);
|
||||
mutex_lock(&rdev->ddev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
gobj->driver_private = robj;
|
||||
@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||
if (r) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_handle_unreference_unlocked(gobj);
|
||||
args->handle = handle;
|
||||
return 0;
|
||||
}
|
||||
@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
robj = gobj->driver_private;
|
||||
args->addr_ptr = radeon_bo_mmap_offset(robj);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
default:
|
||||
break;
|
||||
}
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
/* callback hw specific functions if any */
|
||||
if (robj->rdev->asic->ioctl_wait_idle)
|
||||
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
robj = gobj->driver_private;
|
||||
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
|
||||
radeon_bo_unreserve(rbo);
|
||||
out:
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference(gobj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
void *from_virtual;
|
||||
void *to_virtual;
|
||||
int i;
|
||||
int ret;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
|
||||
ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
|
||||
@ -499,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = read_mapping_page(swap_space, i, NULL);
|
||||
if (IS_ERR(from_page))
|
||||
if (IS_ERR(from_page)) {
|
||||
ret = PTR_ERR(from_page);
|
||||
goto out_err;
|
||||
}
|
||||
to_page = __ttm_tt_get_page(ttm, i);
|
||||
if (unlikely(to_page == NULL))
|
||||
goto out_err;
|
||||
@ -523,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
return 0;
|
||||
out_err:
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
|
||||
@ -535,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
|
||||
void *from_virtual;
|
||||
void *to_virtual;
|
||||
int i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
|
||||
BUG_ON(ttm->caching_state != tt_cached);
|
||||
@ -557,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
|
||||
0);
|
||||
if (unlikely(IS_ERR(swap_storage))) {
|
||||
printk(KERN_ERR "Failed allocating swap storage.\n");
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(swap_storage);
|
||||
}
|
||||
} else
|
||||
swap_storage = persistant_swap_storage;
|
||||
@ -569,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
|
||||
if (unlikely(from_page == NULL))
|
||||
continue;
|
||||
to_page = read_mapping_page(swap_space, i, NULL);
|
||||
if (unlikely(to_page == NULL))
|
||||
if (unlikely(IS_ERR(to_page))) {
|
||||
ret = PTR_ERR(to_page);
|
||||
goto out_err;
|
||||
|
||||
}
|
||||
preempt_disable();
|
||||
from_virtual = kmap_atomic(from_page, KM_USER0);
|
||||
to_virtual = kmap_atomic(to_page, KM_USER1);
|
||||
@ -595,5 +599,5 @@ out_err:
|
||||
if (!persistant_swap_storage)
|
||||
fput(swap_storage);
|
||||
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
@ -801,6 +801,7 @@ struct drm_driver {
|
||||
*/
|
||||
int (*gem_init_object) (struct drm_gem_object *obj);
|
||||
void (*gem_free_object) (struct drm_gem_object *obj);
|
||||
void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
|
||||
|
||||
/* vga arb irq handler */
|
||||
void (*vgaarb_irq)(struct drm_device *dev, bool state);
|
||||
@ -1427,6 +1428,7 @@ extern void drm_sysfs_connector_remove(struct drm_connector *connector);
|
||||
int drm_gem_init(struct drm_device *dev);
|
||||
void drm_gem_destroy(struct drm_device *dev);
|
||||
void drm_gem_object_free(struct kref *kref);
|
||||
void drm_gem_object_free_unlocked(struct kref *kref);
|
||||
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
|
||||
size_t size);
|
||||
void drm_gem_object_handle_free(struct kref *kref);
|
||||
@ -1443,10 +1445,15 @@ drm_gem_object_reference(struct drm_gem_object *obj)
|
||||
static inline void
|
||||
drm_gem_object_unreference(struct drm_gem_object *obj)
|
||||
{
|
||||
if (obj == NULL)
|
||||
return;
|
||||
if (obj != NULL)
|
||||
kref_put(&obj->refcount, drm_gem_object_free);
|
||||
}
|
||||
|
||||
kref_put(&obj->refcount, drm_gem_object_free);
|
||||
static inline void
|
||||
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
if (obj != NULL)
|
||||
kref_put(&obj->refcount, drm_gem_object_free_unlocked);
|
||||
}
|
||||
|
||||
int drm_gem_handle_create(struct drm_file *file_priv,
|
||||
@ -1475,6 +1482,21 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
|
||||
drm_gem_object_unreference(obj);
|
||||
}
|
||||
|
||||
static inline void
|
||||
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
if (obj == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Must bump handle count first as this may be the last
|
||||
* ref, in which case the object would disappear before we
|
||||
* checked for a name
|
||||
*/
|
||||
kref_put(&obj->handlecount, drm_gem_object_handle_free);
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
|
||||
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
|
||||
struct drm_file *filp,
|
||||
u32 handle);
|
||||
|
Loading…
Reference in New Issue
Block a user