2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 17:53:56 +08:00

Merge tag 'topic/drm-misc-2015-10-19' of git://anongit.freedesktop.org/drm-intel into drm-next

More drm-misc for 4.4.
- fb refcount fix in atomic fbdev
- various locking reworks to reduce drm_global_mutex and dev->struct_mutex
- rename docbook to gpu.tmpl and include vga_switcheroo stuff, plus more
  vga_switcheroo (Lukas Wunner)
- viewport check fixes for atomic drivers from Ville
- DRM_DEBUG_VBL from Ville
- non-contentious header fixes from Mikko Rapeli
- small things all over

* tag 'topic/drm-misc-2015-10-19' of git://anongit.freedesktop.org/drm-intel: (31 commits)
  drm/fb-helper: Fix fb refcounting in pan_display_atomic
  drm/fb-helper: Set plane rotation directly
  drm: fix mutex leak in drm_dp_get_mst_branch_device
  drm: Check plane src coordinates correctly during page flip for atomic drivers
  drm: Check crtc viewport correctly with rotated primary plane on atomic drivers
  drm: Refactor plane src coordinate checks
  drm: Swap w/h when converting the mode to src coordidates for a rotated primary plane
  drm: Don't leak fb when plane crtc coodinates are bad
  ALSA: hda - Spell vga_switcheroo consistently
  drm/gem: Use kref_get_unless_zero for the weak mmap references
  drm/vgem: Drop vgem_drm_gem_mmap
  drm: Fix return value of drm_framebuffer_init()
  drm/gem: Use container_of in drm_gem_object_free
  drm/gem: Check locking in drm_gem_object_unreference
  drm/gem: Drop struct_mutex requirement from drm_gem_mmap_obj
  drm/i810_drm.h: include drm/drm.h
  r128_drm.h: include drm/drm.h
  savage_drm.h: include <drm/drm.h>
  gpu/doc: Convert to markdown harder
  gpu/doc: Add vga_switcheroo documentation
  ...
This commit is contained in:
Dave Airlie 2015-10-20 09:01:49 +10:00
commit affa0e033b
41 changed files with 444 additions and 402 deletions

View File

@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml device-drivers.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
80211.xml debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
tracepoint.xml drm.xml media_api.xml w1.xml \
tracepoint.xml gpu.xml media_api.xml w1.xml \
writing_musb_glue_layer.xml crypto-API.xml iio.xml
include Documentation/DocBook/media/Makefile

View File

@ -2,9 +2,9 @@
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
<book id="drmDevelopersGuide">
<book id="gpuDevelopersGuide">
<bookinfo>
<title>Linux DRM Developer's Guide</title>
<title>Linux GPU Driver Developer's Guide</title>
<authorgroup>
<author>
@ -40,6 +40,16 @@
</address>
</affiliation>
</author>
<author>
<firstname>Lukas</firstname>
<surname>Wunner</surname>
<contrib>vga_switcheroo documentation</contrib>
<affiliation>
<address>
<email>lukas@wunner.de</email>
</address>
</affiliation>
</author>
</authorgroup>
<copyright>
@ -51,6 +61,10 @@
<year>2012</year>
<holder>Laurent Pinchart</holder>
</copyright>
<copyright>
<year>2015</year>
<holder>Lukas Wunner</holder>
</copyright>
<legalnotice>
<para>
@ -69,6 +83,13 @@
<revremark>Added extensive documentation about driver internals.
</revremark>
</revision>
<revision>
<revnumber>1.1</revnumber>
<date>2015-10-11</date>
<authorinitials>LW</authorinitials>
<revremark>Added vga_switcheroo documentation.
</revremark>
</revision>
</revhistory>
</bookinfo>
@ -78,9 +99,9 @@
<title>DRM Core</title>
<partintro>
<para>
This first part of the DRM Developer's Guide documents core DRM code,
helper libraries for writing drivers and generic userspace interfaces
exposed by DRM drivers.
This first part of the GPU Driver Developer's Guide documents core DRM
code, helper libraries for writing drivers and generic userspace
interfaces exposed by DRM drivers.
</para>
</partintro>
@ -3583,10 +3604,11 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
plane properties to default value, so that a subsequent open of the
device will not inherit state from the previous user. It can also be
used to execute delayed power switching state changes, e.g. in
conjunction with the vga_switcheroo infrastructure. Beyond that KMS
drivers should not do any further cleanup. Only legacy UMS drivers might
need to clean up device state so that the vga console or an independent
fbdev driver could take over.
conjunction with the vga_switcheroo infrastructure (see
<xref linkend="vga_switcheroo"/>). Beyond that KMS drivers should not
do any further cleanup. Only legacy UMS drivers might need to clean up
device state so that the vga console or an independent fbdev driver
could take over.
</para>
</sect2>
<sect2>
@ -3684,7 +3706,9 @@ int num_ioctls;</synopsis>
</para></listitem>
<listitem><para>
DRM_UNLOCKED - The ioctl handler will be called without locking
the DRM global mutex
the DRM global mutex. This is the enforced default for kms drivers
(i.e. using the DRIVER_MODESET flag) and hence shouldn't be used
any more for new drivers.
</para></listitem>
</itemizedlist>
</para>
@ -3887,8 +3911,8 @@ int num_ioctls;</synopsis>
<partintro>
<para>
This second part of the DRM Developer's Guide documents driver code,
implementation details and also all the driver-specific userspace
This second part of the GPU Driver Developer's Guide documents driver
code, implementation details and also all the driver-specific userspace
interfaces. Especially since all hardware-acceleration interfaces to
userspace are driver specific for efficiency and other reasons these
interfaces can be rather substantial. Hence every driver has its own
@ -4213,4 +4237,50 @@ int num_ioctls;</synopsis>
</chapter>
!Cdrivers/gpu/drm/i915/i915_irq.c
</part>
<part id="vga_switcheroo">
<title>vga_switcheroo</title>
<partintro>
!Pdrivers/gpu/vga/vga_switcheroo.c Overview
</partintro>
<chapter id="modes_of_use">
<title>Modes of Use</title>
<sect1>
<title>Manual switching and manual power control</title>
!Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control
</sect1>
<sect1>
<title>Driver power control</title>
!Pdrivers/gpu/vga/vga_switcheroo.c Driver power control
</sect1>
</chapter>
<chapter id="pubfunctions">
<title>Public functions</title>
!Edrivers/gpu/vga/vga_switcheroo.c
</chapter>
<chapter id="pubstructures">
<title>Public structures</title>
!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler
!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops
</chapter>
<chapter id="pubconstants">
<title>Public constants</title>
!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
!Finclude/linux/vga_switcheroo.h vga_switcheroo_state
</chapter>
<chapter id="privstructures">
<title>Private structures</title>
!Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv
!Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client
</chapter>
!Cdrivers/gpu/vga/vga_switcheroo.c
!Cinclude/linux/vga_switcheroo.h
</part>
</book>

View File

@ -689,18 +689,18 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
}
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
/* KMS */
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);

View File

@ -163,12 +163,9 @@ static void armada_drm_disable_vblank(struct drm_device *dev, unsigned int pipe)
}
static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0),
};
static void armada_drm_lastclose(struct drm_device *dev)

View File

@ -1790,8 +1790,13 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
primary_state->crtc_w = set->mode->hdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
primary_state->src_h = set->mode->vdisplay << 16;
primary_state->src_w = set->mode->hdisplay << 16;
if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
primary_state->src_h = set->mode->hdisplay << 16;
primary_state->src_w = set->mode->vdisplay << 16;
} else {
primary_state->src_h = set->mode->vdisplay << 16;
primary_state->src_w = set->mode->hdisplay << 16;
}
commit:
ret = update_output_state(state, set);

View File

@ -306,8 +306,7 @@ static int drm_mode_object_get_reg(struct drm_device *dev,
* reference counted modeset objects like framebuffers.
*
* Returns:
* New unique (relative to other objects in @dev) integer identifier for the
* object.
* Zero on success, error code on failure.
*/
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
@ -423,7 +422,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
out:
mutex_unlock(&dev->mode_config.fb_lock);
return 0;
return ret;
}
EXPORT_SYMBOL(drm_framebuffer_init);
@ -677,7 +676,6 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->dev = dev;
crtc->funcs = funcs;
crtc->invert_dimensions = false;
drm_modeset_lock_init(&crtc->mutex);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
@ -2286,6 +2284,32 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
return -EINVAL;
}
static int check_src_coords(uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
const struct drm_framebuffer *fb)
{
unsigned int fb_width, fb_height;
fb_width = fb->width << 16;
fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
if (src_w > fb_width ||
src_x > fb_width - src_w ||
src_h > fb_height ||
src_y > fb_height - src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
return -ENOSPC;
}
return 0;
}
/*
* setplane_internal - setplane handler for internal callers
*
@ -2305,7 +2329,6 @@ static int __setplane_internal(struct drm_plane *plane,
uint32_t src_w, uint32_t src_h)
{
int ret = 0;
unsigned int fb_width, fb_height;
/* No fb means shut it down */
if (!fb) {
@ -2342,28 +2365,14 @@ static int __setplane_internal(struct drm_plane *plane,
crtc_y > INT_MAX - (int32_t) crtc_h) {
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
crtc_w, crtc_h, crtc_x, crtc_y);
return -ERANGE;
}
fb_width = fb->width << 16;
fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
if (src_w > fb_width ||
src_x > fb_width - src_w ||
src_h > fb_height ||
src_y > fb_height - src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
ret = -ENOSPC;
ret = -ERANGE;
goto out;
}
ret = check_src_coords(src_x, src_y, src_w, src_h, fb);
if (ret)
goto out;
plane->old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
@ -2553,20 +2562,13 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->invert_dimensions)
if (crtc->state &&
crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) |
BIT(DRM_ROTATE_270)))
swap(hdisplay, vdisplay);
if (hdisplay > fb->width ||
vdisplay > fb->height ||
x > fb->width - hdisplay ||
y > fb->height - vdisplay) {
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
fb->width, fb->height, hdisplay, vdisplay, x, y,
crtc->invert_dimensions ? " (inverted)" : "");
return -ENOSPC;
}
return 0;
return check_src_coords(x << 16, y << 16,
hdisplay << 16, vdisplay << 16, fb);
}
EXPORT_SYMBOL(drm_crtc_check_viewport);
@ -5181,7 +5183,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
}
ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
if (crtc->state) {
const struct drm_plane_state *state = crtc->primary->state;
ret = check_src_coords(state->src_x, state->src_y,
state->src_w, state->src_h, fb);
} else {
ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
}
if (ret)
goto out;

View File

@ -1194,17 +1194,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
if (!port->mstb) {
mstb = port->mstb;
if (!mstb) {
DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
return NULL;
goto out;
}
mstb = port->mstb;
break;
}
}
}
kref_get(&mstb->kref);
out:
mutex_unlock(&mgr->lock);
return mstb;
}

View File

@ -37,11 +37,9 @@
#include "drm_legacy.h"
#include "drm_internal.h"
unsigned int drm_debug = 0; /* 1 to enable debug output */
unsigned int drm_debug = 0; /* bitmask of DRM_UT_x */
EXPORT_SYMBOL(drm_debug);
bool drm_atomic = 0;
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");

View File

@ -360,11 +360,7 @@ retry:
goto fail;
}
ret = drm_atomic_plane_set_property(plane, plane_state,
dev->mode_config.rotation_property,
BIT(DRM_ROTATE_0));
if (ret != 0)
goto fail;
plane_state->rotation = BIT(DRM_ROTATE_0);
/* disable non-primary: */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
@ -1235,7 +1231,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
EXPORT_SYMBOL(drm_fb_helper_set_par);
static int pan_display_atomic(struct fb_var_screeninfo *var,
struct fb_info *info)
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
@ -1253,6 +1249,8 @@ retry:
mode_set = &fb_helper->crtc_info[i].mode_set;
mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
mode_set->x = var->xoffset;
mode_set->y = var->yoffset;
@ -1268,13 +1266,34 @@ retry:
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
return 0;
fail:
for(i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set;
struct drm_plane *plane;
mode_set = &fb_helper->crtc_info[i].mode_set;
plane = mode_set->crtc->primary;
if (ret == 0) {
struct drm_framebuffer *new_fb = plane->state->fb;
if (new_fb)
drm_framebuffer_reference(new_fb);
plane->fb = new_fb;
plane->crtc = plane->state->crtc;
if (plane->old_fb)
drm_framebuffer_unreference(plane->old_fb);
}
plane->old_fb = NULL;
}
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
if (ret != 0)
drm_atomic_state_free(state);
return ret;

View File

@ -763,7 +763,8 @@ EXPORT_SYMBOL(drm_gem_object_release);
void
drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_gem_object *obj =
container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@ -810,8 +811,6 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* drm_gem_mmap() prevents unprivileged users from mapping random objects. So
* callers must verify access restrictions before calling this helper.
*
* NOTE: This function has to be protected with dev->struct_mutex
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
* size, or if no gem_vm_ops are provided.
*/
@ -820,8 +819,6 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
{
struct drm_device *dev = obj->dev;
lockdep_assert_held(&dev->struct_mutex);
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
@ -865,30 +862,46 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_object *obj;
struct drm_gem_object *obj = NULL;
struct drm_vma_offset_node *node;
int ret;
if (drm_device_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
* When the object is being freed, after it hits 0-refcnt it
* proceeds to tear down the object. In the process it will
* attempt to remove the VMA offset and so acquire this
* mgr->vm_lock. Therefore if we find an object with a 0-refcnt
* that matches our range, we know it is in the process of being
* destroyed and will be freed as soon as we release the lock -
* so we have to check for the 0-refcnted object and treat it as
* invalid.
*/
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
if (!node) {
mutex_unlock(&dev->struct_mutex);
if (!obj)
return -EINVAL;
} else if (!drm_vma_node_is_allowed(node, filp)) {
mutex_unlock(&dev->struct_mutex);
if (!drm_vma_node_is_allowed(node, filp)) {
drm_gem_object_unreference_unlocked(obj);
return -EACCES;
}
obj = container_of(node, struct drm_gem_object, vma_node);
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(obj);
return ret;
}

View File

@ -484,9 +484,7 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
struct drm_device *dev = obj->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm_gem_mmap_obj(obj, obj->size, vma);
mutex_unlock(&dev->struct_mutex);
if (ret < 0)
return ret;

View File

@ -691,13 +691,16 @@ long drm_ioctl(struct file *filp,
char stack_kdata[128];
char *kdata = NULL;
unsigned int usize, asize, drv_size;
bool is_driver_ioctl;
dev = file_priv->minor->dev;
if (drm_device_is_unplugged(dev))
return -ENODEV;
if (nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END) {
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
if (is_driver_ioctl) {
/* driver ioctl */
if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
goto err_i1;
@ -756,7 +759,10 @@ long drm_ioctl(struct file *filp,
memset(kdata, 0, usize);
}
if (ioctl->flags & DRM_UNLOCKED)
/* Enforce sane locking for kms driver ioctls. Core ioctls are
* too messy still. */
if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
(ioctl->flags & DRM_UNLOCKED))
retcode = func(dev, kdata, file_priv);
else {
mutex_lock(&drm_global_mutex);

View File

@ -213,17 +213,17 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
if (diff == 0 && flags & DRM_CALLED_FROM_VBLIRQ)
DRM_DEBUG("crtc %u: Redundant vblirq ignored."
" diff_ns = %lld, framedur_ns = %d)\n",
pipe, (long long) diff_ns, framedur_ns);
DRM_DEBUG_VBL("crtc %u: Redundant vblirq ignored."
" diff_ns = %lld, framedur_ns = %d)\n",
pipe, (long long) diff_ns, framedur_ns);
} else {
/* some kind of default for drivers w/o accurate vbl timestamping */
diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
}
DRM_DEBUG("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
if (diff == 0) {
WARN_ON_ONCE(cur_vblank != vblank->last);
@ -800,11 +800,11 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
pipe, vbl_status, hpos, vpos,
(long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
duration_ns/1000, i);
DRM_DEBUG_VBL("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
pipe, vbl_status, hpos, vpos,
(long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
(long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
duration_ns/1000, i);
return ret;
}
@ -1272,8 +1272,8 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != pipe)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
DRM_DEBUG("Sending premature vblank event on disable: "
"wanted %d, current %d\n",
e->event.sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);

View File

@ -112,7 +112,7 @@ void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
/**
* drm_vma_offset_lookup() - Find node in offset space
* drm_vma_offset_lookup_locked() - Find node in offset space
* @mgr: Manager object
* @start: Start address for object (page-based)
* @pages: Size of object (page-based)
@ -122,38 +122,22 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
* region and the given node will be returned, as long as the node spans the
* whole requested area (given the size in number of pages as @pages).
*
* Note that before lookup the vma offset manager lookup lock must be acquired
* with drm_vma_offset_lock_lookup(). See there for an example. This can then be
* used to implement weakly referenced lookups using kref_get_unless_zero().
*
* Example:
* drm_vma_offset_lock_lookup(mgr);
* node = drm_vma_offset_lookup_locked(mgr);
* if (node)
* kref_get_unless_zero(container_of(node, sth, entr));
* drm_vma_offset_unlock_lookup(mgr);
*
* RETURNS:
* Returns NULL if no suitable node can be found. Otherwise, the best match
* is returned. It's the caller's responsibility to make sure the node doesn't
* get destroyed before the caller can access it.
*/
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_vma_offset_node *node;
read_lock(&mgr->vm_lock);
node = drm_vma_offset_lookup_locked(mgr, start, pages);
read_unlock(&mgr->vm_lock);
return node;
}
EXPORT_SYMBOL(drm_vma_offset_lookup);
/**
* drm_vma_offset_lookup_locked() - Find node in offset space
* @mgr: Manager object
* @start: Start address for object (page-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
* manually. See drm_vma_offset_lock_lookup() for an example.
*
* RETURNS:
* Returns NULL if no suitable node can be found. Otherwise, the best match
* is returned.
*/
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)

View File

@ -405,25 +405,25 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
};
static const struct file_operations exynos_drm_driver_fops = {

View File

@ -1299,41 +1299,41 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
};
int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);

View File

@ -932,13 +932,13 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
}
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {

View File

@ -68,12 +68,7 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (drm_device_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
mutex_unlock(&dev->struct_mutex);
if (ret) {
pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
return ret;

View File

@ -45,9 +45,7 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
mutex_lock(&obj->dev->struct_mutex);
ret = drm_gem_mmap_obj(obj, obj->size, vma);
mutex_unlock(&obj->dev->struct_mutex);
if (ret < 0)
return ret;

View File

@ -862,18 +862,18 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
static const struct drm_ioctl_desc
nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
};
long

View File

@ -412,9 +412,6 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
dispc_mgr_go(omap_crtc->channel);
omap_irq_register(crtc->dev, &omap_crtc->vblank_irq);
}
crtc->invert_dimensions = !!(crtc->primary->state->rotation &
(BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)));
}
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,

View File

@ -626,12 +626,12 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
}
static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_AUTH),
};
/*

View File

@ -140,15 +140,12 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct vm_area_struct *vma)
{
struct drm_gem_object *obj = buffer->priv;
struct drm_device *dev = obj->dev;
int ret = 0;
if (WARN_ON(!obj->filp))
return -EINVAL;
mutex_lock(&dev->struct_mutex);
ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
mutex_unlock(&dev->struct_mutex);
if (ret < 0)
return ret;

View File

@ -422,21 +422,21 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
}
const struct drm_ioctl_desc qxl_ioctls[] = {
DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_AUTH),
DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_AUTH),
};
int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);

View File

@ -876,20 +876,20 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
/* KMS */
DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);

View File

@ -79,12 +79,9 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct drm_device *drm = obj->dev;
int ret;
mutex_lock(&drm->struct_mutex);
ret = drm_gem_mmap_obj(obj, obj->size, vma);
mutex_unlock(&drm->struct_mutex);
if (ret)
return ret;

View File

@ -778,20 +778,20 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0),
DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0),
DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0),
DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0),
#endif
};

View File

@ -235,66 +235,13 @@ unlock:
return ret;
}
int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_vma_offset_node *node;
struct drm_gem_object *obj;
struct drm_vgem_gem_object *vgem_obj;
int ret = 0;
mutex_lock(&dev->struct_mutex);
node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
if (!node) {
ret = -EINVAL;
goto out_unlock;
} else if (!drm_vma_node_is_allowed(node, filp)) {
ret = -EACCES;
goto out_unlock;
}
obj = container_of(node, struct drm_gem_object, vma_node);
vgem_obj = to_vgem_bo(obj);
if (obj->dma_buf && vgem_obj->use_dma_buf) {
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
goto out_unlock;
}
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = vgem_obj;
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
mutex_unlock(&dev->struct_mutex);
drm_gem_vm_open(vma);
return ret;
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
static struct drm_ioctl_desc vgem_ioctls[] = {
};
static const struct file_operations vgem_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = vgem_drm_gem_mmap,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,

View File

@ -146,73 +146,73 @@
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
DRM_MASTER | DRM_CONTROL_ALLOW),
VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
DRM_MASTER | DRM_CONTROL_ALLOW),
VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
DRM_MASTER | DRM_CONTROL_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
DRM_MASTER | DRM_CONTROL_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
vmw_fence_obj_signaled_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
/* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
DRM_MASTER | DRM_AUTH),
VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
vmw_present_readback_ioctl,
DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
DRM_MASTER | DRM_AUTH),
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
vmw_kms_update_layout_ioctl,
DRM_MASTER | DRM_UNLOCKED),
DRM_MASTER),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_RENDER_ALLOW),
};
static struct pci_device_id vmw_pci_id_list[] = {

View File

@ -84,9 +84,9 @@
* @fb_info: framebuffer to which console is remapped on switching
* @pwr_state: current power state
* @ops: client callbacks
* @id: client identifier, see enum vga_switcheroo_client_id.
* Determining the id requires the handler, so GPUs are initially
* assigned -1 and later given their true id in vga_switcheroo_enable()
* @id: client identifier. Determining the id requires the handler,
* so gpus are initially assigned VGA_SWITCHEROO_UNKNOWN_ID
* and later given their true id in vga_switcheroo_enable()
* @active: whether the outputs are currently switched to this client
* @driver_power_control: whether power state is controlled by the driver's
* runtime pm. If true, writing ON and OFF to the vga_switcheroo debugfs
@ -100,9 +100,9 @@
struct vga_switcheroo_client {
struct pci_dev *pdev;
struct fb_info *fb_info;
int pwr_state;
enum vga_switcheroo_state pwr_state;
const struct vga_switcheroo_client_ops *ops;
int id;
enum vga_switcheroo_client_id id;
bool active;
bool driver_power_control;
struct list_head list;
@ -145,7 +145,8 @@ struct vgasr_priv {
#define ID_BIT_AUDIO 0x100
#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
#define client_is_vga(c) ((c)->id == -1 || !client_is_audio(c))
#define client_is_vga(c) ((c)->id == VGA_SWITCHEROO_UNKNOWN_ID || \
!client_is_audio(c))
#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
@ -173,7 +174,7 @@ static void vga_switcheroo_enable(void)
vgasr_priv.handler->init();
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (client->id != -1)
if (client->id != VGA_SWITCHEROO_UNKNOWN_ID)
continue;
ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
@ -232,7 +233,8 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
static int register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
int id, bool active, bool driver_power_control)
enum vga_switcheroo_client_id id, bool active,
bool driver_power_control)
{
struct vga_switcheroo_client *client;
@ -277,7 +279,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
bool driver_power_control)
{
return register_client(pdev, ops, -1,
return register_client(pdev, ops, VGA_SWITCHEROO_UNKNOWN_ID,
pdev == vga_default_device(),
driver_power_control);
}
@ -287,7 +289,7 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
* vga_switcheroo_register_audio_client - register audio client
* @pdev: client pci device
* @ops: client callbacks
* @id: client identifier, see enum vga_switcheroo_client_id
* @id: client identifier
*
* Register audio client (audio device on a GPU). The power state of the
* client is assumed to be ON.
@ -296,7 +298,7 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
*/
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
int id)
enum vga_switcheroo_client_id id)
{
return register_client(pdev, ops, id | ID_BIT_AUDIO, false, false);
}
@ -314,7 +316,8 @@ find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
}
static struct vga_switcheroo_client *
find_client_from_id(struct list_head *head, int client_id)
find_client_from_id(struct list_head *head,
enum vga_switcheroo_client_id client_id)
{
struct vga_switcheroo_client *client;
@ -344,7 +347,7 @@ find_active_client(struct list_head *head)
*
* Return: Power state.
*/
int vga_switcheroo_get_client_state(struct pci_dev *pdev)
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *pdev)
{
struct vga_switcheroo_client *client;
enum vga_switcheroo_state ret;
@ -496,7 +499,8 @@ static int vga_switchoff(struct vga_switcheroo_client *client)
return 0;
}
static void set_audio_state(int id, int state)
static void set_audio_state(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state)
{
struct vga_switcheroo_client *client;
@ -583,7 +587,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
int ret;
bool delay = false, can_switch;
bool just_mux = false;
int client_id = -1;
enum vga_switcheroo_client_id client_id = VGA_SWITCHEROO_UNKNOWN_ID;
struct vga_switcheroo_client *client = NULL;
if (cnt > 63)
@ -652,7 +656,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
client_id = VGA_SWITCHEROO_DIS;
}
if (client_id == -1)
if (client_id == VGA_SWITCHEROO_UNKNOWN_ID)
goto out;
client = find_client_from_id(&vgasr_priv.clients, client_id);
if (!client)

View File

@ -107,6 +107,9 @@ struct dma_buf_attachment;
* ATOMIC: used in the atomic code.
* This is the category used by the DRM_DEBUG_ATOMIC() macro.
*
* VBL: used for verbose debug message in the vblank code
* This is the category used by the DRM_DEBUG_VBL() macro.
*
* Enabling verbose debug messages is done through the drm.debug parameter,
* each category being enabled by a bit.
*
@ -114,7 +117,7 @@ struct dma_buf_attachment;
* drm.debug=0x2 will enable DRIVER messages
* drm.debug=0x3 will enable CORE and DRIVER messages
* ...
* drm.debug=0xf will enable all messages
* drm.debug=0x3f will enable all messages
*
* An interesting feature is that it's possible to enable verbose logging at
* run-time by echoing the debug value in its sysfs node:
@ -125,6 +128,7 @@ struct dma_buf_attachment;
#define DRM_UT_KMS 0x04
#define DRM_UT_PRIME 0x08
#define DRM_UT_ATOMIC 0x10
#define DRM_UT_VBL 0x20
extern __printf(2, 3)
void drm_ut_debug_printk(const char *function_name,
@ -217,6 +221,11 @@ void drm_err(const char *format, ...);
if (unlikely(drm_debug & DRM_UT_ATOMIC)) \
drm_ut_debug_printk(__func__, fmt, ##args); \
} while (0)
#define DRM_DEBUG_VBL(fmt, args...) \
do { \
if (unlikely(drm_debug & DRM_UT_VBL)) \
drm_ut_debug_printk(__func__, fmt, ##args); \
} while (0)
/*@}*/

View File

@ -407,9 +407,6 @@ struct drm_crtc_funcs {
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
* @invert_dimensions: for purposes of error checking crtc vs fb sizes,
* invert the width/height of the crtc. This is used if the driver
* is performing 90 or 270 degree rotated scanout
* @x: x position on screen
* @y: y position on screen
* @funcs: CRTC control functions
@ -458,8 +455,6 @@ struct drm_crtc {
*/
struct drm_display_mode hwmode;
bool invert_dimensions;
int x, y;
const struct drm_crtc_funcs *funcs;

View File

@ -142,8 +142,11 @@ drm_gem_object_reference(struct drm_gem_object *obj)
static inline void
drm_gem_object_unreference(struct drm_gem_object *obj)
{
if (obj != NULL)
if (obj != NULL) {
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
kref_put(&obj->refcount, drm_gem_object_free);
}
}
static inline void

View File

@ -54,9 +54,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
unsigned long page_offset, unsigned long size);
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages);
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages);
@ -71,25 +68,25 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
struct file *filp);
/**
* drm_vma_offset_exact_lookup() - Look up node by exact address
* drm_vma_offset_exact_lookup_locked() - Look up node by exact address
* @mgr: Manager object
* @start: Start address (page-based, not byte-based)
* @pages: Size of object (page-based)
*
* Same as drm_vma_offset_lookup() but does not allow any offset into the node.
* Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node.
* It only returns the exact object with the given start address.
*
* RETURNS:
* Node at exact start address @start.
*/
static inline struct drm_vma_offset_node *
drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr,
unsigned long start,
unsigned long pages)
{
struct drm_vma_offset_node *node;
node = drm_vma_offset_lookup(mgr, start, pages);
node = drm_vma_offset_lookup_locked(mgr, start, pages);
return (node && node->vm_node.start == start) ? node : NULL;
}
@ -97,7 +94,7 @@ drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
* drm_vma_offset_lock_lookup() - Lock lookup for extended private use
* @mgr: Manager object
*
* Lock VMA manager for extended lookups. Only *_locked() VMA function calls
* Lock VMA manager for extended lookups. Only locked VMA function calls
* are allowed while holding this lock. All other contexts are blocked from VMA
* until the lock is released via drm_vma_offset_unlock_lookup().
*
@ -108,13 +105,6 @@ drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
* not call any other VMA helpers while holding this lock.
*
* Note: You're in atomic-context while holding this lock!
*
* Example:
* drm_vma_offset_lock_lookup(mgr);
* node = drm_vma_offset_lookup_locked(mgr);
* if (node)
* kref_get_unless_zero(container_of(node, sth, entr));
* drm_vma_offset_unlock_lookup(mgr);
*/
static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
{

View File

@ -59,6 +59,9 @@ enum vga_switcheroo_state {
/**
* enum vga_switcheroo_client_id - client identifier
* @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients.
* Determining the id requires the handler, so GPUs are given their
* true id in a delayed fashion in vga_switcheroo_enable()
* @VGA_SWITCHEROO_IGD: integrated graphics device
* @VGA_SWITCHEROO_DIS: discrete graphics device
* @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported
@ -66,6 +69,7 @@ enum vga_switcheroo_state {
* Client identifier. Audio clients use the same identifier & 0x100.
*/
enum vga_switcheroo_client_id {
VGA_SWITCHEROO_UNKNOWN_ID = -1,
VGA_SWITCHEROO_IGD,
VGA_SWITCHEROO_DIS,
VGA_SWITCHEROO_MAX_CLIENTS,
@ -96,7 +100,7 @@ struct vga_switcheroo_handler {
int (*switchto)(enum vga_switcheroo_client_id id);
int (*power_state)(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state);
int (*get_client_id)(struct pci_dev *pdev);
enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
};
/**
@ -128,7 +132,7 @@ int vga_switcheroo_register_client(struct pci_dev *dev,
bool driver_power_control);
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
int id);
enum vga_switcheroo_client_id id);
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
@ -138,7 +142,7 @@ void vga_switcheroo_unregister_handler(void);
int vga_switcheroo_process_delayed_switch(void);
int vga_switcheroo_get_client_state(struct pci_dev *dev);
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@ -154,10 +158,10 @@ static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_i
static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
int id) { return 0; }
enum vga_switcheroo_client_id id) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}

View File

@ -1,6 +1,8 @@
#ifndef _I810_DRM_H_
#define _I810_DRM_H_
#include <drm/drm.h>
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/

View File

@ -33,6 +33,8 @@
#ifndef __R128_DRM_H__
#define __R128_DRM_H__
#include <drm/drm.h>
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (r128_sarea.h)
*/

View File

@ -26,6 +26,8 @@
#ifndef __SAVAGE_DRM_H__
#define __SAVAGE_DRM_H__
#include <drm/drm.h>
#ifndef __SAVAGE_SAREA_DEFINES__
#define __SAVAGE_SAREA_DEFINES__

View File

@ -153,7 +153,7 @@ struct azx {
unsigned int snoop:1;
unsigned int align_buffer_size:1;
unsigned int region_requested:1;
unsigned int disabled:1; /* disabled by VGA-switcher */
unsigned int disabled:1; /* disabled by vga_switcheroo */
#ifdef CONFIG_SND_HDA_DSP_LOADER
struct azx_dev saved_azx_dev;

View File

@ -337,7 +337,7 @@ enum {
AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF)
/*
* VGA-switcher support
* vga_switcheroo support
*/
#ifdef SUPPORT_VGA_SWITCHEROO
#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo)
@ -1076,12 +1076,12 @@ static void azx_vs_set_state(struct pci_dev *pci,
}
}
} else {
dev_info(chip->card->dev, "%s via VGA-switcheroo\n",
dev_info(chip->card->dev, "%s via vga_switcheroo\n",
disabled ? "Disabling" : "Enabling");
if (disabled) {
pm_runtime_put_sync_suspend(card->dev);
azx_suspend(card->dev);
/* when we get suspended by vga switcheroo we end up in D3cold,
/* when we get suspended by vga_switcheroo we end up in D3cold,
* however we have no ACPI handle, so pci/acpi can't put us there,
* put ourselves there */
pci->current_state = PCI_D3cold;
@ -1121,7 +1121,7 @@ static void init_vga_switcheroo(struct azx *chip)
struct pci_dev *p = get_bound_vga(chip->pci);
if (p) {
dev_info(chip->card->dev,
"Handle VGA-switcheroo audio client\n");
"Handle vga_switcheroo audio client\n");
hda->use_vga_switcheroo = 1;
pci_dev_put(p);
}
@ -1232,7 +1232,7 @@ static int azx_dev_free(struct snd_device *device)
#ifdef SUPPORT_VGA_SWITCHEROO
/*
* Check of disabled HDMI controller by vga-switcheroo
* Check of disabled HDMI controller by vga_switcheroo
*/
static struct pci_dev *get_bound_vga(struct pci_dev *pci)
{
@ -1917,7 +1917,7 @@ static int azx_probe(struct pci_dev *pci,
err = register_vga_switcheroo(chip);
if (err < 0) {
dev_err(card->dev, "Error registering VGA-switcheroo client\n");
dev_err(card->dev, "Error registering vga_switcheroo client\n");
goto out_free;
}

View File

@ -35,7 +35,7 @@ struct hda_intel {
unsigned int irq_pending_warned:1;
unsigned int probe_continued:1;
/* VGA-switcheroo setup */
/* vga_switcheroo setup */
unsigned int use_vga_switcheroo:1;
unsigned int vga_switcheroo_registered:1;
unsigned int init_failed:1; /* delayed init failed */