2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 02:34:01 +08:00

Merge tag 'topic/drm-misc-2016-07-14' of git://anongit.freedesktop.org/drm-intel into drm-next

I recovered dri-devel backlog from my vacation, more misc stuff:
- of_put_node fixes from Peter Chen (not all yet)
- more patches from Gustavo to use kms-native drm_crtc_vblank_* funcs
- docs sphinxification from Lukas Wunner
- bunch of fixes all over from Dan Carpenter
- more follow up work from Chris register/unregister rework in various
  places
- vgem dma-buf export (for writing testcases)
- small things all over from tons of different people

* tag 'topic/drm-misc-2016-07-14' of git://anongit.freedesktop.org/drm-intel: (52 commits)
  drm: Don't overwrite user ioctl arg unless requested
  dma-buf/sync_file: improve Kconfig description for Sync Files
  MAINTAINERS: add entry for the Sync File Framework
  drm: Resurrect atomic rmfb code
  drm/vgem: Use PAGE_KERNEL in place of x86-specific PAGE_KERNEL_IO
  qxl: silence uninitialized variable warning
  qxl: check for kmap failures
  vga_switcheroo: Sphinxify docs
  drm: Restore double clflush on the last partial cacheline
  gpu: drm: rockchip_drm_drv: add missing of_node_put after calling of_parse_phandle
  gpu: drm: sti_vtg: add missing of_node_put after calling of_parse_phandle
  gpu: drm: sti_hqvdp: add missing of_node_put after calling of_parse_phandle
  gpu: drm: sti_vdo: add missing of_node_put after calling of_parse_phandle
  gpu: drm: sti_compositor: add missing of_node_put after calling of_parse_phandle
  drm/tilcdc: use drm_crtc_handle_vblank()
  drm/rcar-du: use drm_crtc_handle_vblank()
  drm/nouveau: use drm_crtc_handle_vblank()
  drm/atmel: use drm_crtc_handle_vblank()
  drm/armada: use drm_crtc_handle_vblank()
  drm: make drm_vblank_count_and_time() static
  ...
This commit is contained in:
Dave Airlie 2016-07-15 11:01:37 +10:00
commit 6c181c8210
49 changed files with 407 additions and 283 deletions

View File

@ -280,8 +280,8 @@ private data in the open method should free it here.
The lastclose method should restore CRTC and plane properties to default
value, so that a subsequent open of the device will not inherit state
from the previous user. It can also be used to execute delayed power
switching state changes, e.g. in conjunction with the vga_switcheroo
infrastructure (see ?). Beyond that KMS drivers should not do any
switching state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
infrastructure. Beyond that KMS drivers should not do any
further cleanup. Only legacy UMS drivers might need to clean up device
state so that the vga console or an independent fbdev driver could take
over.

View File

@ -1,3 +1,5 @@
.. _vga_switcheroo:
==============
VGA Switcheroo
==============
@ -94,9 +96,3 @@ Public functions
.. kernel-doc:: include/linux/apple-gmux.h
:internal:
.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/vga/vga_switcheroo.c
.. WARNING: DOCPROC directive not supported: !Cinclude/linux/vga_switcheroo.h
.. WARNING: DOCPROC directive not supported: !Cdrivers/platform/x86/apple-gmux.c

View File

@ -3779,6 +3779,17 @@ F: include/linux/*fence.h
F: Documentation/dma-buf-sharing.txt
T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
SYNC FILE FRAMEWORK
M: Sumit Semwal <sumit.semwal@linaro.org>
R: Gustavo Padovan <gustavo@padovan.org>
S: Maintained
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
F: drivers/dma-buf/sync_file.c
F: include/linux/sync_file.h
F: Documentation/sync_file.txt
T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
L: dmaengine@vger.kernel.org

View File

@ -1,11 +1,20 @@
menu "DMABUF options"
config SYNC_FILE
bool "sync_file support for fences"
bool "Explicit Synchronization Framework"
default n
select ANON_INODES
select DMA_SHARED_BUFFER
---help---
This option enables the fence framework synchronization to export
sync_files to userspace that can represent one or more fences.
The Sync File Framework adds explicit syncronization via
userspace. It enables send/receive 'struct fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.
The first and main user for this is graphics in which a fence is
associated with a buffer. When a job is submitted to the GPU a fence
is attached to the buffer and is transferred via userspace, using Sync
Files fds, to the DRM driver for example. More details at
Documentation/sync_file.txt.
endmenu

View File

@ -1712,6 +1712,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
drm_crtc_force_disable_all(adev->ddev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
kfree(adev->ip_block_status);

View File

@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
goto done_free;
pm_runtime_get_sync(dev->dev);
if (amdgpu_device_is_px(dev)) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
amdgpu_amdkfd_device_fini(adev);
@ -135,9 +138,12 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
out:
if (r)
if (r) {
/* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
if (adev->rmmio && amdgpu_device_is_px(dev))
pm_runtime_put_noidle(dev->dev);
amdgpu_driver_unload_kms(dev);
}
return r;
}

View File

@ -410,7 +410,7 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
if (stat & VSYNC_IRQ)
drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
drm_crtc_handle_vblank(&dcrtc->crtc);
spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;

View File

@ -383,7 +383,7 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
{
drm_handle_vblank(c->dev, 0);
drm_crtc_handle_vblank(c);
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}

View File

@ -1589,6 +1589,72 @@ void drm_atomic_clean_old_fb(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_clean_old_fb);
int drm_atomic_remove_fb(struct drm_framebuffer *fb)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_device *dev = fb->dev;
struct drm_atomic_state *state;
struct drm_plane *plane;
int ret = 0;
unsigned plane_mask;
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
drm_modeset_acquire_init(&ctx, 0);
state->acquire_ctx = &ctx;
retry:
plane_mask = 0;
ret = drm_modeset_lock_all_ctx(dev, &ctx);
if (ret)
goto unlock;
drm_for_each_plane(plane, dev) {
struct drm_plane_state *plane_state;
if (plane->state->fb != fb)
continue;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto unlock;
}
drm_atomic_set_fb_for_plane(plane_state, NULL);
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret)
goto unlock;
plane_mask |= BIT(drm_plane_index(plane));
plane->old_fb = plane->fb;
plane->fb = NULL;
}
if (plane_mask)
ret = drm_atomic_commit(state);
unlock:
if (plane_mask)
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
if (ret || !plane_mask)
drm_atomic_state_free(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{

View File

@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
mb();
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
mb();
return;
}

View File

@ -396,6 +396,51 @@ void drm_mode_object_reference(struct drm_mode_object *obj)
}
EXPORT_SYMBOL(drm_mode_object_reference);
/**
* drm_crtc_force_disable - Forcibly turn off a CRTC
* @crtc: CRTC to turn off
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_crtc_force_disable(struct drm_crtc *crtc)
{
struct drm_mode_set set = {
.crtc = crtc,
};
return drm_mode_set_config_internal(&set);
}
EXPORT_SYMBOL(drm_crtc_force_disable);
/**
* drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
* @dev: DRM device whose CRTCs to turn off
*
* Drivers may want to call this on unload to ensure that all displays are
* unlit and the GPU is in a consistent, low power state. Takes modeset locks.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_crtc_force_disable_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret = 0;
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev)
if (crtc->enabled) {
ret = drm_crtc_force_disable(crtc);
if (ret)
goto out;
}
out:
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_crtc_force_disable_all);
static void drm_framebuffer_free(struct kref *kref)
{
struct drm_framebuffer *fb =
@ -544,8 +589,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_mode_set set;
int ret;
if (!fb)
return;
@ -570,16 +613,17 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
* in this manner.
*/
if (drm_framebuffer_read_refcount(fb) > 1) {
if (dev->mode_config.funcs->atomic_commit) {
drm_atomic_remove_fb(fb);
goto out;
}
drm_modeset_lock_all(dev);
/* remove from any CRTC */
drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
set.crtc = crtc;
set.fb = NULL;
ret = drm_mode_set_config_internal(&set);
if (ret)
if (drm_crtc_force_disable(crtc))
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
}
}
@ -591,6 +635,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
drm_modeset_unlock_all(dev);
}
out:
drm_framebuffer_unreference(fb);
}
EXPORT_SYMBOL(drm_framebuffer_remove);
@ -1068,23 +1113,7 @@ void drm_connector_unregister(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_connector_unregister);
/**
* drm_connector_register_all - register all connectors
* @dev: drm device
*
* This function registers all connectors in sysfs and other places so that
* userspace can start to access them. drm_connector_register_all() is called
* automatically from drm_dev_register() to complete the device registration,
* if they don't call drm_connector_register() on each connector individually.
*
* When a device is unplugged and should be removed from userspace access,
* call drm_connector_unregister_all(), which is the inverse of this
* function.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_connector_register_all(struct drm_device *dev)
static int drm_connector_register_all(struct drm_device *dev)
{
struct drm_connector *connector;
int ret;
@ -1106,7 +1135,6 @@ err:
drm_connector_unregister_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_connector_register_all);
/**
* drm_connector_unregister_all - unregister connector userspace interfaces

View File

@ -125,6 +125,7 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_atomic_remove_fb(struct drm_framebuffer *fb);
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);

View File

@ -355,8 +355,7 @@ int drm_dp_aux_dev_init(void)
drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
if (IS_ERR(drm_dp_aux_dev_class)) {
res = PTR_ERR(drm_dp_aux_dev_class);
goto out;
return PTR_ERR(drm_dp_aux_dev_class);
}
drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;

View File

@ -362,9 +362,7 @@ EXPORT_SYMBOL(drm_put_dev);
void drm_unplug_dev(struct drm_device *dev)
{
/* for a USB device */
drm_minor_unregister(dev, DRM_MINOR_LEGACY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
drm_dev_unregister(dev);
mutex_lock(&drm_global_mutex);

View File

@ -648,7 +648,7 @@ long drm_ioctl(struct file *filp,
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
unsigned int usize, asize, drv_size;
unsigned int in_size, out_size, drv_size, ksize;
bool is_driver_ioctl;
dev = file_priv->minor->dev;
@ -671,9 +671,12 @@ long drm_ioctl(struct file *filp,
}
drv_size = _IOC_SIZE(ioctl->cmd);
usize = _IOC_SIZE(cmd);
asize = max(usize, drv_size);
cmd = ioctl->cmd;
out_size = in_size = _IOC_SIZE(cmd);
if ((cmd & ioctl->cmd & IOC_IN) == 0)
in_size = 0;
if ((cmd & ioctl->cmd & IOC_OUT) == 0)
out_size = 0;
ksize = max(max(in_size, out_size), drv_size);
DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
task_pid_nr(current),
@ -693,30 +696,24 @@ long drm_ioctl(struct file *filp,
if (unlikely(retcode))
goto err_i1;
if (cmd & (IOC_IN | IOC_OUT)) {
if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
kdata = kmalloc(asize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
}
}
if (asize > usize)
memset(kdata + usize, 0, asize - usize);
}
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg,
usize) != 0) {
retcode = -EFAULT;
if (ksize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
kdata = kmalloc(ksize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
}
} else if (cmd & IOC_OUT) {
memset(kdata, 0, usize);
}
if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
retcode = -EFAULT;
goto err_i1;
}
if (ksize > in_size)
memset(kdata + in_size, 0, ksize - in_size);
/* Enforce sane locking for kms driver ioctls. Core ioctls are
* too messy still. */
if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
@ -728,11 +725,8 @@ long drm_ioctl(struct file *filp,
mutex_unlock(&drm_global_mutex);
}
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
usize) != 0)
retcode = -EFAULT;
}
if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
retcode = -EFAULT;
err_i1:
if (!ioctl)
@ -759,7 +753,7 @@ EXPORT_SYMBOL(drm_ioctl);
* shouldn't be used by any drivers.
*
* Returns:
* True if the @nr corresponds to a DRM core ioctl numer, false otherwise.
* True if the @nr corresponds to a DRM core ioctl number, false otherwise.
*/
bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{

View File

@ -532,7 +532,7 @@ int drm_irq_uninstall(struct drm_device *dev)
/*
* Wake up any waiters so they don't hang. This is just to paper over
* isssues for UMS drivers which aren't in full control of their
* issues for UMS drivers which aren't in full control of their
* vblank/irq handling. KMS drivers must ensure that vblanks are all
* disabled when uninstalling the irq handler.
*/
@ -594,7 +594,7 @@ int drm_control(struct drm_device *dev, void *data,
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
/* UMS was only ever support on pci devices. */
/* UMS was only ever supported on pci devices. */
if (WARN_ON(!dev->pdev))
return -EINVAL;
@ -945,8 +945,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
*
* This is the legacy version of drm_crtc_vblank_count_and_time().
*/
u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 vblank_count;
@ -963,7 +963,6 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
return vblank_count;
}
EXPORT_SYMBOL(drm_vblank_count_and_time);
/**
* drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
@ -975,8 +974,6 @@ EXPORT_SYMBOL(drm_vblank_count_and_time);
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
*
* This is the native KMS version of drm_vblank_count_and_time().
*/
u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime)
@ -1588,12 +1585,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
seq = drm_vblank_count_and_time(dev, pipe, &now);
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1 << 23)) {
vblwait->request.sequence = seq + 1;
vblwait->reply.sequence = vblwait->request.sequence;
}
DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
vblwait->request.sequence, seq, pipe);
@ -1690,6 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
goto done;
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1 << 23)) {
vblwait->request.sequence = seq + 1;
}
if (flags & _DRM_VBLANK_EVENT) {
/* must hold on to the vblank ref until the event fires
* drm_vblank_put will be called asynchronously
@ -1697,11 +1693,6 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1<<23)) {
vblwait->request.sequence = seq + 1;
}
DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
vblwait->request.sequence, pipe);
vblank->last_wait = vblwait->request.sequence;

View File

@ -44,7 +44,7 @@
# include <asm/agp.h>
#else
# ifdef __powerpc__
# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
# else
# define PAGE_AGP PAGE_KERNEL
# endif

View File

@ -999,17 +999,17 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
/**
* mipi_dsi_set_tear_scanline() - turn on the display module's Tearing Effect
* output signal on the TE signal line when display module reaches line N
* defined by STS[n:0].
* mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
* the Tearing Effect output signal of the display module
* @dsi: DSI peripheral device
* @param: STS[10:0]
* @scanline: scanline to use as trigger
*
* Return: 0 on success or a negative error code on failure
*/
int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param)
int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
{
u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, param >> 8,
param & 0xff };
u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
scanline & 0xff };
ssize_t err;
err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
@ -1018,7 +1018,7 @@ int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param)
return 0;
}
EXPORT_SYMBOL(mipi_dsi_set_tear_scanline);
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
/**
* mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image

View File

@ -41,7 +41,7 @@
static inline void *drm_vmalloc_dma(unsigned long size)
{
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
#else
return vmalloc_32(size);
#endif

View File

@ -80,7 +80,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
tmp |= _PAGE_NO_CACHE;
tmp = pgprot_noncached_wc(tmp);
#endif
return tmp;
}
@ -593,7 +593,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
* pages and mappings in fault()
*/
#if defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
vma->vm_ops = &drm_vm_ops;
break;

View File

@ -2,10 +2,6 @@ config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select VIDEOMODE_HELPERS
help
Choose this option if you have a Samsung SoC EXYNOS chipset.

View File

@ -361,13 +361,8 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
if (crtc) {
struct drm_mode_set modeset = {
.crtc = crtc,
};
drm_mode_set_config_internal(&modeset);
}
if (crtc)
drm_crtc_force_disable(crtc);
}
return 0;

View File

@ -431,7 +431,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
phy_set_drvdata(phy, mipi_tx);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy)) {
if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
return ret;
}

View File

@ -125,18 +125,8 @@ nv04_display_destroy(struct drm_device *dev)
struct nv04_display *disp = nv04_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *encoder;
struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
.crtc = crtc,
};
drm_mode_set_config_internal(&modeset);
}
/* Restore state */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
encoder->enc_restore(&encoder->base.base);

View File

@ -749,13 +749,8 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
if (crtc) {
struct drm_mode_set modeset = {
.crtc = crtc,
};
drm_mode_set_config_internal(&modeset);
}
if (crtc)
drm_crtc_force_disable(crtc);
}
return 0;

View File

@ -47,7 +47,7 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
{
struct nouveau_crtc *nv_crtc =
container_of(notify, typeof(*nv_crtc), vblank);
drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index);
drm_crtc_handle_vblank(&nv_crtc->base);
return NVIF_NOTIFY_KEEP;
}
@ -556,6 +556,7 @@ nouveau_display_destroy(struct drm_device *dev)
nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
drm_crtc_force_disable_all(dev);
drm_mode_config_cleanup(dev);
if (disp->dtor)

View File

@ -505,7 +505,11 @@ nouveau_drm_unload(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
pm_runtime_get_sync(dev->dev);
if (nouveau_runtime_pm != 0) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
nouveau_hwmon_fini(dev);

View File

@ -203,7 +203,7 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
{
if (!qxl_check_idle(qdev->release_ring)) {
queue_work(qdev->gc_queue, &qdev->gc_work);
schedule_work(&qdev->gc_work);
if (flush)
flush_work(&qdev->gc_work);
return true;

View File

@ -37,7 +37,6 @@ static int alloc_clips(struct qxl_device *qdev,
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
struct qxl_drawable *drawable,
unsigned num_clips,
struct qxl_bo *clips_bo)
{
@ -136,6 +135,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
* correctly globaly, since that would require
* tracking all of our palettes. */
ret = qxl_bo_kmap(palette_bo, (void **)&pal);
if (ret)
return ret;
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@ -349,7 +350,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
if (ret)
goto out_release_backoff;
rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
rects = drawable_set_clipping(qdev, num_clips, clips_bo);
if (!rects)
goto out_release_backoff;

View File

@ -321,7 +321,6 @@ struct qxl_device {
struct qxl_bo *current_release_bo[3];
int current_release_bo_offset[3];
struct workqueue_struct *gc_queue;
struct work_struct gc_work;
struct drm_property *hotplug_mode_update_property;

View File

@ -258,7 +258,6 @@ static int qxl_device_init(struct qxl_device *qdev,
(unsigned long)qdev->surfaceram_size);
qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
INIT_WORK(&qdev->gc_work, qxl_gc_work);
return 0;
@ -270,10 +269,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
qxl_bo_unref(&qdev->current_release_bo[0]);
if (qdev->current_release_bo[1])
qxl_bo_unref(&qdev->current_release_bo[1]);
flush_workqueue(qdev->gc_queue);
destroy_workqueue(qdev->gc_queue);
qdev->gc_queue = NULL;
flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
@ -310,10 +306,6 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags)
struct qxl_device *qdev;
int r;
/* require kms */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
if (qdev == NULL)
return -ENOMEM;

View File

@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
@ -1526,6 +1527,9 @@ int radeon_device_init(struct radeon_device *rdev,
return 0;
failed:
/* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
if (radeon_is_px(ddev))
pm_runtime_put_noidle(ddev->dev);
if (runtime)
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
return r;

View File

@ -1711,6 +1711,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_crtc_force_disable_all(rdev->ddev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}

View File

@ -63,7 +63,10 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev->rmmio == NULL)
goto done_free;
pm_runtime_get_sync(dev->dev);
if (radeon_is_px(dev)) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
radeon_kfd_device_fini(rdev);

View File

@ -552,7 +552,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
if (status & DSSR_FRM) {
drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
drm_crtc_handle_vblank(&rcrtc->crtc);
rcar_du_crtc_finish_page_flip(rcrtc);
ret = IRQ_HANDLED;
}

View File

@ -433,6 +433,7 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
is_support_iommu = false;
}
of_node_put(iommu);
component_match_add(dev, &match, compare_of, port->parent);
of_node_put(port);
}

View File

@ -267,10 +267,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
compo->vtg_main = of_vtg_find(vtg_np);
of_node_put(vtg_np);
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
if (vtg_np)
compo->vtg_aux = of_vtg_find(vtg_np);
of_node_put(vtg_np);
platform_set_drvdata(pdev, compo);

View File

@ -580,6 +580,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
if (!dvo->panel_node)
DRM_ERROR("No panel associated to the dvo output\n");
of_node_put(dvo->panel_node);
platform_set_drvdata(pdev, dvo);

View File

@ -1363,6 +1363,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
hqvdp->vtg = of_vtg_find(vtg_np);
of_node_put(vtg_np);
platform_set_drvdata(pdev, hqvdp);

View File

@ -432,6 +432,7 @@ static int vtg_probe(struct platform_device *pdev)
np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
if (np) {
vtg->slave = of_vtg_find(np);
of_node_put(np);
if (!vtg->slave)
return -EPROBE_DEFER;

View File

@ -697,7 +697,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
drm_handle_vblank(dev, 0);
drm_crtc_handle_vblank(crtc);
if (!skip_event) {
struct drm_pending_vblank_event *event;

View File

@ -94,7 +94,6 @@ static void udl_usb_disconnect(struct usb_interface *interface)
struct drm_device *dev = usb_get_intfdata(interface);
drm_kms_helper_poll_disable(dev);
drm_connector_unregister_all(dev);
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_unplug_dev(dev);

View File

@ -42,81 +42,38 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
{
drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL;
}
static void vgem_gem_free_object(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
drm_gem_free_mmap_offset(obj);
if (vgem_obj->use_dma_buf && obj->dma_buf) {
dma_buf_put(obj->dma_buf);
obj->dma_buf = NULL;
}
drm_gem_object_release(obj);
if (vgem_obj->pages)
vgem_gem_put_pages(vgem_obj);
vgem_obj->pages = NULL;
kfree(vgem_obj);
}
int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
{
struct page **pages;
if (obj->pages || obj->use_dma_buf)
return 0;
pages = drm_gem_get_pages(&obj->base);
if (IS_ERR(pages)) {
return PTR_ERR(pages);
}
obj->pages = pages;
return 0;
}
static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_vgem_gem_object *obj = vma->vm_private_data;
loff_t num_pages;
pgoff_t page_offset;
int ret;
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
unsigned long vaddr = (unsigned long)vmf->virtual_address;
struct page *page;
num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
obj->pages[page_offset]);
switch (ret) {
case 0:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
case -EBUSY:
return VM_FAULT_RETRY;
case -EFAULT:
case -EINVAL:
return VM_FAULT_SIGBUS;
default:
WARN_ON(1);
return VM_FAULT_SIGBUS;
page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
(vaddr - vma->vm_start) >> PAGE_SHIFT);
if (!IS_ERR(page)) {
vmf->page = page;
return 0;
} else switch (PTR_ERR(page)) {
case -ENOSPC:
case -ENOMEM:
return VM_FAULT_OOM;
case -EBUSY:
return VM_FAULT_RETRY;
case -EFAULT:
case -EINVAL:
return VM_FAULT_SIGBUS;
default:
WARN_ON_ONCE(PTR_ERR(page));
return VM_FAULT_SIGBUS;
}
}
@ -134,57 +91,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
unsigned long size)
{
struct drm_vgem_gem_object *obj;
struct drm_gem_object *gem_object;
int err;
size = roundup(size, PAGE_SIZE);
int ret;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
gem_object = &obj->base;
ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
if (ret)
goto err_free;
err = drm_gem_object_init(dev, gem_object, size);
if (err)
goto out;
ret = drm_gem_handle_create(file, &obj->base, handle);
drm_gem_object_unreference_unlocked(&obj->base);
if (ret)
goto err;
err = vgem_gem_get_pages(obj);
if (err)
goto out;
return &obj->base;
err = drm_gem_handle_create(file, gem_object, handle);
if (err)
goto handle_out;
drm_gem_object_unreference_unlocked(gem_object);
return gem_object;
handle_out:
drm_gem_object_release(gem_object);
out:
err_free:
kfree(obj);
return ERR_PTR(err);
err:
return ERR_PTR(ret);
}
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct drm_gem_object *gem_object;
uint64_t size;
uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
u64 pitch, size;
pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
size = args->height * pitch;
if (size == 0)
return -EINVAL;
gem_object = vgem_gem_create(dev, file, &args->handle, size);
if (IS_ERR(gem_object)) {
DRM_DEBUG_DRIVER("object creation failed\n");
if (IS_ERR(gem_object))
return PTR_ERR(gem_object);
}
args->size = gem_object->size;
args->pitch = pitch;
@ -194,26 +137,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
return 0;
}
int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
int ret = 0;
struct drm_gem_object *obj;
int ret;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
if (!obj->filp) {
ret = -EINVAL;
goto unref;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto unref;
BUG_ON(!obj->filp);
obj->filp->private_data = obj;
*offset = drm_vma_node_offset_addr(&obj->vma_node);
unref:
drm_gem_object_unreference_unlocked(obj);
@ -223,24 +166,127 @@ unref:
static struct drm_ioctl_desc vgem_ioctls[] = {
};
static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long flags = vma->vm_flags;
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
/* Keep the WC mmaping set by drm_gem_mmap() but our pages
* are ordinary and not special.
*/
vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
static const struct file_operations vgem_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = drm_gem_mmap,
.mmap = vgem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
};
static int vgem_prime_pin(struct drm_gem_object *obj)
{
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
/* Flush the object from the CPU cache so that importers can rely
* on coherent indirect access via the exported dma-address.
*/
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
drm_clflush_pages(pages, n_pages);
drm_gem_put_pages(obj, pages, true, false);
return 0;
}
static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct sg_table *st;
struct page **pages;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages))
return ERR_CAST(pages);
st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
drm_gem_put_pages(obj, pages, false, false);
return st;
}
static void *vgem_prime_vmap(struct drm_gem_object *obj)
{
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
void *addr;
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages))
return NULL;
addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
drm_gem_put_pages(obj, pages, false, false);
return addr;
}
static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
vunmap(vaddr);
}
static int vgem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
int ret;
if (obj->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!obj->filp)
return -ENODEV;
ret = obj->filp->f_op->mmap(obj->filp, vma);
if (ret)
return ret;
fput(vma->vm_file);
vma->vm_file = get_file(obj->filp);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
return 0;
}
static struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM,
.driver_features = DRIVER_GEM | DRIVER_PRIME,
.gem_free_object_unlocked = vgem_gem_free_object,
.gem_vm_ops = &vgem_gem_vm_ops,
.ioctls = vgem_ioctls,
.fops = &vgem_driver_fops,
.dumb_create = vgem_gem_dumb_create,
.dumb_map_offset = vgem_gem_dumb_map,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.gem_prime_pin = vgem_prime_pin,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = vgem_prime_get_sg_table,
.gem_prime_vmap = vgem_prime_vmap,
.gem_prime_vunmap = vgem_prime_vunmap,
.gem_prime_mmap = vgem_prime_mmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@ -248,7 +294,7 @@ static struct drm_driver vgem_driver = {
.minor = DRIVER_MINOR,
};
struct drm_device *vgem_device;
static struct drm_device *vgem_device;
static int __init vgem_init(void)
{
@ -261,7 +307,6 @@ static int __init vgem_init(void)
}
ret = drm_dev_register(vgem_device, 0);
if (ret)
goto out_unref;

View File

@ -35,12 +35,6 @@
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
struct drm_gem_object base;
struct page **pages;
bool use_dma_buf;
};
/* vgem_drv.c */
extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
#endif

View File

@ -1041,8 +1041,7 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *vmaster;
if (file_priv->minor->type != DRM_MINOR_LEGACY ||
!(flags & DRM_AUTH))
if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
return NULL;
ret = mutex_lock_interruptible(&dev->master_mutex);

View File

@ -52,9 +52,9 @@
*
* * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
* * muxless: Dual GPUs but only one of them is connected to outputs.
* The other one is merely used to offload rendering, its results
* are copied over PCIe into the framebuffer. On Linux this is
* supported with DRI PRIME.
* The other one is merely used to offload rendering, its results
* are copied over PCIe into the framebuffer. On Linux this is
* supported with DRI PRIME.
*
* Hybrid graphics started to appear in the late Naughties and were initially
* all muxed. Newer laptops moved to a muxless architecture for cost reasons.
@ -560,21 +560,21 @@ EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
* * OFF: Power off the device not in use.
* * ON: Power on the device not in use.
* * IGD: Switch to the integrated graphics device.
* Power on the integrated GPU if necessary, power off the discrete GPU.
* Prerequisite is that no user space processes (e.g. Xorg, alsactl)
* have opened device files of the GPUs or the audio client. If the
* switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
* and /dev/snd/controlC1 to identify processes blocking the switch.
* Power on the integrated GPU if necessary, power off the discrete GPU.
* Prerequisite is that no user space processes (e.g. Xorg, alsactl)
* have opened device files of the GPUs or the audio client. If the
* switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
* and /dev/snd/controlC1 to identify processes blocking the switch.
* * DIS: Switch to the discrete graphics device.
* * DIGD: Delayed switch to the integrated graphics device.
* This will perform the switch once the last user space process has
* closed the device files of the GPUs and the audio client.
* This will perform the switch once the last user space process has
* closed the device files of the GPUs and the audio client.
* * DDIS: Delayed switch to the discrete graphics device.
* * MIGD: Mux-only switch to the integrated graphics device.
* Does not remap console or change the power state of either gpu.
* If the integrated GPU is currently off, the screen will turn black.
* If it is on, the screen will show whatever happens to be in VRAM.
* Either way, the user has to blindly enter the command to switch back.
* Does not remap console or change the power state of either gpu.
* If the integrated GPU is currently off, the screen will turn black.
* If it is on, the screen will show whatever happens to be in VRAM.
* Either way, the user has to blindly enter the command to switch back.
* * MDIS: Mux-only switch to the discrete graphics device.
*
* For GPUs whose power state is controlled by the driver's runtime pm,

View File

@ -942,8 +942,6 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime);
extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,

View File

@ -2588,7 +2588,6 @@ static inline unsigned drm_connector_index(struct drm_connector *connector)
}
/* helpers to {un}register all connectors from sysfs for device */
extern int drm_connector_register_all(struct drm_device *dev);
extern void drm_connector_unregister_all(struct drm_device *dev);
extern __printf(5, 6)
@ -2654,6 +2653,8 @@ extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
extern void drm_plane_force_disable(struct drm_plane *plane);
extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay);
extern int drm_crtc_force_disable(struct drm_crtc *crtc);
extern int drm_crtc_force_disable_all(struct drm_device *dev);
extern void drm_encoder_cleanup(struct drm_encoder *encoder);

View File

@ -265,7 +265,7 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param);
int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);