2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00

- Add S5PV210 FIMD variant support.

- Add IPP v2 framework.
   . it is a rewritten version of the Exynos mem-to-mem image processing
     framework which supprts color space conversion, image up/down-scaling
     and rotation. This new version replaces existing userspace API with
     new easy-to-use and simple ones so we have already applied the use of
     these API to real user, Tizen Platform[1], and also makes existing
     Scaler, FIMC, GScaler and Rotator drivers to use IPP v2 core API.
 
     And below are patch lists we have applied to a real user,
     https://git.tizen.org/cgit/platform/adaptation/samsung_exynos/libtdm-exynos/log/?h=tizen&qt=grep&q=ipp
     https://git.tizen.org/cgit/platform/adaptation/samsung_exynos/libtdm-exynos/commit/?h=tizen&id=b59be207365d10efd489e6f71c8a045b558c44fe
     https://git.tizen.org/cgit/platform/kernel/linux-exynos/log/?h=tizen&qt=grep&q=ipp
 
     TDM(Tizen Display Manager) is a Display HAL for Tizen platform.
     Ps. Only real user using IPP API is Tizen.
 
     [1] https://www.tizen.org/
 
 - Two cleanups
   . One is to just remove mode_set callback from MIPI-DSI driver
     because drm_display_mode data is already available from crtc
     atomic state.
   . And other is to just use new return type, vm_fault_t
     for page fault handler.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJa+Q9ZAAoJEFc4NIkMQxK4i3gQAJywYuOpyg1z8WeHDjizIh3D
 CST10ftIrOnPozBf9TVJwgP3v+5i8md3ZJ6QuWIE1Zl/KZne5KEjsHxQcB/Ktlxd
 bjZk5NtK/K5W4BWEASA1x2unrl1QEcyoOlXwNWWLbHjZidCqtuTgnuNcCdwqYWw8
 e9Hid6w+qnQIyAQnJWv7Ue7IQXw79CNk4dlz5YIvILo2q23HUgHGW0qmz4OHiaF2
 DiDUn/O/246WODagOJADXdjkb/BuksPATwHfyKcjU67wy0kCGnY2WQK7UC5piDi9
 oKwFyzAKWnK10Wq9MElkFwyK6jrygLCmyglsuNs9hJ0HhDjGm15TI0MhQuQzx7H8
 hL1IdgGIgW0CfFdrP6fDFZE7x6vEyW+gir0g0lVkV8Mq5XMaRs93RXDxGPKmSCg/
 7oVcdy9nVMwLzVH9RmJrErQbtGFSIHmWyleg402NNqRMrBdmMKzlmHLSEvXRzp2/
 6NThm1wG7yjh7w8hI2+nfGA4REgBp/rwyuL9JG2aO8cilAkv7jF30Z/B9k1YSaVm
 qWy8H1Xo42dTyCzP0Ys9LXeyfjQ+DqFAa7HeM/9iHK8xE3Kl4fD3AuxtxR0IPqWX
 gcs6CEHh69mnUrHvyNu74FbZY0Dptbfcl+nH9krmNDRwyqjl30zRDfswU9eIpOgS
 /f01g2ztY9ht8EnKZkYU
 =M74Y
 -----END PGP SIGNATURE-----

Merge tag 'exynos-drm-next-for-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next

- Add S5PV210 FIMD variant support.

- Add IPP v2 framework.
  . it is a rewritten version of the Exynos mem-to-mem image processing
    framework which supprts color space conversion, image up/down-scaling
    and rotation. This new version replaces existing userspace API with
    new easy-to-use and simple ones so we have already applied the use of
    these API to real user, Tizen Platform[1], and also makes existing
    Scaler, FIMC, GScaler and Rotator drivers to use IPP v2 core API.

    And below are patch lists we have applied to a real user,
    https://git.tizen.org/cgit/platform/adaptation/samsung_exynos/libtdm-exynos/log/?h=tizen&qt=grep&q=ipp
    https://git.tizen.org/cgit/platform/adaptation/samsung_exynos/libtdm-exynos/commit/?h=tizen&id=b59be207365d10efd489e6f71c8a045b558c44fe
    https://git.tizen.org/cgit/platform/kernel/linux-exynos/log/?h=tizen&qt=grep&q=ipp

    TDM(Tizen Display Manager) is a Display HAL for Tizen platform.
    Ps. Only real user using IPP API is Tizen.

    [1] https://www.tizen.org/

- Two cleanups
  . One is to just remove mode_set callback from MIPI-DSI driver
    because drm_display_mode data is already available from crtc
    atomic state.
  . And other is to just use new return type, vm_fault_t
    for page fault handler.

Signed-off-by: Dave Airlie <airlied@redhat.com>

# gpg: Signature made Mon 14 May 2018 14:23:53 AEST
# gpg:                using RSA key 573834890C4312B8
# gpg: Can't check signature: public key not found
Link: https://patchwork.freedesktop.org/patch/msgid/1526276453-29879-1-git-send-email-inki.dae@samsung.com
This commit is contained in:
Dave Airlie 2018-05-15 15:37:07 +10:00
commit 444ac87bec
27 changed files with 3535 additions and 2191 deletions

View File

@ -0,0 +1,27 @@
* Samsung Exynos Image Scaler
Required properties:
- compatible : value should be one of the following:
(a) "samsung,exynos5420-scaler" for Scaler IP in Exynos5420
(b) "samsung,exynos5433-scaler" for Scaler IP in Exynos5433
- reg : Physical base address of the IP registers and length of memory
mapped region.
- interrupts : Interrupt specifier for scaler interrupt, according to format
specific to interrupt parent.
- clocks : Clock specifier for scaler clock, according to generic clock
bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt)
- clock-names : Names of clocks. For exynos scaler, it should be "mscl"
on 5420 and "pclk", "aclk" and "aclk_xiu" on 5433.
Example:
scaler@12800000 {
compatible = "samsung,exynos5420-scaler";
reg = <0x12800000 0x1294>;
interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock CLK_MSCL0>;
clock-names = "mscl";
};

View File

@ -56,7 +56,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
}
drm_mode_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
fallback:
/*

View File

@ -1,6 +1,6 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select SND_SOC_HDMI_CODEC if SND_SOC
@ -95,21 +95,31 @@ config DRM_EXYNOS_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
config DRM_EXYNOS_IPP
bool
config DRM_EXYNOS_FIMC
bool "FIMC"
depends on BROKEN && MFD_SYSCON
select DRM_EXYNOS_IPP
help
Choose this option if you want to use Exynos FIMC for DRM.
config DRM_EXYNOS_ROTATOR
bool "Rotator"
depends on BROKEN
select DRM_EXYNOS_IPP
help
Choose this option if you want to use Exynos Rotator for DRM.
config DRM_EXYNOS_SCALER
bool "Scaler"
select DRM_EXYNOS_IPP
help
Choose this option if you want to use Exynos Scaler for DRM.
config DRM_EXYNOS_GSC
bool "GScaler"
depends on BROKEN && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n
depends on VIDEO_SAMSUNG_EXYNOS_GSC=n
select DRM_EXYNOS_IPP
help
Choose this option if you want to use Exynos GSC for DRM.

View File

@ -18,8 +18,10 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER) += exynos_mixer.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
exynosdrm-$(CONFIG_DRM_EXYNOS_SCALER) += exynos_drm_scaler.o
exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
exynosdrm-$(CONFIG_DRM_EXYNOS_MIC) += exynos_drm_mic.o

View File

@ -27,15 +27,23 @@
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_ipp.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
#define DRIVER_DATE "20110530"
#define DRIVER_DATE "20180330"
/*
* Interface history:
*
* 1.0 - Original version
* 1.1 - Upgrade IPP driver to version 2.0
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_MINOR 1
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
@ -88,6 +96,16 @@ static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_RESOURCES,
exynos_drm_ipp_get_res_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_CAPS, exynos_drm_ipp_get_caps_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_LIMITS,
exynos_drm_ipp_get_limits_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_COMMIT, exynos_drm_ipp_commit_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
};
static const struct file_operations exynos_drm_driver_fops = {
@ -184,6 +202,7 @@ struct exynos_drm_driver_info {
#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
#define DRM_FIMC_DEVICE BIT(3) /* devices shared with V4L2 subsystem */
#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
@ -223,10 +242,16 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
}, {
DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
}, {
DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(scaler_driver, CONFIG_DRM_EXYNOS_SCALER),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
DRM_COMPONENT_DRIVER
}, {
&exynos_drm_platform_driver,
DRM_VIRTUAL_DEVICE
@ -254,7 +279,11 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
&info->driver->driver,
(void *)platform_bus_type.match))) {
put_device(p);
component_match_add(dev, &match, compare_dev, d);
if (!(info->flags & DRM_FIMC_DEVICE) ||
exynos_drm_check_fimc_device(d) == 0)
component_match_add(dev, &match,
compare_dev, d);
p = d;
}
put_device(p);

View File

@ -273,6 +273,15 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
}
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
int exynos_drm_check_fimc_device(struct device *dev);
#else
static inline int exynos_drm_check_fimc_device(struct device *dev)
{
return 0;
}
#endif
int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
bool nonblock);
@ -288,6 +297,7 @@ extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
extern struct platform_driver fimc_driver;
extern struct platform_driver rotator_driver;
extern struct platform_driver scaler_driver;
extern struct platform_driver gsc_driver;
extern struct platform_driver mic_driver;
#endif

View File

@ -270,7 +270,6 @@ struct exynos_dsi {
u32 lanes;
u32 mode_flags;
u32 format;
struct videomode vm;
int state;
struct drm_property *brightness;
@ -881,30 +880,30 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
{
struct videomode *vm = &dsi->vm;
struct drm_display_mode *m = &dsi->encoder.crtc->state->adjusted_mode;
unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
u32 reg;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
reg = DSIM_CMD_ALLOW(0xf)
| DSIM_STABLE_VFP(vm->vfront_porch)
| DSIM_MAIN_VBP(vm->vback_porch);
| DSIM_STABLE_VFP(m->vsync_start - m->vdisplay)
| DSIM_MAIN_VBP(m->vtotal - m->vsync_end);
exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg);
reg = DSIM_MAIN_HFP(vm->hfront_porch)
| DSIM_MAIN_HBP(vm->hback_porch);
reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay)
| DSIM_MAIN_HBP(m->htotal - m->hsync_end);
exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg);
reg = DSIM_MAIN_VSA(vm->vsync_len)
| DSIM_MAIN_HSA(vm->hsync_len);
reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
| DSIM_MAIN_HSA(m->hsync_end - m->hsync_start);
exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg);
}
reg = DSIM_MAIN_HRESOL(vm->hactive, num_bits_resol) |
DSIM_MAIN_VRESOL(vm->vactive, num_bits_resol);
reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) |
DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol);
exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg);
dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay);
}
static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
@ -1485,26 +1484,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return 0;
}
static void exynos_dsi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct videomode *vm = &dsi->vm;
struct drm_display_mode *m = adjusted_mode;
vm->hactive = m->hdisplay;
vm->vactive = m->vdisplay;
vm->vfront_porch = m->vsync_start - m->vdisplay;
vm->vback_porch = m->vtotal - m->vsync_end;
vm->vsync_len = m->vsync_end - m->vsync_start;
vm->hfront_porch = m->hsync_start - m->hdisplay;
vm->hback_porch = m->htotal - m->hsync_end;
vm->hsync_len = m->hsync_end - m->hsync_start;
}
static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
.mode_set = exynos_dsi_mode_set,
.enable = exynos_dsi_enable,
.disable = exynos_dsi_disable,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +0,0 @@
/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
*
* Authors:
* Eunchul Kim <chulspro.kim@samsung.com>
* Jinyoung Jeon <jy0.jeon@samsung.com>
* Sangmin Lee <lsmin.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _EXYNOS_DRM_FIMC_H_
#define _EXYNOS_DRM_FIMC_H_
/*
* TODO
* FIMD output interface notifier callback.
*/
#endif /* _EXYNOS_DRM_FIMC_H_ */

View File

@ -121,6 +121,12 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
.has_limited_fmt = 1,
};
static struct fimd_driver_data s5pv210_fimd_driver_data = {
.timing_base = 0x0,
.has_shadowcon = 1,
.has_clksel = 1,
};
static struct fimd_driver_data exynos3_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x210,
@ -193,6 +199,8 @@ struct fimd_context {
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,s3c6400-fimd",
.data = &s3c64xx_fimd_driver_data },
{ .compatible = "samsung,s5pv210-fimd",
.data = &s5pv210_fimd_driver_data },
{ .compatible = "samsung,exynos3250-fimd",
.data = &exynos3_fimd_driver_data },
{ .compatible = "samsung,exynos4210-fimd",

View File

@ -431,37 +431,24 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
return 0;
}
int exynos_drm_gem_fault(struct vm_fault *vmf)
vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
unsigned long pfn;
pgoff_t page_offset;
int ret;
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
DRM_ERROR("invalid page offset\n");
ret = -EINVAL;
goto out;
return VM_FAULT_SIGBUS;
}
pfn = page_to_pfn(exynos_gem->pages[page_offset]);
ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out:
switch (ret) {
case 0:
case -ERESTARTSYS:
case -EINTR:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
}
return vmf_insert_mixed(vma, vmf->address,
__pfn_to_pfn_t(pfn, PFN_DEV));
}
static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,

View File

@ -13,6 +13,7 @@
#define _EXYNOS_DRM_GEM_H_
#include <drm/drm_gem.h>
#include <linux/mm_types.h>
#define to_exynos_gem(x) container_of(x, struct exynos_drm_gem, base)
@ -111,7 +112,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_fault *vmf);
vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);

File diff suppressed because it is too large Load Diff

View File

@ -1,24 +0,0 @@
/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
*
* Authors:
* Eunchul Kim <chulspro.kim@samsung.com>
* Jinyoung Jeon <jy0.jeon@samsung.com>
* Sangmin Lee <lsmin.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _EXYNOS_DRM_GSC_H_
#define _EXYNOS_DRM_GSC_H_
/*
* TODO
* FIMD output interface notifier callback.
* Mixer output interface notifier callback.
*/
#endif /* _EXYNOS_DRM_GSC_H_ */

View File

@ -0,0 +1,916 @@
/*
* Copyright (C) 2017 Samsung Electronics Co.Ltd
* Authors:
* Marek Szyprowski <m.szyprowski@samsung.com>
*
* Exynos DRM Image Post Processing (IPP) related functions
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*/
#include <drm/drmP.h>
#include <drm/drm_mode.h>
#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_ipp.h"
static int num_ipp;
static LIST_HEAD(ipp_list);
/**
* exynos_drm_ipp_register - Register a new picture processor hardware module
* @dev: DRM device
* @ipp: ipp module to init
* @funcs: callbacks for the new ipp object
* @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*)
* @formats: array of supported formats
* @num_formats: size of the supported formats array
* @name: name (for debugging purposes)
*
* Initializes a ipp module.
*
* Returns:
* Zero on success, error code on failure.
*/
int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
const struct exynos_drm_ipp_formats *formats,
unsigned int num_formats, const char *name)
{
WARN_ON(!ipp);
WARN_ON(!funcs);
WARN_ON(!formats);
WARN_ON(!num_formats);
spin_lock_init(&ipp->lock);
INIT_LIST_HEAD(&ipp->todo_list);
init_waitqueue_head(&ipp->done_wq);
ipp->dev = dev;
ipp->funcs = funcs;
ipp->capabilities = caps;
ipp->name = name;
ipp->formats = formats;
ipp->num_formats = num_formats;
/* ipp_list modification is serialized by component framework */
list_add_tail(&ipp->head, &ipp_list);
ipp->id = num_ipp++;
DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id);
return 0;
}
/**
* exynos_drm_ipp_unregister - Unregister the picture processor module
* @dev: DRM device
* @ipp: ipp module
*/
void exynos_drm_ipp_unregister(struct drm_device *dev,
struct exynos_drm_ipp *ipp)
{
WARN_ON(ipp->task);
WARN_ON(!list_empty(&ipp->todo_list));
list_del(&ipp->head);
}
/**
* exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Construct a list of ipp ids.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_ioctl_ipp_get_res *resp = data;
struct exynos_drm_ipp *ipp;
uint32_t __user *ipp_ptr = (uint32_t __user *)
(unsigned long)resp->ipp_id_ptr;
unsigned int count = num_ipp, copied = 0;
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (count && resp->count_ipps >= count) {
list_for_each_entry(ipp, &ipp_list, head) {
if (put_user(ipp->id, ipp_ptr + copied))
return -EFAULT;
copied++;
}
}
resp->count_ipps = count;
return 0;
}
static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
{
struct exynos_drm_ipp *ipp;
list_for_each_entry(ipp, &ipp_list, head)
if (ipp->id == id)
return ipp;
return NULL;
}
/**
* exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Construct a structure describing ipp module capabilities.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_ioctl_ipp_get_caps *resp = data;
void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr;
struct exynos_drm_ipp *ipp;
int i;
ipp = __ipp_get(resp->ipp_id);
if (!ipp)
return -ENOENT;
resp->ipp_id = ipp->id;
resp->capabilities = ipp->capabilities;
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (resp->formats_count >= ipp->num_formats) {
for (i = 0; i < ipp->num_formats; i++) {
struct drm_exynos_ipp_format tmp = {
.fourcc = ipp->formats[i].fourcc,
.type = ipp->formats[i].type,
.modifier = ipp->formats[i].modifier,
};
if (copy_to_user(ptr, &tmp, sizeof(tmp)))
return -EFAULT;
ptr += sizeof(tmp);
}
}
resp->formats_count = ipp->num_formats;
return 0;
}
static inline const struct exynos_drm_ipp_formats *__ipp_format_get(
struct exynos_drm_ipp *ipp, uint32_t fourcc,
uint64_t mod, unsigned int type)
{
int i;
for (i = 0; i < ipp->num_formats; i++) {
if ((ipp->formats[i].type & type) &&
ipp->formats[i].fourcc == fourcc &&
ipp->formats[i].modifier == mod)
return &ipp->formats[i];
}
return NULL;
}
/**
* exynos_drm_ipp_get_limits_ioctl - get ipp module limits
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Construct a structure describing ipp module limitations for provided
* picture format.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_ioctl_ipp_get_limits *resp = data;
void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr;
const struct exynos_drm_ipp_formats *format;
struct exynos_drm_ipp *ipp;
if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE &&
resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION)
return -EINVAL;
ipp = __ipp_get(resp->ipp_id);
if (!ipp)
return -ENOENT;
format = __ipp_format_get(ipp, resp->fourcc, resp->modifier,
resp->type);
if (!format)
return -EINVAL;
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (format->num_limits && resp->limits_count >= format->num_limits)
if (copy_to_user((void __user *)ptr, format->limits,
sizeof(*format->limits) * format->num_limits))
return -EFAULT;
resp->limits_count = format->num_limits;
return 0;
}
struct drm_pending_exynos_ipp_event {
struct drm_pending_event base;
struct drm_exynos_ipp_event event;
};
static inline struct exynos_drm_ipp_task *
exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp)
{
struct exynos_drm_ipp_task *task;
task = kzalloc(sizeof(*task), GFP_KERNEL);
if (!task)
return NULL;
task->dev = ipp->dev;
task->ipp = ipp;
/* some defaults */
task->src.rect.w = task->dst.rect.w = UINT_MAX;
task->src.rect.h = task->dst.rect.h = UINT_MAX;
task->transform.rotation = DRM_MODE_ROTATE_0;
DRM_DEBUG_DRIVER("Allocated task %pK\n", task);
return task;
}
static const struct exynos_drm_param_map {
unsigned int id;
unsigned int size;
unsigned int offset;
} exynos_drm_ipp_params_maps[] = {
{
DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
sizeof(struct drm_exynos_ipp_task_buffer),
offsetof(struct exynos_drm_ipp_task, src.buf),
}, {
DRM_EXYNOS_IPP_TASK_BUFFER |
DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
sizeof(struct drm_exynos_ipp_task_buffer),
offsetof(struct exynos_drm_ipp_task, dst.buf),
}, {
DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
sizeof(struct drm_exynos_ipp_task_rect),
offsetof(struct exynos_drm_ipp_task, src.rect),
}, {
DRM_EXYNOS_IPP_TASK_RECTANGLE |
DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
sizeof(struct drm_exynos_ipp_task_rect),
offsetof(struct exynos_drm_ipp_task, dst.rect),
}, {
DRM_EXYNOS_IPP_TASK_TRANSFORM,
sizeof(struct drm_exynos_ipp_task_transform),
offsetof(struct exynos_drm_ipp_task, transform),
}, {
DRM_EXYNOS_IPP_TASK_ALPHA,
sizeof(struct drm_exynos_ipp_task_alpha),
offsetof(struct exynos_drm_ipp_task, alpha),
},
};
static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
struct drm_exynos_ioctl_ipp_commit *arg)
{
const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
void __user *params = (void __user *)(unsigned long)arg->params_ptr;
unsigned int size = arg->params_size;
uint32_t id;
int i;
while (size) {
if (get_user(id, (uint32_t __user *)params))
return -EFAULT;
for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++)
if (map[i].id == id)
break;
if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) ||
map[i].size > size)
return -EINVAL;
if (copy_from_user((void *)task + map[i].offset, params,
map[i].size))
return -EFAULT;
params += map[i].size;
size -= map[i].size;
}
DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task);
return 0;
}
static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
struct drm_file *filp)
{
int ret = 0;
int i;
/* basic checks */
if (buf->buf.width == 0 || buf->buf.height == 0)
return -EINVAL;
buf->format = drm_format_info(buf->buf.fourcc);
for (i = 0; i < buf->format->num_planes; i++) {
unsigned int width = (i == 0) ? buf->buf.width :
DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
if (buf->buf.pitch[i] == 0)
buf->buf.pitch[i] = width * buf->format->cpp[i];
if (buf->buf.pitch[i] < width * buf->format->cpp[i])
return -EINVAL;
if (!buf->buf.gem_id[i])
return -ENOENT;
}
/* pitch for additional planes must match */
if (buf->format->num_planes > 2 &&
buf->buf.pitch[1] != buf->buf.pitch[2])
return -EINVAL;
/* get GEM buffers and check their size */
for (i = 0; i < buf->format->num_planes; i++) {
unsigned int height = (i == 0) ? buf->buf.height :
DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
unsigned long size = height * buf->buf.pitch[i];
struct drm_gem_object *obj = drm_gem_object_lookup(filp,
buf->buf.gem_id[i]);
if (!obj) {
ret = -ENOENT;
goto gem_free;
}
buf->exynos_gem[i] = to_exynos_gem(obj);
if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
i++;
ret = -EINVAL;
goto gem_free;
}
buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr +
buf->buf.offset[i];
}
return 0;
gem_free:
while (i--) {
drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
buf->exynos_gem[i] = NULL;
}
return ret;
}
static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
{
int i;
if (!buf->exynos_gem[0])
return;
for (i = 0; i < buf->format->num_planes; i++)
drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
}
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
DRM_DEBUG_DRIVER("Freeing task %pK\n", task);
exynos_drm_ipp_task_release_buf(&task->src);
exynos_drm_ipp_task_release_buf(&task->dst);
if (task->event)
drm_event_cancel_free(ipp->dev, &task->event->base);
kfree(task);
}
struct drm_ipp_limit {
struct drm_exynos_ipp_limit_val h;
struct drm_exynos_ipp_limit_val v;
};
enum drm_ipp_size_id {
IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
};
static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
[IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
[IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
[IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED,
DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
};
static inline void __limit_set_val(unsigned int *ptr, unsigned int val)
{
if (!*ptr)
*ptr = val;
}
static void __get_size_limit(const struct drm_exynos_ipp_limit *limits,
unsigned int num_limits, enum drm_ipp_size_id id,
struct drm_ipp_limit *res)
{
const struct drm_exynos_ipp_limit *l = limits;
int i = 0;
memset(res, 0, sizeof(*res));
for (i = 0; limit_id_fallback[id][i]; i++)
for (l = limits; l - limits < num_limits; l++) {
if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) !=
DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) ||
((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) !=
limit_id_fallback[id][i]))
continue;
__limit_set_val(&res->h.min, l->h.min);
__limit_set_val(&res->h.max, l->h.max);
__limit_set_val(&res->h.align, l->h.align);
__limit_set_val(&res->v.min, l->v.min);
__limit_set_val(&res->v.max, l->v.max);
__limit_set_val(&res->v.align, l->v.align);
}
}
static inline bool __align_check(unsigned int val, unsigned int align)
{
if (align && (val & (align - 1))) {
DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
val, align);
return false;
}
return true;
}
static inline bool __size_limit_check(unsigned int val,
struct drm_exynos_ipp_limit_val *l)
{
if ((l->min && val < l->min) || (l->max && val > l->max)) {
DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
val, l->min, l->max);
return false;
}
return __align_check(val, l->align);
}
static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
const struct drm_exynos_ipp_limit *limits, unsigned int num_limits,
bool rotate, bool swap)
{
enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
struct drm_ipp_limit l;
struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
if (!limits)
return 0;
__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
if (!__size_limit_check(buf->buf.width, &l.h) ||
!__size_limit_check(buf->buf.height, &l.v))
return -EINVAL;
if (swap) {
lv = &l.h;
lh = &l.v;
}
__get_size_limit(limits, num_limits, id, &l);
if (!__size_limit_check(buf->rect.w, lh) ||
!__align_check(buf->rect.x, lh->align) ||
!__size_limit_check(buf->rect.h, lv) ||
!__align_check(buf->rect.y, lv->align))
return -EINVAL;
return 0;
}
static inline bool __scale_limit_check(unsigned int src, unsigned int dst,
unsigned int min, unsigned int max)
{
if ((max && (dst << 16) > src * max) ||
(min && (dst << 16) < src * min)) {
DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
src, dst,
min >> 16, 100000 * (min & 0xffff) / (1 << 16),
max >> 16, 100000 * (max & 0xffff) / (1 << 16));
return false;
}
return true;
}
static int exynos_drm_ipp_check_scale_limits(
struct drm_exynos_ipp_task_rect *src,
struct drm_exynos_ipp_task_rect *dst,
const struct drm_exynos_ipp_limit *limits,
unsigned int num_limits, bool swap)
{
const struct drm_exynos_ipp_limit_val *lh, *lv;
int dw, dh;
for (; num_limits; limits++, num_limits--)
if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE)
break;
if (!num_limits)
return 0;
lh = (!swap) ? &limits->h : &limits->v;
lv = (!swap) ? &limits->v : &limits->h;
dw = (!swap) ? dst->w : dst->h;
dh = (!swap) ? dst->h : dst->w;
if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
!__scale_limit_check(src->h, dh, lv->min, lv->max))
return -EINVAL;
return 0;
}
static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
{
struct exynos_drm_ipp *ipp = task->ipp;
const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
unsigned int rotation = task->transform.rotation;
int ret = 0;
bool swap = drm_rotation_90_or_270(rotation);
bool rotate = (rotation != DRM_MODE_ROTATE_0);
bool scale = false;
DRM_DEBUG_DRIVER("Checking task %pK\n", task);
if (src->rect.w == UINT_MAX)
src->rect.w = src->buf.width;
if (src->rect.h == UINT_MAX)
src->rect.h = src->buf.height;
if (dst->rect.w == UINT_MAX)
dst->rect.w = dst->buf.width;
if (dst->rect.h == UINT_MAX)
dst->rect.h = dst->buf.height;
if (src->rect.x + src->rect.w > (src->buf.width) ||
src->rect.y + src->rect.h > (src->buf.height) ||
dst->rect.x + dst->rect.w > (dst->buf.width) ||
dst->rect.y + dst->rect.h > (dst->buf.height)) {
DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n",
task);
return -EINVAL;
}
if ((!swap && (src->rect.w != dst->rect.w ||
src->rect.h != dst->rect.h)) ||
(swap && (src->rect.w != dst->rect.h ||
src->rect.h != dst->rect.w)))
scale = true;
if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) &&
(src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
src->buf.fourcc != dst->buf.fourcc)) {
DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task);
return -EINVAL;
}
src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
DRM_EXYNOS_IPP_FORMAT_SOURCE);
if (!src_fmt) {
DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
return -EINVAL;
}
ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
src_fmt->num_limits,
rotate, false);
if (ret)
return ret;
ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
src_fmt->limits,
src_fmt->num_limits, swap);
if (ret)
return ret;
dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
DRM_EXYNOS_IPP_FORMAT_DESTINATION);
if (!dst_fmt) {
DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
return -EINVAL;
}
ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
dst_fmt->num_limits,
false, swap);
if (ret)
return ret;
ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
dst_fmt->limits,
dst_fmt->num_limits, swap);
if (ret)
return ret;
DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task);
return ret;
}
static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
struct drm_file *filp)
{
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
int ret = 0;
DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task);
ret = exynos_drm_ipp_task_setup_buffer(src, filp);
if (ret) {
DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task);
return ret;
}
ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
if (ret) {
DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task);
return ret;
}
DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task);
return ret;
}
static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
struct drm_file *file_priv, uint64_t user_data)
{
struct drm_pending_exynos_ipp_event *e = NULL;
int ret;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
e->event.base.type = DRM_EXYNOS_IPP_EVENT;
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
ret = drm_event_reserve_init(task->dev, file_priv, &e->base,
&e->event.base);
if (ret)
goto free;
task->event = e;
return 0;
free:
kfree(e);
return ret;
}
static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
{
struct timespec64 now;
ktime_get_ts64(&now);
task->event->event.tv_sec = now.tv_sec;
task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
drm_send_event(task->dev, &task->event->base);
}
static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
{
int ret = task->ret;
if (ret == 0 && task->event) {
exynos_drm_ipp_event_send(task);
/* ensure event won't be canceled on task free */
task->event = NULL;
}
exynos_drm_ipp_task_free(task->ipp, task);
return ret;
}
static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
{
struct exynos_drm_ipp_task *task = container_of(work,
struct exynos_drm_ipp_task, cleanup_work);
exynos_drm_ipp_task_cleanup(task);
}
static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp);
/**
* exynos_drm_ipp_task_done - finish given task and set return code
* @task: ipp task to finish
* @ret: error code or 0 if operation has been performed successfully
*/
void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
{
struct exynos_drm_ipp *ipp = task->ipp;
unsigned long flags;
DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret);
spin_lock_irqsave(&ipp->lock, flags);
if (ipp->task == task)
ipp->task = NULL;
task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
task->ret = ret;
spin_unlock_irqrestore(&ipp->lock, flags);
exynos_drm_ipp_next_task(ipp);
wake_up(&ipp->done_wq);
if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
schedule_work(&task->cleanup_work);
}
}
static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
{
struct exynos_drm_ipp_task *task;
unsigned long flags;
int ret;
DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id);
spin_lock_irqsave(&ipp->lock, flags);
if (ipp->task || list_empty(&ipp->todo_list)) {
spin_unlock_irqrestore(&ipp->lock, flags);
return;
}
task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
head);
list_del_init(&task->head);
ipp->task = task;
spin_unlock_irqrestore(&ipp->lock, flags);
DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task);
ret = ipp->funcs->commit(ipp, task);
if (ret)
exynos_drm_ipp_task_done(task, ret);
}
static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
unsigned long flags;
spin_lock_irqsave(&ipp->lock, flags);
list_add(&task->head, &ipp->todo_list);
spin_unlock_irqrestore(&ipp->lock, flags);
exynos_drm_ipp_next_task(ipp);
}
static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
unsigned long flags;
spin_lock_irqsave(&ipp->lock, flags);
if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
/* already completed task */
exynos_drm_ipp_task_cleanup(task);
} else if (ipp->task != task) {
/* task has not been scheduled for execution yet */
list_del_init(&task->head);
exynos_drm_ipp_task_cleanup(task);
} else {
/*
* currently processed task, call abort() and perform
* cleanup with async worker
*/
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
spin_unlock_irqrestore(&ipp->lock, flags);
if (ipp->funcs->abort)
ipp->funcs->abort(ipp, task);
return;
}
spin_unlock_irqrestore(&ipp->lock, flags);
}
/**
* exynos_drm_ipp_commit_ioctl - perform image processing operation
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* Construct a ipp task from the set of properties provided from the user
* and try to schedule it to framebuffer processor hardware.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_exynos_ioctl_ipp_commit *arg = data;
struct exynos_drm_ipp *ipp;
struct exynos_drm_ipp_task *task;
int ret = 0;
if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved)
return -EINVAL;
/* can't test and expect an event at the same time */
if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) &&
(arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT))
return -EINVAL;
ipp = __ipp_get(arg->ipp_id);
if (!ipp)
return -ENOENT;
task = exynos_drm_ipp_task_alloc(ipp);
if (!task)
return -ENOMEM;
ret = exynos_drm_ipp_task_set(task, arg);
if (ret)
goto free;
ret = exynos_drm_ipp_task_check(task);
if (ret)
goto free;
ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY)
goto free;
if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) {
ret = exynos_drm_ipp_event_create(task, file_priv,
arg->user_data);
if (ret)
goto free;
}
/*
* Queue task for processing on the hardware. task object will be
* then freed after exynos_drm_ipp_task_done()
*/
if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n",
ipp->id, task);
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
exynos_drm_ipp_schedule_task(task->ipp, task);
ret = 0;
} else {
DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id,
task);
exynos_drm_ipp_schedule_task(ipp, task);
ret = wait_event_interruptible(ipp->done_wq,
task->flags & DRM_EXYNOS_IPP_TASK_DONE);
if (ret)
exynos_drm_ipp_task_abort(ipp, task);
else
ret = exynos_drm_ipp_task_cleanup(task);
}
return ret;
free:
exynos_drm_ipp_task_free(ipp, task);
return ret;
}

View File

@ -0,0 +1,175 @@
/*
* Copyright (c) 2017 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef _EXYNOS_DRM_IPP_H_
#define _EXYNOS_DRM_IPP_H_
#include <drm/drmP.h>
struct exynos_drm_ipp;
struct exynos_drm_ipp_task;
/**
* struct exynos_drm_ipp_funcs - exynos_drm_ipp control functions
*/
struct exynos_drm_ipp_funcs {
/**
* @commit:
*
* This is the main entry point to start framebuffer processing
* in the hardware. The exynos_drm_ipp_task has been already validated.
* This function must not wait until the device finishes processing.
* When the driver finishes processing, it has to call
* exynos_exynos_drm_ipp_task_done() function.
*
* RETURNS:
*
* 0 on success or negative error codes in case of failure.
*/
int (*commit)(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task);
/**
* @abort:
*
* Informs the driver that it has to abort the currently running
* task as soon as possible (i.e. as soon as it can stop the device
* safely), even if the task would not have been finished by then.
* After the driver performs the necessary steps, it has to call
* exynos_drm_ipp_task_done() (as if the task ended normally).
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
*/
void (*abort)(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task);
};
/**
* struct exynos_drm_ipp - central picture processor module structure
*/
struct exynos_drm_ipp {
struct drm_device *dev;
struct list_head head;
unsigned int id;
const char *name;
const struct exynos_drm_ipp_funcs *funcs;
unsigned int capabilities;
const struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
atomic_t sequence;
spinlock_t lock;
struct exynos_drm_ipp_task *task;
struct list_head todo_list;
wait_queue_head_t done_wq;
};
struct exynos_drm_ipp_buffer {
struct drm_exynos_ipp_task_buffer buf;
struct drm_exynos_ipp_task_rect rect;
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
const struct drm_format_info *format;
dma_addr_t dma_addr[MAX_FB_BUFFER];
};
/**
* struct exynos_drm_ipp_task - a structure describing transformation that
* has to be performed by the picture processor hardware module
*/
struct exynos_drm_ipp_task {
struct drm_device *dev;
struct exynos_drm_ipp *ipp;
struct list_head head;
struct exynos_drm_ipp_buffer src;
struct exynos_drm_ipp_buffer dst;
struct drm_exynos_ipp_task_transform transform;
struct drm_exynos_ipp_task_alpha alpha;
struct work_struct cleanup_work;
unsigned int flags;
int ret;
struct drm_pending_exynos_ipp_event *event;
};
#define DRM_EXYNOS_IPP_TASK_DONE (1 << 0)
#define DRM_EXYNOS_IPP_TASK_ASYNC (1 << 1)
struct exynos_drm_ipp_formats {
uint32_t fourcc;
uint32_t type;
uint64_t modifier;
const struct drm_exynos_ipp_limit *limits;
unsigned int num_limits;
};
/* helper macros to set exynos_drm_ipp_formats structure and limits*/
#define IPP_SRCDST_MFORMAT(f, m, l) \
.fourcc = DRM_FORMAT_##f, .modifier = m, .limits = l, \
.num_limits = ARRAY_SIZE(l), \
.type = (DRM_EXYNOS_IPP_FORMAT_SOURCE | \
DRM_EXYNOS_IPP_FORMAT_DESTINATION)
#define IPP_SRCDST_FORMAT(f, l) IPP_SRCDST_MFORMAT(f, 0, l)
#define IPP_SIZE_LIMIT(l, val...) \
.type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE | \
DRM_EXYNOS_IPP_LIMIT_SIZE_##l), val
#define IPP_SCALE_LIMIT(val...) \
.type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE), val
int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
const struct exynos_drm_ipp_formats *formats,
unsigned int num_formats, const char *name);
void exynos_drm_ipp_unregister(struct drm_device *dev,
struct exynos_drm_ipp *ipp);
void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret);
#ifdef CONFIG_DRM_EXYNOS_IPP
int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int exynos_drm_ipp_commit_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
#else
static inline int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_exynos_ioctl_ipp_get_res *resp = data;
resp->count_ipps = 0;
return 0;
}
static inline int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
return -ENODEV;
}
static inline int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
return -ENODEV;
}
static inline int exynos_drm_ipp_commit_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
return -ENODEV;
}
#endif
#endif

View File

@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
#include <linux/component.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@ -22,29 +23,18 @@
#include <drm/exynos_drm.h>
#include "regs-rotator.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
* Rotator supports image crop/rotator and input/output DMA operations.
* input DMA reads image data from the memory.
* output DMA writes image data to memory.
*
* M2M operation : supports crop/scale/rotation/csc so on.
* Memory ----> Rotator H/W ----> Memory.
*/
/*
* TODO
* 1. check suspend/resume api if needed.
* 2. need to check use case platform_device_id.
* 3. check src/dst size with, height.
* 4. need to add supported list in prop_list.
*/
#define ROTATOR_AUTOSUSPEND_DELAY 2000
#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
struct rot_context, ippdrv);
#define rot_read(offset) readl(rot->regs + (offset))
#define rot_read(offset) readl(rot->regs + (offset))
#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
enum rot_irq_status {
@ -52,54 +42,28 @@ enum rot_irq_status {
ROT_IRQ_STATUS_ILLEGAL = 9,
};
/*
* A structure of limitation.
*
* @min_w: minimum width.
* @min_h: minimum height.
* @max_w: maximum width.
* @max_h: maximum height.
* @align: align size.
*/
struct rot_limit {
u32 min_w;
u32 min_h;
u32 max_w;
u32 max_h;
u32 align;
};
/*
* A structure of limitation table.
*
* @ycbcr420_2p: case of YUV.
* @rgb888: case of RGB.
*/
struct rot_limit_table {
struct rot_limit ycbcr420_2p;
struct rot_limit rgb888;
struct rot_variant {
const struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
};
/*
* A structure of rotator context.
* @ippdrv: prepare initialization using ippdrv.
* @regs_res: register resources.
* @regs: memory mapped io registers.
* @clock: rotator gate clock.
* @limit_tbl: limitation of rotator.
* @irq: irq number.
* @cur_buf_id: current operation buffer id.
* @suspended: suspended state.
*/
struct rot_context {
struct exynos_drm_ippdrv ippdrv;
struct resource *regs_res;
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
struct device *dev;
void __iomem *regs;
struct clk *clock;
struct rot_limit_table *limit_tbl;
int irq;
int cur_buf_id[EXYNOS_DRM_OPS_MAX];
bool suspended;
const struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
struct exynos_drm_ipp_task *task;
};
static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
@ -114,15 +78,6 @@ static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
rot_write(val, ROT_CONFIG);
}
static u32 rotator_reg_get_fmt(struct rot_context *rot)
{
u32 val = rot_read(ROT_CONTROL);
val &= ROT_CONTROL_FMT_MASK;
return val;
}
static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
{
u32 val = rot_read(ROT_STATUS);
@ -138,9 +93,6 @@ static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
static irqreturn_t rotator_irq_handler(int irq, void *arg)
{
struct rot_context *rot = arg;
struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
enum rot_irq_status irq_status;
u32 val;
@ -152,56 +104,21 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg)
val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
rot_write(val, ROT_STATUS);
if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
event_work->ippdrv = ippdrv;
event_work->buf_id[EXYNOS_DRM_OPS_DST] =
rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
queue_work(ippdrv->event_workq, &event_work->work);
} else {
DRM_ERROR("the SFR is set illegally\n");
if (rot->task) {
struct exynos_drm_ipp_task *task = rot->task;
rot->task = NULL;
pm_runtime_mark_last_busy(rot->dev);
pm_runtime_put_autosuspend(rot->dev);
exynos_drm_ipp_task_done(task,
irq_status == ROT_IRQ_STATUS_COMPLETE ? 0 : -EINVAL);
}
return IRQ_HANDLED;
}
static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
u32 *vsize)
static void rotator_src_set_fmt(struct rot_context *rot, u32 fmt)
{
struct rot_limit_table *limit_tbl = rot->limit_tbl;
struct rot_limit *limit;
u32 mask, val;
/* Get size limit */
if (fmt == ROT_CONTROL_FMT_RGB888)
limit = &limit_tbl->rgb888;
else
limit = &limit_tbl->ycbcr420_2p;
/* Get mask for rounding to nearest aligned val */
mask = ~((1 << limit->align) - 1);
/* Set aligned width */
val = ROT_ALIGN(*hsize, limit->align, mask);
if (val < limit->min_w)
*hsize = ROT_MIN(limit->min_w, mask);
else if (val > limit->max_w)
*hsize = ROT_MAX(limit->max_w, mask);
else
*hsize = val;
/* Set aligned height */
val = ROT_ALIGN(*vsize, limit->align, mask);
if (val < limit->min_h)
*vsize = ROT_MIN(limit->min_h, mask);
else if (val > limit->max_h)
*vsize = ROT_MAX(limit->max_h, mask);
else
*vsize = val;
}
static int rotator_src_set_fmt(struct device *dev, u32 fmt)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 val;
val = rot_read(ROT_CONTROL);
@ -214,515 +131,176 @@ static int rotator_src_set_fmt(struct device *dev, u32 fmt)
case DRM_FORMAT_XRGB8888:
val |= ROT_CONTROL_FMT_RGB888;
break;
default:
DRM_ERROR("invalid image format\n");
return -EINVAL;
}
rot_write(val, ROT_CONTROL);
return 0;
}
static inline bool rotator_check_reg_fmt(u32 fmt)
static void rotator_src_set_buf(struct rot_context *rot,
struct exynos_drm_ipp_buffer *buf)
{
if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
(fmt == ROT_CONTROL_FMT_RGB888))
return true;
return false;
}
static int rotator_src_set_size(struct device *dev, int swap,
struct drm_exynos_pos *pos,
struct drm_exynos_sz *sz)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 fmt, hsize, vsize;
u32 val;
/* Get format */
fmt = rotator_reg_get_fmt(rot);
if (!rotator_check_reg_fmt(fmt)) {
DRM_ERROR("invalid format.\n");
return -EINVAL;
}
/* Align buffer size */
hsize = sz->hsize;
vsize = sz->vsize;
rotator_align_size(rot, fmt, &hsize, &vsize);
/* Set buffer size configuration */
val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
val = ROT_SET_BUF_SIZE_H(buf->buf.height) |
ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]);
rot_write(val, ROT_SRC_BUF_SIZE);
/* Set crop image position configuration */
val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x);
rot_write(val, ROT_SRC_CROP_POS);
val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
val = ROT_SRC_CROP_SIZE_H(buf->rect.h) |
ROT_SRC_CROP_SIZE_W(buf->rect.w);
rot_write(val, ROT_SRC_CROP_SIZE);
return 0;
/* Set buffer DMA address */
rot_write(buf->dma_addr[0], ROT_SRC_BUF_ADDR(0));
rot_write(buf->dma_addr[1], ROT_SRC_BUF_ADDR(1));
}
static int rotator_src_set_addr(struct device *dev,
struct drm_exynos_ipp_buf_info *buf_info,
u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
static void rotator_dst_set_transf(struct rot_context *rot,
unsigned int rotation)
{
struct rot_context *rot = dev_get_drvdata(dev);
dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
u32 val, fmt, hsize, vsize;
int i;
/* Set current buf_id */
rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
switch (buf_type) {
case IPP_BUF_ENQUEUE:
/* Set address configuration */
for_each_ipp_planar(i)
addr[i] = buf_info->base[i];
/* Get format */
fmt = rotator_reg_get_fmt(rot);
if (!rotator_check_reg_fmt(fmt)) {
DRM_ERROR("invalid format.\n");
return -EINVAL;
}
/* Re-set cb planar for NV12 format */
if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
!addr[EXYNOS_DRM_PLANAR_CB]) {
val = rot_read(ROT_SRC_BUF_SIZE);
hsize = ROT_GET_BUF_SIZE_W(val);
vsize = ROT_GET_BUF_SIZE_H(val);
/* Set cb planar */
addr[EXYNOS_DRM_PLANAR_CB] =
addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
}
for_each_ipp_planar(i)
rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
break;
case IPP_BUF_DEQUEUE:
for_each_ipp_planar(i)
rot_write(0x0, ROT_SRC_BUF_ADDR(i));
break;
default:
/* Nothing to do */
break;
}
return 0;
}
static int rotator_dst_set_transf(struct device *dev,
enum drm_exynos_degree degree,
enum drm_exynos_flip flip, bool *swap)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 val;
/* Set transform configuration */
val = rot_read(ROT_CONTROL);
val &= ~ROT_CONTROL_FLIP_MASK;
switch (flip) {
case EXYNOS_DRM_FLIP_VERTICAL:
val |= ROT_CONTROL_FLIP_VERTICAL;
break;
case EXYNOS_DRM_FLIP_HORIZONTAL:
if (rotation & DRM_MODE_REFLECT_X)
val |= ROT_CONTROL_FLIP_HORIZONTAL;
break;
default:
/* Flip None */
break;
}
if (rotation & DRM_MODE_REFLECT_Y)
val |= ROT_CONTROL_FLIP_VERTICAL;
val &= ~ROT_CONTROL_ROT_MASK;
switch (degree) {
case EXYNOS_DRM_DEGREE_90:
if (rotation & DRM_MODE_ROTATE_90)
val |= ROT_CONTROL_ROT_90;
break;
case EXYNOS_DRM_DEGREE_180:
else if (rotation & DRM_MODE_ROTATE_180)
val |= ROT_CONTROL_ROT_180;
break;
case EXYNOS_DRM_DEGREE_270:
else if (rotation & DRM_MODE_ROTATE_270)
val |= ROT_CONTROL_ROT_270;
break;
default:
/* Rotation 0 Degree */
break;
}
rot_write(val, ROT_CONTROL);
/* Check degree for setting buffer size swap */
if ((degree == EXYNOS_DRM_DEGREE_90) ||
(degree == EXYNOS_DRM_DEGREE_270))
*swap = true;
else
*swap = false;
return 0;
}
static int rotator_dst_set_size(struct device *dev, int swap,
struct drm_exynos_pos *pos,
struct drm_exynos_sz *sz)
static void rotator_dst_set_buf(struct rot_context *rot,
struct exynos_drm_ipp_buffer *buf)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 val, fmt, hsize, vsize;
/* Get format */
fmt = rotator_reg_get_fmt(rot);
if (!rotator_check_reg_fmt(fmt)) {
DRM_ERROR("invalid format.\n");
return -EINVAL;
}
/* Align buffer size */
hsize = sz->hsize;
vsize = sz->vsize;
rotator_align_size(rot, fmt, &hsize, &vsize);
u32 val;
/* Set buffer size configuration */
val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
val = ROT_SET_BUF_SIZE_H(buf->buf.height) |
ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]);
rot_write(val, ROT_DST_BUF_SIZE);
/* Set crop image position configuration */
val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x);
rot_write(val, ROT_DST_CROP_POS);
return 0;
/* Set buffer DMA address */
rot_write(buf->dma_addr[0], ROT_DST_BUF_ADDR(0));
rot_write(buf->dma_addr[1], ROT_DST_BUF_ADDR(1));
}
static int rotator_dst_set_addr(struct device *dev,
struct drm_exynos_ipp_buf_info *buf_info,
u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
static void rotator_start(struct rot_context *rot)
{
struct rot_context *rot = dev_get_drvdata(dev);
dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
u32 val, fmt, hsize, vsize;
int i;
/* Set current buf_id */
rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
switch (buf_type) {
case IPP_BUF_ENQUEUE:
/* Set address configuration */
for_each_ipp_planar(i)
addr[i] = buf_info->base[i];
/* Get format */
fmt = rotator_reg_get_fmt(rot);
if (!rotator_check_reg_fmt(fmt)) {
DRM_ERROR("invalid format.\n");
return -EINVAL;
}
/* Re-set cb planar for NV12 format */
if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
!addr[EXYNOS_DRM_PLANAR_CB]) {
/* Get buf size */
val = rot_read(ROT_DST_BUF_SIZE);
hsize = ROT_GET_BUF_SIZE_W(val);
vsize = ROT_GET_BUF_SIZE_H(val);
/* Set cb planar */
addr[EXYNOS_DRM_PLANAR_CB] =
addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
}
for_each_ipp_planar(i)
rot_write(addr[i], ROT_DST_BUF_ADDR(i));
break;
case IPP_BUF_DEQUEUE:
for_each_ipp_planar(i)
rot_write(0x0, ROT_DST_BUF_ADDR(i));
break;
default:
/* Nothing to do */
break;
}
return 0;
}
static struct exynos_drm_ipp_ops rot_src_ops = {
.set_fmt = rotator_src_set_fmt,
.set_size = rotator_src_set_size,
.set_addr = rotator_src_set_addr,
};
static struct exynos_drm_ipp_ops rot_dst_ops = {
.set_transf = rotator_dst_set_transf,
.set_size = rotator_dst_set_size,
.set_addr = rotator_dst_set_addr,
};
static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
{
struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list;
prop_list->version = 1;
prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
(1 << EXYNOS_DRM_FLIP_HORIZONTAL);
prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
(1 << EXYNOS_DRM_DEGREE_90) |
(1 << EXYNOS_DRM_DEGREE_180) |
(1 << EXYNOS_DRM_DEGREE_270);
prop_list->csc = 0;
prop_list->crop = 0;
prop_list->scale = 0;
return 0;
}
static inline bool rotator_check_drm_fmt(u32 fmt)
{
switch (fmt) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_NV12:
return true;
default:
DRM_DEBUG_KMS("not support format\n");
return false;
}
}
static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
{
switch (flip) {
case EXYNOS_DRM_FLIP_NONE:
case EXYNOS_DRM_FLIP_VERTICAL:
case EXYNOS_DRM_FLIP_HORIZONTAL:
case EXYNOS_DRM_FLIP_BOTH:
return true;
default:
DRM_DEBUG_KMS("invalid flip\n");
return false;
}
}
static int rotator_ippdrv_check_property(struct device *dev,
struct drm_exynos_ipp_property *property)
{
struct drm_exynos_ipp_config *src_config =
&property->config[EXYNOS_DRM_OPS_SRC];
struct drm_exynos_ipp_config *dst_config =
&property->config[EXYNOS_DRM_OPS_DST];
struct drm_exynos_pos *src_pos = &src_config->pos;
struct drm_exynos_pos *dst_pos = &dst_config->pos;
struct drm_exynos_sz *src_sz = &src_config->sz;
struct drm_exynos_sz *dst_sz = &dst_config->sz;
bool swap = false;
/* Check format configuration */
if (src_config->fmt != dst_config->fmt) {
DRM_DEBUG_KMS("not support csc feature\n");
return -EINVAL;
}
if (!rotator_check_drm_fmt(dst_config->fmt)) {
DRM_DEBUG_KMS("invalid format\n");
return -EINVAL;
}
/* Check transform configuration */
if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
DRM_DEBUG_KMS("not support source-side rotation\n");
return -EINVAL;
}
switch (dst_config->degree) {
case EXYNOS_DRM_DEGREE_90:
case EXYNOS_DRM_DEGREE_270:
swap = true;
case EXYNOS_DRM_DEGREE_0:
case EXYNOS_DRM_DEGREE_180:
/* No problem */
break;
default:
DRM_DEBUG_KMS("invalid degree\n");
return -EINVAL;
}
if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
DRM_DEBUG_KMS("not support source-side flip\n");
return -EINVAL;
}
if (!rotator_check_drm_flip(dst_config->flip)) {
DRM_DEBUG_KMS("invalid flip\n");
return -EINVAL;
}
/* Check size configuration */
if ((src_pos->x + src_pos->w > src_sz->hsize) ||
(src_pos->y + src_pos->h > src_sz->vsize)) {
DRM_DEBUG_KMS("out of source buffer bound\n");
return -EINVAL;
}
if (swap) {
if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
(dst_pos->y + dst_pos->w > dst_sz->hsize)) {
DRM_DEBUG_KMS("out of destination buffer bound\n");
return -EINVAL;
}
if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
DRM_DEBUG_KMS("not support scale feature\n");
return -EINVAL;
}
} else {
if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
(dst_pos->y + dst_pos->h > dst_sz->vsize)) {
DRM_DEBUG_KMS("out of destination buffer bound\n");
return -EINVAL;
}
if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
DRM_DEBUG_KMS("not support scale feature\n");
return -EINVAL;
}
}
return 0;
}
static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
{
struct rot_context *rot = dev_get_drvdata(dev);
u32 val;
if (rot->suspended) {
DRM_ERROR("suspended state\n");
return -EPERM;
}
if (cmd != IPP_CMD_M2M) {
DRM_ERROR("not support cmd: %d\n", cmd);
return -EINVAL;
}
/* Set interrupt enable */
rotator_reg_set_irq(rot, true);
val = rot_read(ROT_CONTROL);
val |= ROT_CONTROL_START;
rot_write(val, ROT_CONTROL);
}
static int rotator_commit(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
struct rot_context *rot =
container_of(ipp, struct rot_context, ipp);
pm_runtime_get_sync(rot->dev);
rot->task = task;
rotator_src_set_fmt(rot, task->src.buf.fourcc);
rotator_src_set_buf(rot, &task->src);
rotator_dst_set_transf(rot, task->transform.rotation);
rotator_dst_set_buf(rot, &task->dst);
rotator_start(rot);
return 0;
}
static struct rot_limit_table rot_limit_tbl_4210 = {
.ycbcr420_2p = {
.min_w = 32,
.min_h = 32,
.max_w = SZ_64K,
.max_h = SZ_64K,
.align = 3,
},
.rgb888 = {
.min_w = 8,
.min_h = 8,
.max_w = SZ_16K,
.max_h = SZ_16K,
.align = 2,
},
static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = rotator_commit,
};
static struct rot_limit_table rot_limit_tbl_4x12 = {
.ycbcr420_2p = {
.min_w = 32,
.min_h = 32,
.max_w = SZ_32K,
.max_h = SZ_32K,
.align = 3,
},
.rgb888 = {
.min_w = 8,
.min_h = 8,
.max_w = SZ_8K,
.max_h = SZ_8K,
.align = 2,
},
};
static int rotator_bind(struct device *dev, struct device *master, void *data)
{
struct rot_context *rot = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &rot->ipp;
static struct rot_limit_table rot_limit_tbl_5250 = {
.ycbcr420_2p = {
.min_w = 32,
.min_h = 32,
.max_w = SZ_32K,
.max_h = SZ_32K,
.align = 3,
},
.rgb888 = {
.min_w = 8,
.min_h = 8,
.max_w = SZ_8K,
.max_h = SZ_8K,
.align = 1,
},
};
rot->drm_dev = drm_dev;
drm_iommu_attach_device(drm_dev, dev);
static const struct of_device_id exynos_rotator_match[] = {
{
.compatible = "samsung,exynos4210-rotator",
.data = &rot_limit_tbl_4210,
},
{
.compatible = "samsung,exynos4212-rotator",
.data = &rot_limit_tbl_4x12,
},
{
.compatible = "samsung,exynos5250-rotator",
.data = &rot_limit_tbl_5250,
},
{},
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
rot->formats, rot->num_formats, "rotator");
dev_info(dev, "The exynos rotator has been probed successfully\n");
return 0;
}
static void rotator_unbind(struct device *dev, struct device *master,
void *data)
{
struct rot_context *rot = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
drm_iommu_detach_device(rot->drm_dev, rot->dev);
}
static const struct component_ops rotator_component_ops = {
.bind = rotator_bind,
.unbind = rotator_unbind,
};
MODULE_DEVICE_TABLE(of, exynos_rotator_match);
static int rotator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *regs_res;
struct rot_context *rot;
struct exynos_drm_ippdrv *ippdrv;
const struct rot_variant *variant;
int irq;
int ret;
if (!dev->of_node) {
dev_err(dev, "cannot find of_node.\n");
return -ENODEV;
}
rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
if (!rot)
return -ENOMEM;
rot->limit_tbl = (struct rot_limit_table *)
of_device_get_match_data(dev);
rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rot->regs = devm_ioremap_resource(dev, rot->regs_res);
variant = of_device_get_match_data(dev);
rot->formats = variant->formats;
rot->num_formats = variant->num_formats;
rot->dev = dev;
regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rot->regs = devm_ioremap_resource(dev, regs_res);
if (IS_ERR(rot->regs))
return PTR_ERR(rot->regs);
rot->irq = platform_get_irq(pdev, 0);
if (rot->irq < 0) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "failed to get irq\n");
return rot->irq;
return irq;
}
ret = devm_request_threaded_irq(dev, rot->irq, NULL,
rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot);
ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev),
rot);
if (ret < 0) {
dev_err(dev, "failed to request irq\n");
return ret;
@ -734,35 +312,19 @@ static int rotator_probe(struct platform_device *pdev)
return PTR_ERR(rot->clock);
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, ROTATOR_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
ippdrv = &rot->ippdrv;
ippdrv->dev = dev;
ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
ippdrv->check_property = rotator_ippdrv_check_property;
ippdrv->start = rotator_ippdrv_start;
ret = rotator_init_prop_list(ippdrv);
if (ret < 0) {
dev_err(dev, "failed to init property list.\n");
goto err_ippdrv_register;
}
DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv);
platform_set_drvdata(pdev, rot);
ret = exynos_drm_ippdrv_register(ippdrv);
if (ret < 0) {
dev_err(dev, "failed to register drm rotator device\n");
goto err_ippdrv_register;
}
dev_info(dev, "The exynos rotator is probed successfully\n");
ret = component_add(dev, &rotator_component_ops);
if (ret)
goto err_component;
return 0;
err_ippdrv_register:
err_component:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return ret;
}
@ -770,45 +332,101 @@ err_ippdrv_register:
static int rotator_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rot_context *rot = dev_get_drvdata(dev);
struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
exynos_drm_ippdrv_unregister(ippdrv);
component_del(dev, &rotator_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return 0;
}
#ifdef CONFIG_PM
static int rotator_clk_crtl(struct rot_context *rot, bool enable)
{
if (enable) {
clk_prepare_enable(rot->clock);
rot->suspended = false;
} else {
clk_disable_unprepare(rot->clock);
rot->suspended = true;
}
return 0;
}
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
return rotator_clk_crtl(rot, false);
clk_disable_unprepare(rot->clock);
return 0;
}
static int rotator_runtime_resume(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
return rotator_clk_crtl(rot, true);
return clk_prepare_enable(rot->clock);
}
#endif
static const struct drm_exynos_ipp_limit rotator_4210_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) },
};
static const struct drm_exynos_ipp_limit rotator_4412_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) },
};
static const struct drm_exynos_ipp_limit rotator_5250_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
};
static const struct drm_exynos_ipp_limit rotator_4210_yuv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
};
static const struct drm_exynos_ipp_limit rotator_4412_yuv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_32K }, .v = { 32, SZ_32K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) },
};
static const struct exynos_drm_ipp_formats rotator_4210_formats[] = {
{ IPP_SRCDST_FORMAT(XRGB8888, rotator_4210_rbg888_limits) },
{ IPP_SRCDST_FORMAT(NV12, rotator_4210_yuv_limits) },
};
static const struct exynos_drm_ipp_formats rotator_4412_formats[] = {
{ IPP_SRCDST_FORMAT(XRGB8888, rotator_4412_rbg888_limits) },
{ IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) },
};
static const struct exynos_drm_ipp_formats rotator_5250_formats[] = {
{ IPP_SRCDST_FORMAT(XRGB8888, rotator_5250_rbg888_limits) },
{ IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) },
};
static const struct rot_variant rotator_4210_data = {
.formats = rotator_4210_formats,
.num_formats = ARRAY_SIZE(rotator_4210_formats),
};
static const struct rot_variant rotator_4412_data = {
.formats = rotator_4412_formats,
.num_formats = ARRAY_SIZE(rotator_4412_formats),
};
static const struct rot_variant rotator_5250_data = {
.formats = rotator_5250_formats,
.num_formats = ARRAY_SIZE(rotator_5250_formats),
};
static const struct of_device_id exynos_rotator_match[] = {
{
.compatible = "samsung,exynos4210-rotator",
.data = &rotator_4210_data,
}, {
.compatible = "samsung,exynos4212-rotator",
.data = &rotator_4412_data,
}, {
.compatible = "samsung,exynos5250-rotator",
.data = &rotator_5250_data,
}, {
},
};
MODULE_DEVICE_TABLE(of, exynos_rotator_match);
static const struct dev_pm_ops rotator_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
@ -820,7 +438,7 @@ struct platform_driver rotator_driver = {
.probe = rotator_probe,
.remove = rotator_remove,
.driver = {
.name = "exynos-rot",
.name = "exynos-rotator",
.owner = THIS_MODULE,
.pm = &rotator_pm_ops,
.of_match_table = exynos_rotator_match,

View File

@ -0,0 +1,694 @@
/*
* Copyright (C) 2017 Samsung Electronics Co.Ltd
* Author:
* Andrzej Pietrasiewicz <andrzej.p@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundationr
*/
#include <linux/kernel.h>
#include <linux/component.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "regs-scaler.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
#define scaler_read(offset) readl(scaler->regs + (offset))
#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset))
#define SCALER_MAX_CLK 4
#define SCALER_AUTOSUSPEND_DELAY 2000
struct scaler_data {
const char *clk_name[SCALER_MAX_CLK];
unsigned int num_clk;
const struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
};
struct scaler_context {
struct exynos_drm_ipp ipp;
struct drm_device *drm_dev;
struct device *dev;
void __iomem *regs;
struct clk *clock[SCALER_MAX_CLK];
struct exynos_drm_ipp_task *task;
const struct scaler_data *scaler_data;
};
static u32 scaler_get_format(u32 drm_fmt)
{
switch (drm_fmt) {
case DRM_FORMAT_NV21:
return SCALER_YUV420_2P_UV;
case DRM_FORMAT_NV12:
return SCALER_YUV420_2P_VU;
case DRM_FORMAT_YUV420:
return SCALER_YUV420_3P;
case DRM_FORMAT_YUYV:
return SCALER_YUV422_1P_YUYV;
case DRM_FORMAT_UYVY:
return SCALER_YUV422_1P_UYVY;
case DRM_FORMAT_YVYU:
return SCALER_YUV422_1P_YVYU;
case DRM_FORMAT_NV61:
return SCALER_YUV422_2P_UV;
case DRM_FORMAT_NV16:
return SCALER_YUV422_2P_VU;
case DRM_FORMAT_YUV422:
return SCALER_YUV422_3P;
case DRM_FORMAT_NV42:
return SCALER_YUV444_2P_UV;
case DRM_FORMAT_NV24:
return SCALER_YUV444_2P_VU;
case DRM_FORMAT_YUV444:
return SCALER_YUV444_3P;
case DRM_FORMAT_RGB565:
return SCALER_RGB_565;
case DRM_FORMAT_XRGB1555:
return SCALER_ARGB1555;
case DRM_FORMAT_ARGB1555:
return SCALER_ARGB1555;
case DRM_FORMAT_XRGB4444:
return SCALER_ARGB4444;
case DRM_FORMAT_ARGB4444:
return SCALER_ARGB4444;
case DRM_FORMAT_XRGB8888:
return SCALER_ARGB8888;
case DRM_FORMAT_ARGB8888:
return SCALER_ARGB8888;
case DRM_FORMAT_RGBX8888:
return SCALER_RGBA8888;
case DRM_FORMAT_RGBA8888:
return SCALER_RGBA8888;
default:
break;
}
return 0;
}
static inline void scaler_enable_int(struct scaler_context *scaler)
{
u32 val;
val = SCALER_INT_EN_TIMEOUT |
SCALER_INT_EN_ILLEGAL_BLEND |
SCALER_INT_EN_ILLEGAL_RATIO |
SCALER_INT_EN_ILLEGAL_DST_HEIGHT |
SCALER_INT_EN_ILLEGAL_DST_WIDTH |
SCALER_INT_EN_ILLEGAL_DST_V_POS |
SCALER_INT_EN_ILLEGAL_DST_H_POS |
SCALER_INT_EN_ILLEGAL_DST_C_SPAN |
SCALER_INT_EN_ILLEGAL_DST_Y_SPAN |
SCALER_INT_EN_ILLEGAL_DST_CR_BASE |
SCALER_INT_EN_ILLEGAL_DST_CB_BASE |
SCALER_INT_EN_ILLEGAL_DST_Y_BASE |
SCALER_INT_EN_ILLEGAL_DST_COLOR |
SCALER_INT_EN_ILLEGAL_SRC_HEIGHT |
SCALER_INT_EN_ILLEGAL_SRC_WIDTH |
SCALER_INT_EN_ILLEGAL_SRC_CV_POS |
SCALER_INT_EN_ILLEGAL_SRC_CH_POS |
SCALER_INT_EN_ILLEGAL_SRC_YV_POS |
SCALER_INT_EN_ILLEGAL_SRC_YH_POS |
SCALER_INT_EN_ILLEGAL_DST_SPAN |
SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN |
SCALER_INT_EN_ILLEGAL_SRC_CR_BASE |
SCALER_INT_EN_ILLEGAL_SRC_CB_BASE |
SCALER_INT_EN_ILLEGAL_SRC_Y_BASE |
SCALER_INT_EN_ILLEGAL_SRC_COLOR |
SCALER_INT_EN_FRAME_END;
scaler_write(val, SCALER_INT_EN);
}
static inline void scaler_set_src_fmt(struct scaler_context *scaler,
u32 src_fmt)
{
u32 val;
val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt);
scaler_write(val, SCALER_SRC_CFG);
}
static inline void scaler_set_src_base(struct scaler_context *scaler,
struct exynos_drm_ipp_buffer *src_buf)
{
static unsigned int bases[] = {
SCALER_SRC_Y_BASE,
SCALER_SRC_CB_BASE,
SCALER_SRC_CR_BASE,
};
int i;
for (i = 0; i < src_buf->format->num_planes; ++i)
scaler_write(src_buf->dma_addr[i], bases[i]);
}
static inline void scaler_set_src_span(struct scaler_context *scaler,
struct exynos_drm_ipp_buffer *src_buf)
{
u32 val;
val = SCALER_SRC_SPAN_SET_Y_SPAN(src_buf->buf.pitch[0] /
src_buf->format->cpp[0]);
if (src_buf->format->num_planes > 1)
val |= SCALER_SRC_SPAN_SET_C_SPAN(src_buf->buf.pitch[1]);
scaler_write(val, SCALER_SRC_SPAN);
}
static inline void scaler_set_src_luma_pos(struct scaler_context *scaler,
struct drm_exynos_ipp_task_rect *src_pos)
{
u32 val;
val = SCALER_SRC_Y_POS_SET_YH_POS(src_pos->x << 2);
val |= SCALER_SRC_Y_POS_SET_YV_POS(src_pos->y << 2);
scaler_write(val, SCALER_SRC_Y_POS);
scaler_write(val, SCALER_SRC_C_POS); /* ATTENTION! */
}
static inline void scaler_set_src_wh(struct scaler_context *scaler,
struct drm_exynos_ipp_task_rect *src_pos)
{
u32 val;
val = SCALER_SRC_WH_SET_WIDTH(src_pos->w);
val |= SCALER_SRC_WH_SET_HEIGHT(src_pos->h);
scaler_write(val, SCALER_SRC_WH);
}
static inline void scaler_set_dst_fmt(struct scaler_context *scaler,
u32 dst_fmt)
{
u32 val;
val = SCALER_DST_CFG_SET_COLOR_FORMAT(dst_fmt);
scaler_write(val, SCALER_DST_CFG);
}
static inline void scaler_set_dst_base(struct scaler_context *scaler,
struct exynos_drm_ipp_buffer *dst_buf)
{
static unsigned int bases[] = {
SCALER_DST_Y_BASE,
SCALER_DST_CB_BASE,
SCALER_DST_CR_BASE,
};
int i;
for (i = 0; i < dst_buf->format->num_planes; ++i)
scaler_write(dst_buf->dma_addr[i], bases[i]);
}
static inline void scaler_set_dst_span(struct scaler_context *scaler,
struct exynos_drm_ipp_buffer *dst_buf)
{
u32 val;
val = SCALER_DST_SPAN_SET_Y_SPAN(dst_buf->buf.pitch[0] /
dst_buf->format->cpp[0]);
if (dst_buf->format->num_planes > 1)
val |= SCALER_DST_SPAN_SET_C_SPAN(dst_buf->buf.pitch[1]);
scaler_write(val, SCALER_DST_SPAN);
}
static inline void scaler_set_dst_luma_pos(struct scaler_context *scaler,
struct drm_exynos_ipp_task_rect *dst_pos)
{
u32 val;
val = SCALER_DST_WH_SET_WIDTH(dst_pos->w);
val |= SCALER_DST_WH_SET_HEIGHT(dst_pos->h);
scaler_write(val, SCALER_DST_WH);
}
static inline void scaler_set_dst_wh(struct scaler_context *scaler,
struct drm_exynos_ipp_task_rect *dst_pos)
{
u32 val;
val = SCALER_DST_POS_SET_H_POS(dst_pos->x);
val |= SCALER_DST_POS_SET_V_POS(dst_pos->y);
scaler_write(val, SCALER_DST_POS);
}
static inline void scaler_set_hv_ratio(struct scaler_context *scaler,
unsigned int rotation,
struct drm_exynos_ipp_task_rect *src_pos,
struct drm_exynos_ipp_task_rect *dst_pos)
{
u32 val, h_ratio, v_ratio;
if (drm_rotation_90_or_270(rotation)) {
h_ratio = (src_pos->h << 16) / dst_pos->w;
v_ratio = (src_pos->w << 16) / dst_pos->h;
} else {
h_ratio = (src_pos->w << 16) / dst_pos->w;
v_ratio = (src_pos->h << 16) / dst_pos->h;
}
val = SCALER_H_RATIO_SET(h_ratio);
scaler_write(val, SCALER_H_RATIO);
val = SCALER_V_RATIO_SET(v_ratio);
scaler_write(val, SCALER_V_RATIO);
}
static inline void scaler_set_rotation(struct scaler_context *scaler,
unsigned int rotation)
{
u32 val = 0;
if (rotation & DRM_MODE_ROTATE_90)
val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_90);
else if (rotation & DRM_MODE_ROTATE_180)
val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_180);
else if (rotation & DRM_MODE_ROTATE_270)
val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_270);
if (rotation & DRM_MODE_REFLECT_X)
val |= SCALER_ROT_CFG_FLIP_X_EN;
if (rotation & DRM_MODE_REFLECT_Y)
val |= SCALER_ROT_CFG_FLIP_Y_EN;
scaler_write(val, SCALER_ROT_CFG);
}
static inline void scaler_set_csc(struct scaler_context *scaler,
const struct drm_format_info *fmt)
{
static const u32 csc_mtx[2][3][3] = {
{ /* YCbCr to RGB */
{0x254, 0x000, 0x331},
{0x254, 0xf38, 0xe60},
{0x254, 0x409, 0x000},
},
{ /* RGB to YCbCr */
{0x084, 0x102, 0x032},
{0xfb4, 0xf6b, 0x0e1},
{0x0e1, 0xf44, 0xfdc},
},
};
int i, j, dir;
switch (fmt->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
dir = 1;
break;
default:
dir = 0;
}
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
scaler_write(csc_mtx[dir][i][j], SCALER_CSC_COEF(j, i));
}
static inline void scaler_set_timer(struct scaler_context *scaler,
unsigned int timer, unsigned int divider)
{
u32 val;
val = SCALER_TIMEOUT_CTRL_TIMER_ENABLE;
val |= SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(timer);
val |= SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(divider);
scaler_write(val, SCALER_TIMEOUT_CTRL);
}
static inline void scaler_start_hw(struct scaler_context *scaler)
{
scaler_write(SCALER_CFG_START_CMD, SCALER_CFG);
}
static int scaler_commit(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
struct scaler_context *scaler =
container_of(ipp, struct scaler_context, ipp);
u32 src_fmt = scaler_get_format(task->src.buf.fourcc);
struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect;
u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
scaler->task = task;
pm_runtime_get_sync(scaler->dev);
scaler_set_src_fmt(scaler, src_fmt);
scaler_set_src_base(scaler, &task->src);
scaler_set_src_span(scaler, &task->src);
scaler_set_src_luma_pos(scaler, src_pos);
scaler_set_src_wh(scaler, src_pos);
scaler_set_dst_fmt(scaler, dst_fmt);
scaler_set_dst_base(scaler, &task->dst);
scaler_set_dst_span(scaler, &task->dst);
scaler_set_dst_luma_pos(scaler, dst_pos);
scaler_set_dst_wh(scaler, dst_pos);
scaler_set_hv_ratio(scaler, task->transform.rotation, src_pos, dst_pos);
scaler_set_rotation(scaler, task->transform.rotation);
scaler_set_csc(scaler, task->src.format);
scaler_set_timer(scaler, 0xffff, 0xf);
scaler_enable_int(scaler);
scaler_start_hw(scaler);
return 0;
}
static struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = scaler_commit,
};
static inline void scaler_disable_int(struct scaler_context *scaler)
{
scaler_write(0, SCALER_INT_EN);
}
static inline u32 scaler_get_int_status(struct scaler_context *scaler)
{
return scaler_read(SCALER_INT_STATUS);
}
static inline bool scaler_task_done(u32 val)
{
return val & SCALER_INT_STATUS_FRAME_END ? 0 : -EINVAL;
}
static irqreturn_t scaler_irq_handler(int irq, void *arg)
{
struct scaler_context *scaler = arg;
u32 val = scaler_get_int_status(scaler);
scaler_disable_int(scaler);
if (scaler->task) {
struct exynos_drm_ipp_task *task = scaler->task;
scaler->task = NULL;
pm_runtime_mark_last_busy(scaler->dev);
pm_runtime_put_autosuspend(scaler->dev);
exynos_drm_ipp_task_done(task, scaler_task_done(val));
}
return IRQ_HANDLED;
}
static int scaler_bind(struct device *dev, struct device *master, void *data)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &scaler->ipp;
scaler->drm_dev = drm_dev;
drm_iommu_attach_device(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
scaler->scaler_data->formats,
scaler->scaler_data->num_formats, "scaler");
dev_info(dev, "The exynos scaler has been probed successfully\n");
return 0;
}
static void scaler_unbind(struct device *dev, struct device *master,
void *data)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
drm_iommu_detach_device(scaler->drm_dev, scaler->dev);
}
static const struct component_ops scaler_component_ops = {
.bind = scaler_bind,
.unbind = scaler_unbind,
};
static int scaler_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *regs_res;
struct scaler_context *scaler;
int irq;
int ret, i;
scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL);
if (!scaler)
return -ENOMEM;
scaler->scaler_data =
(struct scaler_data *)of_device_get_match_data(dev);
scaler->dev = dev;
regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
scaler->regs = devm_ioremap_resource(dev, regs_res);
if (IS_ERR(scaler->regs))
return PTR_ERR(scaler->regs);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "failed to get irq\n");
return irq;
}
ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler,
IRQF_ONESHOT, "drm_scaler", scaler);
if (ret < 0) {
dev_err(dev, "failed to request irq\n");
return ret;
}
for (i = 0; i < scaler->scaler_data->num_clk; ++i) {
scaler->clock[i] = devm_clk_get(dev,
scaler->scaler_data->clk_name[i]);
if (IS_ERR(scaler->clock[i])) {
dev_err(dev, "failed to get clock\n");
return PTR_ERR(scaler->clock[i]);
}
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, SCALER_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
platform_set_drvdata(pdev, scaler);
ret = component_add(dev, &scaler_component_ops);
if (ret)
goto err_ippdrv_register;
return 0;
err_ippdrv_register:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return ret;
}
static int scaler_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
component_del(dev, &scaler_component_ops);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
return 0;
}
#ifdef CONFIG_PM
static int clk_disable_unprepare_wrapper(struct clk *clk)
{
clk_disable_unprepare(clk);
return 0;
}
static int scaler_clk_ctrl(struct scaler_context *scaler, bool enable)
{
int (*clk_fun)(struct clk *clk), i;
clk_fun = enable ? clk_prepare_enable : clk_disable_unprepare_wrapper;
for (i = 0; i < scaler->scaler_data->num_clk; ++i)
clk_fun(scaler->clock[i]);
return 0;
}
static int scaler_runtime_suspend(struct device *dev)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
return scaler_clk_ctrl(scaler, false);
}
static int scaler_runtime_resume(struct device *dev)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
return scaler_clk_ctrl(scaler, true);
}
#endif
static const struct dev_pm_ops scaler_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(scaler_runtime_suspend, scaler_runtime_resume, NULL)
};
static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_hv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) },
{ IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
.v = { 65536 * 1 / 4, 65536 * 16 }) },
};
static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_h_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
{ IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 1) },
{ IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
.v = { 65536 * 1 / 4, 65536 * 16 }) },
};
static const struct drm_exynos_ipp_limit scaler_5420_one_pixel_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
{ IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 },
.v = { 65536 * 1 / 4, 65536 * 16 }) },
};
static const struct exynos_drm_ipp_formats exynos5420_formats[] = {
/* SCALER_YUV420_2P_UV */
{ IPP_SRCDST_FORMAT(NV21, scaler_5420_two_pixel_hv_limits) },
/* SCALER_YUV420_2P_VU */
{ IPP_SRCDST_FORMAT(NV12, scaler_5420_two_pixel_hv_limits) },
/* SCALER_YUV420_3P */
{ IPP_SRCDST_FORMAT(YUV420, scaler_5420_two_pixel_hv_limits) },
/* SCALER_YUV422_1P_YUYV */
{ IPP_SRCDST_FORMAT(YUYV, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_1P_UYVY */
{ IPP_SRCDST_FORMAT(UYVY, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_1P_YVYU */
{ IPP_SRCDST_FORMAT(YVYU, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_2P_UV */
{ IPP_SRCDST_FORMAT(NV61, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_2P_VU */
{ IPP_SRCDST_FORMAT(NV16, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV422_3P */
{ IPP_SRCDST_FORMAT(YUV422, scaler_5420_two_pixel_h_limits) },
/* SCALER_YUV444_2P_UV */
{ IPP_SRCDST_FORMAT(NV42, scaler_5420_one_pixel_limits) },
/* SCALER_YUV444_2P_VU */
{ IPP_SRCDST_FORMAT(NV24, scaler_5420_one_pixel_limits) },
/* SCALER_YUV444_3P */
{ IPP_SRCDST_FORMAT(YUV444, scaler_5420_one_pixel_limits) },
/* SCALER_RGB_565 */
{ IPP_SRCDST_FORMAT(RGB565, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB1555 */
{ IPP_SRCDST_FORMAT(XRGB1555, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB1555 */
{ IPP_SRCDST_FORMAT(ARGB1555, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB4444 */
{ IPP_SRCDST_FORMAT(XRGB4444, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB4444 */
{ IPP_SRCDST_FORMAT(ARGB4444, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB8888 */
{ IPP_SRCDST_FORMAT(XRGB8888, scaler_5420_one_pixel_limits) },
/* SCALER_ARGB8888 */
{ IPP_SRCDST_FORMAT(ARGB8888, scaler_5420_one_pixel_limits) },
/* SCALER_RGBA8888 */
{ IPP_SRCDST_FORMAT(RGBX8888, scaler_5420_one_pixel_limits) },
/* SCALER_RGBA8888 */
{ IPP_SRCDST_FORMAT(RGBA8888, scaler_5420_one_pixel_limits) },
};
static const struct scaler_data exynos5420_data = {
.clk_name = {"mscl"},
.num_clk = 1,
.formats = exynos5420_formats,
.num_formats = ARRAY_SIZE(exynos5420_formats),
};
static const struct scaler_data exynos5433_data = {
.clk_name = {"pclk", "aclk", "aclk_xiu"},
.num_clk = 3,
.formats = exynos5420_formats, /* intentional */
.num_formats = ARRAY_SIZE(exynos5420_formats),
};
static const struct of_device_id exynos_scaler_match[] = {
{
.compatible = "samsung,exynos5420-scaler",
.data = &exynos5420_data,
}, {
.compatible = "samsung,exynos5433-scaler",
.data = &exynos5433_data,
}, {
},
};
MODULE_DEVICE_TABLE(of, exynos_scaler_match);
struct platform_driver scaler_driver = {
.probe = scaler_probe,
.remove = scaler_remove,
.driver = {
.name = "exynos-scaler",
.owner = THIS_MODULE,
.pm = &scaler_pm_ops,
.of_match_table = exynos_scaler_match,
},
};

View File

@ -954,8 +954,6 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
drm_mode_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
encoder->bridge = hdata->bridge;
hdata->bridge->encoder = encoder;
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
if (ret)
DRM_ERROR("Failed to attach bridge\n");

View File

@ -473,7 +473,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
luma_addr[1] = luma_addr[0] + fb->pitches[0];
chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
chroma_addr[1] = chroma_addr[0] + fb->pitches[1];
}
} else {
luma_addr[1] = 0;
@ -482,6 +482,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
spin_lock_irqsave(&ctx->reg_slock, flags);
vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
/* interlace or progressive scan mode */
val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@ -495,21 +496,23 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height));
/* chroma plane for NV12/NV21 is half the height of the luma plane */
vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) |
VP_IMG_VSIZE(fb->height / 2));
vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w);
vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
vp_reg_write(ctx, VP_SRC_H_POSITION,
VP_SRC_H_POSITION_VAL(state->src.x));
vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w);
vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x);
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2);
vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2);
vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2);
vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2);
} else {
vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h);
vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y);
}
@ -699,6 +702,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* interlace scan need to check shadow register */
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
vp_reg_read(ctx, VP_SHADOW_UPDATE))
goto out;
base = mixer_reg_read(ctx, MXR_CFG);
shadow = mixer_reg_read(ctx, MXR_CFG_S);
if (base != shadow)
goto out;
base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
if (base != shadow)

View File

@ -47,6 +47,7 @@
#define MXR_MO 0x0304
#define MXR_RESOLUTION 0x0310
#define MXR_CFG_S 0x2004
#define MXR_GRAPHIC0_BASE_S 0x2024
#define MXR_GRAPHIC1_BASE_S 0x2044

View File

@ -0,0 +1,426 @@
/* drivers/gpu/drm/exynos/regs-scaler.h
*
* Copyright (c) 2017 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
*
* Register definition file for Samsung scaler driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef EXYNOS_REGS_SCALER_H
#define EXYNOS_REGS_SCALER_H
/* Register part */
/* Global setting */
#define SCALER_STATUS 0x0 /* no shadow */
#define SCALER_CFG 0x4
/* Interrupt */
#define SCALER_INT_EN 0x8 /* no shadow */
#define SCALER_INT_STATUS 0xc /* no shadow */
/* SRC */
#define SCALER_SRC_CFG 0x10
#define SCALER_SRC_Y_BASE 0x14
#define SCALER_SRC_CB_BASE 0x18
#define SCALER_SRC_CR_BASE 0x294
#define SCALER_SRC_SPAN 0x1c
#define SCALER_SRC_Y_POS 0x20
#define SCALER_SRC_WH 0x24
#define SCALER_SRC_C_POS 0x28
/* DST */
#define SCALER_DST_CFG 0x30
#define SCALER_DST_Y_BASE 0x34
#define SCALER_DST_CB_BASE 0x38
#define SCALER_DST_CR_BASE 0x298
#define SCALER_DST_SPAN 0x3c
#define SCALER_DST_WH 0x40
#define SCALER_DST_POS 0x44
/* Ratio */
#define SCALER_H_RATIO 0x50
#define SCALER_V_RATIO 0x54
/* Rotation */
#define SCALER_ROT_CFG 0x58
/* Coefficient */
/*
* YHCOEF_{x}{A|B|C|D} CHCOEF_{x}{A|B|C|D}
*
* A B C D A B C D
* 0 60 64 68 6c 140 144 148 14c
* 1 70 74 78 7c 150 154 158 15c
* 2 80 84 88 8c 160 164 168 16c
* 3 90 94 98 9c 170 174 178 17c
* 4 a0 a4 a8 ac 180 184 188 18c
* 5 b0 b4 b8 bc 190 194 198 19c
* 6 c0 c4 c8 cc 1a0 1a4 1a8 1ac
* 7 d0 d4 d8 dc 1b0 1b4 1b8 1bc
* 8 e0 e4 e8 ec 1c0 1c4 1c8 1cc
*
*
* YVCOEF_{x}{A|B} CVCOEF_{x}{A|B}
*
* A B A B
* 0 f0 f4 1d0 1d4
* 1 f8 fc 1d8 1dc
* 2 100 104 1e0 1e4
* 3 108 10c 1e8 1ec
* 4 110 114 1f0 1f4
* 5 118 11c 1f8 1fc
* 6 120 124 200 204
* 7 128 12c 208 20c
* 8 130 134 210 214
*/
#define _SCALER_HCOEF_DELTA(r, c) ((r) * 0x10 + (c) * 0x4)
#define _SCALER_VCOEF_DELTA(r, c) ((r) * 0x8 + (c) * 0x4)
#define SCALER_YHCOEF(r, c) (0x60 + _SCALER_HCOEF_DELTA((r), (c)))
#define SCALER_YVCOEF(r, c) (0xf0 + _SCALER_VCOEF_DELTA((r), (c)))
#define SCALER_CHCOEF(r, c) (0x140 + _SCALER_HCOEF_DELTA((r), (c)))
#define SCALER_CVCOEF(r, c) (0x1d0 + _SCALER_VCOEF_DELTA((r), (c)))
/* Color Space Conversion */
#define SCALER_CSC_COEF(x, y) (0x220 + (y) * 0xc + (x) * 0x4)
/* Dithering */
#define SCALER_DITH_CFG 0x250
/* Version Number */
#define SCALER_VER 0x260 /* no shadow */
/* Cycle count and Timeout */
#define SCALER_CYCLE_COUNT 0x278 /* no shadow */
#define SCALER_TIMEOUT_CTRL 0x2c0 /* no shadow */
#define SCALER_TIMEOUT_CNT 0x2c4 /* no shadow */
/* Blending */
#define SCALER_SRC_BLEND_COLOR 0x280
#define SCALER_SRC_BLEND_ALPHA 0x284
#define SCALER_DST_BLEND_COLOR 0x288
#define SCALER_DST_BLEND_ALPHA 0x28c
/* Color Fill */
#define SCALER_FILL_COLOR 0x290
/* Multiple Command Queue */
#define SCALER_ADDR_Q_CONFIG 0x2a0 /* no shadow */
#define SCALER_SRC_ADDR_Q_STATUS 0x2a4 /* no shadow */
#define SCALER_SRC_ADDR_Q 0x2a8 /* no shadow */
/* CRC */
#define SCALER_CRC_COLOR00_10 0x2b0 /* no shadow */
#define SCALER_CRC_COLOR20_30 0x2b4 /* no shadow */
#define SCALER_CRC_COLOR01_11 0x2b8 /* no shadow */
#define SCALER_CRC_COLOR21_31 0x2bc /* no shadow */
/* Shadow Registers */
#define SCALER_SHADOW_OFFSET 0x1000
/* Bit definition part */
#define SCALER_MASK(hi_b, lo_b) ((1 << ((hi_b) - (lo_b) + 1)) - 1)
#define SCALER_GET(reg, hi_b, lo_b) \
(((reg) >> (lo_b)) & SCALER_MASK(hi_b, lo_b))
#define SCALER_SET(val, hi_b, lo_b) \
(((val) & SCALER_MASK(hi_b, lo_b)) << lo_b)
/* SCALER_STATUS */
#define SCALER_STATUS_SCALER_RUNNING (1 << 1)
#define SCALER_STATUS_SCALER_READY_CLK_DOWN (1 << 0)
/* SCALER_CFG */
#define SCALER_CFG_FILL_EN (1 << 24)
#define SCALER_CFG_BLEND_COLOR_DIVIDE_ALPHA_EN (1 << 17)
#define SCALER_CFG_BLEND_EN (1 << 16)
#define SCALER_CFG_CSC_Y_OFFSET_SRC_EN (1 << 10)
#define SCALER_CFG_CSC_Y_OFFSET_DST_EN (1 << 9)
#define SCALER_CFG_16_BURST_MODE (1 << 8)
#define SCALER_CFG_SOFT_RESET (1 << 1)
#define SCALER_CFG_START_CMD (1 << 0)
/* SCALER_INT_EN */
#define SCALER_INT_EN_TIMEOUT (1 << 31)
#define SCALER_INT_EN_ILLEGAL_BLEND (1 << 24)
#define SCALER_INT_EN_ILLEGAL_RATIO (1 << 23)
#define SCALER_INT_EN_ILLEGAL_DST_HEIGHT (1 << 22)
#define SCALER_INT_EN_ILLEGAL_DST_WIDTH (1 << 21)
#define SCALER_INT_EN_ILLEGAL_DST_V_POS (1 << 20)
#define SCALER_INT_EN_ILLEGAL_DST_H_POS (1 << 19)
#define SCALER_INT_EN_ILLEGAL_DST_C_SPAN (1 << 18)
#define SCALER_INT_EN_ILLEGAL_DST_Y_SPAN (1 << 17)
#define SCALER_INT_EN_ILLEGAL_DST_CR_BASE (1 << 16)
#define SCALER_INT_EN_ILLEGAL_DST_CB_BASE (1 << 15)
#define SCALER_INT_EN_ILLEGAL_DST_Y_BASE (1 << 14)
#define SCALER_INT_EN_ILLEGAL_DST_COLOR (1 << 13)
#define SCALER_INT_EN_ILLEGAL_SRC_HEIGHT (1 << 12)
#define SCALER_INT_EN_ILLEGAL_SRC_WIDTH (1 << 11)
#define SCALER_INT_EN_ILLEGAL_SRC_CV_POS (1 << 10)
#define SCALER_INT_EN_ILLEGAL_SRC_CH_POS (1 << 9)
#define SCALER_INT_EN_ILLEGAL_SRC_YV_POS (1 << 8)
#define SCALER_INT_EN_ILLEGAL_SRC_YH_POS (1 << 7)
#define SCALER_INT_EN_ILLEGAL_DST_SPAN (1 << 6)
#define SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN (1 << 5)
#define SCALER_INT_EN_ILLEGAL_SRC_CR_BASE (1 << 4)
#define SCALER_INT_EN_ILLEGAL_SRC_CB_BASE (1 << 3)
#define SCALER_INT_EN_ILLEGAL_SRC_Y_BASE (1 << 2)
#define SCALER_INT_EN_ILLEGAL_SRC_COLOR (1 << 1)
#define SCALER_INT_EN_FRAME_END (1 << 0)
/* SCALER_INT_STATUS */
#define SCALER_INT_STATUS_TIMEOUT (1 << 31)
#define SCALER_INT_STATUS_ILLEGAL_BLEND (1 << 24)
#define SCALER_INT_STATUS_ILLEGAL_RATIO (1 << 23)
#define SCALER_INT_STATUS_ILLEGAL_DST_HEIGHT (1 << 22)
#define SCALER_INT_STATUS_ILLEGAL_DST_WIDTH (1 << 21)
#define SCALER_INT_STATUS_ILLEGAL_DST_V_POS (1 << 20)
#define SCALER_INT_STATUS_ILLEGAL_DST_H_POS (1 << 19)
#define SCALER_INT_STATUS_ILLEGAL_DST_C_SPAN (1 << 18)
#define SCALER_INT_STATUS_ILLEGAL_DST_Y_SPAN (1 << 17)
#define SCALER_INT_STATUS_ILLEGAL_DST_CR_BASE (1 << 16)
#define SCALER_INT_STATUS_ILLEGAL_DST_CB_BASE (1 << 15)
#define SCALER_INT_STATUS_ILLEGAL_DST_Y_BASE (1 << 14)
#define SCALER_INT_STATUS_ILLEGAL_DST_COLOR (1 << 13)
#define SCALER_INT_STATUS_ILLEGAL_SRC_HEIGHT (1 << 12)
#define SCALER_INT_STATUS_ILLEGAL_SRC_WIDTH (1 << 11)
#define SCALER_INT_STATUS_ILLEGAL_SRC_CV_POS (1 << 10)
#define SCALER_INT_STATUS_ILLEGAL_SRC_CH_POS (1 << 9)
#define SCALER_INT_STATUS_ILLEGAL_SRC_YV_POS (1 << 8)
#define SCALER_INT_STATUS_ILLEGAL_SRC_YH_POS (1 << 7)
#define SCALER_INT_STATUS_ILLEGAL_DST_SPAN (1 << 6)
#define SCALER_INT_STATUS_ILLEGAL_SRC_Y_SPAN (1 << 5)
#define SCALER_INT_STATUS_ILLEGAL_SRC_CR_BASE (1 << 4)
#define SCALER_INT_STATUS_ILLEGAL_SRC_CB_BASE (1 << 3)
#define SCALER_INT_STATUS_ILLEGAL_SRC_Y_BASE (1 << 2)
#define SCALER_INT_STATUS_ILLEGAL_SRC_COLOR (1 << 1)
#define SCALER_INT_STATUS_FRAME_END (1 << 0)
/* SCALER_SRC_CFG */
#define SCALER_SRC_CFG_TILE_EN (1 << 10)
#define SCALER_SRC_CFG_GET_BYTE_SWAP(r) SCALER_GET(r, 6, 5)
#define SCALER_SRC_CFG_SET_BYTE_SWAP(v) SCALER_SET(v, 6, 5)
#define SCALER_SRC_CFG_GET_COLOR_FORMAT(r) SCALER_GET(r, 4, 0)
#define SCALER_SRC_CFG_SET_COLOR_FORMAT(v) SCALER_SET(v, 4, 0)
#define SCALER_YUV420_2P_UV 0
#define SCALER_YUV422_2P_UV 2
#define SCALER_YUV444_2P_UV 3
#define SCALER_RGB_565 4
#define SCALER_ARGB1555 5
#define SCALER_ARGB8888 6
#define SCALER_ARGB8888_PRE 7
#define SCALER_YUV422_1P_YVYU 9
#define SCALER_YUV422_1P_YUYV 10
#define SCALER_YUV422_1P_UYVY 11
#define SCALER_ARGB4444 12
#define SCALER_L8A8 13
#define SCALER_RGBA8888 14
#define SCALER_L8 15
#define SCALER_YUV420_2P_VU 16
#define SCALER_YUV422_2P_VU 18
#define SCALER_YUV444_2P_VU 19
#define SCALER_YUV420_3P 20
#define SCALER_YUV422_3P 22
#define SCALER_YUV444_3P 23
/* SCALER_SRC_SPAN */
#define SCALER_SRC_SPAN_GET_C_SPAN(r) SCALER_GET(r, 29, 16)
#define SCALER_SRC_SPAN_SET_C_SPAN(v) SCALER_SET(v, 29, 16)
#define SCALER_SRC_SPAN_GET_Y_SPAN(r) SCALER_GET(r, 13, 0)
#define SCALER_SRC_SPAN_SET_Y_SPAN(v) SCALER_SET(v, 13, 0)
/* SCALER_SRC_Y_POS */
#define SCALER_SRC_Y_POS_GET_YH_POS(r) SCALER_GET(r, 31, 16)
#define SCALER_SRC_Y_POS_SET_YH_POS(v) SCALER_SET(v, 31, 16)
#define SCALER_SRC_Y_POS_GET_YV_POS(r) SCALER_GET(r, 15, 0)
#define SCALER_SRC_Y_POS_SET_YV_POS(v) SCALER_SET(v, 15, 0)
/* SCALER_SRC_WH */
#define SCALER_SRC_WH_GET_WIDTH(r) SCALER_GET(r, 29, 16)
#define SCALER_SRC_WH_SET_WIDTH(v) SCALER_SET(v, 29, 16)
#define SCALER_SRC_WH_GET_HEIGHT(r) SCALER_GET(r, 13, 0)
#define SCALER_SRC_WH_SET_HEIGHT(v) SCALER_SET(v, 13, 0)
/* SCALER_SRC_C_POS */
#define SCALER_SRC_C_POS_GET_CH_POS(r) SCALER_GET(r, 31, 16)
#define SCALER_SRC_C_POS_SET_CH_POS(v) SCALER_SET(v, 31, 16)
#define SCALER_SRC_C_POS_GET_CV_POS(r) SCALER_GET(r, 15, 0)
#define SCALER_SRC_C_POS_SET_CV_POS(v) SCALER_SET(v, 15, 0)
/* SCALER_DST_CFG */
#define SCALER_DST_CFG_GET_BYTE_SWAP(r) SCALER_GET(r, 6, 5)
#define SCALER_DST_CFG_SET_BYTE_SWAP(v) SCALER_SET(v, 6, 5)
#define SCALER_DST_CFG_GET_COLOR_FORMAT(r) SCALER_GET(r, 4, 0)
#define SCALER_DST_CFG_SET_COLOR_FORMAT(v) SCALER_SET(v, 4, 0)
/* SCALER_DST_SPAN */
#define SCALER_DST_SPAN_GET_C_SPAN(r) SCALER_GET(r, 29, 16)
#define SCALER_DST_SPAN_SET_C_SPAN(v) SCALER_SET(v, 29, 16)
#define SCALER_DST_SPAN_GET_Y_SPAN(r) SCALER_GET(r, 13, 0)
#define SCALER_DST_SPAN_SET_Y_SPAN(v) SCALER_SET(v, 13, 0)
/* SCALER_DST_WH */
#define SCALER_DST_WH_GET_WIDTH(r) SCALER_GET(r, 29, 16)
#define SCALER_DST_WH_SET_WIDTH(v) SCALER_SET(v, 29, 16)
#define SCALER_DST_WH_GET_HEIGHT(r) SCALER_GET(r, 13, 0)
#define SCALER_DST_WH_SET_HEIGHT(v) SCALER_SET(v, 13, 0)
/* SCALER_DST_POS */
#define SCALER_DST_POS_GET_H_POS(r) SCALER_GET(r, 29, 16)
#define SCALER_DST_POS_SET_H_POS(v) SCALER_SET(v, 29, 16)
#define SCALER_DST_POS_GET_V_POS(r) SCALER_GET(r, 13, 0)
#define SCALER_DST_POS_SET_V_POS(v) SCALER_SET(v, 13, 0)
/* SCALER_H_RATIO */
#define SCALER_H_RATIO_GET(r) SCALER_GET(r, 18, 0)
#define SCALER_H_RATIO_SET(v) SCALER_SET(v, 18, 0)
/* SCALER_V_RATIO */
#define SCALER_V_RATIO_GET(r) SCALER_GET(r, 18, 0)
#define SCALER_V_RATIO_SET(v) SCALER_SET(v, 18, 0)
/* SCALER_ROT_CFG */
#define SCALER_ROT_CFG_FLIP_X_EN (1 << 3)
#define SCALER_ROT_CFG_FLIP_Y_EN (1 << 2)
#define SCALER_ROT_CFG_GET_ROTMODE(r) SCALER_GET(r, 1, 0)
#define SCALER_ROT_CFG_SET_ROTMODE(v) SCALER_SET(v, 1, 0)
#define SCALER_ROT_MODE_90 1
#define SCALER_ROT_MODE_180 2
#define SCALER_ROT_MODE_270 3
/* SCALER_HCOEF, SCALER_VCOEF */
#define SCALER_COEF_SHIFT(i) (16 * (1 - (i) % 2))
#define SCALER_COEF_GET(r, i) \
(((r) >> SCALER_COEF_SHIFT(i)) & 0x1ff)
#define SCALER_COEF_SET(v, i) \
(((v) & 0x1ff) << SCALER_COEF_SHIFT(i))
/* SCALER_CSC_COEFxy */
#define SCALER_CSC_COEF_GET(r) SCALER_GET(r, 11, 0)
#define SCALER_CSC_COEF_SET(v) SCALER_SET(v, 11, 0)
/* SCALER_DITH_CFG */
#define SCALER_DITH_CFG_GET_R_TYPE(r) SCALER_GET(r, 8, 6)
#define SCALER_DITH_CFG_SET_R_TYPE(v) SCALER_SET(v, 8, 6)
#define SCALER_DITH_CFG_GET_G_TYPE(r) SCALER_GET(r, 5, 3)
#define SCALER_DITH_CFG_SET_G_TYPE(v) SCALER_SET(v, 5, 3)
#define SCALER_DITH_CFG_GET_B_TYPE(r) SCALER_GET(r, 2, 0)
#define SCALER_DITH_CFG_SET_B_TYPE(v) SCALER_SET(v, 2, 0)
/* SCALER_TIMEOUT_CTRL */
#define SCALER_TIMEOUT_CTRL_GET_TIMER_VALUE(r) SCALER_GET(r, 31, 16)
#define SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(v) SCALER_SET(v, 31, 16)
#define SCALER_TIMEOUT_CTRL_GET_TIMER_DIV(r) SCALER_GET(r, 7, 4)
#define SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(v) SCALER_SET(v, 7, 4)
#define SCALER_TIMEOUT_CTRL_TIMER_ENABLE (1 << 0)
/* SCALER_TIMEOUT_CNT */
#define SCALER_TIMEOUT_CTRL_GET_TIMER_COUNT(r) SCALER_GET(r, 31, 16)
/* SCALER_SRC_BLEND_COLOR */
#define SCALER_SRC_BLEND_COLOR_SEL_INV (1 << 31)
#define SCALER_SRC_BLEND_COLOR_GET_SEL(r) SCALER_GET(r, 30, 29)
#define SCALER_SRC_BLEND_COLOR_SET_SEL(v) SCALER_SET(v, 30, 29)
#define SCALER_SRC_BLEND_COLOR_OP_SEL_INV (1 << 28)
#define SCALER_SRC_BLEND_COLOR_GET_OP_SEL(r) SCALER_GET(r, 27, 24)
#define SCALER_SRC_BLEND_COLOR_SET_OP_SEL(v) SCALER_SET(v, 27, 24)
#define SCALER_SRC_BLEND_COLOR_GET_COLOR0(r) SCALER_GET(r, 23, 16)
#define SCALER_SRC_BLEND_COLOR_SET_COLOR0(v) SCALER_SET(v, 23, 16)
#define SCALER_SRC_BLEND_COLOR_GET_COLOR1(r) SCALER_GET(r, 15, 8)
#define SCALER_SRC_BLEND_COLOR_SET_COLOR1(v) SCALER_SET(v, 15, 8)
#define SCALER_SRC_BLEND_COLOR_GET_COLOR2(r) SCALER_GET(r, 7, 0)
#define SCALER_SRC_BLEND_COLOR_SET_COLOR2(v) SCALER_SET(v, 7, 0)
/* SCALER_SRC_BLEND_ALPHA */
#define SCALER_SRC_BLEND_ALPHA_SEL_INV (1 << 31)
#define SCALER_SRC_BLEND_ALPHA_GET_SEL(r) SCALER_GET(r, 30, 29)
#define SCALER_SRC_BLEND_ALPHA_SET_SEL(v) SCALER_SET(v, 30, 29)
#define SCALER_SRC_BLEND_ALPHA_OP_SEL_INV (1 << 28)
#define SCALER_SRC_BLEND_ALPHA_GET_OP_SEL(r) SCALER_GET(r, 27, 24)
#define SCALER_SRC_BLEND_ALPHA_SET_OP_SEL(v) SCALER_SET(v, 27, 24)
#define SCALER_SRC_BLEND_ALPHA_GET_ALPHA(r) SCALER_GET(r, 7, 0)
#define SCALER_SRC_BLEND_ALPHA_SET_ALPHA(v) SCALER_SET(v, 7, 0)
/* SCALER_DST_BLEND_COLOR */
#define SCALER_DST_BLEND_COLOR_SEL_INV (1 << 31)
#define SCALER_DST_BLEND_COLOR_GET_SEL(r) SCALER_GET(r, 30, 29)
#define SCALER_DST_BLEND_COLOR_SET_SEL(v) SCALER_SET(v, 30, 29)
#define SCALER_DST_BLEND_COLOR_OP_SEL_INV (1 << 28)
#define SCALER_DST_BLEND_COLOR_GET_OP_SEL(r) SCALER_GET(r, 27, 24)
#define SCALER_DST_BLEND_COLOR_SET_OP_SEL(v) SCALER_SET(v, 27, 24)
#define SCALER_DST_BLEND_COLOR_GET_COLOR0(r) SCALER_GET(r, 23, 16)
#define SCALER_DST_BLEND_COLOR_SET_COLOR0(v) SCALER_SET(v, 23, 16)
#define SCALER_DST_BLEND_COLOR_GET_COLOR1(r) SCALER_GET(r, 15, 8)
#define SCALER_DST_BLEND_COLOR_SET_COLOR1(v) SCALER_SET(v, 15, 8)
#define SCALER_DST_BLEND_COLOR_GET_COLOR2(r) SCALER_GET(r, 7, 0)
#define SCALER_DST_BLEND_COLOR_SET_COLOR2(v) SCALER_SET(v, 7, 0)
/* SCALER_DST_BLEND_ALPHA */
#define SCALER_DST_BLEND_ALPHA_SEL_INV (1 << 31)
#define SCALER_DST_BLEND_ALPHA_GET_SEL(r) SCALER_GET(r, 30, 29)
#define SCALER_DST_BLEND_ALPHA_SET_SEL(v) SCALER_SET(v, 30, 29)
#define SCALER_DST_BLEND_ALPHA_OP_SEL_INV (1 << 28)
#define SCALER_DST_BLEND_ALPHA_GET_OP_SEL(r) SCALER_GET(r, 27, 24)
#define SCALER_DST_BLEND_ALPHA_SET_OP_SEL(v) SCALER_SET(v, 27, 24)
#define SCALER_DST_BLEND_ALPHA_GET_ALPHA(r) SCALER_GET(r, 7, 0)
#define SCALER_DST_BLEND_ALPHA_SET_ALPHA(v) SCALER_SET(v, 7, 0)
/* SCALER_FILL_COLOR */
#define SCALER_FILL_COLOR_GET_ALPHA(r) SCALER_GET(r, 31, 24)
#define SCALER_FILL_COLOR_SET_ALPHA(v) SCALER_SET(v, 31, 24)
#define SCALER_FILL_COLOR_GET_FILL_COLOR0(r) SCALER_GET(r, 23, 16)
#define SCALER_FILL_COLOR_SET_FILL_COLOR0(v) SCALER_SET(v, 23, 16)
#define SCALER_FILL_COLOR_GET_FILL_COLOR1(r) SCALER_GET(r, 15, 8)
#define SCALER_FILL_COLOR_SET_FILL_COLOR1(v) SCALER_SET(v, 15, 8)
#define SCALER_FILL_COLOR_GET_FILL_COLOR2(r) SCALER_GET(r, 7, 0)
#define SCALER_FILL_COLOR_SET_FILL_COLOR2(v) SCALER_SET(v, 7, 0)
/* SCALER_ADDR_Q_CONFIG */
#define SCALER_ADDR_Q_CONFIG_RST (1 << 0)
/* SCALER_SRC_ADDR_Q_STATUS */
#define SCALER_SRC_ADDR_Q_STATUS_Y_FULL (1 << 23)
#define SCALER_SRC_ADDR_Q_STATUS_Y_EMPTY (1 << 22)
#define SCALER_SRC_ADDR_Q_STATUS_GET_Y_WR_IDX(r) SCALER_GET(r, 21, 16)
#define SCALER_SRC_ADDR_Q_STATUS_CB_FULL (1 << 15)
#define SCALER_SRC_ADDR_Q_STATUS_CB_EMPTY (1 << 14)
#define SCALER_SRC_ADDR_Q_STATUS_GET_CB_WR_IDX(r) SCALER_GET(r, 13, 8)
#define SCALER_SRC_ADDR_Q_STATUS_CR_FULL (1 << 7)
#define SCALER_SRC_ADDR_Q_STATUS_CR_EMPTY (1 << 6)
#define SCALER_SRC_ADDR_Q_STATUS_GET_CR_WR_IDX(r) SCALER_GET(r, 5, 0)
/* SCALER_DST_ADDR_Q_STATUS */
#define SCALER_DST_ADDR_Q_STATUS_Y_FULL (1 << 23)
#define SCALER_DST_ADDR_Q_STATUS_Y_EMPTY (1 << 22)
#define SCALER_DST_ADDR_Q_STATUS_GET_Y_WR_IDX(r) SCALER_GET(r, 21, 16)
#define SCALER_DST_ADDR_Q_STATUS_CB_FULL (1 << 15)
#define SCALER_DST_ADDR_Q_STATUS_CB_EMPTY (1 << 14)
#define SCALER_DST_ADDR_Q_STATUS_GET_CB_WR_IDX(r) SCALER_GET(r, 13, 8)
#define SCALER_DST_ADDR_Q_STATUS_CR_FULL (1 << 7)
#define SCALER_DST_ADDR_Q_STATUS_CR_EMPTY (1 << 6)
#define SCALER_DST_ADDR_Q_STATUS_GET_CR_WR_IDX(r) SCALER_GET(r, 5, 0)
/* SCALER_CRC_COLOR00_10 */
#define SCALER_CRC_COLOR00_10_GET_00(r) SCALER_GET(r, 31, 16)
#define SCALER_CRC_COLOR00_10_GET_10(r) SCALER_GET(r, 15, 0)
/* SCALER_CRC_COLOR20_30 */
#define SCALER_CRC_COLOR20_30_GET_20(r) SCALER_GET(r, 31, 16)
#define SCALER_CRC_COLOR20_30_GET_30(r) SCALER_GET(r, 15, 0)
/* SCALER_CRC_COLOR01_11 */
#define SCALER_CRC_COLOR01_11_GET_01(r) SCALER_GET(r, 31, 16)
#define SCALER_CRC_COLOR01_11_GET_11(r) SCALER_GET(r, 15, 0)
/* SCALER_CRC_COLOR21_31 */
#define SCALER_CRC_COLOR21_31_GET_21(r) SCALER_GET(r, 31, 16)
#define SCALER_CRC_COLOR21_31_GET_31(r) SCALER_GET(r, 15, 0)
#endif /* EXYNOS_REGS_SCALER_H */

View File

@ -35,6 +35,7 @@
*/
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"

View File

@ -741,6 +741,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
struct vc4_async_flip_state {
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct drm_framebuffer *old_fb;
struct drm_pending_vblank_event *event;
struct vc4_seqno_cb cb;
@ -770,6 +771,23 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
drm_crtc_vblank_put(crtc);
drm_framebuffer_put(flip_state->fb);
/* Decrement the BO usecnt in order to keep the inc/dec calls balanced
* when the planes are updated through the async update path.
* FIXME: we should move to generic async-page-flip when it's
* available, so that we can get rid of this hand-made cleanup_fb()
* logic.
*/
if (flip_state->old_fb) {
struct drm_gem_cma_object *cma_bo;
struct vc4_bo *bo;
cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
bo = to_vc4_bo(&cma_bo->base);
vc4_bo_dec_usecnt(bo);
drm_framebuffer_put(flip_state->old_fb);
}
kfree(flip_state);
up(&vc4->async_modeset);
@ -794,9 +812,22 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
/* Increment the BO usecnt here, so that we never end up with an
* unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
* plane is later updated through the non-async path.
* FIXME: we should move to generic async-page-flip when it's
* available, so that we can get rid of this hand-made prepare_fb()
* logic.
*/
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
if (!flip_state)
if (!flip_state) {
vc4_bo_dec_usecnt(bo);
return -ENOMEM;
}
drm_framebuffer_get(fb);
flip_state->fb = fb;
@ -807,10 +838,23 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
ret = down_interruptible(&vc4->async_modeset);
if (ret) {
drm_framebuffer_put(fb);
vc4_bo_dec_usecnt(bo);
kfree(flip_state);
return ret;
}
/* Save the current FB before it's replaced by the new one in
* drm_atomic_set_fb_for_plane(). We'll need the old FB in
* vc4_async_page_flip_complete() to decrement the BO usecnt and keep
* it consistent.
* FIXME: we should move to generic async-page-flip when it's
* available, so that we can get rid of this hand-made cleanup_fb()
* logic.
*/
flip_state->old_fb = plane->state->fb;
if (flip_state->old_fb)
drm_framebuffer_get(flip_state->old_fb);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
/* Immediately update the plane's legacy fb pointer, so that later

View File

@ -441,11 +441,11 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set)
struct drm_crtc *crtc = set->crtc;
struct drm_framebuffer *fb;
struct drm_crtc *tmp;
struct drm_modeset_acquire_ctx *ctx;
struct drm_device *dev = set->crtc->dev;
struct drm_modeset_acquire_ctx ctx;
int ret;
ctx = dev->mode_config.acquire_ctx;
drm_modeset_acquire_init(&ctx, 0);
restart:
/*
@ -458,7 +458,7 @@ restart:
fb = set->fb;
ret = crtc->funcs->set_config(set, ctx);
ret = crtc->funcs->set_config(set, &ctx);
if (ret == 0) {
crtc->primary->crtc = crtc;
crtc->primary->fb = fb;
@ -473,20 +473,13 @@ restart:
}
if (ret == -EDEADLK) {
dev->mode_config.acquire_ctx = NULL;
retry_locking:
drm_modeset_backoff(ctx);
ret = drm_modeset_lock_all_ctx(dev, ctx);
if (ret)
goto retry_locking;
dev->mode_config.acquire_ctx = ctx;
drm_modeset_backoff(&ctx);
goto restart;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
@ -624,7 +617,6 @@ static int vmw_fb_set_par(struct fb_info *info)
}
mutex_lock(&par->bo_mutex);
drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_fb_kms_framebuffer(info);
if (ret)
goto out_unlock;
@ -657,7 +649,6 @@ out_unlock:
drm_mode_destroy(vmw_priv->dev, old_mode);
par->set_mode = mode;
drm_modeset_unlock_all(vmw_priv->dev);
mutex_unlock(&par->bo_mutex);
return ret;
@ -713,18 +704,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
par->max_width = fb_width;
par->max_height = fb_height;
drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
par->max_height, &par->con,
&par->crtc, &init_mode);
if (ret) {
drm_modeset_unlock_all(vmw_priv->dev);
if (ret)
goto err_kms;
}
info->var.xres = init_mode->hdisplay;
info->var.yres = init_mode->vdisplay;
drm_modeset_unlock_all(vmw_priv->dev);
/*
* Create buffers and alloc memory
@ -832,7 +819,9 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
cancel_delayed_work_sync(&par->local_work);
unregister_framebuffer(info);
mutex_lock(&par->bo_mutex);
(void) vmw_fb_kms_detach(par, true, true);
mutex_unlock(&par->bo_mutex);
vfree(par->vmalloc);
framebuffer_release(info);

View File

@ -2595,6 +2595,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
@ -2680,7 +2681,9 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
struct vmw_display_unit *du;
struct drm_display_mode *mode;
int i = 0;
int ret = 0;
mutex_lock(&dev_priv->dev->mode_config.mutex);
list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
head) {
if (i == unit)
@ -2691,7 +2694,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
if (i != unit) {
DRM_ERROR("Could not find initial display unit.\n");
return -EINVAL;
ret = -EINVAL;
goto out_unlock;
}
if (list_empty(&con->modes))
@ -2699,7 +2703,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
if (list_empty(&con->modes)) {
DRM_ERROR("Could not find initial display mode.\n");
return -EINVAL;
ret = -EINVAL;
goto out_unlock;
}
du = vmw_connector_to_du(con);
@ -2720,7 +2725,10 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
head);
}
return 0;
out_unlock:
mutex_unlock(&dev_priv->dev->mode_config.mutex);
return ret;
}
/**

View File

@ -135,6 +135,219 @@ struct drm_exynos_g2d_exec {
__u64 async;
};
/* Exynos DRM IPP v2 API */
/**
* Enumerate available IPP hardware modules.
*
* @count_ipps: size of ipp_id array / number of ipp modules (set by driver)
* @reserved: padding
* @ipp_id_ptr: pointer to ipp_id array or NULL
*/
struct drm_exynos_ioctl_ipp_get_res {
__u32 count_ipps;
__u32 reserved;
__u64 ipp_id_ptr;
};
enum drm_exynos_ipp_format_type {
DRM_EXYNOS_IPP_FORMAT_SOURCE = 0x01,
DRM_EXYNOS_IPP_FORMAT_DESTINATION = 0x02,
};
struct drm_exynos_ipp_format {
__u32 fourcc;
__u32 type;
__u64 modifier;
};
enum drm_exynos_ipp_capability {
DRM_EXYNOS_IPP_CAP_CROP = 0x01,
DRM_EXYNOS_IPP_CAP_ROTATE = 0x02,
DRM_EXYNOS_IPP_CAP_SCALE = 0x04,
DRM_EXYNOS_IPP_CAP_CONVERT = 0x08,
};
/**
* Get IPP hardware capabilities and supported image formats.
*
* @ipp_id: id of IPP module to query
* @capabilities: bitmask of drm_exynos_ipp_capability (set by driver)
* @reserved: padding
* @formats_count: size of formats array (in entries) / number of filled
* formats (set by driver)
* @formats_ptr: pointer to formats array or NULL
*/
struct drm_exynos_ioctl_ipp_get_caps {
__u32 ipp_id;
__u32 capabilities;
__u32 reserved;
__u32 formats_count;
__u64 formats_ptr;
};
enum drm_exynos_ipp_limit_type {
/* size (horizontal/vertial) limits, in pixels (min, max, alignment) */
DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE = 0x0001,
/* scale ratio (horizonta/vertial), 16.16 fixed point (min, max) */
DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE = 0x0002,
/* image buffer area */
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER = 0x0001 << 16,
/* src/dst rectangle area */
DRM_EXYNOS_IPP_LIMIT_SIZE_AREA = 0x0002 << 16,
/* src/dst rectangle area when rotation enabled */
DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED = 0x0003 << 16,
DRM_EXYNOS_IPP_LIMIT_TYPE_MASK = 0x000f,
DRM_EXYNOS_IPP_LIMIT_SIZE_MASK = 0x000f << 16,
};
struct drm_exynos_ipp_limit_val {
__u32 min;
__u32 max;
__u32 align;
__u32 reserved;
};
/**
* IPP module limitation.
*
* @type: limit type (see drm_exynos_ipp_limit_type enum)
* @reserved: padding
* @h: horizontal limits
* @v: vertical limits
*/
struct drm_exynos_ipp_limit {
__u32 type;
__u32 reserved;
struct drm_exynos_ipp_limit_val h;
struct drm_exynos_ipp_limit_val v;
};
/**
* Get IPP limits for given image format.
*
* @ipp_id: id of IPP module to query
* @fourcc: image format code (see DRM_FORMAT_* in drm_fourcc.h)
* @modifier: image format modifier (see DRM_FORMAT_MOD_* in drm_fourcc.h)
* @type: source/destination identifier (drm_exynos_ipp_format_flag enum)
* @limits_count: size of limits array (in entries) / number of filled entries
* (set by driver)
* @limits_ptr: pointer to limits array or NULL
*/
struct drm_exynos_ioctl_ipp_get_limits {
__u32 ipp_id;
__u32 fourcc;
__u64 modifier;
__u32 type;
__u32 limits_count;
__u64 limits_ptr;
};
enum drm_exynos_ipp_task_id {
/* buffer described by struct drm_exynos_ipp_task_buffer */
DRM_EXYNOS_IPP_TASK_BUFFER = 0x0001,
/* rectangle described by struct drm_exynos_ipp_task_rect */
DRM_EXYNOS_IPP_TASK_RECTANGLE = 0x0002,
/* transformation described by struct drm_exynos_ipp_task_transform */
DRM_EXYNOS_IPP_TASK_TRANSFORM = 0x0003,
/* alpha configuration described by struct drm_exynos_ipp_task_alpha */
DRM_EXYNOS_IPP_TASK_ALPHA = 0x0004,
/* source image data (for buffer and rectangle chunks) */
DRM_EXYNOS_IPP_TASK_TYPE_SOURCE = 0x0001 << 16,
/* destination image data (for buffer and rectangle chunks) */
DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION = 0x0002 << 16,
};
/**
* Memory buffer with image data.
*
* @id: must be DRM_EXYNOS_IPP_TASK_BUFFER
* other parameters are same as for AddFB2 generic DRM ioctl
*/
struct drm_exynos_ipp_task_buffer {
__u32 id;
__u32 fourcc;
__u32 width, height;
__u32 gem_id[4];
__u32 offset[4];
__u32 pitch[4];
__u64 modifier;
};
/**
* Rectangle for processing.
*
* @id: must be DRM_EXYNOS_IPP_TASK_RECTANGLE
* @reserved: padding
* @x,@y: left corner in pixels
* @w,@h: width/height in pixels
*/
struct drm_exynos_ipp_task_rect {
__u32 id;
__u32 reserved;
__u32 x;
__u32 y;
__u32 w;
__u32 h;
};
/**
* Image tranformation description.
*
* @id: must be DRM_EXYNOS_IPP_TASK_TRANSFORM
* @rotation: DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* values
*/
struct drm_exynos_ipp_task_transform {
__u32 id;
__u32 rotation;
};
/**
* Image global alpha configuration for formats without alpha values.
*
* @id: must be DRM_EXYNOS_IPP_TASK_ALPHA
* @value: global alpha value (0-255)
*/
struct drm_exynos_ipp_task_alpha {
__u32 id;
__u32 value;
};
enum drm_exynos_ipp_flag {
/* generate DRM event after processing */
DRM_EXYNOS_IPP_FLAG_EVENT = 0x01,
/* dry run, only check task parameters */
DRM_EXYNOS_IPP_FLAG_TEST_ONLY = 0x02,
/* non-blocking processing */
DRM_EXYNOS_IPP_FLAG_NONBLOCK = 0x04,
};
#define DRM_EXYNOS_IPP_FLAGS (DRM_EXYNOS_IPP_FLAG_EVENT |\
DRM_EXYNOS_IPP_FLAG_TEST_ONLY | DRM_EXYNOS_IPP_FLAG_NONBLOCK)
/**
* Perform image processing described by array of drm_exynos_ipp_task_*
* structures (parameters array).
*
* @ipp_id: id of IPP module to run the task
* @flags: bitmask of drm_exynos_ipp_flag values
* @reserved: padding
* @params_size: size of parameters array (in bytes)
* @params_ptr: pointer to parameters array or NULL
* @user_data: (optional) data for drm event
*/
struct drm_exynos_ioctl_ipp_commit {
__u32 ipp_id;
__u32 flags;
__u32 reserved;
__u32 params_size;
__u64 params_ptr;
__u64 user_data;
};
#define DRM_EXYNOS_GEM_CREATE 0x00
#define DRM_EXYNOS_GEM_MAP 0x01
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
@ -147,6 +360,11 @@ struct drm_exynos_g2d_exec {
#define DRM_EXYNOS_G2D_EXEC 0x22
/* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */
/* IPP - Image Post Processing */
#define DRM_EXYNOS_IPP_GET_RESOURCES 0x40
#define DRM_EXYNOS_IPP_GET_CAPS 0x41
#define DRM_EXYNOS_IPP_GET_LIMITS 0x42
#define DRM_EXYNOS_IPP_COMMIT 0x43
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@ -165,8 +383,20 @@ struct drm_exynos_g2d_exec {
#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
#define DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_GET_RESOURCES, \
struct drm_exynos_ioctl_ipp_get_res)
#define DRM_IOCTL_EXYNOS_IPP_GET_CAPS DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_GET_CAPS, struct drm_exynos_ioctl_ipp_get_caps)
#define DRM_IOCTL_EXYNOS_IPP_GET_LIMITS DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_GET_LIMITS, \
struct drm_exynos_ioctl_ipp_get_limits)
#define DRM_IOCTL_EXYNOS_IPP_COMMIT DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
/* EXYNOS specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
#define DRM_EXYNOS_IPP_EVENT 0x80000002
struct drm_exynos_g2d_event {
struct drm_event base;
@ -177,6 +407,16 @@ struct drm_exynos_g2d_event {
__u32 reserved;
};
struct drm_exynos_ipp_event {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
__u32 ipp_id;
__u32 sequence;
__u64 reserved;
};
#if defined(__cplusplus)
}
#endif