mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 15:14:18 +08:00
Merge tag 'drm-msm-next-2020-12-07' of https://gitlab.freedesktop.org/drm/msm into drm-next
* Shutdown hook for GPU (to ensure GPU is idle before iommu goes away) * GPU cooling device support * DSI 7nm and 10nm phy/pll updates * Additional sm8150/sm8250 DPU support (merge_3d and DSPP color processing) * Various DP fixes * A whole bunch of W=1 fixes from Lee Jones * GEM locking re-work (no more trylock_recursive in shrinker!) * LLCC (system cache) support * Various other fixes/cleanups Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGt0G=H3_RbF_GAQv838z5uujSmFd+7fYhL6Yg=23LwZ=g@mail.gmail.com
This commit is contained in:
commit
60f2f74978
@ -39,6 +39,10 @@ Required properties:
|
||||
a4xx Snapdragon SoCs. See
|
||||
Documentation/devicetree/bindings/sram/qcom,ocmem.yaml.
|
||||
|
||||
Optional properties:
|
||||
- #cooling-cells: The value must be 2. For details, please refer
|
||||
Documentation/devicetree/bindings/thermal/thermal-cooling-devices.yaml.
|
||||
|
||||
Example 3xx/4xx:
|
||||
|
||||
/ {
|
||||
@ -61,6 +65,7 @@ Example 3xx/4xx:
|
||||
power-domains = <&mmcc OXILICX_GDSC>;
|
||||
operating-points-v2 = <&gpu_opp_table>;
|
||||
iommus = <&gpu_iommu 0>;
|
||||
#cooling-cells = <2>;
|
||||
};
|
||||
|
||||
gpu_sram: ocmem@fdd00000 {
|
||||
@ -98,6 +103,8 @@ Example a6xx (with GMU):
|
||||
reg = <0x5000000 0x40000>, <0x509e000 0x10>;
|
||||
reg-names = "kgsl_3d0_reg_memory", "cx_mem";
|
||||
|
||||
#cooling-cells = <2>;
|
||||
|
||||
/*
|
||||
* Look ma, no clocks! The GPU clocks and power are
|
||||
* controlled entirely by the GMU
|
||||
|
@ -4,8 +4,8 @@ config DRM_MSM
|
||||
tristate "MSM DRM"
|
||||
depends on DRM
|
||||
depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
|
||||
depends on IOMMU_SUPPORT
|
||||
depends on OF && COMMON_CLK
|
||||
depends on MMU
|
||||
depends on QCOM_OCMEM || QCOM_OCMEM=n
|
||||
select IOMMU_IO_PGTABLE
|
||||
select QCOM_MDT_LOADER if ARCH_QCOM
|
||||
|
@ -67,6 +67,7 @@ msm-y := \
|
||||
disp/dpu1/dpu_hw_pingpong.o \
|
||||
disp/dpu1/dpu_hw_sspp.o \
|
||||
disp/dpu1/dpu_hw_dspp.o \
|
||||
disp/dpu1/dpu_hw_merge3d.o \
|
||||
disp/dpu1/dpu_hw_top.o \
|
||||
disp/dpu1/dpu_hw_util.o \
|
||||
disp/dpu1/dpu_hw_vbif.o \
|
||||
|
@ -519,6 +519,8 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
||||
struct msm_gpu *gpu;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct platform_device *pdev = priv->gpu_pdev;
|
||||
struct icc_path *ocmem_icc_path;
|
||||
struct icc_path *icc_path;
|
||||
int ret;
|
||||
|
||||
if (!pdev) {
|
||||
@ -566,13 +568,28 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
|
||||
ret = IS_ERR(icc_path);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
|
||||
ret = IS_ERR(ocmem_icc_path);
|
||||
if (ret) {
|
||||
/* allow -ENODATA, ocmem icc is optional */
|
||||
if (ret != -ENODATA)
|
||||
goto fail;
|
||||
ocmem_icc_path = NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Set the ICC path to maximum speed for now by multiplying the fastest
|
||||
* frequency by the bus width (8). We'll want to scale this later on to
|
||||
* improve battery life.
|
||||
*/
|
||||
icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
|
||||
icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
|
||||
icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
|
||||
icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
|
||||
|
||||
return gpu;
|
||||
|
||||
|
@ -648,6 +648,8 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
|
||||
struct msm_gpu *gpu;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct platform_device *pdev = priv->gpu_pdev;
|
||||
struct icc_path *ocmem_icc_path;
|
||||
struct icc_path *icc_path;
|
||||
int ret;
|
||||
|
||||
if (!pdev) {
|
||||
@ -694,13 +696,27 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
|
||||
ret = IS_ERR(icc_path);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
|
||||
ret = IS_ERR(ocmem_icc_path);
|
||||
if (ret) {
|
||||
/* allow -ENODATA, ocmem icc is optional */
|
||||
if (ret != -ENODATA)
|
||||
goto fail;
|
||||
ocmem_icc_path = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the ICC path to maximum speed for now by multiplying the fastest
|
||||
* frequency by the bus width (8). We'll want to scale this later on to
|
||||
* improve battery life.
|
||||
*/
|
||||
icc_set_bw(gpu->icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
|
||||
icc_set_bw(gpu->ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
|
||||
icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
|
||||
icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
|
||||
|
||||
return gpu;
|
||||
|
||||
|
@ -36,7 +36,7 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
||||
OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
spin_lock_irqsave(&ring->preempt_lock, flags);
|
||||
|
||||
/* Copy the shadow to the actual register */
|
||||
ring->cur = ring->next;
|
||||
@ -44,7 +44,7 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
||||
/* Make sure to wrap wptr if we need to */
|
||||
wptr = get_wptr(ring);
|
||||
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
spin_unlock_irqrestore(&ring->preempt_lock, flags);
|
||||
|
||||
/* Make sure everything is posted before making a decision */
|
||||
mb();
|
||||
@ -426,7 +426,7 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
|
||||
static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
|
||||
struct drm_gem_object *obj)
|
||||
{
|
||||
u32 *buf = msm_gem_get_vaddr_active(obj);
|
||||
u32 *buf = msm_gem_get_vaddr(obj);
|
||||
|
||||
if (IS_ERR(buf))
|
||||
return;
|
||||
@ -755,12 +755,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
|
||||
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
|
||||
|
||||
/* Disable preemption if WHERE_AM_I isn't available */
|
||||
if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) {
|
||||
a5xx_preempt_fini(gpu);
|
||||
gpu->nr_rings = 1;
|
||||
} else {
|
||||
/* Create a privileged buffer for the RPTR shadow */
|
||||
/* Create a privileged buffer for the RPTR shadow */
|
||||
if (a5xx_gpu->has_whereami) {
|
||||
if (!a5xx_gpu->shadow_bo) {
|
||||
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
|
||||
sizeof(u32) * gpu->nr_rings,
|
||||
@ -774,6 +770,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||
|
||||
gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
|
||||
REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
|
||||
} else if (gpu->nr_rings > 1) {
|
||||
/* Disable preemption if WHERE_AM_I isn't available */
|
||||
a5xx_preempt_fini(gpu);
|
||||
gpu->nr_rings = 1;
|
||||
}
|
||||
|
||||
a5xx_preempt_hw_init(gpu);
|
||||
@ -1056,7 +1056,6 @@ static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
|
||||
static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
|
||||
{
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
|
||||
|
||||
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
|
||||
@ -1072,7 +1071,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
|
||||
/* Turn off the hangcheck timer to keep it from bothering us */
|
||||
del_timer(&gpu->hangcheck_timer);
|
||||
|
||||
queue_work(priv->wq, &gpu->recover_work);
|
||||
kthread_queue_work(gpu->worker, &gpu->recover_work);
|
||||
}
|
||||
|
||||
#define RBBM_ERROR_MASK \
|
||||
@ -1207,7 +1206,9 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
|
||||
static int a5xx_pm_suspend(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
u32 mask = 0xf;
|
||||
int i, ret;
|
||||
|
||||
/* A510 has 3 XIN ports in VBIF */
|
||||
if (adreno_is_a510(adreno_gpu))
|
||||
@ -1227,7 +1228,15 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
|
||||
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
|
||||
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
|
||||
|
||||
return msm_gpu_pm_suspend(gpu);
|
||||
ret = msm_gpu_pm_suspend(gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (a5xx_gpu->has_whereami)
|
||||
for (i = 0; i < gpu->nr_rings; i++)
|
||||
a5xx_gpu->shadow[i] = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||
|
@ -45,9 +45,9 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||
if (!ring)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
spin_lock_irqsave(&ring->preempt_lock, flags);
|
||||
wptr = get_wptr(ring);
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
spin_unlock_irqrestore(&ring->preempt_lock, flags);
|
||||
|
||||
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
|
||||
}
|
||||
@ -62,9 +62,9 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
|
||||
bool empty;
|
||||
struct msm_ringbuffer *ring = gpu->rb[i];
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
spin_lock_irqsave(&ring->preempt_lock, flags);
|
||||
empty = (get_wptr(ring) == ring->memptrs->rptr);
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
spin_unlock_irqrestore(&ring->preempt_lock, flags);
|
||||
|
||||
if (!empty)
|
||||
return ring;
|
||||
@ -78,13 +78,12 @@ static void a5xx_preempt_timer(struct timer_list *t)
|
||||
struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
|
||||
struct msm_gpu *gpu = &a5xx_gpu->base.base;
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
||||
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
|
||||
return;
|
||||
|
||||
DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
|
||||
queue_work(priv->wq, &gpu->recover_work);
|
||||
kthread_queue_work(gpu->worker, &gpu->recover_work);
|
||||
}
|
||||
|
||||
/* Try to trigger a preemption switch */
|
||||
@ -132,9 +131,9 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
|
||||
}
|
||||
|
||||
/* Make sure the wptr doesn't update while we're in motion */
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
spin_lock_irqsave(&ring->preempt_lock, flags);
|
||||
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
spin_unlock_irqrestore(&ring->preempt_lock, flags);
|
||||
|
||||
/* Set the address of the incoming preemption record */
|
||||
gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
|
||||
@ -162,7 +161,6 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
||||
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
|
||||
return;
|
||||
@ -181,7 +179,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
|
||||
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
|
||||
DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
|
||||
gpu->name);
|
||||
queue_work(priv->wq, &gpu->recover_work);
|
||||
kthread_queue_work(gpu->worker, &gpu->recover_work);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -19,8 +19,6 @@ static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
|
||||
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
||||
/* FIXME: add a banner here */
|
||||
gmu->hung = true;
|
||||
@ -29,7 +27,7 @@ static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
|
||||
del_timer(&gpu->hangcheck_timer);
|
||||
|
||||
/* Queue the GPU handler because we need to treat this as a recovery */
|
||||
queue_work(priv->wq, &gpu->recover_work);
|
||||
kthread_queue_work(gpu->worker, &gpu->recover_work);
|
||||
}
|
||||
|
||||
static irqreturn_t a6xx_gmu_irq(int irq, void *data)
|
||||
|
@ -8,7 +8,9 @@
|
||||
#include "a6xx_gpu.h"
|
||||
#include "a6xx_gmu.xml.h"
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/soc/qcom/llcc-qcom.h>
|
||||
|
||||
#define GPU_PAS_ID 13
|
||||
|
||||
@ -30,7 +32,7 @@ static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
|
||||
A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
|
||||
}
|
||||
|
||||
bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||
static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||
{
|
||||
/* wait for CP to drain ringbuffer: */
|
||||
if (!adreno_idle(gpu, ring))
|
||||
@ -65,7 +67,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||
OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
spin_lock_irqsave(&ring->preempt_lock, flags);
|
||||
|
||||
/* Copy the shadow to the actual register */
|
||||
ring->cur = ring->next;
|
||||
@ -73,7 +75,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
|
||||
/* Make sure to wrap wptr if we need to */
|
||||
wptr = get_wptr(ring);
|
||||
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
spin_unlock_irqrestore(&ring->preempt_lock, flags);
|
||||
|
||||
/* Make sure everything is posted before making a decision */
|
||||
mb();
|
||||
@ -522,7 +524,7 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
|
||||
static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
|
||||
struct drm_gem_object *obj)
|
||||
{
|
||||
u32 *buf = msm_gem_get_vaddr_active(obj);
|
||||
u32 *buf = msm_gem_get_vaddr(obj);
|
||||
|
||||
if (IS_ERR(buf))
|
||||
return;
|
||||
@ -965,8 +967,6 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
|
||||
|
||||
/*
|
||||
@ -989,7 +989,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
|
||||
/* Turn off the hangcheck timer to keep it from bothering us */
|
||||
del_timer(&gpu->hangcheck_timer);
|
||||
|
||||
queue_work(priv->wq, &gpu->recover_work);
|
||||
kthread_queue_work(gpu->worker, &gpu->recover_work);
|
||||
}
|
||||
|
||||
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
|
||||
@ -1022,6 +1022,105 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void a6xx_llc_rmw(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 mask, u32 or)
|
||||
{
|
||||
return msm_rmw(a6xx_gpu->llc_mmio + (reg << 2), mask, or);
|
||||
}
|
||||
|
||||
static void a6xx_llc_write(struct a6xx_gpu *a6xx_gpu, u32 reg, u32 value)
|
||||
{
|
||||
return msm_writel(value, a6xx_gpu->llc_mmio + (reg << 2));
|
||||
}
|
||||
|
||||
static void a6xx_llc_deactivate(struct a6xx_gpu *a6xx_gpu)
|
||||
{
|
||||
llcc_slice_deactivate(a6xx_gpu->llc_slice);
|
||||
llcc_slice_deactivate(a6xx_gpu->htw_llc_slice);
|
||||
}
|
||||
|
||||
static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
u32 cntl1_regval = 0;
|
||||
|
||||
if (IS_ERR(a6xx_gpu->llc_mmio))
|
||||
return;
|
||||
|
||||
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
|
||||
u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
|
||||
|
||||
gpu_scid &= 0x1f;
|
||||
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
|
||||
(gpu_scid << 15) | (gpu_scid << 20);
|
||||
}
|
||||
|
||||
/*
|
||||
* For targets with a MMU500, activate the slice but don't program the
|
||||
* register. The XBL will take care of that.
|
||||
*/
|
||||
if (!llcc_slice_activate(a6xx_gpu->htw_llc_slice)) {
|
||||
if (!a6xx_gpu->have_mmu500) {
|
||||
u32 gpuhtw_scid = llcc_get_slice_id(a6xx_gpu->htw_llc_slice);
|
||||
|
||||
gpuhtw_scid &= 0x1f;
|
||||
cntl1_regval |= FIELD_PREP(GENMASK(29, 25), gpuhtw_scid);
|
||||
}
|
||||
}
|
||||
|
||||
if (cntl1_regval) {
|
||||
/*
|
||||
* Program the slice IDs for the various GPU blocks and GPU MMU
|
||||
* pagetables
|
||||
*/
|
||||
if (a6xx_gpu->have_mmu500)
|
||||
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0),
|
||||
cntl1_regval);
|
||||
else {
|
||||
a6xx_llc_write(a6xx_gpu,
|
||||
REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1, cntl1_regval);
|
||||
|
||||
/*
|
||||
* Program cacheability overrides to not allocate cache
|
||||
* lines on a write miss
|
||||
*/
|
||||
a6xx_llc_rmw(a6xx_gpu,
|
||||
REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0, 0xF, 0x03);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
|
||||
{
|
||||
llcc_slice_putd(a6xx_gpu->llc_slice);
|
||||
llcc_slice_putd(a6xx_gpu->htw_llc_slice);
|
||||
}
|
||||
|
||||
static void a6xx_llc_slices_init(struct platform_device *pdev,
|
||||
struct a6xx_gpu *a6xx_gpu)
|
||||
{
|
||||
struct device_node *phandle;
|
||||
|
||||
a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
|
||||
if (IS_ERR(a6xx_gpu->llc_mmio))
|
||||
return;
|
||||
|
||||
/*
|
||||
* There is a different programming path for targets with an mmu500
|
||||
* attached, so detect if that is the case
|
||||
*/
|
||||
phandle = of_parse_phandle(pdev->dev.of_node, "iommus", 0);
|
||||
a6xx_gpu->have_mmu500 = (phandle &&
|
||||
of_device_is_compatible(phandle, "arm,mmu-500"));
|
||||
of_node_put(phandle);
|
||||
|
||||
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
|
||||
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
|
||||
|
||||
if (IS_ERR(a6xx_gpu->llc_slice) && IS_ERR(a6xx_gpu->htw_llc_slice))
|
||||
a6xx_gpu->llc_mmio = ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static int a6xx_pm_resume(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
@ -1038,6 +1137,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
|
||||
|
||||
msm_gpu_resume_devfreq(gpu);
|
||||
|
||||
a6xx_llc_activate(a6xx_gpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1045,12 +1146,23 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
int i, ret;
|
||||
|
||||
trace_msm_gpu_suspend(0);
|
||||
|
||||
a6xx_llc_deactivate(a6xx_gpu);
|
||||
|
||||
devfreq_suspend_device(gpu->devfreq.devfreq);
|
||||
|
||||
return a6xx_gmu_stop(a6xx_gpu);
|
||||
ret = a6xx_gmu_stop(a6xx_gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
|
||||
for (i = 0; i < gpu->nr_rings; i++)
|
||||
a6xx_gpu->shadow[i] = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||
@ -1091,6 +1203,8 @@ static void a6xx_destroy(struct msm_gpu *gpu)
|
||||
drm_gem_object_put(a6xx_gpu->shadow_bo);
|
||||
}
|
||||
|
||||
a6xx_llc_slices_destroy(a6xx_gpu);
|
||||
|
||||
a6xx_gmu_remove(a6xx_gpu);
|
||||
|
||||
adreno_gpu_cleanup(adreno_gpu);
|
||||
@ -1209,6 +1323,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
|
||||
if (info && info->revn == 650)
|
||||
adreno_gpu->base.hw_apriv = true;
|
||||
|
||||
a6xx_llc_slices_init(pdev, a6xx_gpu);
|
||||
|
||||
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
|
||||
if (ret) {
|
||||
a6xx_destroy(&(a6xx_gpu->base.base));
|
||||
|
@ -28,6 +28,11 @@ struct a6xx_gpu {
|
||||
uint32_t *shadow;
|
||||
|
||||
bool has_whereami;
|
||||
|
||||
void __iomem *llc_mmio;
|
||||
void *llc_slice;
|
||||
void *htw_llc_slice;
|
||||
bool have_mmu500;
|
||||
};
|
||||
|
||||
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
|
||||
|
@ -80,7 +80,7 @@ struct a6xx_state_memobj {
|
||||
unsigned long long data[];
|
||||
};
|
||||
|
||||
void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
|
||||
static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
|
||||
{
|
||||
struct a6xx_state_memobj *obj =
|
||||
kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
|
||||
@ -92,7 +92,7 @@ void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
|
||||
return &obj->data;
|
||||
}
|
||||
|
||||
void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
|
||||
static void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
|
||||
size_t size)
|
||||
{
|
||||
void *dst = state_kcalloc(a6xx_state, 1, size);
|
||||
@ -944,7 +944,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
|
||||
return &a6xx_state->base;
|
||||
}
|
||||
|
||||
void a6xx_gpu_state_destroy(struct kref *kref)
|
||||
static void a6xx_gpu_state_destroy(struct kref *kref)
|
||||
{
|
||||
struct a6xx_state_memobj *obj, *tmp;
|
||||
struct msm_gpu_state *state = container_of(kref,
|
||||
|
@ -475,6 +475,11 @@ static int adreno_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void adreno_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
pm_runtime_force_suspend(&pdev->dev);
|
||||
}
|
||||
|
||||
static const struct of_device_id dt_match[] = {
|
||||
{ .compatible = "qcom,adreno" },
|
||||
{ .compatible = "qcom,adreno-3xx" },
|
||||
@ -509,6 +514,7 @@ static const struct dev_pm_ops adreno_pm_ops = {
|
||||
static struct platform_driver adreno_driver = {
|
||||
.probe = adreno_probe,
|
||||
.remove = adreno_remove,
|
||||
.shutdown = adreno_shutdown,
|
||||
.driver = {
|
||||
.name = "adreno",
|
||||
.of_match_table = dt_match,
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/soc/qcom/mdt_loader.h>
|
||||
#include <soc/qcom/ocmem.h>
|
||||
#include "adreno_gpu.h"
|
||||
#include "a6xx_gpu.h"
|
||||
#include "msm_gem.h"
|
||||
#include "msm_mmu.h"
|
||||
|
||||
@ -189,6 +190,9 @@ struct msm_gem_address_space *
|
||||
adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
struct io_pgtable_domain_attr pgtbl_cfg;
|
||||
struct iommu_domain *iommu;
|
||||
struct msm_mmu *mmu;
|
||||
struct msm_gem_address_space *aspace;
|
||||
@ -198,7 +202,20 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
|
||||
if (!iommu)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* This allows GPU to set the bus attributes required to use system
|
||||
* cache on behalf of the iommu page table walker.
|
||||
*/
|
||||
if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
|
||||
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
|
||||
iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
|
||||
}
|
||||
|
||||
mmu = msm_iommu_new(&pdev->dev, iommu);
|
||||
if (IS_ERR(mmu)) {
|
||||
iommu_domain_free(iommu);
|
||||
return ERR_CAST(mmu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the aperture start or SZ_16M, whichever is greater. This will
|
||||
@ -899,7 +916,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
struct adreno_platform_config *config = dev->platform_data;
|
||||
struct msm_gpu_config adreno_gpu_config = { 0 };
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
int ret;
|
||||
|
||||
adreno_gpu->funcs = funcs;
|
||||
adreno_gpu->info = adreno_info(config->rev);
|
||||
@ -918,37 +934,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
|
||||
return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
|
||||
adreno_gpu->info->name, &adreno_gpu_config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The legacy case, before "interconnect-names", only has a
|
||||
* single interconnect path which is equivalent to "gfx-mem"
|
||||
*/
|
||||
if (!of_find_property(dev->of_node, "interconnect-names", NULL)) {
|
||||
gpu->icc_path = of_icc_get(dev, NULL);
|
||||
} else {
|
||||
gpu->icc_path = of_icc_get(dev, "gfx-mem");
|
||||
gpu->ocmem_icc_path = of_icc_get(dev, "ocmem");
|
||||
}
|
||||
|
||||
if (IS_ERR(gpu->icc_path)) {
|
||||
ret = PTR_ERR(gpu->icc_path);
|
||||
gpu->icc_path = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (IS_ERR(gpu->ocmem_icc_path)) {
|
||||
ret = PTR_ERR(gpu->ocmem_icc_path);
|
||||
gpu->ocmem_icc_path = NULL;
|
||||
/* allow -ENODATA, ocmem icc is optional */
|
||||
if (ret != -ENODATA)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
|
||||
|
@ -22,6 +22,7 @@
|
||||
* @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
|
||||
* @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
|
||||
* @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
|
||||
* @DPU_PERF_MODE_MAX: maximum value, used for error checking
|
||||
*/
|
||||
enum dpu_perf_mode {
|
||||
DPU_PERF_MODE_NORMAL,
|
||||
@ -31,9 +32,9 @@ enum dpu_perf_mode {
|
||||
};
|
||||
|
||||
/**
|
||||
* @_dpu_core_perf_calc_bw() - to calculate BW per crtc
|
||||
* @kms - pointer to the dpu_kms
|
||||
* @crtc - pointer to a crtc
|
||||
* _dpu_core_perf_calc_bw() - to calculate BW per crtc
|
||||
* @kms: pointer to the dpu_kms
|
||||
* @crtc: pointer to a crtc
|
||||
* Return: returns aggregated BW for all planes in crtc.
|
||||
*/
|
||||
static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
|
||||
@ -63,9 +64,9 @@ static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
|
||||
|
||||
/**
|
||||
* _dpu_core_perf_calc_clk() - to calculate clock per crtc
|
||||
* @kms - pointer to the dpu_kms
|
||||
* @crtc - pointer to a crtc
|
||||
* @state - pointer to a crtc state
|
||||
* @kms: pointer to the dpu_kms
|
||||
* @crtc: pointer to a crtc
|
||||
* @state: pointer to a crtc state
|
||||
* Return: returns max clk for all planes in crtc.
|
||||
*/
|
||||
static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms,
|
||||
@ -110,14 +111,11 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
|
||||
struct drm_crtc_state *state,
|
||||
struct dpu_core_perf_params *perf)
|
||||
{
|
||||
struct dpu_crtc_state *dpu_cstate;
|
||||
|
||||
if (!kms || !kms->catalog || !crtc || !state || !perf) {
|
||||
DPU_ERROR("invalid parameters\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dpu_cstate = to_dpu_crtc_state(state);
|
||||
memset(perf, 0, sizeof(struct dpu_core_perf_params));
|
||||
|
||||
if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
|
||||
@ -219,9 +217,6 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
|
||||
int i, ret = 0;
|
||||
u64 avg_bw;
|
||||
|
||||
if (!kms->num_paths)
|
||||
return -EINVAL;
|
||||
|
||||
drm_for_each_crtc(tmp_crtc, crtc->dev) {
|
||||
if (tmp_crtc->enabled &&
|
||||
curr_client_type ==
|
||||
@ -239,6 +234,9 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
|
||||
}
|
||||
}
|
||||
|
||||
if (!kms->num_paths)
|
||||
return 0;
|
||||
|
||||
avg_bw = perf.bw_ctl;
|
||||
do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
|
||||
|
||||
@ -249,8 +247,8 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
|
||||
}
|
||||
|
||||
/**
|
||||
* @dpu_core_perf_crtc_release_bw() - request zero bandwidth
|
||||
* @crtc - pointer to a crtc
|
||||
* dpu_core_perf_crtc_release_bw() - request zero bandwidth
|
||||
* @crtc: pointer to a crtc
|
||||
*
|
||||
* Function checks a state variable for the crtc, if all pending commit
|
||||
* requests are done, meaning no more bandwidth is needed, release
|
||||
|
@ -845,7 +845,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
mode = &crtc_state->adjusted_mode;
|
||||
DPU_DEBUG("%s: check", dpu_crtc->name);
|
||||
DPU_DEBUG("%s: check\n", dpu_crtc->name);
|
||||
|
||||
/* force a full mode set if active state changed */
|
||||
if (crtc_state->active_changed)
|
||||
@ -953,7 +953,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
|
||||
DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
|
||||
DPU_DEBUG("%s: zpos %d\n", dpu_crtc->name, z_pos);
|
||||
}
|
||||
|
||||
for (i = 0; i < multirect_count; i++) {
|
||||
|
@ -132,9 +132,10 @@ enum dpu_enc_rc_states {
|
||||
* @phys_encs: Container of physical encoders managed.
|
||||
* @cur_master: Pointer to the current master in this mode. Optimization
|
||||
* Only valid after enable. Cleared as disable.
|
||||
* @hw_pp Handle to the pingpong blocks used for the display. No.
|
||||
* @cur_slave: As above but for the slave encoder.
|
||||
* @hw_pp: Handle to the pingpong blocks used for the display. No.
|
||||
* pingpong blocks can be different than num_phys_encs.
|
||||
* @intfs_swapped Whether or not the phys_enc interfaces have been swapped
|
||||
* @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
|
||||
* for partial update right-only cases, such as pingpong
|
||||
* split where virtual pingpong does not generate IRQs
|
||||
* @crtc: Pointer to the currently assigned crtc. Normally you
|
||||
@ -973,12 +974,11 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||
struct drm_crtc *drm_crtc;
|
||||
struct dpu_crtc_state *cstate;
|
||||
struct dpu_global_state *global_state;
|
||||
struct msm_display_topology topology;
|
||||
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
|
||||
int num_lm, num_ctl, num_pp, num_dspp;
|
||||
int num_lm, num_ctl, num_pp;
|
||||
int i, j;
|
||||
|
||||
if (!drm_enc) {
|
||||
@ -1020,8 +1020,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||
if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
|
||||
break;
|
||||
|
||||
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
|
||||
|
||||
/* Query resource that have been reserved in atomic check step. */
|
||||
num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
|
||||
@ -1030,7 +1028,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
|
||||
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
|
||||
num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
|
||||
ARRAY_SIZE(hw_dspp));
|
||||
|
||||
@ -1096,7 +1094,6 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
|
||||
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
|
||||
{
|
||||
struct dpu_encoder_virt *dpu_enc = NULL;
|
||||
struct msm_drm_private *priv;
|
||||
int i;
|
||||
|
||||
if (!drm_enc || !drm_enc->dev) {
|
||||
@ -1104,8 +1101,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
|
||||
return;
|
||||
}
|
||||
|
||||
priv = drm_enc->dev->dev_private;
|
||||
|
||||
dpu_enc = to_dpu_encoder_virt(drm_enc);
|
||||
if (!dpu_enc || !dpu_enc->cur_master) {
|
||||
DPU_ERROR("invalid dpu encoder/master\n");
|
||||
@ -1207,7 +1202,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
|
||||
{
|
||||
struct dpu_encoder_virt *dpu_enc = NULL;
|
||||
struct msm_drm_private *priv;
|
||||
struct dpu_kms *dpu_kms;
|
||||
int i = 0;
|
||||
|
||||
if (!drm_enc) {
|
||||
@ -1225,7 +1219,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
|
||||
dpu_enc->enabled = false;
|
||||
|
||||
priv = drm_enc->dev->dev_private;
|
||||
dpu_kms = to_dpu_kms(priv->kms);
|
||||
|
||||
trace_dpu_enc_disable(DRMID(drm_enc));
|
||||
|
||||
@ -1444,9 +1437,9 @@ static void dpu_encoder_off_work(struct work_struct *work)
|
||||
|
||||
/**
|
||||
* _dpu_encoder_trigger_flush - trigger flush for a physical encoder
|
||||
* drm_enc: Pointer to drm encoder structure
|
||||
* phys: Pointer to physical encoder structure
|
||||
* extra_flush_bits: Additional bit mask to include in flush trigger
|
||||
* @drm_enc: Pointer to drm encoder structure
|
||||
* @phys: Pointer to physical encoder structure
|
||||
* @extra_flush_bits: Additional bit mask to include in flush trigger
|
||||
*/
|
||||
static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
|
||||
struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
|
||||
@ -1483,7 +1476,7 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
|
||||
|
||||
/**
|
||||
* _dpu_encoder_trigger_start - trigger start for a physical encoder
|
||||
* phys: Pointer to physical encoder structure
|
||||
* @phys: Pointer to physical encoder structure
|
||||
*/
|
||||
static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
|
||||
{
|
||||
@ -1566,7 +1559,7 @@ static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
|
||||
* encoder rather than the individual physical ones in order to handle
|
||||
* use cases that require visibility into multiple physical encoders at
|
||||
* a time.
|
||||
* dpu_enc: Pointer to virtual encoder structure
|
||||
* @dpu_enc: Pointer to virtual encoder structure
|
||||
*/
|
||||
static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
|
||||
{
|
||||
|
@ -437,7 +437,6 @@ static void dpu_encoder_phys_cmd_enable_helper(
|
||||
struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
struct dpu_hw_ctl *ctl;
|
||||
u32 flush_mask = 0;
|
||||
|
||||
if (!phys_enc->hw_pp) {
|
||||
DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
|
||||
@ -452,8 +451,7 @@ static void dpu_encoder_phys_cmd_enable_helper(
|
||||
return;
|
||||
|
||||
ctl = phys_enc->hw_ctl;
|
||||
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
|
||||
ctl->ops.update_pending_flush(ctl, flush_mask);
|
||||
ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
|
||||
}
|
||||
|
||||
static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
|
||||
|
@ -5,6 +5,7 @@
|
||||
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
|
||||
#include "dpu_encoder_phys.h"
|
||||
#include "dpu_hw_interrupts.h"
|
||||
#include "dpu_hw_merge3d.h"
|
||||
#include "dpu_core_irq.h"
|
||||
#include "dpu_formats.h"
|
||||
#include "dpu_trace.h"
|
||||
@ -282,6 +283,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
|
||||
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
|
||||
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
|
||||
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
|
||||
if (phys_enc->hw_pp->merge_3d)
|
||||
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->id;
|
||||
|
||||
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
|
||||
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
|
||||
@ -295,6 +298,12 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
|
||||
true,
|
||||
phys_enc->hw_pp->idx);
|
||||
|
||||
if (phys_enc->hw_pp->merge_3d) {
|
||||
struct dpu_hw_merge_3d *merge_3d = to_dpu_hw_merge_3d(phys_enc->hw_pp->merge_3d);
|
||||
|
||||
merge_3d->ops.setup_3d_mode(merge_3d, intf_cfg.mode_3d);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
|
||||
|
||||
programmable_fetch_config(phys_enc, &timing_params);
|
||||
@ -429,8 +438,6 @@ end:
|
||||
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
|
||||
{
|
||||
struct dpu_hw_ctl *ctl;
|
||||
u32 flush_mask = 0;
|
||||
u32 intf_flush_mask = 0;
|
||||
|
||||
ctl = phys_enc->hw_ctl;
|
||||
|
||||
@ -452,20 +459,14 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
|
||||
!dpu_encoder_phys_vid_is_master(phys_enc))
|
||||
goto skip_flush;
|
||||
|
||||
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
|
||||
ctl->ops.update_pending_flush(ctl, flush_mask);
|
||||
|
||||
if (ctl->ops.get_bitmask_active_intf)
|
||||
ctl->ops.get_bitmask_active_intf(ctl, &intf_flush_mask,
|
||||
phys_enc->hw_intf->idx);
|
||||
|
||||
if (ctl->ops.update_pending_intf_flush)
|
||||
ctl->ops.update_pending_intf_flush(ctl, intf_flush_mask);
|
||||
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
|
||||
if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
|
||||
ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->id);
|
||||
|
||||
skip_flush:
|
||||
DPU_DEBUG_VIDENC(phys_enc,
|
||||
"update pending flush ctl %d flush_mask 0%x intf_mask 0x%x\n",
|
||||
ctl->idx - CTL_0, flush_mask, intf_flush_mask);
|
||||
"update pending flush ctl %d intf %d\n",
|
||||
ctl->idx - CTL_0, phys_enc->hw_intf->idx);
|
||||
|
||||
|
||||
/* ctl_flush & timing engine enable will be triggered by framework */
|
||||
|
@ -22,7 +22,7 @@
|
||||
#define DPU_MAX_IMG_WIDTH 0x3FFF
|
||||
#define DPU_MAX_IMG_HEIGHT 0x3FFF
|
||||
|
||||
/**
|
||||
/*
|
||||
* DPU supported format packing, bpp, and other format
|
||||
* information.
|
||||
* DPU currently only supports interleaved RGB formats
|
||||
|
@ -19,6 +19,7 @@ static LIST_HEAD(dpu_hw_blk_list);
|
||||
|
||||
/**
|
||||
* dpu_hw_blk_init - initialize hw block object
|
||||
* @hw_blk: pointer to hw block object
|
||||
* @type: hw block type - enum dpu_hw_blk_type
|
||||
* @id: instance id of the hw block
|
||||
* @ops: Pointer to block operations
|
||||
@ -114,7 +115,6 @@ error_start:
|
||||
/**
|
||||
* dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
|
||||
* @hw_blk: hw block to be freed
|
||||
* @free_blk: function to be called when reference count goes to zero
|
||||
*/
|
||||
void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
|
||||
{
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include "dpu_hw_mdss.h"
|
||||
#include "dpu_hw_catalog.h"
|
||||
#include "dpu_hw_catalog_format.h"
|
||||
#include "dpu_kms.h"
|
||||
|
||||
#define VIG_MASK \
|
||||
@ -41,6 +40,8 @@
|
||||
#define PINGPONG_SDM845_SPLIT_MASK \
|
||||
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
|
||||
|
||||
#define MERGE_3D_SM8150_MASK (0)
|
||||
|
||||
#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
|
||||
|
||||
#define INTF_SDM845_MASK (0)
|
||||
@ -60,6 +61,79 @@
|
||||
|
||||
#define STRCAT(X, Y) (X Y)
|
||||
|
||||
static const uint32_t plane_formats[] = {
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_RGBA8888,
|
||||
DRM_FORMAT_BGRA8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_RGBX8888,
|
||||
DRM_FORMAT_BGRX8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_RGB888,
|
||||
DRM_FORMAT_BGR888,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_BGR565,
|
||||
DRM_FORMAT_ARGB1555,
|
||||
DRM_FORMAT_ABGR1555,
|
||||
DRM_FORMAT_RGBA5551,
|
||||
DRM_FORMAT_BGRA5551,
|
||||
DRM_FORMAT_XRGB1555,
|
||||
DRM_FORMAT_XBGR1555,
|
||||
DRM_FORMAT_RGBX5551,
|
||||
DRM_FORMAT_BGRX5551,
|
||||
DRM_FORMAT_ARGB4444,
|
||||
DRM_FORMAT_ABGR4444,
|
||||
DRM_FORMAT_RGBA4444,
|
||||
DRM_FORMAT_BGRA4444,
|
||||
DRM_FORMAT_XRGB4444,
|
||||
DRM_FORMAT_XBGR4444,
|
||||
DRM_FORMAT_RGBX4444,
|
||||
DRM_FORMAT_BGRX4444,
|
||||
};
|
||||
|
||||
static const uint32_t plane_formats_yuv[] = {
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_RGBA8888,
|
||||
DRM_FORMAT_BGRX8888,
|
||||
DRM_FORMAT_BGRA8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_RGBX8888,
|
||||
DRM_FORMAT_RGB888,
|
||||
DRM_FORMAT_BGR888,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_BGR565,
|
||||
DRM_FORMAT_ARGB1555,
|
||||
DRM_FORMAT_ABGR1555,
|
||||
DRM_FORMAT_RGBA5551,
|
||||
DRM_FORMAT_BGRA5551,
|
||||
DRM_FORMAT_XRGB1555,
|
||||
DRM_FORMAT_XBGR1555,
|
||||
DRM_FORMAT_RGBX5551,
|
||||
DRM_FORMAT_BGRX5551,
|
||||
DRM_FORMAT_ARGB4444,
|
||||
DRM_FORMAT_ABGR4444,
|
||||
DRM_FORMAT_RGBA4444,
|
||||
DRM_FORMAT_BGRA4444,
|
||||
DRM_FORMAT_XRGB4444,
|
||||
DRM_FORMAT_XBGR4444,
|
||||
DRM_FORMAT_RGBX4444,
|
||||
DRM_FORMAT_BGRX4444,
|
||||
|
||||
DRM_FORMAT_NV12,
|
||||
DRM_FORMAT_NV21,
|
||||
DRM_FORMAT_NV16,
|
||||
DRM_FORMAT_NV61,
|
||||
DRM_FORMAT_VYUY,
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_YUYV,
|
||||
DRM_FORMAT_YVYU,
|
||||
DRM_FORMAT_YUV420,
|
||||
DRM_FORMAT_YVU420,
|
||||
};
|
||||
|
||||
/*************************************************************
|
||||
* DPU sub blocks config
|
||||
*************************************************************/
|
||||
@ -111,7 +185,6 @@ static const struct dpu_caps sm8150_dpu_caps = {
|
||||
static const struct dpu_caps sm8250_dpu_caps = {
|
||||
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
|
||||
.max_mixer_blendstages = 0xb,
|
||||
.max_linewidth = 4096,
|
||||
.qseed_type = DPU_SSPP_SCALER_QSEED3, /* TODO: qseed3 lite */
|
||||
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
|
||||
.ubwc_version = DPU_HW_UBWC_VER_40,
|
||||
@ -433,9 +506,9 @@ static const struct dpu_lm_cfg sc7180_lm[] = {
|
||||
|
||||
static const struct dpu_lm_cfg sm8150_lm[] = {
|
||||
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
|
||||
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
|
||||
&sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
|
||||
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
|
||||
&sdm845_lm_sblk, PINGPONG_2, LM_3, 0),
|
||||
LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
|
||||
@ -454,16 +527,28 @@ static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = {
|
||||
.len = 0x90, .version = 0x10000},
|
||||
};
|
||||
|
||||
#define DSPP_BLK(_name, _id, _base) \
|
||||
static const struct dpu_dspp_sub_blks sm8150_dspp_sblk = {
|
||||
.pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
|
||||
.len = 0x90, .version = 0x40000},
|
||||
};
|
||||
|
||||
#define DSPP_BLK(_name, _id, _base, _sblk) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = 0x1800, \
|
||||
.features = DSPP_SC7180_MASK, \
|
||||
.sblk = &sc7180_dspp_sblk \
|
||||
.sblk = _sblk \
|
||||
}
|
||||
|
||||
static const struct dpu_dspp_cfg sc7180_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000),
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sc7180_dspp_sblk),
|
||||
};
|
||||
|
||||
static const struct dpu_dspp_cfg sm8150_dspp[] = {
|
||||
DSPP_BLK("dspp_0", DSPP_0, 0x54000, &sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_1", DSPP_1, 0x56000, &sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_2", DSPP_2, 0x58000, &sm8150_dspp_sblk),
|
||||
DSPP_BLK("dspp_3", DSPP_3, 0x5a000, &sm8150_dspp_sblk),
|
||||
};
|
||||
|
||||
/*************************************************************
|
||||
@ -481,40 +566,59 @@ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
|
||||
.len = 0x20, .version = 0x10000},
|
||||
};
|
||||
|
||||
#define PP_BLK_TE(_name, _id, _base) \
|
||||
#define PP_BLK_TE(_name, _id, _base, _merge_3d) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = 0xd4, \
|
||||
.features = PINGPONG_SDM845_SPLIT_MASK, \
|
||||
.merge_3d = _merge_3d, \
|
||||
.sblk = &sdm845_pp_sblk_te \
|
||||
}
|
||||
#define PP_BLK(_name, _id, _base) \
|
||||
#define PP_BLK(_name, _id, _base, _merge_3d) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = 0xd4, \
|
||||
.features = PINGPONG_SDM845_MASK, \
|
||||
.merge_3d = _merge_3d, \
|
||||
.sblk = &sdm845_pp_sblk \
|
||||
}
|
||||
|
||||
static const struct dpu_pingpong_cfg sdm845_pp[] = {
|
||||
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
|
||||
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
|
||||
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0),
|
||||
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0),
|
||||
};
|
||||
|
||||
static struct dpu_pingpong_cfg sc7180_pp[] = {
|
||||
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
|
||||
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
|
||||
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0),
|
||||
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0),
|
||||
};
|
||||
|
||||
static const struct dpu_pingpong_cfg sm8150_pp[] = {
|
||||
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
|
||||
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
|
||||
PP_BLK("pingpong_4", PINGPONG_4, 0x72000),
|
||||
PP_BLK("pingpong_5", PINGPONG_5, 0x72800),
|
||||
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0),
|
||||
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0),
|
||||
PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1),
|
||||
PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1),
|
||||
PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2),
|
||||
PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2),
|
||||
};
|
||||
|
||||
/*************************************************************
|
||||
* MERGE_3D sub blocks config
|
||||
*************************************************************/
|
||||
#define MERGE_3D_BLK(_name, _id, _base) \
|
||||
{\
|
||||
.name = _name, .id = _id, \
|
||||
.base = _base, .len = 0x100, \
|
||||
.features = MERGE_3D_SM8150_MASK, \
|
||||
.sblk = NULL \
|
||||
}
|
||||
|
||||
static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
|
||||
MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x83000),
|
||||
MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x83100),
|
||||
MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
|
||||
};
|
||||
|
||||
/*************************************************************
|
||||
@ -836,8 +940,12 @@ static void sm8150_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
|
||||
.sspp = sdm845_sspp,
|
||||
.mixer_count = ARRAY_SIZE(sm8150_lm),
|
||||
.mixer = sm8150_lm,
|
||||
.dspp_count = ARRAY_SIZE(sm8150_dspp),
|
||||
.dspp = sm8150_dspp,
|
||||
.pingpong_count = ARRAY_SIZE(sm8150_pp),
|
||||
.pingpong = sm8150_pp,
|
||||
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
|
||||
.merge_3d = sm8150_merge_3d,
|
||||
.intf_count = ARRAY_SIZE(sm8150_intf),
|
||||
.intf = sm8150_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
@ -866,8 +974,12 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
|
||||
.sspp = sdm845_sspp,
|
||||
.mixer_count = ARRAY_SIZE(sm8150_lm),
|
||||
.mixer = sm8150_lm,
|
||||
.dspp_count = ARRAY_SIZE(sm8150_dspp),
|
||||
.dspp = sm8150_dspp,
|
||||
.pingpong_count = ARRAY_SIZE(sm8150_pp),
|
||||
.pingpong = sm8150_pp,
|
||||
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
|
||||
.merge_3d = sm8150_merge_3d,
|
||||
.intf_count = ARRAY_SIZE(sm8150_intf),
|
||||
.intf = sm8150_intf,
|
||||
.vbif_count = ARRAY_SIZE(sdm845_vbif),
|
||||
|
@ -524,9 +524,23 @@ struct dpu_dspp_cfg {
|
||||
*/
|
||||
struct dpu_pingpong_cfg {
|
||||
DPU_HW_BLK_INFO;
|
||||
u32 merge_3d;
|
||||
const struct dpu_pingpong_sub_blks *sblk;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_merge_3d_cfg - information of DSPP blocks
|
||||
* @id enum identifying this block
|
||||
* @base register offset of this block
|
||||
* @features bit mask identifying sub-blocks/features
|
||||
* supported by this block
|
||||
* @sblk sub-blocks information
|
||||
*/
|
||||
struct dpu_merge_3d_cfg {
|
||||
DPU_HW_BLK_INFO;
|
||||
const struct dpu_merge_3d_sub_blks *sblk;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dpu_intf_cfg - information of timing engine blocks
|
||||
* @id enum identifying this block
|
||||
@ -724,6 +738,9 @@ struct dpu_mdss_cfg {
|
||||
u32 pingpong_count;
|
||||
const struct dpu_pingpong_cfg *pingpong;
|
||||
|
||||
u32 merge_3d_count;
|
||||
const struct dpu_merge_3d_cfg *merge_3d;
|
||||
|
||||
u32 intf_count;
|
||||
const struct dpu_intf_cfg *intf;
|
||||
|
||||
@ -767,6 +784,7 @@ struct dpu_mdss_hw_cfg_handler {
|
||||
#define BLK_INTF(s) ((s)->intf)
|
||||
#define BLK_AD(s) ((s)->ad)
|
||||
#define BLK_DSPP(s) ((s)->dspp)
|
||||
#define BLK_MERGE3d(s) ((s)->merge_3d)
|
||||
|
||||
/**
|
||||
* dpu_hw_catalog_init - dpu hardware catalog init API retrieves
|
||||
|
@ -1,88 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "dpu_hw_mdss.h"
|
||||
|
||||
static const uint32_t qcom_compressed_supported_formats[] = {
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_BGR565,
|
||||
|
||||
DRM_FORMAT_NV12,
|
||||
};
|
||||
|
||||
static const uint32_t plane_formats[] = {
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_RGBA8888,
|
||||
DRM_FORMAT_BGRA8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_RGBX8888,
|
||||
DRM_FORMAT_BGRX8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_RGB888,
|
||||
DRM_FORMAT_BGR888,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_BGR565,
|
||||
DRM_FORMAT_ARGB1555,
|
||||
DRM_FORMAT_ABGR1555,
|
||||
DRM_FORMAT_RGBA5551,
|
||||
DRM_FORMAT_BGRA5551,
|
||||
DRM_FORMAT_XRGB1555,
|
||||
DRM_FORMAT_XBGR1555,
|
||||
DRM_FORMAT_RGBX5551,
|
||||
DRM_FORMAT_BGRX5551,
|
||||
DRM_FORMAT_ARGB4444,
|
||||
DRM_FORMAT_ABGR4444,
|
||||
DRM_FORMAT_RGBA4444,
|
||||
DRM_FORMAT_BGRA4444,
|
||||
DRM_FORMAT_XRGB4444,
|
||||
DRM_FORMAT_XBGR4444,
|
||||
DRM_FORMAT_RGBX4444,
|
||||
DRM_FORMAT_BGRX4444,
|
||||
};
|
||||
|
||||
static const uint32_t plane_formats_yuv[] = {
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_RGBA8888,
|
||||
DRM_FORMAT_BGRX8888,
|
||||
DRM_FORMAT_BGRA8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_RGBX8888,
|
||||
DRM_FORMAT_RGB888,
|
||||
DRM_FORMAT_BGR888,
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_BGR565,
|
||||
DRM_FORMAT_ARGB1555,
|
||||
DRM_FORMAT_ABGR1555,
|
||||
DRM_FORMAT_RGBA5551,
|
||||
DRM_FORMAT_BGRA5551,
|
||||
DRM_FORMAT_XRGB1555,
|
||||
DRM_FORMAT_XBGR1555,
|
||||
DRM_FORMAT_RGBX5551,
|
||||
DRM_FORMAT_BGRX5551,
|
||||
DRM_FORMAT_ARGB4444,
|
||||
DRM_FORMAT_ABGR4444,
|
||||
DRM_FORMAT_RGBA4444,
|
||||
DRM_FORMAT_BGRA4444,
|
||||
DRM_FORMAT_XRGB4444,
|
||||
DRM_FORMAT_XBGR4444,
|
||||
DRM_FORMAT_RGBX4444,
|
||||
DRM_FORMAT_BGRX4444,
|
||||
|
||||
DRM_FORMAT_NV12,
|
||||
DRM_FORMAT_NV21,
|
||||
DRM_FORMAT_NV16,
|
||||
DRM_FORMAT_NV61,
|
||||
DRM_FORMAT_VYUY,
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_YUYV,
|
||||
DRM_FORMAT_YVYU,
|
||||
DRM_FORMAT_YUV420,
|
||||
DRM_FORMAT_YVU420,
|
||||
};
|
@ -22,7 +22,9 @@
|
||||
#define CTL_PREPARE 0x0d0
|
||||
#define CTL_SW_RESET 0x030
|
||||
#define CTL_LAYER_EXTN_OFFSET 0x40
|
||||
#define CTL_MERGE_3D_ACTIVE 0x0E4
|
||||
#define CTL_INTF_ACTIVE 0x0F4
|
||||
#define CTL_MERGE_3D_FLUSH 0x100
|
||||
#define CTL_INTF_FLUSH 0x110
|
||||
#define CTL_INTF_MASTER 0x134
|
||||
|
||||
@ -30,6 +32,7 @@
|
||||
#define CTL_FLUSH_MASK_CTL BIT(17)
|
||||
|
||||
#define DPU_REG_RESET_TIMEOUT_US 2000
|
||||
#define MERGE_3D_IDX 23
|
||||
#define INTF_IDX 31
|
||||
|
||||
static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
|
||||
@ -104,12 +107,6 @@ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
|
||||
ctx->pending_flush_mask |= flushbits;
|
||||
}
|
||||
|
||||
static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx,
|
||||
u32 flushbits)
|
||||
{
|
||||
ctx->pending_intf_flush_mask |= flushbits;
|
||||
}
|
||||
|
||||
static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
|
||||
{
|
||||
return ctx->pending_flush_mask;
|
||||
@ -118,6 +115,9 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
|
||||
static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
|
||||
{
|
||||
|
||||
if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
|
||||
DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
|
||||
ctx->pending_merge_3d_flush_mask);
|
||||
if (ctx->pending_flush_mask & BIT(INTF_IDX))
|
||||
DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
|
||||
ctx->pending_intf_flush_mask);
|
||||
@ -220,40 +220,39 @@ static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
|
||||
return flushbits;
|
||||
}
|
||||
|
||||
static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
|
||||
u32 *flushbits, enum dpu_intf intf)
|
||||
static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_intf intf)
|
||||
{
|
||||
switch (intf) {
|
||||
case INTF_0:
|
||||
*flushbits |= BIT(31);
|
||||
ctx->pending_flush_mask |= BIT(31);
|
||||
break;
|
||||
case INTF_1:
|
||||
*flushbits |= BIT(30);
|
||||
ctx->pending_flush_mask |= BIT(30);
|
||||
break;
|
||||
case INTF_2:
|
||||
*flushbits |= BIT(29);
|
||||
ctx->pending_flush_mask |= BIT(29);
|
||||
break;
|
||||
case INTF_3:
|
||||
*flushbits |= BIT(28);
|
||||
ctx->pending_flush_mask |= BIT(28);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
|
||||
u32 *flushbits, enum dpu_intf intf)
|
||||
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_intf intf)
|
||||
{
|
||||
*flushbits |= BIT(31);
|
||||
return 0;
|
||||
ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
|
||||
ctx->pending_flush_mask |= BIT(INTF_IDX);
|
||||
}
|
||||
|
||||
static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
|
||||
u32 *flushbits, enum dpu_intf intf)
|
||||
static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_merge_3d merge_3d)
|
||||
{
|
||||
*flushbits |= BIT(intf - INTF_0);
|
||||
return 0;
|
||||
ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
|
||||
ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
|
||||
}
|
||||
|
||||
static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
|
||||
@ -497,6 +496,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
|
||||
|
||||
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
|
||||
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
|
||||
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
|
||||
}
|
||||
|
||||
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
|
||||
@ -535,15 +535,15 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
|
||||
if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
|
||||
ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
|
||||
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
|
||||
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1;
|
||||
ops->get_bitmask_active_intf =
|
||||
dpu_hw_ctl_active_get_bitmask_intf;
|
||||
ops->update_pending_intf_flush =
|
||||
dpu_hw_ctl_update_pending_intf_flush;
|
||||
ops->update_pending_flush_intf =
|
||||
dpu_hw_ctl_update_pending_flush_intf_v1;
|
||||
ops->update_pending_flush_merge_3d =
|
||||
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
|
||||
} else {
|
||||
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
|
||||
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
|
||||
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
|
||||
ops->update_pending_flush_intf =
|
||||
dpu_hw_ctl_update_pending_flush_intf;
|
||||
}
|
||||
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
|
||||
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
|
||||
|
@ -37,12 +37,14 @@ struct dpu_hw_stage_cfg {
|
||||
* struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
|
||||
* @intf : Interface id
|
||||
* @mode_3d: 3d mux configuration
|
||||
* @merge_3d: 3d merge block used
|
||||
* @intf_mode_sel: Interface mode, cmd / vid
|
||||
* @stream_sel: Stream selection for multi-stream interfaces
|
||||
*/
|
||||
struct dpu_hw_intf_cfg {
|
||||
enum dpu_intf intf;
|
||||
enum dpu_3d_blend_mode mode_3d;
|
||||
enum dpu_merge_3d merge_3d;
|
||||
enum dpu_ctl_mode_sel intf_mode_sel;
|
||||
int stream_sel;
|
||||
};
|
||||
@ -91,13 +93,22 @@ struct dpu_hw_ctl_ops {
|
||||
u32 flushbits);
|
||||
|
||||
/**
|
||||
* OR in the given flushbits to the cached pending_intf_flush_mask
|
||||
* OR in the given flushbits to the cached pending_(intf_)flush_mask
|
||||
* No effect on hardware
|
||||
* @ctx : ctl path ctx pointer
|
||||
* @flushbits : module flushmask
|
||||
* @blk : interface block index
|
||||
*/
|
||||
void (*update_pending_intf_flush)(struct dpu_hw_ctl *ctx,
|
||||
u32 flushbits);
|
||||
void (*update_pending_flush_intf)(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_intf blk);
|
||||
|
||||
/**
|
||||
* OR in the given flushbits to the cached pending_(merge_3d_)flush_mask
|
||||
* No effect on hardware
|
||||
* @ctx : ctl path ctx pointer
|
||||
* @blk : interface block index
|
||||
*/
|
||||
void (*update_pending_flush_merge_3d)(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_merge_3d blk);
|
||||
|
||||
/**
|
||||
* Write the value of the pending_flush_mask to hardware
|
||||
@ -142,23 +153,6 @@ struct dpu_hw_ctl_ops {
|
||||
uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx,
|
||||
enum dpu_dspp blk);
|
||||
|
||||
/**
|
||||
* Query the value of the intf flush mask
|
||||
* No effect on hardware
|
||||
* @ctx : ctl path ctx pointer
|
||||
*/
|
||||
int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
|
||||
u32 *flushbits,
|
||||
enum dpu_intf blk);
|
||||
|
||||
/**
|
||||
* Query the value of the intf active flush mask
|
||||
* No effect on hardware
|
||||
* @ctx : ctl path ctx pointer
|
||||
*/
|
||||
int (*get_bitmask_active_intf)(struct dpu_hw_ctl *ctx,
|
||||
u32 *flushbits, enum dpu_intf blk);
|
||||
|
||||
/**
|
||||
* Set all blend stages to disabled
|
||||
* @ctx : ctl path ctx pointer
|
||||
@ -198,6 +192,7 @@ struct dpu_hw_ctl {
|
||||
const struct dpu_lm_cfg *mixer_hw_caps;
|
||||
u32 pending_flush_mask;
|
||||
u32 pending_intf_flush_mask;
|
||||
u32 pending_merge_3d_flush_mask;
|
||||
|
||||
/* ops */
|
||||
struct dpu_hw_ctl_ops ops;
|
||||
|
@ -57,8 +57,7 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
|
||||
static void _setup_dspp_ops(struct dpu_hw_dspp *c,
|
||||
unsigned long features)
|
||||
{
|
||||
if (test_bit(DPU_DSPP_PCC, &features) &&
|
||||
IS_SC7180_TARGET(c->hw.hwversion))
|
||||
if (test_bit(DPU_DSPP_PCC, &features))
|
||||
c->ops.setup_pcc = dpu_setup_dspp_pcc;
|
||||
}
|
||||
|
||||
|
@ -189,8 +189,8 @@ struct dpu_irq_type {
|
||||
u32 reg_idx;
|
||||
};
|
||||
|
||||
/**
|
||||
* List of DPU interrupt registers
|
||||
/*
|
||||
* struct dpu_intr_reg - List of DPU interrupt registers
|
||||
*/
|
||||
static const struct dpu_intr_reg dpu_intr_set[] = {
|
||||
{
|
||||
@ -245,9 +245,10 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* IRQ mapping table - use for lookup an irq_idx in this table that have
|
||||
* a matching interface type and instance index.
|
||||
/*
|
||||
* struct dpu_irq_type - IRQ mapping table use for lookup an irq_idx in this
|
||||
* table that have a matching interface type and
|
||||
* instance index.
|
||||
*/
|
||||
static const struct dpu_irq_type dpu_irq_map[] = {
|
||||
/* BEGIN MAP_RANGE: 0-31, INTR */
|
||||
|
@ -48,7 +48,7 @@ static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
|
||||
/**
|
||||
* _stage_offset(): returns the relative offset of the blend registers
|
||||
* for the stage to be setup
|
||||
* @c: mixer ctx contains the mixer to be programmed
|
||||
* @ctx: mixer ctx contains the mixer to be programmed
|
||||
* @stage: stage index to setup
|
||||
*/
|
||||
static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
|
||||
|
@ -96,6 +96,7 @@ enum dpu_hw_blk_type {
|
||||
DPU_HW_BLK_INTF,
|
||||
DPU_HW_BLK_WB,
|
||||
DPU_HW_BLK_DSPP,
|
||||
DPU_HW_BLK_MERGE_3D,
|
||||
DPU_HW_BLK_MAX,
|
||||
};
|
||||
|
||||
@ -186,6 +187,13 @@ enum dpu_pingpong {
|
||||
PINGPONG_MAX
|
||||
};
|
||||
|
||||
enum dpu_merge_3d {
|
||||
MERGE_3D_0 = 1,
|
||||
MERGE_3D_1,
|
||||
MERGE_3D_2,
|
||||
MERGE_3D_MAX
|
||||
};
|
||||
|
||||
enum dpu_intf {
|
||||
INTF_0 = 1,
|
||||
INTF_1,
|
||||
|
94
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
Normal file
94
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
Normal file
@ -0,0 +1,94 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/iopoll.h>
|
||||
|
||||
#include "dpu_hw_mdss.h"
|
||||
#include "dpu_hwio.h"
|
||||
#include "dpu_hw_catalog.h"
|
||||
#include "dpu_hw_merge3d.h"
|
||||
#include "dpu_kms.h"
|
||||
#include "dpu_trace.h"
|
||||
|
||||
#define MERGE_3D_MUX 0x000
|
||||
#define MERGE_3D_MODE 0x004
|
||||
|
||||
static const struct dpu_merge_3d_cfg *_merge_3d_offset(enum dpu_merge_3d idx,
|
||||
const struct dpu_mdss_cfg *m,
|
||||
void __iomem *addr,
|
||||
struct dpu_hw_blk_reg_map *b)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < m->merge_3d_count; i++) {
|
||||
if (idx == m->merge_3d[i].id) {
|
||||
b->base_off = addr;
|
||||
b->blk_off = m->merge_3d[i].base;
|
||||
b->length = m->merge_3d[i].len;
|
||||
b->hwversion = m->hwversion;
|
||||
b->log_mask = DPU_DBG_MASK_PINGPONG;
|
||||
return &m->merge_3d[i];
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static void dpu_hw_merge_3d_setup_3d_mode(struct dpu_hw_merge_3d *merge_3d,
|
||||
enum dpu_3d_blend_mode mode_3d)
|
||||
{
|
||||
struct dpu_hw_blk_reg_map *c;
|
||||
u32 data;
|
||||
|
||||
|
||||
c = &merge_3d->hw;
|
||||
if (mode_3d == BLEND_3D_NONE) {
|
||||
DPU_REG_WRITE(c, MERGE_3D_MODE, 0);
|
||||
DPU_REG_WRITE(c, MERGE_3D_MUX, 0);
|
||||
} else {
|
||||
data = BIT(0) | ((mode_3d - 1) << 1);
|
||||
DPU_REG_WRITE(c, MERGE_3D_MODE, data);
|
||||
}
|
||||
}
|
||||
|
||||
static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
|
||||
unsigned long features)
|
||||
{
|
||||
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
|
||||
};
|
||||
|
||||
static struct dpu_hw_blk_ops dpu_hw_ops;
|
||||
|
||||
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m)
|
||||
{
|
||||
struct dpu_hw_merge_3d *c;
|
||||
const struct dpu_merge_3d_cfg *cfg;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cfg = _merge_3d_offset(idx, m, addr, &c->hw);
|
||||
if (IS_ERR_OR_NULL(cfg)) {
|
||||
kfree(c);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
c->idx = idx;
|
||||
c->caps = cfg;
|
||||
_setup_merge_3d_ops(c, c->caps->features);
|
||||
|
||||
dpu_hw_blk_init(&c->base, DPU_HW_BLK_MERGE_3D, idx, &dpu_hw_ops);
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw)
|
||||
{
|
||||
if (hw)
|
||||
dpu_hw_blk_destroy(&hw->base);
|
||||
kfree(hw);
|
||||
}
|
68
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
Normal file
68
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
Normal file
@ -0,0 +1,68 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _DPU_HW_MERGE3D_H
|
||||
#define _DPU_HW_MERGE3D_H
|
||||
|
||||
#include "dpu_hw_catalog.h"
|
||||
#include "dpu_hw_mdss.h"
|
||||
#include "dpu_hw_util.h"
|
||||
#include "dpu_hw_blk.h"
|
||||
|
||||
struct dpu_hw_merge_3d;
|
||||
|
||||
/**
|
||||
*
|
||||
* struct dpu_hw_merge_3d_ops : Interface to the merge_3d Hw driver functions
|
||||
* Assumption is these functions will be called after clocks are enabled
|
||||
* @setup_3d_mode : enable 3D merge
|
||||
*/
|
||||
struct dpu_hw_merge_3d_ops {
|
||||
void (*setup_3d_mode)(struct dpu_hw_merge_3d *merge_3d,
|
||||
enum dpu_3d_blend_mode mode_3d);
|
||||
|
||||
};
|
||||
|
||||
struct dpu_hw_merge_3d {
|
||||
struct dpu_hw_blk base;
|
||||
struct dpu_hw_blk_reg_map hw;
|
||||
|
||||
/* merge_3d */
|
||||
enum dpu_merge_3d idx;
|
||||
const struct dpu_merge_3d_cfg *caps;
|
||||
|
||||
/* ops */
|
||||
struct dpu_hw_merge_3d_ops ops;
|
||||
};
|
||||
|
||||
/**
|
||||
* to_dpu_hw_merge_3d - convert base object dpu_hw_base to container
|
||||
* @hw: Pointer to base hardware block
|
||||
* return: Pointer to hardware block container
|
||||
*/
|
||||
static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw)
|
||||
{
|
||||
return container_of(hw, struct dpu_hw_merge_3d, base);
|
||||
}
|
||||
|
||||
/**
|
||||
* dpu_hw_merge_3d_init - initializes the merge_3d driver for the passed
|
||||
* merge_3d idx.
|
||||
* @idx: Pingpong index for which driver object is required
|
||||
* @addr: Mapped register io address of MDP
|
||||
* @m: Pointer to mdss catalog data
|
||||
* Returns: Error code or allocated dpu_hw_merge_3d context
|
||||
*/
|
||||
struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(enum dpu_merge_3d idx,
|
||||
void __iomem *addr,
|
||||
const struct dpu_mdss_cfg *m);
|
||||
|
||||
/**
|
||||
* dpu_hw_merge_3d_destroy - destroys merge_3d driver context
|
||||
* should be called to free the context
|
||||
* @pp: Pointer to PP driver context returned by dpu_hw_merge_3d_init
|
||||
*/
|
||||
void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *pp);
|
||||
|
||||
#endif /*_DPU_HW_MERGE3D_H */
|
@ -119,6 +119,7 @@ struct dpu_hw_pingpong {
|
||||
/* pingpong */
|
||||
enum dpu_pingpong idx;
|
||||
const struct dpu_pingpong_cfg *caps;
|
||||
struct dpu_hw_blk *merge_3d;
|
||||
|
||||
/* ops */
|
||||
struct dpu_hw_pingpong_ops ops;
|
||||
|
@ -231,7 +231,7 @@ static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
|
||||
DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Setup source pixel format, flip,
|
||||
*/
|
||||
static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
|
||||
@ -437,7 +437,7 @@ static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
|
||||
return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* dpu_hw_sspp_setup_rects()
|
||||
*/
|
||||
static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
|
||||
|
@ -718,6 +718,8 @@ static void dpu_kms_destroy(struct msm_kms *kms)
|
||||
dpu_kms = to_dpu_kms(kms);
|
||||
|
||||
_dpu_kms_hw_destroy(dpu_kms);
|
||||
|
||||
msm_kms_destroy(&dpu_kms->base);
|
||||
}
|
||||
|
||||
static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
|
||||
@ -1091,12 +1093,9 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
|
||||
return PTR_ERR(dpu_kms->opp_table);
|
||||
/* OPP table is optional */
|
||||
ret = dev_pm_opp_of_add_table(dev);
|
||||
if (!ret) {
|
||||
dpu_kms->has_opp_table = true;
|
||||
} else if (ret != -ENODEV) {
|
||||
if (ret && ret != -ENODEV) {
|
||||
dev_err(dev, "invalid OPP table in device tree\n");
|
||||
dev_pm_opp_put_clkname(dpu_kms->opp_table);
|
||||
return ret;
|
||||
goto put_clkname;
|
||||
}
|
||||
|
||||
mp = &dpu_kms->mp;
|
||||
@ -1108,7 +1107,11 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
|
||||
|
||||
platform_set_drvdata(pdev, dpu_kms);
|
||||
|
||||
msm_kms_init(&dpu_kms->base, &kms_funcs);
|
||||
ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
|
||||
if (ret) {
|
||||
DPU_ERROR("failed to init kms, ret=%d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
dpu_kms->dev = ddev;
|
||||
dpu_kms->pdev = pdev;
|
||||
|
||||
@ -1118,8 +1121,8 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
|
||||
priv->kms = &dpu_kms->base;
|
||||
return ret;
|
||||
err:
|
||||
if (dpu_kms->has_opp_table)
|
||||
dev_pm_opp_of_remove_table(dev);
|
||||
dev_pm_opp_of_remove_table(dev);
|
||||
put_clkname:
|
||||
dev_pm_opp_put_clkname(dpu_kms->opp_table);
|
||||
return ret;
|
||||
}
|
||||
@ -1137,8 +1140,7 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
|
||||
if (dpu_kms->rpm_enabled)
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
if (dpu_kms->has_opp_table)
|
||||
dev_pm_opp_of_remove_table(dev);
|
||||
dev_pm_opp_of_remove_table(dev);
|
||||
dev_pm_opp_put_clkname(dpu_kms->opp_table);
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,6 @@ struct dpu_kms {
|
||||
bool rpm_enabled;
|
||||
|
||||
struct opp_table *opp_table;
|
||||
bool has_opp_table;
|
||||
|
||||
struct dss_module_power mp;
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "dpu_kms.h"
|
||||
#include "dpu_formats.h"
|
||||
#include "dpu_hw_sspp.h"
|
||||
#include "dpu_hw_catalog_format.h"
|
||||
#include "dpu_trace.h"
|
||||
#include "dpu_crtc.h"
|
||||
#include "dpu_vbif.h"
|
||||
@ -63,6 +62,16 @@ enum {
|
||||
|
||||
#define DEFAULT_REFRESH_RATE 60
|
||||
|
||||
static const uint32_t qcom_compressed_supported_formats[] = {
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_BGR565,
|
||||
|
||||
DRM_FORMAT_NV12,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dpu_plane_qos - Different qos configurations for each pipe
|
||||
*
|
||||
@ -133,7 +142,8 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
|
||||
|
||||
/**
|
||||
* _dpu_plane_calc_bw - calculate bandwidth required for a plane
|
||||
* @Plane: Pointer to drm plane.
|
||||
* @plane: Pointer to drm plane.
|
||||
* @fb: Pointer to framebuffer associated with the given plane
|
||||
* Result: Updates calculated bandwidth in the plane state.
|
||||
* BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
|
||||
* Prefill BW Equation: line src bytes * line_time
|
||||
@ -151,7 +161,7 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
|
||||
u64 plane_bw;
|
||||
u32 hw_latency_lines;
|
||||
u64 scale_factor;
|
||||
int vbp, vpw;
|
||||
int vbp, vpw, vfp;
|
||||
|
||||
pstate = to_dpu_plane_state(plane->state);
|
||||
mode = &plane->state->crtc->mode;
|
||||
@ -164,6 +174,7 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
|
||||
fps = drm_mode_vrefresh(mode);
|
||||
vbp = mode->vtotal - mode->vsync_end;
|
||||
vpw = mode->vsync_end - mode->vsync_start;
|
||||
vfp = mode->vsync_start - mode->vdisplay;
|
||||
hw_latency_lines = dpu_kms->catalog->perf.min_prefill_lines;
|
||||
scale_factor = src_height > dst_height ?
|
||||
mult_frac(src_height, 1, dst_height) : 1;
|
||||
@ -176,14 +187,20 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
|
||||
src_width * hw_latency_lines * fps * fmt->bpp *
|
||||
scale_factor * mode->vtotal;
|
||||
|
||||
do_div(plane_prefill_bw, (vbp+vpw));
|
||||
if ((vbp+vpw) > hw_latency_lines)
|
||||
do_div(plane_prefill_bw, (vbp+vpw));
|
||||
else if ((vbp+vpw+vfp) < hw_latency_lines)
|
||||
do_div(plane_prefill_bw, (vbp+vpw+vfp));
|
||||
else
|
||||
do_div(plane_prefill_bw, hw_latency_lines);
|
||||
|
||||
|
||||
pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw);
|
||||
}
|
||||
|
||||
/**
|
||||
* _dpu_plane_calc_clk - calculate clock required for a plane
|
||||
* @Plane: Pointer to drm plane.
|
||||
* @plane: Pointer to drm plane.
|
||||
* Result: Updates calculated clock in the plane state.
|
||||
* Clock equation: dst_w * v_total * fps * (src_h / dst_h)
|
||||
*/
|
||||
@ -215,7 +232,7 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane)
|
||||
* _dpu_plane_calc_fill_level - calculate fill level of the given source format
|
||||
* @plane: Pointer to drm plane
|
||||
* @fmt: Pointer to source buffer format
|
||||
* @src_wdith: width of source buffer
|
||||
* @src_width: width of source buffer
|
||||
* Return: fill level corresponding to the source buffer/format or 0 if error
|
||||
*/
|
||||
static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
|
||||
@ -937,6 +954,7 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
|
||||
{
|
||||
int ret = 0, min_scale;
|
||||
struct dpu_plane *pdpu = to_dpu_plane(plane);
|
||||
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
|
||||
const struct drm_crtc_state *crtc_state = NULL;
|
||||
const struct dpu_format *fmt;
|
||||
struct drm_rect src, dst, fb_rect = { 0 };
|
||||
@ -1009,6 +1027,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1046,6 +1066,7 @@ void dpu_plane_flush(struct drm_plane *plane)
|
||||
/**
|
||||
* dpu_plane_set_error: enable/disable error condition
|
||||
* @plane: pointer to drm_plane structure
|
||||
* @error: error value to set
|
||||
*/
|
||||
void dpu_plane_set_error(struct drm_plane *plane, bool error)
|
||||
{
|
||||
@ -1066,6 +1087,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
|
||||
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
bool is_rt_pipe, update_qos_remap;
|
||||
const struct dpu_format *fmt =
|
||||
to_dpu_format(msm_framebuffer_format(fb));
|
||||
|
||||
@ -1075,7 +1097,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
|
||||
|
||||
pstate->pending = true;
|
||||
|
||||
pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
|
||||
is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
|
||||
_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
|
||||
|
||||
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
|
||||
@ -1181,7 +1203,16 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
|
||||
_dpu_plane_set_ot_limit(plane, crtc);
|
||||
}
|
||||
|
||||
_dpu_plane_set_qos_remap(plane);
|
||||
update_qos_remap = (is_rt_pipe != pdpu->is_rt_pipe) ||
|
||||
pstate->needs_qos_remap;
|
||||
|
||||
if (update_qos_remap) {
|
||||
if (is_rt_pipe != pdpu->is_rt_pipe)
|
||||
pdpu->is_rt_pipe = is_rt_pipe;
|
||||
else if (pstate->needs_qos_remap)
|
||||
pstate->needs_qos_remap = false;
|
||||
_dpu_plane_set_qos_remap(plane);
|
||||
}
|
||||
|
||||
_dpu_plane_calc_bw(plane, fb);
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
* @base: base drm plane state object
|
||||
* @aspace: pointer to address space for input/output buffers
|
||||
* @stage: assigned by crtc blender
|
||||
* @needs_qos_remap: qos remap settings need to be updated
|
||||
* @multirect_index: index of the rectangle of SSPP
|
||||
* @multirect_mode: parallel or time multiplex multirect mode
|
||||
* @pending: whether the current update is still pending
|
||||
@ -32,6 +33,7 @@ struct dpu_plane_state {
|
||||
struct drm_plane_state base;
|
||||
struct msm_gem_address_space *aspace;
|
||||
enum dpu_stage stage;
|
||||
bool needs_qos_remap;
|
||||
uint32_t multirect_index;
|
||||
uint32_t multirect_mode;
|
||||
bool pending;
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "dpu_hw_pingpong.h"
|
||||
#include "dpu_hw_intf.h"
|
||||
#include "dpu_hw_dspp.h"
|
||||
#include "dpu_hw_merge3d.h"
|
||||
#include "dpu_encoder.h"
|
||||
#include "dpu_trace.h"
|
||||
|
||||
@ -42,6 +43,14 @@ int dpu_rm_destroy(struct dpu_rm *rm)
|
||||
dpu_hw_pingpong_destroy(hw);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
|
||||
struct dpu_hw_merge_3d *hw;
|
||||
|
||||
if (rm->merge_3d_blks[i]) {
|
||||
hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
|
||||
dpu_hw_merge_3d_destroy(hw);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
|
||||
struct dpu_hw_mixer *hw;
|
||||
|
||||
@ -119,6 +128,24 @@ int dpu_rm_init(struct dpu_rm *rm,
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < cat->merge_3d_count; i++) {
|
||||
struct dpu_hw_merge_3d *hw;
|
||||
const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
|
||||
|
||||
if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) {
|
||||
DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id);
|
||||
continue;
|
||||
}
|
||||
hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat);
|
||||
if (IS_ERR_OR_NULL(hw)) {
|
||||
rc = PTR_ERR(hw);
|
||||
DPU_ERROR("failed merge_3d object creation: err %d\n",
|
||||
rc);
|
||||
goto fail;
|
||||
}
|
||||
rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
|
||||
}
|
||||
|
||||
for (i = 0; i < cat->pingpong_count; i++) {
|
||||
struct dpu_hw_pingpong *hw;
|
||||
const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
|
||||
@ -134,6 +161,8 @@ int dpu_rm_init(struct dpu_rm *rm,
|
||||
rc);
|
||||
goto fail;
|
||||
}
|
||||
if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
|
||||
hw->merge_3d = rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0];
|
||||
rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
|
||||
}
|
||||
|
||||
@ -210,7 +239,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
|
||||
* @rm: dpu resource manager handle
|
||||
* @primary_idx: index of primary mixer in rm->mixer_blks[]
|
||||
* @peer_idx: index of other mixer in rm->mixer_blks[]
|
||||
* @Return: true if rm->mixer_blks[peer_idx] is a peer of
|
||||
* Return: true if rm->mixer_blks[peer_idx] is a peer of
|
||||
* rm->mixer_blks[primary_idx]
|
||||
*/
|
||||
static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
|
||||
@ -235,6 +264,7 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
|
||||
* proposed use case requirements, incl. hardwired dependent blocks like
|
||||
* pingpong
|
||||
* @rm: dpu resource manager handle
|
||||
* @global_state: resources shared across multiple kms objects
|
||||
* @enc_id: encoder id requesting for allocation
|
||||
* @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
|
||||
* if lm, and all other hardwired blocks connected to the lm (pp) is
|
||||
@ -245,7 +275,7 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
|
||||
* mixer in rm->dspp_blks[].
|
||||
* @reqs: input parameter, rm requirements for HW blocks needed in the
|
||||
* datapath.
|
||||
* @Return: true if lm matches all requirements, false otherwise
|
||||
* Return: true if lm matches all requirements, false otherwise
|
||||
*/
|
||||
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
|
||||
struct dpu_global_state *global_state,
|
||||
|
@ -29,6 +29,7 @@ struct dpu_rm {
|
||||
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
|
||||
struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
|
||||
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
|
||||
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
|
||||
|
||||
uint32_t lm_max_width;
|
||||
};
|
||||
|
@ -140,7 +140,7 @@ exit:
|
||||
|
||||
/**
|
||||
* dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
|
||||
* @vbif: Pointer to hardware vbif driver
|
||||
* @dpu_kms: DPU handler
|
||||
* @params: Pointer to usecase parameters
|
||||
*
|
||||
* Note this function would block waiting for bus halt.
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mdp4_kms.h"
|
||||
#include "msm_gem.h"
|
||||
|
||||
struct mdp4_crtc {
|
||||
struct drm_crtc base;
|
||||
|
@ -175,6 +175,8 @@ static void mdp4_destroy(struct msm_kms *kms)
|
||||
if (mdp4_kms->rpm_enabled)
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
mdp_kms_destroy(&mdp4_kms->base);
|
||||
|
||||
kfree(mdp4_kms);
|
||||
}
|
||||
|
||||
@ -427,7 +429,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mdp_kms_init(&mdp4_kms->base, &kms_funcs);
|
||||
ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev->dev, "failed to init kms\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
kms = &mdp4_kms->base.base;
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mdp5_kms.h"
|
||||
#include "msm_gem.h"
|
||||
|
||||
#define CURSOR_WIDTH 64
|
||||
#define CURSOR_HEIGHT 64
|
||||
@ -577,9 +578,9 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
mdp5_crtc->enabled = true;
|
||||
}
|
||||
|
||||
int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *new_crtc_state,
|
||||
bool need_right_mixer)
|
||||
static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *new_crtc_state,
|
||||
bool need_right_mixer)
|
||||
{
|
||||
struct mdp5_crtc_state *mdp5_cstate =
|
||||
to_mdp5_crtc_state(new_crtc_state);
|
||||
|
@ -216,7 +216,9 @@ static void send_start_signal(struct mdp5_ctl *ctl)
|
||||
/**
|
||||
* mdp5_ctl_set_encoder_state() - set the encoder state
|
||||
*
|
||||
* @enable: true, when encoder is ready for data streaming; false, otherwise.
|
||||
* @ctl: the CTL instance
|
||||
* @pipeline: the encoder's INTF + MIXER configuration
|
||||
* @enabled: true, when encoder is ready for data streaming; false, otherwise.
|
||||
*
|
||||
* Note:
|
||||
* This encoder state is needed to trigger START signal (data path kickoff).
|
||||
@ -510,6 +512,13 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
|
||||
/**
|
||||
* mdp5_ctl_commit() - Register Flush
|
||||
*
|
||||
* @ctl: the CTL instance
|
||||
* @pipeline: the encoder's INTF + MIXER configuration
|
||||
* @flush_mask: bitmask of display controller hw blocks to flush
|
||||
* @start: if true, immediately update flush registers and set START
|
||||
* bit, otherwise accumulate flush_mask bits until we are
|
||||
* ready to START
|
||||
*
|
||||
* The flush register is used to indicate several registers are all
|
||||
* programmed, and are safe to update to the back copy of the double
|
||||
* buffered registers.
|
||||
|
@ -232,6 +232,8 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
|
||||
aspace->mmu->funcs->detach(aspace->mmu);
|
||||
msm_gem_address_space_put(aspace);
|
||||
}
|
||||
|
||||
mdp_kms_destroy(&mdp5_kms->base);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
@ -294,7 +296,7 @@ static const struct mdp_kms_funcs kms_funcs = {
|
||||
.set_irqmask = mdp5_set_irqmask,
|
||||
};
|
||||
|
||||
int mdp5_disable(struct mdp5_kms *mdp5_kms)
|
||||
static int mdp5_disable(struct mdp5_kms *mdp5_kms)
|
||||
{
|
||||
DBG("");
|
||||
|
||||
@ -314,7 +316,7 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mdp5_enable(struct mdp5_kms *mdp5_kms)
|
||||
static int mdp5_enable(struct mdp5_kms *mdp5_kms)
|
||||
{
|
||||
DBG("");
|
||||
|
||||
@ -592,11 +594,14 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
||||
return NULL;
|
||||
|
||||
mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
|
||||
mdp_kms_init(&mdp5_kms->base, &kms_funcs);
|
||||
|
||||
pdev = mdp5_kms->pdev;
|
||||
|
||||
ret = mdp_kms_init(&mdp5_kms->base, &kms_funcs);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(&pdev->dev, "failed to init kms\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
|
||||
if (irq < 0) {
|
||||
ret = irq;
|
||||
|
@ -36,12 +36,17 @@ struct mdp_kms {
|
||||
};
|
||||
#define to_mdp_kms(x) container_of(x, struct mdp_kms, base)
|
||||
|
||||
static inline void mdp_kms_init(struct mdp_kms *mdp_kms,
|
||||
static inline int mdp_kms_init(struct mdp_kms *mdp_kms,
|
||||
const struct mdp_kms_funcs *funcs)
|
||||
{
|
||||
mdp_kms->funcs = funcs;
|
||||
INIT_LIST_HEAD(&mdp_kms->irq_list);
|
||||
msm_kms_init(&mdp_kms->base, &funcs->base);
|
||||
return msm_kms_init(&mdp_kms->base, &funcs->base);
|
||||
}
|
||||
|
||||
static inline void mdp_kms_destroy(struct mdp_kms *mdp_kms)
|
||||
{
|
||||
msm_kms_destroy(&mdp_kms->base);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
|
||||
|
||||
#include <linux/rational.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/phy/phy.h>
|
||||
@ -572,6 +571,19 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
|
||||
dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
|
||||
}
|
||||
|
||||
u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
|
||||
{
|
||||
struct dp_catalog_private *catalog = container_of(dp_catalog,
|
||||
struct dp_catalog_private, dp_catalog);
|
||||
u32 status;
|
||||
|
||||
status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
|
||||
status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
|
||||
status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
|
||||
{
|
||||
struct dp_catalog_private *catalog = container_of(dp_catalog,
|
||||
|
@ -97,6 +97,7 @@ void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
|
||||
void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
|
||||
u32 intr_mask, bool en);
|
||||
void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
|
||||
u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);
|
||||
u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
|
||||
void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
|
||||
int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level,
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/phy/phy.h>
|
||||
#include <linux/phy/phy-dp.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <drm/drm_fixed.h>
|
||||
#include <drm/drm_dp_helper.h>
|
||||
#include <drm/drm_print.h>
|
||||
@ -76,6 +77,8 @@ struct dp_ctrl_private {
|
||||
struct dp_parser *parser;
|
||||
struct dp_catalog *catalog;
|
||||
|
||||
struct opp_table *opp_table;
|
||||
|
||||
struct completion idle_comp;
|
||||
struct completion video_comp;
|
||||
};
|
||||
@ -611,7 +614,7 @@ static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
|
||||
static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
|
||||
struct dp_vc_tu_mapping_table *tu_table)
|
||||
{
|
||||
struct tu_algo_data tu;
|
||||
struct tu_algo_data *tu;
|
||||
int compare_result_1, compare_result_2;
|
||||
u64 temp = 0;
|
||||
s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0;
|
||||
@ -626,298 +629,300 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
|
||||
uint EXTRA_PIXCLK_CYCLE_DELAY = 4;
|
||||
uint HBLANK_MARGIN = 4;
|
||||
|
||||
memset(&tu, 0, sizeof(tu));
|
||||
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
|
||||
if (!tu)
|
||||
return
|
||||
|
||||
dp_panel_update_tu_timings(in, &tu);
|
||||
dp_panel_update_tu_timings(in, tu);
|
||||
|
||||
tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
|
||||
tu->err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(4, 1);
|
||||
temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp);
|
||||
temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp);
|
||||
tu.extra_buffer_margin = drm_fixp2int_ceil(temp_fp);
|
||||
temp2_fp = drm_fixp_mul(temp1_fp, tu->lclk_fp);
|
||||
temp_fp = drm_fixp_div(temp2_fp, tu->pclk_fp);
|
||||
tu->extra_buffer_margin = drm_fixp2int_ceil(temp_fp);
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
|
||||
temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
|
||||
temp2_fp = drm_fixp_mul(tu->pclk_fp, temp1_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
|
||||
temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
|
||||
tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp);
|
||||
tu->ratio_fp = drm_fixp_div(temp2_fp, tu->lclk_fp);
|
||||
|
||||
tu.original_ratio_fp = tu.ratio_fp;
|
||||
tu.boundary_moderation_en = false;
|
||||
tu.upper_boundary_count = 0;
|
||||
tu.lower_boundary_count = 0;
|
||||
tu.i_upper_boundary_count = 0;
|
||||
tu.i_lower_boundary_count = 0;
|
||||
tu.valid_lower_boundary_link = 0;
|
||||
tu.even_distribution_BF = 0;
|
||||
tu.even_distribution_legacy = 0;
|
||||
tu.even_distribution = 0;
|
||||
tu.delay_start_time_fp = 0;
|
||||
tu->original_ratio_fp = tu->ratio_fp;
|
||||
tu->boundary_moderation_en = false;
|
||||
tu->upper_boundary_count = 0;
|
||||
tu->lower_boundary_count = 0;
|
||||
tu->i_upper_boundary_count = 0;
|
||||
tu->i_lower_boundary_count = 0;
|
||||
tu->valid_lower_boundary_link = 0;
|
||||
tu->even_distribution_BF = 0;
|
||||
tu->even_distribution_legacy = 0;
|
||||
tu->even_distribution = 0;
|
||||
tu->delay_start_time_fp = 0;
|
||||
|
||||
tu.err_fp = drm_fixp_from_fraction(1000, 1);
|
||||
tu.n_err_fp = 0;
|
||||
tu.n_n_err_fp = 0;
|
||||
tu->err_fp = drm_fixp_from_fraction(1000, 1);
|
||||
tu->n_err_fp = 0;
|
||||
tu->n_n_err_fp = 0;
|
||||
|
||||
tu.ratio = drm_fixp2int(tu.ratio_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
|
||||
div64_u64_rem(tu.lwidth_fp, temp1_fp, &temp2_fp);
|
||||
tu->ratio = drm_fixp2int(tu->ratio_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
|
||||
div64_u64_rem(tu->lwidth_fp, temp1_fp, &temp2_fp);
|
||||
if (temp2_fp != 0 &&
|
||||
!tu.ratio && tu.dsc_en == 0) {
|
||||
tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp);
|
||||
tu.ratio = drm_fixp2int(tu.ratio_fp);
|
||||
if (tu.ratio)
|
||||
tu.ratio_fp = drm_fixp_from_fraction(1, 1);
|
||||
!tu->ratio && tu->dsc_en == 0) {
|
||||
tu->ratio_fp = drm_fixp_mul(tu->ratio_fp, RATIO_SCALE_fp);
|
||||
tu->ratio = drm_fixp2int(tu->ratio_fp);
|
||||
if (tu->ratio)
|
||||
tu->ratio_fp = drm_fixp_from_fraction(1, 1);
|
||||
}
|
||||
|
||||
if (tu.ratio > 1)
|
||||
tu.ratio = 1;
|
||||
if (tu->ratio > 1)
|
||||
tu->ratio = 1;
|
||||
|
||||
if (tu.ratio == 1)
|
||||
if (tu->ratio == 1)
|
||||
goto tu_size_calc;
|
||||
|
||||
compare_result_1 = _tu_param_compare(tu.ratio_fp, const_p49_fp);
|
||||
compare_result_1 = _tu_param_compare(tu->ratio_fp, const_p49_fp);
|
||||
if (!compare_result_1 || compare_result_1 == 1)
|
||||
compare_result_1 = 1;
|
||||
else
|
||||
compare_result_1 = 0;
|
||||
|
||||
compare_result_2 = _tu_param_compare(tu.ratio_fp, const_p56_fp);
|
||||
compare_result_2 = _tu_param_compare(tu->ratio_fp, const_p56_fp);
|
||||
if (!compare_result_2 || compare_result_2 == 2)
|
||||
compare_result_2 = 1;
|
||||
else
|
||||
compare_result_2 = 0;
|
||||
|
||||
if (tu.dsc_en && compare_result_1 && compare_result_2) {
|
||||
if (tu->dsc_en && compare_result_1 && compare_result_2) {
|
||||
HBLANK_MARGIN += 4;
|
||||
DRM_DEBUG_DP("Info: increase HBLANK_MARGIN to %d\n",
|
||||
HBLANK_MARGIN);
|
||||
}
|
||||
|
||||
tu_size_calc:
|
||||
for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) {
|
||||
temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1);
|
||||
temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
|
||||
for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) {
|
||||
temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
|
||||
temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
|
||||
temp = drm_fixp2int_ceil(temp2_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(temp, 1);
|
||||
tu.n_err_fp = temp1_fp - temp2_fp;
|
||||
tu->n_err_fp = temp1_fp - temp2_fp;
|
||||
|
||||
if (tu.n_err_fp < tu.err_fp) {
|
||||
tu.err_fp = tu.n_err_fp;
|
||||
tu.tu_size_desired = tu.tu_size;
|
||||
if (tu->n_err_fp < tu->err_fp) {
|
||||
tu->err_fp = tu->n_err_fp;
|
||||
tu->tu_size_desired = tu->tu_size;
|
||||
}
|
||||
}
|
||||
|
||||
tu.tu_size_minus1 = tu.tu_size_desired - 1;
|
||||
tu->tu_size_minus1 = tu->tu_size_desired - 1;
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
|
||||
temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
|
||||
tu.valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
|
||||
temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
|
||||
tu->valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
|
||||
temp2_fp = tu.lwidth_fp;
|
||||
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
|
||||
temp2_fp = tu->lwidth_fp;
|
||||
temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp);
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1);
|
||||
temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
|
||||
tu.n_tus = drm_fixp2int(temp2_fp);
|
||||
tu->n_tus = drm_fixp2int(temp2_fp);
|
||||
if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
|
||||
tu.n_tus += 1;
|
||||
tu->n_tus += 1;
|
||||
|
||||
tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0;
|
||||
tu->even_distribution_legacy = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
|
||||
DRM_DEBUG_DP("Info: n_sym = %d, num_of_tus = %d\n",
|
||||
tu.valid_boundary_link, tu.n_tus);
|
||||
tu->valid_boundary_link, tu->n_tus);
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
|
||||
temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
|
||||
temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->valid_boundary_link, 1);
|
||||
temp2_fp = temp1_fp - temp2_fp;
|
||||
temp1_fp = drm_fixp_from_fraction(tu.n_tus + 1, 1);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->n_tus + 1, 1);
|
||||
temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
|
||||
|
||||
temp = drm_fixp2int(temp2_fp);
|
||||
if (temp && temp2_fp)
|
||||
tu.extra_bytes = drm_fixp2int_ceil(temp2_fp);
|
||||
tu->extra_bytes = drm_fixp2int_ceil(temp2_fp);
|
||||
else
|
||||
tu.extra_bytes = 0;
|
||||
tu->extra_bytes = 0;
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.extra_bytes, 1);
|
||||
temp2_fp = drm_fixp_from_fraction(8, tu.bpp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->extra_bytes, 1);
|
||||
temp2_fp = drm_fixp_from_fraction(8, tu->bpp);
|
||||
temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
|
||||
|
||||
if (temp && temp1_fp)
|
||||
tu.extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp);
|
||||
tu->extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp);
|
||||
else
|
||||
tu.extra_pclk_cycles = drm_fixp2int(temp1_fp);
|
||||
tu->extra_pclk_cycles = drm_fixp2int(temp1_fp);
|
||||
|
||||
temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp);
|
||||
temp2_fp = drm_fixp_from_fraction(tu.extra_pclk_cycles, 1);
|
||||
temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
|
||||
temp2_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles, 1);
|
||||
temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
|
||||
|
||||
if (temp1_fp)
|
||||
tu.extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp);
|
||||
tu->extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp);
|
||||
else
|
||||
tu.extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp);
|
||||
tu->extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp);
|
||||
|
||||
tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link;
|
||||
tu->filler_size = tu->tu_size_desired - tu->valid_boundary_link;
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
|
||||
tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
|
||||
tu->ratio_by_tu_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
|
||||
|
||||
tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk +
|
||||
tu.filler_size + tu.extra_buffer_margin;
|
||||
tu->delay_start_link = tu->extra_pclk_cycles_in_link_clk +
|
||||
tu->filler_size + tu->extra_buffer_margin;
|
||||
|
||||
tu.resulting_valid_fp =
|
||||
drm_fixp_from_fraction(tu.valid_boundary_link, 1);
|
||||
tu->resulting_valid_fp =
|
||||
drm_fixp_from_fraction(tu->valid_boundary_link, 1);
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
|
||||
temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
|
||||
tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
|
||||
temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
|
||||
temp2_fp = drm_fixp_div(tu->resulting_valid_fp, temp1_fp);
|
||||
tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp;
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1);
|
||||
temp1_fp = tu.hbp_relative_to_pclk_fp - temp1_fp;
|
||||
tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp);
|
||||
temp1_fp = tu->hbp_relative_to_pclk_fp - temp1_fp;
|
||||
tu->hbp_time_fp = drm_fixp_div(temp1_fp, tu->pclk_fp);
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
|
||||
tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1);
|
||||
tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
|
||||
|
||||
compare_result_1 = _tu_param_compare(tu.hbp_time_fp,
|
||||
tu.delay_start_time_fp);
|
||||
compare_result_1 = _tu_param_compare(tu->hbp_time_fp,
|
||||
tu->delay_start_time_fp);
|
||||
if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */
|
||||
tu.min_hblank_violated = 1;
|
||||
tu->min_hblank_violated = 1;
|
||||
|
||||
tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp);
|
||||
tu->hactive_time_fp = drm_fixp_div(tu->lwidth_fp, tu->pclk_fp);
|
||||
|
||||
compare_result_2 = _tu_param_compare(tu.hactive_time_fp,
|
||||
tu.delay_start_time_fp);
|
||||
compare_result_2 = _tu_param_compare(tu->hactive_time_fp,
|
||||
tu->delay_start_time_fp);
|
||||
if (compare_result_2 == 2)
|
||||
tu.min_hblank_violated = 1;
|
||||
tu->min_hblank_violated = 1;
|
||||
|
||||
tu.delay_start_time_fp = 0;
|
||||
tu->delay_start_time_fp = 0;
|
||||
|
||||
/* brute force */
|
||||
|
||||
tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY;
|
||||
tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp;
|
||||
tu->delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY;
|
||||
tu->diff_abs_fp = tu->resulting_valid_fp - tu->ratio_by_tu_fp;
|
||||
|
||||
temp = drm_fixp2int(tu.diff_abs_fp);
|
||||
if (!temp && tu.diff_abs_fp <= 0xffff)
|
||||
tu.diff_abs_fp = 0;
|
||||
temp = drm_fixp2int(tu->diff_abs_fp);
|
||||
if (!temp && tu->diff_abs_fp <= 0xffff)
|
||||
tu->diff_abs_fp = 0;
|
||||
|
||||
/* if(diff_abs < 0) diff_abs *= -1 */
|
||||
if (tu.diff_abs_fp < 0)
|
||||
tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1);
|
||||
if (tu->diff_abs_fp < 0)
|
||||
tu->diff_abs_fp = drm_fixp_mul(tu->diff_abs_fp, -1);
|
||||
|
||||
tu.boundary_mod_lower_err = 0;
|
||||
if ((tu.diff_abs_fp != 0 &&
|
||||
((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) ||
|
||||
(tu.even_distribution_legacy == 0) ||
|
||||
tu->boundary_mod_lower_err = 0;
|
||||
if ((tu->diff_abs_fp != 0 &&
|
||||
((tu->diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) ||
|
||||
(tu->even_distribution_legacy == 0) ||
|
||||
(DP_BRUTE_FORCE == 1))) ||
|
||||
(tu.min_hblank_violated == 1)) {
|
||||
(tu->min_hblank_violated == 1)) {
|
||||
do {
|
||||
tu.err_fp = drm_fixp_from_fraction(1000, 1);
|
||||
tu->err_fp = drm_fixp_from_fraction(1000, 1);
|
||||
|
||||
temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp);
|
||||
temp1_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
|
||||
temp2_fp = drm_fixp_from_fraction(
|
||||
tu.delay_start_link_extra_pixclk, 1);
|
||||
tu->delay_start_link_extra_pixclk, 1);
|
||||
temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
|
||||
|
||||
if (temp1_fp)
|
||||
tu.extra_buffer_margin =
|
||||
tu->extra_buffer_margin =
|
||||
drm_fixp2int_ceil(temp1_fp);
|
||||
else
|
||||
tu.extra_buffer_margin = 0;
|
||||
tu->extra_buffer_margin = 0;
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
|
||||
temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
|
||||
temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp);
|
||||
|
||||
if (temp1_fp)
|
||||
tu.n_symbols = drm_fixp2int_ceil(temp1_fp);
|
||||
tu->n_symbols = drm_fixp2int_ceil(temp1_fp);
|
||||
else
|
||||
tu.n_symbols = 0;
|
||||
tu->n_symbols = 0;
|
||||
|
||||
for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) {
|
||||
for (tu.i_upper_boundary_count = 1;
|
||||
tu.i_upper_boundary_count <= 15;
|
||||
tu.i_upper_boundary_count++) {
|
||||
for (tu.i_lower_boundary_count = 1;
|
||||
tu.i_lower_boundary_count <= 15;
|
||||
tu.i_lower_boundary_count++) {
|
||||
_tu_valid_boundary_calc(&tu);
|
||||
for (tu->tu_size = 32; tu->tu_size <= 64; tu->tu_size++) {
|
||||
for (tu->i_upper_boundary_count = 1;
|
||||
tu->i_upper_boundary_count <= 15;
|
||||
tu->i_upper_boundary_count++) {
|
||||
for (tu->i_lower_boundary_count = 1;
|
||||
tu->i_lower_boundary_count <= 15;
|
||||
tu->i_lower_boundary_count++) {
|
||||
_tu_valid_boundary_calc(tu);
|
||||
}
|
||||
}
|
||||
}
|
||||
tu.delay_start_link_extra_pixclk--;
|
||||
} while (tu.boundary_moderation_en != true &&
|
||||
tu.boundary_mod_lower_err == 1 &&
|
||||
tu.delay_start_link_extra_pixclk != 0);
|
||||
tu->delay_start_link_extra_pixclk--;
|
||||
} while (tu->boundary_moderation_en != true &&
|
||||
tu->boundary_mod_lower_err == 1 &&
|
||||
tu->delay_start_link_extra_pixclk != 0);
|
||||
|
||||
if (tu.boundary_moderation_en == true) {
|
||||
if (tu->boundary_moderation_en == true) {
|
||||
temp1_fp = drm_fixp_from_fraction(
|
||||
(tu.upper_boundary_count *
|
||||
tu.valid_boundary_link +
|
||||
tu.lower_boundary_count *
|
||||
(tu.valid_boundary_link - 1)), 1);
|
||||
(tu->upper_boundary_count *
|
||||
tu->valid_boundary_link +
|
||||
tu->lower_boundary_count *
|
||||
(tu->valid_boundary_link - 1)), 1);
|
||||
temp2_fp = drm_fixp_from_fraction(
|
||||
(tu.upper_boundary_count +
|
||||
tu.lower_boundary_count), 1);
|
||||
tu.resulting_valid_fp =
|
||||
(tu->upper_boundary_count +
|
||||
tu->lower_boundary_count), 1);
|
||||
tu->resulting_valid_fp =
|
||||
drm_fixp_div(temp1_fp, temp2_fp);
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(
|
||||
tu.tu_size_desired, 1);
|
||||
tu.ratio_by_tu_fp =
|
||||
drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
|
||||
tu->tu_size_desired, 1);
|
||||
tu->ratio_by_tu_fp =
|
||||
drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
|
||||
|
||||
tu.valid_lower_boundary_link =
|
||||
tu.valid_boundary_link - 1;
|
||||
tu->valid_lower_boundary_link =
|
||||
tu->valid_boundary_link - 1;
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
|
||||
temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
|
||||
temp1_fp = drm_fixp_mul(tu->lwidth_fp, temp1_fp);
|
||||
temp2_fp = drm_fixp_div(temp1_fp,
|
||||
tu.resulting_valid_fp);
|
||||
tu.n_tus = drm_fixp2int(temp2_fp);
|
||||
tu->resulting_valid_fp);
|
||||
tu->n_tus = drm_fixp2int(temp2_fp);
|
||||
|
||||
tu.tu_size_minus1 = tu.tu_size_desired - 1;
|
||||
tu.even_distribution_BF = 1;
|
||||
tu->tu_size_minus1 = tu->tu_size_desired - 1;
|
||||
tu->even_distribution_BF = 1;
|
||||
|
||||
temp1_fp =
|
||||
drm_fixp_from_fraction(tu.tu_size_desired, 1);
|
||||
drm_fixp_from_fraction(tu->tu_size_desired, 1);
|
||||
temp2_fp =
|
||||
drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
|
||||
tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
|
||||
drm_fixp_div(tu->resulting_valid_fp, temp1_fp);
|
||||
tu->TU_ratio_err_fp = temp2_fp - tu->original_ratio_fp;
|
||||
}
|
||||
}
|
||||
|
||||
temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp);
|
||||
temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu->lwidth_fp);
|
||||
|
||||
if (temp2_fp)
|
||||
temp = drm_fixp2int_ceil(temp2_fp);
|
||||
else
|
||||
temp = 0;
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
|
||||
temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
|
||||
temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
|
||||
temp2_fp = drm_fixp_div(temp1_fp, temp2_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(temp, 1);
|
||||
temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
|
||||
temp = drm_fixp2int(temp2_fp);
|
||||
|
||||
if (tu.async_en)
|
||||
tu.delay_start_link += (int)temp;
|
||||
if (tu->async_en)
|
||||
tu->delay_start_link += (int)temp;
|
||||
|
||||
temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
|
||||
tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
|
||||
temp1_fp = drm_fixp_from_fraction(tu->delay_start_link, 1);
|
||||
tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
|
||||
|
||||
/* OUTPUTS */
|
||||
tu_table->valid_boundary_link = tu.valid_boundary_link;
|
||||
tu_table->delay_start_link = tu.delay_start_link;
|
||||
tu_table->boundary_moderation_en = tu.boundary_moderation_en;
|
||||
tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link;
|
||||
tu_table->upper_boundary_count = tu.upper_boundary_count;
|
||||
tu_table->lower_boundary_count = tu.lower_boundary_count;
|
||||
tu_table->tu_size_minus1 = tu.tu_size_minus1;
|
||||
tu_table->valid_boundary_link = tu->valid_boundary_link;
|
||||
tu_table->delay_start_link = tu->delay_start_link;
|
||||
tu_table->boundary_moderation_en = tu->boundary_moderation_en;
|
||||
tu_table->valid_lower_boundary_link = tu->valid_lower_boundary_link;
|
||||
tu_table->upper_boundary_count = tu->upper_boundary_count;
|
||||
tu_table->lower_boundary_count = tu->lower_boundary_count;
|
||||
tu_table->tu_size_minus1 = tu->tu_size_minus1;
|
||||
|
||||
DRM_DEBUG_DP("TU: valid_boundary_link: %d\n",
|
||||
tu_table->valid_boundary_link);
|
||||
@ -932,6 +937,8 @@ tu_size_calc:
|
||||
DRM_DEBUG_DP("TU: lower_boundary_count: %d\n",
|
||||
tu_table->lower_boundary_count);
|
||||
DRM_DEBUG_DP("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1);
|
||||
|
||||
kfree(tu);
|
||||
}
|
||||
|
||||
static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
|
||||
@ -1061,23 +1068,15 @@ static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
|
||||
static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
|
||||
u8 *link_status)
|
||||
{
|
||||
int len = 0;
|
||||
u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS;
|
||||
u32 link_status_read_max_retries = 100;
|
||||
int ret = 0, len;
|
||||
|
||||
while (--link_status_read_max_retries) {
|
||||
len = drm_dp_dpcd_read_link_status(ctrl->aux,
|
||||
link_status);
|
||||
if (len != DP_LINK_STATUS_SIZE) {
|
||||
DRM_ERROR("DP link status read failed, err: %d\n", len);
|
||||
return len;
|
||||
}
|
||||
|
||||
if (!(link_status[offset] & DP_LINK_STATUS_UPDATED))
|
||||
return 0;
|
||||
len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status);
|
||||
if (len != DP_LINK_STATUS_SIZE) {
|
||||
DRM_ERROR("DP link status read failed, err: %d\n", len);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
|
||||
@ -1400,6 +1399,8 @@ int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
|
||||
void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
|
||||
{
|
||||
struct dp_ctrl_private *ctrl;
|
||||
struct dp_io *dp_io;
|
||||
struct phy *phy;
|
||||
|
||||
if (!dp_ctrl) {
|
||||
DRM_ERROR("Invalid input data\n");
|
||||
@ -1407,8 +1408,11 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
|
||||
}
|
||||
|
||||
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
|
||||
dp_io = &ctrl->parser->io;
|
||||
phy = dp_io->phy;
|
||||
|
||||
dp_catalog_ctrl_enable_irq(ctrl->catalog, false);
|
||||
phy_exit(phy);
|
||||
|
||||
DRM_DEBUG_DP("Host deinitialized successfully\n");
|
||||
}
|
||||
@ -1463,6 +1467,30 @@ static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
|
||||
{
|
||||
struct dp_io *dp_io;
|
||||
struct phy *phy;
|
||||
int ret;
|
||||
|
||||
dp_io = &ctrl->parser->io;
|
||||
phy = dp_io->phy;
|
||||
|
||||
dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
|
||||
|
||||
dp_catalog_ctrl_reset(ctrl->catalog);
|
||||
|
||||
ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
|
||||
}
|
||||
|
||||
phy_power_off(phy);
|
||||
phy_exit(phy);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -1643,11 +1671,7 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ctrl->link->phy_params.p_level = 0;
|
||||
ctrl->link->phy_params.v_level = 0;
|
||||
|
||||
while (--link_train_max_retries &&
|
||||
!atomic_read(&ctrl->dp_ctrl.aborted)) {
|
||||
while (--link_train_max_retries) {
|
||||
rc = dp_ctrl_reinitialize_mainlink(ctrl);
|
||||
if (rc) {
|
||||
DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
|
||||
@ -1662,6 +1686,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
|
||||
break;
|
||||
} else if (training_step == DP_TRAINING_1) {
|
||||
/* link train_1 failed */
|
||||
if (!dp_catalog_link_is_connected(ctrl->catalog)) {
|
||||
break;
|
||||
}
|
||||
|
||||
rc = dp_ctrl_link_rate_down_shift(ctrl);
|
||||
if (rc < 0) { /* already in RBR = 1.6G */
|
||||
if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
|
||||
@ -1681,6 +1709,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
|
||||
}
|
||||
} else if (training_step == DP_TRAINING_2) {
|
||||
/* link train_2 failed, lower lane rate */
|
||||
if (!dp_catalog_link_is_connected(ctrl->catalog)) {
|
||||
break;
|
||||
}
|
||||
|
||||
rc = dp_ctrl_link_lane_down_shift(ctrl);
|
||||
if (rc < 0) {
|
||||
/* end with failure */
|
||||
@ -1701,6 +1733,11 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
|
||||
*/
|
||||
if (rc == 0) /* link train successfully */
|
||||
dp_ctrl_push_idle(dp_ctrl);
|
||||
else {
|
||||
/* link training failed */
|
||||
dp_ctrl_deinitialize_mainlink(ctrl);
|
||||
rc = -ECONNRESET;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1836,6 +1873,7 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
|
||||
struct dp_parser *parser)
|
||||
{
|
||||
struct dp_ctrl_private *ctrl;
|
||||
int ret;
|
||||
|
||||
if (!dev || !panel || !aux ||
|
||||
!link || !catalog) {
|
||||
@ -1849,6 +1887,21 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ctrl->opp_table = dev_pm_opp_set_clkname(dev, "ctrl_link");
|
||||
if (IS_ERR(ctrl->opp_table)) {
|
||||
dev_err(dev, "invalid DP OPP table in device tree\n");
|
||||
/* caller do PTR_ERR(ctrl->opp_table) */
|
||||
return (struct dp_ctrl *)ctrl->opp_table;
|
||||
}
|
||||
|
||||
/* OPP table is optional */
|
||||
ret = dev_pm_opp_of_add_table(dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to add DP OPP table\n");
|
||||
dev_pm_opp_put_clkname(ctrl->opp_table);
|
||||
ctrl->opp_table = NULL;
|
||||
}
|
||||
|
||||
init_completion(&ctrl->idle_comp);
|
||||
init_completion(&ctrl->video_comp);
|
||||
|
||||
@ -1866,4 +1919,13 @@ struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
|
||||
|
||||
void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
|
||||
{
|
||||
struct dp_ctrl_private *ctrl;
|
||||
|
||||
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
|
||||
|
||||
if (ctrl->opp_table) {
|
||||
dev_pm_opp_of_remove_table(ctrl->dev);
|
||||
dev_pm_opp_put_clkname(ctrl->opp_table);
|
||||
ctrl->opp_table = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ enum {
|
||||
ST_CONNECT_PENDING,
|
||||
ST_CONNECTED,
|
||||
ST_DISCONNECT_PENDING,
|
||||
ST_SUSPEND_PENDING,
|
||||
ST_DISPLAY_OFF,
|
||||
ST_SUSPENDED,
|
||||
};
|
||||
|
||||
@ -102,20 +102,20 @@ struct dp_display_private {
|
||||
struct dp_display_mode dp_mode;
|
||||
struct msm_dp dp_display;
|
||||
|
||||
bool encoder_mode_set;
|
||||
|
||||
/* wait for audio signaling */
|
||||
struct completion audio_comp;
|
||||
|
||||
/* event related only access by event thread */
|
||||
struct mutex event_mutex;
|
||||
wait_queue_head_t event_q;
|
||||
atomic_t hpd_state;
|
||||
u32 hpd_state;
|
||||
u32 event_pndx;
|
||||
u32 event_gndx;
|
||||
struct dp_event event_list[DP_EVENT_Q_MAX];
|
||||
spinlock_t event_lock;
|
||||
|
||||
struct completion resume_comp;
|
||||
|
||||
struct dp_audio *audio;
|
||||
};
|
||||
|
||||
@ -281,13 +281,24 @@ static void dp_display_send_hpd_event(struct msm_dp *dp_display)
|
||||
drm_helper_hpd_irq_event(connector->dev);
|
||||
}
|
||||
|
||||
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
|
||||
bool hpd)
|
||||
|
||||
static void dp_display_set_encoder_mode(struct dp_display_private *dp)
|
||||
{
|
||||
static bool encoder_mode_set;
|
||||
struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
|
||||
if (!dp->encoder_mode_set && dp->dp_display.encoder &&
|
||||
kms->funcs->set_encoder_mode) {
|
||||
kms->funcs->set_encoder_mode(kms,
|
||||
dp->dp_display.encoder, false);
|
||||
|
||||
dp->encoder_mode_set = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
|
||||
bool hpd)
|
||||
{
|
||||
if ((hpd && dp->dp_display.is_connected) ||
|
||||
(!hpd && !dp->dp_display.is_connected)) {
|
||||
DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off"));
|
||||
@ -300,15 +311,6 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
|
||||
|
||||
dp->dp_display.is_connected = hpd;
|
||||
|
||||
if (dp->dp_display.is_connected && dp->dp_display.encoder
|
||||
&& !encoder_mode_set
|
||||
&& kms->funcs->set_encoder_mode) {
|
||||
kms->funcs->set_encoder_mode(kms,
|
||||
dp->dp_display.encoder, false);
|
||||
DRM_DEBUG_DP("set_encoder_mode() Completed\n");
|
||||
encoder_mode_set = true;
|
||||
}
|
||||
|
||||
dp_display_send_hpd_event(&dp->dp_display);
|
||||
|
||||
return 0;
|
||||
@ -335,6 +337,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
|
||||
dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
|
||||
dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
|
||||
|
||||
dp_link_reset_phy_params_vx_px(dp->link);
|
||||
rc = dp_ctrl_on_link(dp->ctrl);
|
||||
if (rc) {
|
||||
DRM_ERROR("failed to complete DP link training\n");
|
||||
@ -343,7 +346,6 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
|
||||
|
||||
dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
|
||||
|
||||
|
||||
end:
|
||||
return rc;
|
||||
}
|
||||
@ -360,12 +362,28 @@ static void dp_display_host_init(struct dp_display_private *dp)
|
||||
if (dp->usbpd->orientation == ORIENTATION_CC2)
|
||||
flip = true;
|
||||
|
||||
dp_display_set_encoder_mode(dp);
|
||||
|
||||
dp_power_init(dp->power, flip);
|
||||
dp_ctrl_host_init(dp->ctrl, flip);
|
||||
dp_aux_init(dp->aux);
|
||||
dp->core_initialized = true;
|
||||
}
|
||||
|
||||
static void dp_display_host_deinit(struct dp_display_private *dp)
|
||||
{
|
||||
if (!dp->core_initialized) {
|
||||
DRM_DEBUG_DP("DP core not initialized\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dp_ctrl_host_deinit(dp->ctrl);
|
||||
dp_aux_deinit(dp->aux);
|
||||
dp_power_deinit(dp->power);
|
||||
|
||||
dp->core_initialized = false;
|
||||
}
|
||||
|
||||
static int dp_display_usbpd_configure_cb(struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
@ -429,25 +447,42 @@ static void dp_display_handle_video_request(struct dp_display_private *dp)
|
||||
}
|
||||
}
|
||||
|
||||
static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (dp_display_is_sink_count_zero(dp)) {
|
||||
DRM_DEBUG_DP("sink count is zero, nothing to do\n");
|
||||
if (dp->hpd_state != ST_DISCONNECTED) {
|
||||
dp->hpd_state = ST_DISCONNECT_PENDING;
|
||||
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
|
||||
}
|
||||
} else {
|
||||
if (dp->hpd_state == ST_DISCONNECTED) {
|
||||
dp->hpd_state = ST_CONNECT_PENDING;
|
||||
rc = dp_display_process_hpd_high(dp);
|
||||
if (rc)
|
||||
dp->hpd_state = ST_DISCONNECTED;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
|
||||
{
|
||||
u32 sink_request;
|
||||
u32 sink_request = dp->link->sink_request;
|
||||
|
||||
sink_request = dp->link->sink_request;
|
||||
|
||||
if (sink_request & DS_PORT_STATUS_CHANGED) {
|
||||
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
|
||||
if (dp_display_is_sink_count_zero(dp)) {
|
||||
DRM_DEBUG_DP("sink count is zero, nothing to do\n");
|
||||
return 0;
|
||||
if (dp->hpd_state == ST_DISCONNECTED) {
|
||||
if (sink_request & DP_LINK_STATUS_UPDATED) {
|
||||
DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return dp_display_process_hpd_high(dp);
|
||||
}
|
||||
|
||||
dp_ctrl_handle_sink_request(dp->ctrl);
|
||||
|
||||
if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN)
|
||||
if (sink_request & DP_TEST_LINK_VIDEO_PATTERN)
|
||||
dp_display_handle_video_request(dp);
|
||||
|
||||
return 0;
|
||||
@ -456,7 +491,9 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
|
||||
static int dp_display_usbpd_attention_cb(struct device *dev)
|
||||
{
|
||||
int rc = 0;
|
||||
u32 sink_request;
|
||||
struct dp_display_private *dp;
|
||||
struct dp_usbpd *hpd;
|
||||
|
||||
if (!dev) {
|
||||
DRM_ERROR("invalid dev\n");
|
||||
@ -470,10 +507,17 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
hpd = dp->usbpd;
|
||||
|
||||
/* check for any test request issued by sink */
|
||||
rc = dp_link_process_request(dp->link);
|
||||
if (!rc)
|
||||
dp_display_handle_irq_hpd(dp);
|
||||
if (!rc) {
|
||||
sink_request = dp->link->sink_request;
|
||||
if (sink_request & DS_PORT_STATUS_CHANGED)
|
||||
rc = dp_display_handle_port_ststus_changed(dp);
|
||||
else
|
||||
rc = dp_display_handle_irq_hpd(dp);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -490,8 +534,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = atomic_read(&dp->hpd_state);
|
||||
if (state == ST_SUSPEND_PENDING) {
|
||||
state = dp->hpd_state;
|
||||
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
return 0;
|
||||
}
|
||||
@ -508,21 +552,23 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (state == ST_SUSPENDED)
|
||||
tout = DP_TIMEOUT_NONE;
|
||||
|
||||
atomic_set(&dp->hpd_state, ST_CONNECT_PENDING);
|
||||
dp->hpd_state = ST_CONNECT_PENDING;
|
||||
|
||||
hpd->hpd_high = 1;
|
||||
|
||||
ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
|
||||
if (ret) { /* failed */
|
||||
if (ret) { /* link train failed */
|
||||
hpd->hpd_high = 0;
|
||||
atomic_set(&dp->hpd_state, ST_DISCONNECTED);
|
||||
}
|
||||
dp->hpd_state = ST_DISCONNECTED;
|
||||
|
||||
/* start sanity checking */
|
||||
dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
|
||||
if (ret == -ECONNRESET) { /* cable unplugged */
|
||||
dp->core_initialized = false;
|
||||
}
|
||||
|
||||
} else {
|
||||
/* start sentinel checking in case of missing uevent */
|
||||
dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
|
||||
}
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
@ -539,10 +585,10 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = atomic_read(&dp->hpd_state);
|
||||
state = dp->hpd_state;
|
||||
if (state == ST_CONNECT_PENDING) {
|
||||
dp_display_enable(dp, 0);
|
||||
atomic_set(&dp->hpd_state, ST_CONNECTED);
|
||||
dp->hpd_state = ST_CONNECTED;
|
||||
}
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
@ -553,7 +599,14 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
|
||||
static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
|
||||
bool plugged)
|
||||
{
|
||||
if (dp_display->plugged_cb && dp_display->codec_dev)
|
||||
struct dp_display_private *dp;
|
||||
|
||||
dp = container_of(dp_display,
|
||||
struct dp_display_private, dp_display);
|
||||
|
||||
/* notify audio subsystem only if sink supports audio */
|
||||
if (dp_display->plugged_cb && dp_display->codec_dev &&
|
||||
dp->audio_supported)
|
||||
dp_display->plugged_cb(dp_display->codec_dev, plugged);
|
||||
}
|
||||
|
||||
@ -567,12 +620,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = atomic_read(&dp->hpd_state);
|
||||
if (state == ST_SUSPEND_PENDING) {
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
state = dp->hpd_state;
|
||||
if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) {
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
return 0;
|
||||
@ -585,7 +633,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
atomic_set(&dp->hpd_state, ST_DISCONNECT_PENDING);
|
||||
dp->hpd_state = ST_DISCONNECT_PENDING;
|
||||
|
||||
/* disable HPD plug interrupt until disconnect is done */
|
||||
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK
|
||||
@ -599,7 +647,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
||||
*/
|
||||
dp_display_usbpd_disconnect_cb(&dp->pdev->dev);
|
||||
|
||||
/* start sanity checking */
|
||||
/* start sentinel checking in case of missing uevent */
|
||||
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
|
||||
|
||||
/* signal the disconnect event early to ensure proper teardown */
|
||||
@ -620,10 +668,10 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = atomic_read(&dp->hpd_state);
|
||||
state = dp->hpd_state;
|
||||
if (state == ST_DISCONNECT_PENDING) {
|
||||
dp_display_disable(dp, 0);
|
||||
atomic_set(&dp->hpd_state, ST_DISCONNECTED);
|
||||
dp->hpd_state = ST_DISCONNECTED;
|
||||
}
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
@ -634,17 +682,21 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
|
||||
static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
|
||||
{
|
||||
u32 state;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
/* irq_hpd can happen at either connected or disconnected state */
|
||||
state = atomic_read(&dp->hpd_state);
|
||||
if (state == ST_SUSPEND_PENDING) {
|
||||
state = dp->hpd_state;
|
||||
if (state == ST_DISPLAY_OFF) {
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dp_display_usbpd_attention_cb(&dp->pdev->dev);
|
||||
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
|
||||
if (ret == -ECONNRESET) { /* cable unplugged */
|
||||
dp->core_initialized = false;
|
||||
}
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
@ -698,7 +750,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
|
||||
goto error;
|
||||
}
|
||||
|
||||
dp->power = dp_power_get(dp->parser);
|
||||
dp->power = dp_power_get(dev, dp->parser);
|
||||
if (IS_ERR(dp->power)) {
|
||||
rc = PTR_ERR(dp->power);
|
||||
DRM_ERROR("failed to initialize power, rc = %d\n", rc);
|
||||
@ -798,8 +850,6 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
|
||||
if (!rc)
|
||||
dp_display->power_on = true;
|
||||
|
||||
/* complete resume_comp regardless it is armed or not */
|
||||
complete(&dp->resume_comp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -829,7 +879,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
|
||||
dp_display = g_dp_display;
|
||||
|
||||
if (!dp_display->power_on)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
/* wait only if audio was enabled */
|
||||
if (dp_display->audio_enabled) {
|
||||
@ -1074,7 +1124,7 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
|
||||
/* delete connect pending event first */
|
||||
/* stop sentinel connect pending checking */
|
||||
dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT);
|
||||
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
|
||||
}
|
||||
@ -1151,9 +1201,6 @@ static int dp_display_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
mutex_init(&dp->event_mutex);
|
||||
|
||||
init_completion(&dp->resume_comp);
|
||||
|
||||
g_dp_display = &dp->dp_display;
|
||||
|
||||
/* Store DP audio handle inside DP display */
|
||||
@ -1189,20 +1236,54 @@ static int dp_display_remove(struct platform_device *pdev)
|
||||
|
||||
static int dp_pm_resume(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct msm_dp *dp_display = platform_get_drvdata(pdev);
|
||||
struct dp_display_private *dp;
|
||||
u32 status;
|
||||
|
||||
dp = container_of(dp_display, struct dp_display_private, dp_display);
|
||||
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
/* start from disconnected state */
|
||||
dp->hpd_state = ST_DISCONNECTED;
|
||||
|
||||
/* turn on dp ctrl/phy */
|
||||
dp_display_host_init(dp);
|
||||
|
||||
dp_catalog_ctrl_hpd_config(dp->catalog);
|
||||
|
||||
status = dp_catalog_link_is_connected(dp->catalog);
|
||||
|
||||
if (status)
|
||||
dp->dp_display.is_connected = true;
|
||||
else
|
||||
dp->dp_display.is_connected = false;
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct dp_display_private *dp = platform_get_drvdata(pdev);
|
||||
struct msm_dp *dp_display = platform_get_drvdata(pdev);
|
||||
struct dp_display_private *dp;
|
||||
|
||||
if (!dp) {
|
||||
DRM_ERROR("DP driver bind failed. Invalid driver data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dp = container_of(dp_display, struct dp_display_private, dp_display);
|
||||
|
||||
atomic_set(&dp->hpd_state, ST_SUSPENDED);
|
||||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
if (dp->core_initialized == true)
|
||||
dp_display_host_deinit(dp);
|
||||
|
||||
dp->hpd_state = ST_SUSPENDED;
|
||||
|
||||
/* host_init will be called at pm_resume */
|
||||
dp->core_initialized = false;
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1317,19 +1398,6 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_display_wait4resume_done(struct dp_display_private *dp)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
reinit_completion(&dp->resume_comp);
|
||||
if (!wait_for_completion_timeout(&dp->resume_comp,
|
||||
WAIT_FOR_RESUME_TIMEOUT_JIFFIES)) {
|
||||
DRM_ERROR("wait4resume_done timedout\n");
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
|
||||
{
|
||||
int rc = 0;
|
||||
@ -1344,6 +1412,9 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
|
||||
|
||||
mutex_lock(&dp_display->event_mutex);
|
||||
|
||||
/* stop sentinel checking */
|
||||
dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT);
|
||||
|
||||
rc = dp_display_set_mode(dp, &dp_display->dp_mode);
|
||||
if (rc) {
|
||||
DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
|
||||
@ -1358,15 +1429,10 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
|
||||
return rc;
|
||||
}
|
||||
|
||||
state = atomic_read(&dp_display->hpd_state);
|
||||
if (state == ST_SUSPENDED) {
|
||||
/* start link training */
|
||||
dp_add_event(dp_display, EV_HPD_PLUG_INT, 0, 0);
|
||||
mutex_unlock(&dp_display->event_mutex);
|
||||
state = dp_display->hpd_state;
|
||||
|
||||
/* wait until dp interface is up */
|
||||
goto resume_done;
|
||||
}
|
||||
if (state == ST_DISPLAY_OFF)
|
||||
dp_display_host_init(dp_display);
|
||||
|
||||
dp_display_enable(dp_display, 0);
|
||||
|
||||
@ -1377,21 +1443,16 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
|
||||
dp_display_unprepare(dp);
|
||||
}
|
||||
|
||||
dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT);
|
||||
|
||||
if (state == ST_SUSPEND_PENDING)
|
||||
/* manual kick off plug event to train link */
|
||||
if (state == ST_DISPLAY_OFF)
|
||||
dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
|
||||
|
||||
/* completed connection */
|
||||
atomic_set(&dp_display->hpd_state, ST_CONNECTED);
|
||||
dp_display->hpd_state = ST_CONNECTED;
|
||||
|
||||
mutex_unlock(&dp_display->event_mutex);
|
||||
|
||||
return rc;
|
||||
|
||||
resume_done:
|
||||
dp_display_wait4resume_done(dp_display);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder)
|
||||
@ -1415,20 +1476,21 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
|
||||
|
||||
mutex_lock(&dp_display->event_mutex);
|
||||
|
||||
/* stop sentinel checking */
|
||||
dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT);
|
||||
|
||||
dp_display_disable(dp_display, 0);
|
||||
|
||||
rc = dp_display_unprepare(dp);
|
||||
if (rc)
|
||||
DRM_ERROR("DP display unprepare failed, rc=%d\n", rc);
|
||||
|
||||
dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT);
|
||||
|
||||
state = atomic_read(&dp_display->hpd_state);
|
||||
state = dp_display->hpd_state;
|
||||
if (state == ST_DISCONNECT_PENDING) {
|
||||
/* completed disconnection */
|
||||
atomic_set(&dp_display->hpd_state, ST_DISCONNECTED);
|
||||
dp_display->hpd_state = ST_DISCONNECTED;
|
||||
} else {
|
||||
atomic_set(&dp_display->hpd_state, ST_SUSPEND_PENDING);
|
||||
dp_display->hpd_state = ST_DISPLAY_OFF;
|
||||
}
|
||||
|
||||
mutex_unlock(&dp_display->event_mutex);
|
||||
|
@ -773,7 +773,8 @@ static int dp_link_process_link_training_request(struct dp_link_private *link)
|
||||
link->request.test_lane_count);
|
||||
|
||||
link->dp_link.link_params.num_lanes = link->request.test_lane_count;
|
||||
link->dp_link.link_params.rate = link->request.test_link_rate;
|
||||
link->dp_link.link_params.rate =
|
||||
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -869,6 +870,9 @@ static int dp_link_parse_vx_px(struct dp_link_private *link)
|
||||
drm_dp_get_adjust_request_voltage(link->link_status, 0);
|
||||
link->dp_link.phy_params.p_level =
|
||||
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0);
|
||||
|
||||
link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
|
||||
|
||||
DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n",
|
||||
link->dp_link.phy_params.v_level,
|
||||
link->dp_link.phy_params.p_level);
|
||||
@ -911,7 +915,8 @@ static int dp_link_process_phy_test_pattern_request(
|
||||
link->request.test_lane_count);
|
||||
|
||||
link->dp_link.link_params.num_lanes = link->request.test_lane_count;
|
||||
link->dp_link.link_params.rate = link->request.test_link_rate;
|
||||
link->dp_link.link_params.rate =
|
||||
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
|
||||
|
||||
ret = dp_link_parse_vx_px(link);
|
||||
|
||||
@ -939,22 +944,20 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
|
||||
*/
|
||||
static int dp_link_process_link_status_update(struct dp_link_private *link)
|
||||
{
|
||||
if (!(get_link_status(link->link_status,
|
||||
DP_LANE_ALIGN_STATUS_UPDATED) &
|
||||
DP_LINK_STATUS_UPDATED) ||
|
||||
(drm_dp_clock_recovery_ok(link->link_status,
|
||||
link->dp_link.link_params.num_lanes) &&
|
||||
drm_dp_channel_eq_ok(link->link_status,
|
||||
link->dp_link.link_params.num_lanes)))
|
||||
return -EINVAL;
|
||||
bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status,
|
||||
link->dp_link.link_params.num_lanes);
|
||||
|
||||
DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n",
|
||||
drm_dp_clock_recovery_ok(link->link_status,
|
||||
link->dp_link.link_params.num_lanes),
|
||||
drm_dp_clock_recovery_ok(link->link_status,
|
||||
link->dp_link.link_params.num_lanes));
|
||||
bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status,
|
||||
link->dp_link.link_params.num_lanes);
|
||||
|
||||
return 0;
|
||||
DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n",
|
||||
channel_eq_done, clock_recovery_done);
|
||||
|
||||
if (channel_eq_done && clock_recovery_done)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1156,6 +1159,12 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
|
||||
{
|
||||
dp_link->phy_params.v_level = 0;
|
||||
dp_link->phy_params.p_level = 0;
|
||||
}
|
||||
|
||||
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
|
||||
{
|
||||
u32 tbd;
|
||||
|
@ -135,6 +135,7 @@ static inline u32 dp_link_bit_depth_to_bpc(u32 tbd)
|
||||
}
|
||||
}
|
||||
|
||||
void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link);
|
||||
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp);
|
||||
int dp_link_process_request(struct dp_link *dp_link);
|
||||
int dp_link_get_colorimetry_config(struct dp_link *dp_link);
|
||||
|
@ -196,6 +196,11 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
|
||||
&panel->aux->ddc);
|
||||
if (!dp_panel->edid) {
|
||||
DRM_ERROR("panel edid read failed\n");
|
||||
/* check edid read fail is due to unplug */
|
||||
if (!dp_catalog_link_is_connected(panel->catalog)) {
|
||||
rc = -ETIMEDOUT;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* fail safe edid */
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
|
@ -8,12 +8,14 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include "dp_power.h"
|
||||
#include "msm_drv.h"
|
||||
|
||||
struct dp_power_private {
|
||||
struct dp_parser *parser;
|
||||
struct platform_device *pdev;
|
||||
struct device *dev;
|
||||
struct clk *link_clk_src;
|
||||
struct clk *pixel_provider;
|
||||
struct clk *link_provider;
|
||||
@ -148,18 +150,51 @@ static int dp_power_clk_deinit(struct dp_power_private *power)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp_power_clk_set_link_rate(struct dp_power_private *power,
|
||||
struct dss_clk *clk_arry, int num_clk, int enable)
|
||||
{
|
||||
u32 rate;
|
||||
int i, rc = 0;
|
||||
|
||||
for (i = 0; i < num_clk; i++) {
|
||||
if (clk_arry[i].clk) {
|
||||
if (clk_arry[i].type == DSS_CLK_PCLK) {
|
||||
if (enable)
|
||||
rate = clk_arry[i].rate;
|
||||
else
|
||||
rate = 0;
|
||||
|
||||
rc = dev_pm_opp_set_rate(power->dev, rate);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dp_power_clk_set_rate(struct dp_power_private *power,
|
||||
enum dp_pm_type module, bool enable)
|
||||
{
|
||||
int rc = 0;
|
||||
struct dss_module_power *mp = &power->parser->mp[module];
|
||||
|
||||
if (enable) {
|
||||
rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
|
||||
if (module == DP_CTRL_PM) {
|
||||
rc = dp_power_clk_set_link_rate(power, mp->clk_config, mp->num_clk, enable);
|
||||
if (rc) {
|
||||
DRM_ERROR("failed to set clks rate.\n");
|
||||
DRM_ERROR("failed to set link clks rate\n");
|
||||
return rc;
|
||||
}
|
||||
} else {
|
||||
|
||||
if (enable) {
|
||||
rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
|
||||
if (rc) {
|
||||
DRM_ERROR("failed to set clks rate\n");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
|
||||
@ -349,7 +384,7 @@ int dp_power_deinit(struct dp_power *dp_power)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dp_power *dp_power_get(struct dp_parser *parser)
|
||||
struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser)
|
||||
{
|
||||
struct dp_power_private *power;
|
||||
struct dp_power *dp_power;
|
||||
@ -365,6 +400,7 @@ struct dp_power *dp_power_get(struct dp_parser *parser)
|
||||
|
||||
power->parser = parser;
|
||||
power->pdev = parser->pdev;
|
||||
power->dev = dev;
|
||||
|
||||
dp_power = &power->dp_power;
|
||||
|
||||
|
@ -102,6 +102,6 @@ void dp_power_client_deinit(struct dp_power *power);
|
||||
* methods to be called by the client to configure the power related
|
||||
* modueles.
|
||||
*/
|
||||
struct dp_power *dp_power_get(struct dp_parser *parser);
|
||||
struct dp_power *dp_power_get(struct device *dev, struct dp_parser *parser);
|
||||
|
||||
#endif /* _DP_POWER_H_ */
|
||||
|
@ -32,6 +32,8 @@
|
||||
#define DP_DP_IRQ_HPD_INT_ACK (0x00000002)
|
||||
#define DP_DP_HPD_REPLUG_INT_ACK (0x00000004)
|
||||
#define DP_DP_HPD_UNPLUG_INT_ACK (0x00000008)
|
||||
#define DP_DP_HPD_STATE_STATUS_BITS_MASK (0x0000000F)
|
||||
#define DP_DP_HPD_STATE_STATUS_BITS_SHIFT (0x1C)
|
||||
|
||||
#define REG_DP_DP_HPD_INT_MASK (0x0000000C)
|
||||
#define DP_DP_HPD_PLUG_INT_MASK (0x00000001)
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "sfpb.xml.h"
|
||||
#include "dsi_cfg.h"
|
||||
#include "msm_kms.h"
|
||||
#include "msm_gem.h"
|
||||
|
||||
#define DSI_RESET_TOGGLE_DELAY_MS 20
|
||||
|
||||
@ -113,7 +114,6 @@ struct msm_dsi_host {
|
||||
struct clk *byte_intf_clk;
|
||||
|
||||
struct opp_table *opp_table;
|
||||
bool has_opp_table;
|
||||
|
||||
u32 byte_clk_rate;
|
||||
u32 pixel_clk_rate;
|
||||
@ -1657,7 +1657,7 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct mipi_dsi_host_ops dsi_host_ops = {
|
||||
static const struct mipi_dsi_host_ops dsi_host_ops = {
|
||||
.attach = dsi_host_attach,
|
||||
.detach = dsi_host_detach,
|
||||
.transfer = dsi_host_transfer,
|
||||
@ -1891,9 +1891,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
|
||||
return PTR_ERR(msm_host->opp_table);
|
||||
/* OPP table is optional */
|
||||
ret = dev_pm_opp_of_add_table(&pdev->dev);
|
||||
if (!ret) {
|
||||
msm_host->has_opp_table = true;
|
||||
} else if (ret != -ENODEV) {
|
||||
if (ret && ret != -ENODEV) {
|
||||
dev_err(&pdev->dev, "invalid OPP table in device tree\n");
|
||||
dev_pm_opp_put_clkname(msm_host->opp_table);
|
||||
return ret;
|
||||
@ -1934,8 +1932,7 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
|
||||
mutex_destroy(&msm_host->cmd_mutex);
|
||||
mutex_destroy(&msm_host->dev_mutex);
|
||||
|
||||
if (msm_host->has_opp_table)
|
||||
dev_pm_opp_of_remove_table(&msm_host->pdev->dev);
|
||||
dev_pm_opp_of_remove_table(&msm_host->pdev->dev);
|
||||
dev_pm_opp_put_clkname(msm_host->opp_table);
|
||||
pm_runtime_disable(&msm_host->pdev->dev);
|
||||
}
|
||||
|
@ -192,6 +192,28 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
|
||||
static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
|
||||
{
|
||||
void __iomem *base = phy->base;
|
||||
u32 data;
|
||||
|
||||
DBG("");
|
||||
|
||||
if (dsi_phy_hw_v3_0_is_pll_on(phy))
|
||||
pr_warn("Turning OFF PHY while PLL is on\n");
|
||||
|
||||
dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
|
||||
data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
|
||||
|
||||
/* disable all lanes */
|
||||
data &= ~0x1F;
|
||||
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
|
||||
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
|
||||
|
||||
/* Turn off all PHY blocks */
|
||||
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
|
||||
/* make sure phy is turned off */
|
||||
wmb();
|
||||
|
||||
DBG("DSI%d PHY disabled", phy->id);
|
||||
}
|
||||
|
||||
static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
|
||||
|
@ -200,7 +200,28 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
|
||||
|
||||
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
|
||||
{
|
||||
/* TODO */
|
||||
void __iomem *base = phy->base;
|
||||
u32 data;
|
||||
|
||||
DBG("");
|
||||
|
||||
if (dsi_phy_hw_v4_0_is_pll_on(phy))
|
||||
pr_warn("Turning OFF PHY while PLL is on\n");
|
||||
|
||||
dsi_phy_hw_v4_0_config_lpcdrx(phy, false);
|
||||
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_CTRL_0);
|
||||
|
||||
/* disable all lanes */
|
||||
data &= ~0x1F;
|
||||
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
|
||||
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0);
|
||||
|
||||
/* Turn off all PHY blocks */
|
||||
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x00);
|
||||
/* make sure phy is turned off */
|
||||
wmb();
|
||||
|
||||
DBG("DSI%d PHY disabled", phy->id);
|
||||
}
|
||||
|
||||
static int dsi_7nm_phy_init(struct msm_dsi_phy *phy)
|
||||
|
@ -559,6 +559,7 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
|
||||
struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
|
||||
void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
|
||||
val &= ~0x3;
|
||||
@ -573,6 +574,13 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
|
||||
val |= cached->pll_mux;
|
||||
pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
|
||||
|
||||
ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(&pll_10nm->pdev->dev,
|
||||
"restore vco rate failed. ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
DBG("DSI PLL%d", pll_10nm->id);
|
||||
|
||||
return 0;
|
||||
|
@ -447,7 +447,10 @@ static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
|
||||
cached_state->postdiv1 =
|
||||
pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
|
||||
cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
|
||||
cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
|
||||
if (dsi_pll_28nm_clk_is_enabled(&pll->clk_hw))
|
||||
cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
|
||||
else
|
||||
cached_state->vco_rate = 0;
|
||||
}
|
||||
|
||||
static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
|
||||
|
@ -585,6 +585,7 @@ static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)
|
||||
struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
|
||||
void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
|
||||
val &= ~0x3;
|
||||
@ -599,6 +600,13 @@ static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)
|
||||
val |= cached->pll_mux;
|
||||
pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
|
||||
|
||||
ret = dsi_pll_7nm_vco_set_rate(&pll->clk_hw, pll_7nm->vco_current_rate, pll_7nm->vco_ref_clk_rate);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(&pll_7nm->pdev->dev,
|
||||
"restore vco rate failed. ret=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
DBG("DSI PLL%d", pll_7nm->id);
|
||||
|
||||
return 0;
|
||||
|
@ -55,16 +55,32 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
|
||||
}
|
||||
}
|
||||
|
||||
static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
for_each_crtc_mask(kms->dev, crtc, crtc_mask)
|
||||
mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]);
|
||||
}
|
||||
|
||||
static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask)
|
||||
mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]);
|
||||
}
|
||||
|
||||
static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
|
||||
{
|
||||
unsigned crtc_mask = BIT(crtc_idx);
|
||||
|
||||
trace_msm_atomic_async_commit_start(crtc_mask);
|
||||
|
||||
mutex_lock(&kms->commit_lock);
|
||||
lock_crtcs(kms, crtc_mask);
|
||||
|
||||
if (!(kms->pending_crtc_mask & crtc_mask)) {
|
||||
mutex_unlock(&kms->commit_lock);
|
||||
unlock_crtcs(kms, crtc_mask);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -79,7 +95,6 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
|
||||
*/
|
||||
trace_msm_atomic_flush_commit(crtc_mask);
|
||||
kms->funcs->flush_commit(kms, crtc_mask);
|
||||
mutex_unlock(&kms->commit_lock);
|
||||
|
||||
/*
|
||||
* Wait for flush to complete:
|
||||
@ -90,9 +105,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
|
||||
|
||||
vblank_put(kms, crtc_mask);
|
||||
|
||||
mutex_lock(&kms->commit_lock);
|
||||
kms->funcs->complete_commit(kms, crtc_mask);
|
||||
mutex_unlock(&kms->commit_lock);
|
||||
unlock_crtcs(kms, crtc_mask);
|
||||
kms->funcs->disable_commit(kms);
|
||||
|
||||
out:
|
||||
@ -103,14 +117,13 @@ static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
|
||||
{
|
||||
struct msm_pending_timer *timer = container_of(t,
|
||||
struct msm_pending_timer, timer);
|
||||
struct msm_drm_private *priv = timer->kms->dev->dev_private;
|
||||
|
||||
queue_work(priv->wq, &timer->work);
|
||||
kthread_queue_work(timer->worker, &timer->work);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void msm_atomic_pending_work(struct work_struct *work)
|
||||
static void msm_atomic_pending_work(struct kthread_work *work)
|
||||
{
|
||||
struct msm_pending_timer *timer = container_of(work,
|
||||
struct msm_pending_timer, work);
|
||||
@ -118,14 +131,30 @@ static void msm_atomic_pending_work(struct work_struct *work)
|
||||
msm_atomic_async_commit(timer->kms, timer->crtc_idx);
|
||||
}
|
||||
|
||||
void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
|
||||
int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
|
||||
struct msm_kms *kms, int crtc_idx)
|
||||
{
|
||||
timer->kms = kms;
|
||||
timer->crtc_idx = crtc_idx;
|
||||
hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
timer->timer.function = msm_atomic_pending_timer;
|
||||
INIT_WORK(&timer->work, msm_atomic_pending_work);
|
||||
|
||||
timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
|
||||
if (IS_ERR(timer->worker)) {
|
||||
int ret = PTR_ERR(timer->worker);
|
||||
timer->worker = NULL;
|
||||
return ret;
|
||||
}
|
||||
sched_set_fifo(timer->worker->task);
|
||||
kthread_init_work(&timer->work, msm_atomic_pending_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer)
|
||||
{
|
||||
if (timer->worker)
|
||||
kthread_destroy_worker(timer->worker);
|
||||
}
|
||||
|
||||
static bool can_do_async(struct drm_atomic_state *state,
|
||||
@ -189,12 +218,11 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
* Ensure any previous (potentially async) commit has
|
||||
* completed:
|
||||
*/
|
||||
lock_crtcs(kms, crtc_mask);
|
||||
trace_msm_atomic_wait_flush_start(crtc_mask);
|
||||
kms->funcs->wait_flush(kms, crtc_mask);
|
||||
trace_msm_atomic_wait_flush_finish(crtc_mask);
|
||||
|
||||
mutex_lock(&kms->commit_lock);
|
||||
|
||||
/*
|
||||
* Now that there is no in-progress flush, prepare the
|
||||
* current update:
|
||||
@ -232,8 +260,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
}
|
||||
|
||||
kms->funcs->disable_commit(kms);
|
||||
mutex_unlock(&kms->commit_lock);
|
||||
|
||||
unlock_crtcs(kms, crtc_mask);
|
||||
/*
|
||||
* At this point, from drm core's perspective, we
|
||||
* are done with the atomic update, so we can just
|
||||
@ -260,8 +287,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
*/
|
||||
trace_msm_atomic_flush_commit(crtc_mask);
|
||||
kms->funcs->flush_commit(kms, crtc_mask);
|
||||
mutex_unlock(&kms->commit_lock);
|
||||
|
||||
unlock_crtcs(kms, crtc_mask);
|
||||
/*
|
||||
* Wait for flush to complete:
|
||||
*/
|
||||
@ -271,9 +297,9 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
|
||||
vblank_put(kms, crtc_mask);
|
||||
|
||||
mutex_lock(&kms->commit_lock);
|
||||
lock_crtcs(kms, crtc_mask);
|
||||
kms->funcs->complete_commit(kms, crtc_mask);
|
||||
mutex_unlock(&kms->commit_lock);
|
||||
unlock_crtcs(kms, crtc_mask);
|
||||
kms->funcs->disable_commit(kms);
|
||||
|
||||
drm_atomic_helper_commit_hw_done(state);
|
||||
|
@ -112,6 +112,11 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_gpu *gpu = priv->gpu;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&priv->mm_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (gpu) {
|
||||
seq_printf(m, "Active Objects (%s):\n", gpu->name);
|
||||
@ -119,7 +124,10 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
|
||||
}
|
||||
|
||||
seq_printf(m, "Inactive Objects:\n");
|
||||
msm_gem_describe_objects(&priv->inactive_list, m);
|
||||
msm_gem_describe_objects(&priv->inactive_dontneed, m);
|
||||
msm_gem_describe_objects(&priv->inactive_willneed, m);
|
||||
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
|
||||
@ -120,8 +121,8 @@ struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
|
||||
return clk;
|
||||
}
|
||||
|
||||
void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname, bool quiet)
|
||||
static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname, bool quiet)
|
||||
{
|
||||
struct resource *res;
|
||||
unsigned long size;
|
||||
@ -180,6 +181,14 @@ u32 msm_readl(const void __iomem *addr)
|
||||
return val;
|
||||
}
|
||||
|
||||
void msm_rmw(void __iomem *addr, u32 mask, u32 or)
|
||||
{
|
||||
u32 val = msm_readl(addr);
|
||||
|
||||
val &= ~mask;
|
||||
msm_writel(val | or, addr);
|
||||
}
|
||||
|
||||
struct msm_vblank_work {
|
||||
struct work_struct work;
|
||||
int crtc_id;
|
||||
@ -437,10 +446,14 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
|
||||
|
||||
priv->wq = alloc_ordered_workqueue("msm", 0);
|
||||
|
||||
INIT_WORK(&priv->free_work, msm_gem_free_work);
|
||||
init_llist_head(&priv->free_list);
|
||||
INIT_LIST_HEAD(&priv->inactive_willneed);
|
||||
INIT_LIST_HEAD(&priv->inactive_dontneed);
|
||||
mutex_init(&priv->mm_lock);
|
||||
|
||||
INIT_LIST_HEAD(&priv->inactive_list);
|
||||
/* Teach lockdep about lock ordering wrt. shrinker: */
|
||||
fs_reclaim_acquire(GFP_KERNEL);
|
||||
might_lock(&priv->mm_lock);
|
||||
fs_reclaim_release(GFP_KERNEL);
|
||||
|
||||
drm_mode_config_init(ddev);
|
||||
|
||||
@ -908,14 +921,9 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
obj = drm_gem_object_lookup(file, args->handle);
|
||||
if (!obj) {
|
||||
ret = -ENOENT;
|
||||
goto unlock;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
ret = msm_gem_madvise(obj, args->madv);
|
||||
@ -924,10 +932,8 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
drm_gem_object_put_locked(obj);
|
||||
drm_gem_object_put(obj);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -174,12 +174,21 @@ struct msm_drm_private {
|
||||
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
|
||||
struct msm_perf_state *perf;
|
||||
|
||||
/* list of GEM objects: */
|
||||
struct list_head inactive_list;
|
||||
|
||||
/* worker for delayed free of objects: */
|
||||
struct work_struct free_work;
|
||||
struct llist_head free_list;
|
||||
/*
|
||||
* Lists of inactive GEM objects. Every bo is either in one of the
|
||||
* inactive lists (depending on whether or not it is shrinkable) or
|
||||
* gpu->active_list (for the gpu it is active on[1])
|
||||
*
|
||||
* These lists are protected by mm_lock. If struct_mutex is involved, it
|
||||
* should be aquired prior to mm_lock. One should *not* hold mm_lock in
|
||||
* get_pages()/vmap()/etc paths, as they can trigger the shrinker.
|
||||
*
|
||||
* [1] if someone ever added support for the old 2d cores, there could be
|
||||
* more than one gpu object
|
||||
*/
|
||||
struct list_head inactive_willneed; /* inactive + !shrinkable */
|
||||
struct list_head inactive_dontneed; /* inactive + shrinkable */
|
||||
struct mutex mm_lock;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
@ -228,8 +237,9 @@ struct msm_pending_timer;
|
||||
|
||||
int msm_atomic_prepare_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state);
|
||||
void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
|
||||
int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
|
||||
struct msm_kms *kms, int crtc_idx);
|
||||
void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
|
||||
void msm_atomic_commit_tail(struct drm_atomic_state *state);
|
||||
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
|
||||
void msm_atomic_state_clear(struct drm_atomic_state *state);
|
||||
@ -266,34 +276,12 @@ void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
||||
|
||||
bool msm_use_mmu(struct drm_device *dev);
|
||||
|
||||
void msm_gem_submit_free(struct msm_gem_submit *submit);
|
||||
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
void msm_gem_shrinker_init(struct drm_device *dev);
|
||||
void msm_gem_shrinker_cleanup(struct drm_device *dev);
|
||||
|
||||
int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma);
|
||||
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova,
|
||||
u64 range_start, u64 range_end);
|
||||
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace);
|
||||
void msm_gem_unpin_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace);
|
||||
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
|
||||
void msm_gem_put_pages(struct drm_gem_object *obj);
|
||||
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset);
|
||||
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
|
||||
@ -302,37 +290,6 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
|
||||
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
|
||||
void msm_gem_put_vaddr(struct drm_gem_object *obj);
|
||||
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
|
||||
int msm_gem_sync_object(struct drm_gem_object *obj,
|
||||
struct msm_fence_context *fctx, bool exclusive);
|
||||
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
|
||||
void msm_gem_active_put(struct drm_gem_object *obj);
|
||||
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
|
||||
int msm_gem_cpu_fini(struct drm_gem_object *obj);
|
||||
void msm_gem_free_object(struct drm_gem_object *obj);
|
||||
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova);
|
||||
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova);
|
||||
void msm_gem_kernel_put(struct drm_gem_object *bo,
|
||||
struct msm_gem_address_space *aspace, bool locked);
|
||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
struct dma_buf *dmabuf, struct sg_table *sgt);
|
||||
void msm_gem_free_work(struct work_struct *work);
|
||||
|
||||
__printf(2, 3)
|
||||
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
|
||||
|
||||
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
|
||||
struct msm_gem_address_space *aspace);
|
||||
@ -422,6 +379,11 @@ static inline int msm_dp_display_disable(struct msm_dp *dp,
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int msm_dp_display_pre_disable(struct msm_dp *dp,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void msm_dp_display_mode_set(struct msm_dp *dp,
|
||||
struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
@ -446,8 +408,6 @@ void __init msm_dpu_register(void);
|
||||
void __exit msm_dpu_unregister(void);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
|
||||
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
|
||||
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
|
||||
int msm_debugfs_late_init(struct drm_device *dev);
|
||||
int msm_rd_debugfs_init(struct drm_minor *minor);
|
||||
@ -477,6 +437,7 @@ void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name,
|
||||
const char *dbgname);
|
||||
void msm_writel(u32 data, void __iomem *addr);
|
||||
u32 msm_readl(const void __iomem *addr);
|
||||
void msm_rmw(void __iomem *addr, u32 mask, u32 or);
|
||||
|
||||
struct msm_gpu_submitqueue;
|
||||
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
||||
#include "msm_drv.h"
|
||||
#include "msm_gem.h"
|
||||
#include "msm_kms.h"
|
||||
|
||||
extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
|
@ -18,8 +18,7 @@
|
||||
#include "msm_gpu.h"
|
||||
#include "msm_mmu.h"
|
||||
|
||||
static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
|
||||
|
||||
static void update_inactive(struct msm_gem_object *msm_obj);
|
||||
|
||||
static dma_addr_t physaddr(struct drm_gem_object *obj)
|
||||
{
|
||||
@ -177,15 +176,15 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct page **p;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
msm_gem_lock(obj);
|
||||
|
||||
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
p = get_pages(obj);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -251,14 +250,14 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
|
||||
* vm_ops.open/drm_gem_mmap_obj and close get and put
|
||||
* a reference on obj. So, we dont need to hold one here.
|
||||
*/
|
||||
err = mutex_lock_interruptible(&msm_obj->lock);
|
||||
err = msm_gem_lock_interruptible(obj);
|
||||
if (err) {
|
||||
ret = VM_FAULT_NOPAGE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
@ -279,7 +278,7 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
|
||||
|
||||
ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
|
||||
out_unlock:
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -288,10 +287,9 @@ out:
|
||||
static uint64_t mmap_offset(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
/* Make it mmapable */
|
||||
ret = drm_gem_create_mmap_offset(obj);
|
||||
@ -307,11 +305,10 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
|
||||
{
|
||||
uint64_t offset;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
msm_gem_lock(obj);
|
||||
offset = mmap_offset(obj);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
return offset;
|
||||
}
|
||||
|
||||
@ -321,7 +318,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *vma;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
|
||||
if (!vma)
|
||||
@ -340,7 +337,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *vma;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
list_for_each_entry(vma, &msm_obj->vmas, list) {
|
||||
if (vma->aspace == aspace)
|
||||
@ -359,33 +356,45 @@ static void del_vma(struct msm_gem_vma *vma)
|
||||
kfree(vma);
|
||||
}
|
||||
|
||||
/* Called with msm_obj->lock locked */
|
||||
/* Called with msm_obj locked */
|
||||
static void
|
||||
put_iova(struct drm_gem_object *obj)
|
||||
put_iova_spaces(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *vma, *tmp;
|
||||
struct msm_gem_vma *vma;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
|
||||
list_for_each_entry(vma, &msm_obj->vmas, list) {
|
||||
if (vma->aspace) {
|
||||
msm_gem_purge_vma(vma->aspace, vma);
|
||||
msm_gem_close_vma(vma->aspace, vma);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Called with msm_obj locked */
|
||||
static void
|
||||
put_iova_vmas(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *vma, *tmp;
|
||||
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
|
||||
del_vma(vma);
|
||||
}
|
||||
}
|
||||
|
||||
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
|
||||
static int get_iova_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova,
|
||||
u64 range_start, u64 range_end)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *vma;
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
vma = lookup_vma(obj, aspace);
|
||||
|
||||
@ -420,7 +429,7 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
|
||||
if (msm_obj->flags & MSM_BO_MAP_PRIV)
|
||||
prot |= IOMMU_PRIV;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
|
||||
return -EBUSY;
|
||||
@ -437,21 +446,16 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
|
||||
msm_obj->sgt, obj->size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* get iova and pin it. Should have a matching put
|
||||
* limits iova to specified range (in pages)
|
||||
*/
|
||||
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
|
||||
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova,
|
||||
u64 range_start, u64 range_end)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
u64 local;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
ret = msm_gem_get_iova_locked(obj, aspace, &local,
|
||||
ret = get_iova_locked(obj, aspace, &local,
|
||||
range_start, range_end);
|
||||
|
||||
if (!ret)
|
||||
@ -460,10 +464,32 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
|
||||
if (!ret)
|
||||
*iova = local;
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* get iova and pin it. Should have a matching put
|
||||
* limits iova to specified range (in pages)
|
||||
*/
|
||||
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova,
|
||||
u64 range_start, u64 range_end)
|
||||
{
|
||||
int ret;
|
||||
|
||||
msm_gem_lock(obj);
|
||||
ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
|
||||
msm_gem_unlock(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
||||
{
|
||||
return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
|
||||
}
|
||||
|
||||
/* get iova and pin it. Should have a matching put */
|
||||
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
||||
@ -478,12 +504,11 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_lock(obj);
|
||||
ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
|
||||
msm_gem_unlock(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -494,17 +519,32 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *vma;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
msm_gem_lock(obj);
|
||||
vma = lookup_vma(obj, aspace);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
WARN_ON(!vma);
|
||||
|
||||
return vma ? vma->iova : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Locked variant of msm_gem_unpin_iova()
|
||||
*/
|
||||
void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_gem_vma *vma;
|
||||
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
vma = lookup_vma(obj, aspace);
|
||||
|
||||
if (!WARN_ON(!vma))
|
||||
msm_gem_unmap_vma(aspace, vma);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unpin a iova by updating the reference counts. The memory isn't actually
|
||||
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
|
||||
@ -513,16 +553,9 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||
void msm_gem_unpin_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_gem_vma *vma;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
vma = lookup_vma(obj, aspace);
|
||||
|
||||
if (!WARN_ON(!vma))
|
||||
msm_gem_unmap_vma(aspace, vma);
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_lock(obj);
|
||||
msm_gem_unpin_iova_locked(obj, aspace);
|
||||
msm_gem_unlock(obj);
|
||||
}
|
||||
|
||||
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
@ -560,23 +593,22 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
if (obj->import_attach)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
|
||||
if (WARN_ON(msm_obj->madv > madv)) {
|
||||
DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
|
||||
msm_obj->madv, madv);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
/* increment vmap_count *before* vmap() call, so shrinker can
|
||||
* check vmap_count (is_vunmapable()) outside of msm_obj->lock.
|
||||
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
|
||||
* This guarantees that we won't try to msm_gem_vunmap() this
|
||||
* same object from within the vmap() call (while we already
|
||||
* hold msm_obj->lock)
|
||||
* hold msm_obj lock)
|
||||
*/
|
||||
msm_obj->vmap_count++;
|
||||
|
||||
@ -594,18 +626,27 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return msm_obj->vaddr;
|
||||
|
||||
fail:
|
||||
msm_obj->vmap_count--;
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
|
||||
{
|
||||
return get_vaddr(obj, MSM_MADV_WILLNEED);
|
||||
}
|
||||
|
||||
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
|
||||
{
|
||||
return get_vaddr(obj, MSM_MADV_WILLNEED);
|
||||
void *ret;
|
||||
|
||||
msm_gem_lock(obj);
|
||||
ret = msm_gem_get_vaddr_locked(obj);
|
||||
msm_gem_unlock(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -619,14 +660,21 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
|
||||
return get_vaddr(obj, __MSM_MADV_PURGED);
|
||||
}
|
||||
|
||||
void msm_gem_put_vaddr(struct drm_gem_object *obj)
|
||||
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
WARN_ON(msm_obj->vmap_count < 1);
|
||||
|
||||
msm_obj->vmap_count--;
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
}
|
||||
|
||||
void msm_gem_put_vaddr(struct drm_gem_object *obj)
|
||||
{
|
||||
msm_gem_lock(obj);
|
||||
msm_gem_put_vaddr_locked(obj);
|
||||
msm_gem_unlock(obj);
|
||||
}
|
||||
|
||||
/* Update madvise status, returns true if not purged, else
|
||||
@ -636,37 +684,40 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
msm_gem_lock(obj);
|
||||
|
||||
if (msm_obj->madv != __MSM_MADV_PURGED)
|
||||
msm_obj->madv = madv;
|
||||
|
||||
madv = msm_obj->madv;
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
/* If the obj is inactive, we might need to move it
|
||||
* between inactive lists
|
||||
*/
|
||||
if (msm_obj->active_count == 0)
|
||||
update_inactive(msm_obj);
|
||||
|
||||
msm_gem_unlock(obj);
|
||||
|
||||
return (madv != __MSM_MADV_PURGED);
|
||||
}
|
||||
|
||||
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
|
||||
void msm_gem_purge(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
WARN_ON(!is_purgeable(msm_obj));
|
||||
WARN_ON(obj->import_attach);
|
||||
|
||||
mutex_lock_nested(&msm_obj->lock, subclass);
|
||||
put_iova_spaces(obj);
|
||||
|
||||
put_iova(obj);
|
||||
|
||||
msm_gem_vunmap_locked(obj);
|
||||
msm_gem_vunmap(obj);
|
||||
|
||||
put_pages(obj);
|
||||
|
||||
put_iova_vmas(obj);
|
||||
|
||||
msm_obj->madv = __MSM_MADV_PURGED;
|
||||
|
||||
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
|
||||
@ -681,15 +732,13 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
|
||||
|
||||
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
|
||||
0, (loff_t)-1);
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
}
|
||||
|
||||
static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
|
||||
void msm_gem_vunmap(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->lock));
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
|
||||
return;
|
||||
@ -698,15 +747,6 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
|
||||
msm_obj->vaddr = NULL;
|
||||
}
|
||||
|
||||
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
mutex_lock_nested(&msm_obj->lock, subclass);
|
||||
msm_gem_vunmap_locked(obj);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
}
|
||||
|
||||
/* must be called before _move_to_active().. */
|
||||
int msm_gem_sync_object(struct drm_gem_object *obj,
|
||||
struct msm_fence_context *fctx, bool exclusive)
|
||||
@ -745,30 +785,48 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
|
||||
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||
|
||||
might_sleep();
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
|
||||
|
||||
if (!atomic_fetch_inc(&msm_obj->active_count)) {
|
||||
msm_obj->gpu = gpu;
|
||||
if (msm_obj->active_count++ == 0) {
|
||||
mutex_lock(&priv->mm_lock);
|
||||
list_del_init(&msm_obj->mm_list);
|
||||
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void msm_gem_active_put(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
struct msm_drm_private *priv = obj->dev->dev_private;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
might_sleep();
|
||||
WARN_ON(!msm_gem_is_locked(obj));
|
||||
|
||||
if (!atomic_dec_return(&msm_obj->active_count)) {
|
||||
msm_obj->gpu = NULL;
|
||||
list_del_init(&msm_obj->mm_list);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
if (--msm_obj->active_count == 0) {
|
||||
update_inactive(msm_obj);
|
||||
}
|
||||
}
|
||||
|
||||
static void update_inactive(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
|
||||
|
||||
mutex_lock(&priv->mm_lock);
|
||||
WARN_ON(msm_obj->active_count != 0);
|
||||
|
||||
list_del_init(&msm_obj->mm_list);
|
||||
if (msm_obj->madv == MSM_MADV_WILLNEED)
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
|
||||
else
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
|
||||
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
}
|
||||
|
||||
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
|
||||
{
|
||||
bool write = !!(op & MSM_PREP_WRITE);
|
||||
@ -815,7 +873,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
||||
uint64_t off = drm_vma_node_start(&obj->vma_node);
|
||||
const char *madv;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
msm_gem_lock(obj);
|
||||
|
||||
switch (msm_obj->madv) {
|
||||
case __MSM_MADV_PURGED:
|
||||
@ -883,7 +941,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
||||
describe_fence(fence, "Exclusive", m);
|
||||
rcu_read_unlock();
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
}
|
||||
|
||||
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
|
||||
@ -912,25 +970,16 @@ void msm_gem_free_object(struct drm_gem_object *obj)
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
||||
if (llist_add(&msm_obj->freed, &priv->free_list))
|
||||
queue_work(priv->wq, &priv->free_work);
|
||||
}
|
||||
mutex_lock(&priv->mm_lock);
|
||||
list_del(&msm_obj->mm_list);
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
|
||||
static void free_object(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
struct drm_gem_object *obj = &msm_obj->base;
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
msm_gem_lock(obj);
|
||||
|
||||
/* object should not be on active list: */
|
||||
WARN_ON(is_active(msm_obj));
|
||||
|
||||
list_del(&msm_obj->mm_list);
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
|
||||
put_iova(obj);
|
||||
put_iova_spaces(obj);
|
||||
|
||||
if (obj->import_attach) {
|
||||
WARN_ON(msm_obj->vaddr);
|
||||
@ -941,41 +990,25 @@ static void free_object(struct msm_gem_object *msm_obj)
|
||||
if (msm_obj->pages)
|
||||
kvfree(msm_obj->pages);
|
||||
|
||||
/* dma_buf_detach() grabs resv lock, so we need to unlock
|
||||
* prior to drm_prime_gem_destroy
|
||||
*/
|
||||
msm_gem_unlock(obj);
|
||||
|
||||
drm_prime_gem_destroy(obj, msm_obj->sgt);
|
||||
} else {
|
||||
msm_gem_vunmap_locked(obj);
|
||||
msm_gem_vunmap(obj);
|
||||
put_pages(obj);
|
||||
msm_gem_unlock(obj);
|
||||
}
|
||||
|
||||
put_iova_vmas(obj);
|
||||
|
||||
drm_gem_object_release(obj);
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
kfree(msm_obj);
|
||||
}
|
||||
|
||||
void msm_gem_free_work(struct work_struct *work)
|
||||
{
|
||||
struct msm_drm_private *priv =
|
||||
container_of(work, struct msm_drm_private, free_work);
|
||||
struct drm_device *dev = priv->dev;
|
||||
struct llist_node *freed;
|
||||
struct msm_gem_object *msm_obj, *next;
|
||||
|
||||
while ((freed = llist_del_all(&priv->free_list))) {
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
llist_for_each_entry_safe(msm_obj, next,
|
||||
freed, freed)
|
||||
free_object(msm_obj);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (need_resched())
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* convenience method to construct a GEM buffer object, and userspace handle */
|
||||
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
uint32_t size, uint32_t flags, uint32_t *handle,
|
||||
@ -1037,8 +1070,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
||||
if (!msm_obj)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&msm_obj->lock);
|
||||
|
||||
msm_obj->flags = flags;
|
||||
msm_obj->madv = MSM_MADV_WILLNEED;
|
||||
|
||||
@ -1086,10 +1117,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||
struct msm_gem_vma *vma;
|
||||
struct page **pages;
|
||||
|
||||
mutex_lock(&msm_obj->lock);
|
||||
msm_gem_lock(obj);
|
||||
|
||||
vma = add_vma(obj, NULL);
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
goto fail;
|
||||
@ -1119,19 +1150,19 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
||||
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
|
||||
}
|
||||
|
||||
if (struct_mutex_locked) {
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
} else {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
mutex_lock(&priv->mm_lock);
|
||||
/* Initially obj is idle, obj->madv == WILLNEED: */
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
|
||||
return obj;
|
||||
|
||||
fail:
|
||||
drm_gem_object_put(obj);
|
||||
if (struct_mutex_locked) {
|
||||
drm_gem_object_put_locked(obj);
|
||||
} else {
|
||||
drm_gem_object_put(obj);
|
||||
}
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
@ -1173,26 +1204,26 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
npages = size / PAGE_SIZE;
|
||||
|
||||
msm_obj = to_msm_bo(obj);
|
||||
mutex_lock(&msm_obj->lock);
|
||||
msm_gem_lock(obj);
|
||||
msm_obj->sgt = sgt;
|
||||
msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!msm_obj->pages) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
|
||||
if (ret) {
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mutex_unlock(&msm_obj->lock);
|
||||
msm_gem_unlock(obj);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_lock(&priv->mm_lock);
|
||||
list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
|
||||
return obj;
|
||||
|
||||
|
@ -64,7 +64,6 @@ struct msm_gem_object {
|
||||
*
|
||||
*/
|
||||
struct list_head mm_list;
|
||||
struct msm_gpu *gpu; /* non-null if active */
|
||||
|
||||
/* Transiently in the process of submit ioctl, objects associated
|
||||
* with the submit are on submit->bo_list.. this only lasts for
|
||||
@ -85,50 +84,124 @@ struct msm_gem_object {
|
||||
* an IOMMU. Also used for stolen/splashscreen buffer.
|
||||
*/
|
||||
struct drm_mm_node *vram_node;
|
||||
struct mutex lock; /* Protects resources associated with bo */
|
||||
|
||||
char name[32]; /* Identifier to print for the debugfs files */
|
||||
|
||||
atomic_t active_count;
|
||||
int active_count;
|
||||
};
|
||||
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
|
||||
|
||||
int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma);
|
||||
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova,
|
||||
u64 range_start, u64 range_end);
|
||||
int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
uint64_t msm_gem_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace);
|
||||
void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace);
|
||||
void msm_gem_unpin_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace);
|
||||
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
|
||||
void msm_gem_put_pages(struct drm_gem_object *obj);
|
||||
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset);
|
||||
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
|
||||
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
|
||||
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
|
||||
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
|
||||
void msm_gem_put_vaddr(struct drm_gem_object *obj);
|
||||
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
|
||||
int msm_gem_sync_object(struct drm_gem_object *obj,
|
||||
struct msm_fence_context *fctx, bool exclusive);
|
||||
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
|
||||
void msm_gem_active_put(struct drm_gem_object *obj);
|
||||
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
|
||||
int msm_gem_cpu_fini(struct drm_gem_object *obj);
|
||||
void msm_gem_free_object(struct drm_gem_object *obj);
|
||||
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
uint32_t size, uint32_t flags, uint32_t *handle, char *name);
|
||||
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
|
||||
uint32_t size, uint32_t flags);
|
||||
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova);
|
||||
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
|
||||
uint32_t flags, struct msm_gem_address_space *aspace,
|
||||
struct drm_gem_object **bo, uint64_t *iova);
|
||||
void msm_gem_kernel_put(struct drm_gem_object *bo,
|
||||
struct msm_gem_address_space *aspace, bool locked);
|
||||
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
|
||||
struct dma_buf *dmabuf, struct sg_table *sgt);
|
||||
__printf(2, 3)
|
||||
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
|
||||
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
msm_gem_lock(struct drm_gem_object *obj)
|
||||
{
|
||||
dma_resv_lock(obj->resv, NULL);
|
||||
}
|
||||
|
||||
static inline bool __must_check
|
||||
msm_gem_trylock(struct drm_gem_object *obj)
|
||||
{
|
||||
return dma_resv_trylock(obj->resv);
|
||||
}
|
||||
|
||||
static inline int
|
||||
msm_gem_lock_interruptible(struct drm_gem_object *obj)
|
||||
{
|
||||
return dma_resv_lock_interruptible(obj->resv, NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
msm_gem_unlock(struct drm_gem_object *obj)
|
||||
{
|
||||
dma_resv_unlock(obj->resv);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
msm_gem_is_locked(struct drm_gem_object *obj)
|
||||
{
|
||||
return dma_resv_is_locked(obj->resv);
|
||||
}
|
||||
|
||||
static inline bool is_active(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
return atomic_read(&msm_obj->active_count);
|
||||
WARN_ON(!msm_gem_is_locked(&msm_obj->base));
|
||||
return msm_obj->active_count;
|
||||
}
|
||||
|
||||
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
|
||||
return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
|
||||
!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
|
||||
}
|
||||
|
||||
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
WARN_ON(!msm_gem_is_locked(&msm_obj->base));
|
||||
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
|
||||
}
|
||||
|
||||
/* The shrinker can be triggered while we hold objA->lock, and need
|
||||
* to grab objB->lock to purge it. Lockdep just sees these as a single
|
||||
* class of lock, so we use subclasses to teach it the difference.
|
||||
*
|
||||
* OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
|
||||
* OBJ_LOCK_SHRINKER is used by shrinker.
|
||||
*
|
||||
* It is *essential* that we never go down paths that could trigger the
|
||||
* shrinker for a purgable object. This is ensured by checking that
|
||||
* msm_obj->madv == MSM_MADV_WILLNEED.
|
||||
*/
|
||||
enum msm_gem_lock {
|
||||
OBJ_LOCK_NORMAL,
|
||||
OBJ_LOCK_SHRINKER,
|
||||
};
|
||||
|
||||
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
|
||||
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
|
||||
void msm_gem_free_work(struct work_struct *work);
|
||||
void msm_gem_purge(struct drm_gem_object *obj);
|
||||
void msm_gem_vunmap(struct drm_gem_object *obj);
|
||||
|
||||
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
|
||||
* associated with the cmdstream submission for synchronization (and
|
||||
@ -136,6 +209,7 @@ void msm_gem_free_work(struct work_struct *work);
|
||||
* lasts for the duration of the submit-ioctl.
|
||||
*/
|
||||
struct msm_gem_submit {
|
||||
struct kref ref;
|
||||
struct drm_device *dev;
|
||||
struct msm_gpu *gpu;
|
||||
struct msm_gem_address_space *aspace;
|
||||
@ -157,7 +231,10 @@ struct msm_gem_submit {
|
||||
uint32_t type;
|
||||
uint32_t size; /* in dwords */
|
||||
uint64_t iova;
|
||||
uint32_t offset;/* in dwords */
|
||||
uint32_t idx; /* cmdstream buffer idx in bos[] */
|
||||
uint32_t nr_relocs;
|
||||
struct drm_msm_gem_submit_reloc *relocs;
|
||||
} *cmd; /* array of size nr_cmds */
|
||||
struct {
|
||||
uint32_t flags;
|
||||
@ -169,6 +246,18 @@ struct msm_gem_submit {
|
||||
} bos[];
|
||||
};
|
||||
|
||||
void __msm_gem_submit_destroy(struct kref *kref);
|
||||
|
||||
static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
|
||||
{
|
||||
kref_get(&submit->ref);
|
||||
}
|
||||
|
||||
static inline void msm_gem_submit_put(struct msm_gem_submit *submit)
|
||||
{
|
||||
kref_put(&submit->ref, __msm_gem_submit_destroy);
|
||||
}
|
||||
|
||||
/* helper to determine of a buffer in submit should be dumped, used for both
|
||||
* devcoredump and debugfs cmdstream dumping:
|
||||
*/
|
||||
|
@ -6,58 +6,28 @@
|
||||
|
||||
#include "msm_drv.h"
|
||||
#include "msm_gem.h"
|
||||
#include "msm_gpu.h"
|
||||
#include "msm_gpu_trace.h"
|
||||
|
||||
static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
||||
{
|
||||
/* NOTE: we are *closer* to being able to get rid of
|
||||
* mutex_trylock_recursive().. the msm_gem code itself does
|
||||
* not need struct_mutex, although codepaths that can trigger
|
||||
* shrinker are still called in code-paths that hold the
|
||||
* struct_mutex.
|
||||
*
|
||||
* Also, msm_obj->madv is protected by struct_mutex.
|
||||
*
|
||||
* The next step is probably split out a seperate lock for
|
||||
* protecting inactive_list, so that shrinker does not need
|
||||
* struct_mutex.
|
||||
*/
|
||||
switch (mutex_trylock_recursive(&dev->struct_mutex)) {
|
||||
case MUTEX_TRYLOCK_FAILED:
|
||||
return false;
|
||||
|
||||
case MUTEX_TRYLOCK_SUCCESS:
|
||||
*unlock = true;
|
||||
return true;
|
||||
|
||||
case MUTEX_TRYLOCK_RECURSIVE:
|
||||
*unlock = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
BUG();
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
{
|
||||
struct msm_drm_private *priv =
|
||||
container_of(shrinker, struct msm_drm_private, shrinker);
|
||||
struct drm_device *dev = priv->dev;
|
||||
struct msm_gem_object *msm_obj;
|
||||
unsigned long count = 0;
|
||||
bool unlock;
|
||||
|
||||
if (!msm_gem_shrinker_lock(dev, &unlock))
|
||||
return 0;
|
||||
mutex_lock(&priv->mm_lock);
|
||||
|
||||
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
|
||||
list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
|
||||
if (!msm_gem_trylock(&msm_obj->base))
|
||||
continue;
|
||||
if (is_purgeable(msm_obj))
|
||||
count += msm_obj->base.size >> PAGE_SHIFT;
|
||||
msm_gem_unlock(&msm_obj->base);
|
||||
}
|
||||
|
||||
if (unlock)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
@ -67,25 +37,24 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
{
|
||||
struct msm_drm_private *priv =
|
||||
container_of(shrinker, struct msm_drm_private, shrinker);
|
||||
struct drm_device *dev = priv->dev;
|
||||
struct msm_gem_object *msm_obj;
|
||||
unsigned long freed = 0;
|
||||
bool unlock;
|
||||
|
||||
if (!msm_gem_shrinker_lock(dev, &unlock))
|
||||
return SHRINK_STOP;
|
||||
mutex_lock(&priv->mm_lock);
|
||||
|
||||
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
|
||||
list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
|
||||
if (freed >= sc->nr_to_scan)
|
||||
break;
|
||||
if (!msm_gem_trylock(&msm_obj->base))
|
||||
continue;
|
||||
if (is_purgeable(msm_obj)) {
|
||||
msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
|
||||
msm_gem_purge(&msm_obj->base);
|
||||
freed += msm_obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
msm_gem_unlock(&msm_obj->base);
|
||||
}
|
||||
|
||||
if (unlock)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
|
||||
if (freed > 0)
|
||||
trace_msm_gem_purge(freed << PAGE_SHIFT);
|
||||
@ -93,33 +62,57 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
return freed;
|
||||
}
|
||||
|
||||
/* since we don't know any better, lets bail after a few
|
||||
* and if necessary the shrinker will be invoked again.
|
||||
* Seems better than unmapping *everything*
|
||||
*/
|
||||
static const int vmap_shrink_limit = 15;
|
||||
|
||||
static unsigned
|
||||
vmap_shrink(struct list_head *mm_list)
|
||||
{
|
||||
struct msm_gem_object *msm_obj;
|
||||
unsigned unmapped = 0;
|
||||
|
||||
list_for_each_entry(msm_obj, mm_list, mm_list) {
|
||||
if (!msm_gem_trylock(&msm_obj->base))
|
||||
continue;
|
||||
if (is_vunmapable(msm_obj)) {
|
||||
msm_gem_vunmap(&msm_obj->base);
|
||||
unmapped++;
|
||||
}
|
||||
msm_gem_unlock(&msm_obj->base);
|
||||
|
||||
if (++unmapped >= vmap_shrink_limit)
|
||||
break;
|
||||
}
|
||||
|
||||
return unmapped;
|
||||
}
|
||||
|
||||
static int
|
||||
msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||
{
|
||||
struct msm_drm_private *priv =
|
||||
container_of(nb, struct msm_drm_private, vmap_notifier);
|
||||
struct drm_device *dev = priv->dev;
|
||||
struct msm_gem_object *msm_obj;
|
||||
unsigned unmapped = 0;
|
||||
bool unlock;
|
||||
struct list_head *mm_lists[] = {
|
||||
&priv->inactive_dontneed,
|
||||
&priv->inactive_willneed,
|
||||
priv->gpu ? &priv->gpu->active_list : NULL,
|
||||
NULL,
|
||||
};
|
||||
unsigned idx, unmapped = 0;
|
||||
|
||||
if (!msm_gem_shrinker_lock(dev, &unlock))
|
||||
return NOTIFY_DONE;
|
||||
mutex_lock(&priv->mm_lock);
|
||||
|
||||
list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
|
||||
if (is_vunmapable(msm_obj)) {
|
||||
msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
|
||||
/* since we don't know any better, lets bail after a few
|
||||
* and if necessary the shrinker will be invoked again.
|
||||
* Seems better than unmapping *everything*
|
||||
*/
|
||||
if (++unmapped >= 15)
|
||||
break;
|
||||
}
|
||||
for (idx = 0; mm_lists[idx]; idx++) {
|
||||
unmapped += vmap_shrink(mm_lists[idx]);
|
||||
|
||||
if (unmapped >= vmap_shrink_limit)
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlock)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&priv->mm_lock);
|
||||
|
||||
*(unsigned long *)ptr += unmapped;
|
||||
|
||||
@ -131,7 +124,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||
|
||||
/**
|
||||
* msm_gem_shrinker_init - Initialize msm shrinker
|
||||
* @dev_priv: msm device
|
||||
* @dev: drm device
|
||||
*
|
||||
* This function registers and sets up the msm shrinker.
|
||||
*/
|
||||
@ -149,7 +142,7 @@ void msm_gem_shrinker_init(struct drm_device *dev)
|
||||
|
||||
/**
|
||||
* msm_gem_shrinker_cleanup - Clean up msm shrinker
|
||||
* @dev_priv: msm device
|
||||
* @dev: drm device
|
||||
*
|
||||
* This function unregisters the msm shrinker.
|
||||
*/
|
||||
|
@ -42,6 +42,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
||||
if (!submit)
|
||||
return NULL;
|
||||
|
||||
kref_init(&submit->ref);
|
||||
submit->dev = dev;
|
||||
submit->aspace = queue->ctx->aspace;
|
||||
submit->gpu = gpu;
|
||||
@ -60,13 +61,19 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
||||
return submit;
|
||||
}
|
||||
|
||||
void msm_gem_submit_free(struct msm_gem_submit *submit)
|
||||
void __msm_gem_submit_destroy(struct kref *kref)
|
||||
{
|
||||
struct msm_gem_submit *submit =
|
||||
container_of(kref, struct msm_gem_submit, ref);
|
||||
unsigned i;
|
||||
|
||||
dma_fence_put(submit->fence);
|
||||
list_del(&submit->node);
|
||||
put_pid(submit->pid);
|
||||
msm_submitqueue_put(submit->queue);
|
||||
|
||||
for (i = 0; i < submit->nr_cmds; i++)
|
||||
kfree(submit->cmd[i].relocs);
|
||||
|
||||
kfree(submit);
|
||||
}
|
||||
|
||||
@ -150,13 +157,73 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int submit_lookup_cmds(struct msm_gem_submit *submit,
|
||||
struct drm_msm_gem_submit *args, struct drm_file *file)
|
||||
{
|
||||
unsigned i, sz;
|
||||
int ret = 0;
|
||||
|
||||
for (i = 0; i < args->nr_cmds; i++) {
|
||||
struct drm_msm_gem_submit_cmd submit_cmd;
|
||||
void __user *userptr =
|
||||
u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
|
||||
|
||||
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* validate input from userspace: */
|
||||
switch (submit_cmd.type) {
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (submit_cmd.size % 4) {
|
||||
DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
|
||||
submit_cmd.size);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
submit->cmd[i].type = submit_cmd.type;
|
||||
submit->cmd[i].size = submit_cmd.size / 4;
|
||||
submit->cmd[i].offset = submit_cmd.submit_offset / 4;
|
||||
submit->cmd[i].idx = submit_cmd.submit_idx;
|
||||
submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
|
||||
|
||||
sz = array_size(submit_cmd.nr_relocs,
|
||||
sizeof(struct drm_msm_gem_submit_reloc));
|
||||
/* check for overflow: */
|
||||
if (sz == SIZE_MAX) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
|
||||
ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
|
||||
int i, bool backoff)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
|
||||
if (submit->bos[i].flags & BO_PINNED)
|
||||
msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
|
||||
msm_gem_unpin_iova_locked(&msm_obj->base, submit->aspace);
|
||||
|
||||
if (submit->bos[i].flags & BO_LOCKED)
|
||||
dma_resv_unlock(msm_obj->base.resv);
|
||||
@ -259,7 +326,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
|
||||
uint64_t iova;
|
||||
|
||||
/* if locking succeeded, pin bo: */
|
||||
ret = msm_gem_get_and_pin_iova(&msm_obj->base,
|
||||
ret = msm_gem_get_and_pin_iova_locked(&msm_obj->base,
|
||||
submit->aspace, &iova);
|
||||
|
||||
if (ret)
|
||||
@ -301,7 +368,7 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
|
||||
|
||||
/* process the reloc's and patch up the cmdstream as needed: */
|
||||
static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
|
||||
uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
|
||||
uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
|
||||
{
|
||||
uint32_t i, last_offset = 0;
|
||||
uint32_t *ptr;
|
||||
@ -318,7 +385,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
/* For now, just map the entire thing. Eventually we probably
|
||||
* to do it page-by-page, w/ kmap() if not vmap()d..
|
||||
*/
|
||||
ptr = msm_gem_get_vaddr(&obj->base);
|
||||
ptr = msm_gem_get_vaddr_locked(&obj->base);
|
||||
|
||||
if (IS_ERR(ptr)) {
|
||||
ret = PTR_ERR(ptr);
|
||||
@ -327,18 +394,11 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_relocs; i++) {
|
||||
struct drm_msm_gem_submit_reloc submit_reloc;
|
||||
void __user *userptr =
|
||||
u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
|
||||
struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
|
||||
uint32_t off;
|
||||
uint64_t iova;
|
||||
bool valid;
|
||||
|
||||
if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (submit_reloc.submit_offset % 4) {
|
||||
DRM_ERROR("non-aligned reloc offset: %u\n",
|
||||
submit_reloc.submit_offset);
|
||||
@ -376,7 +436,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
}
|
||||
|
||||
out:
|
||||
msm_gem_put_vaddr(&obj->base);
|
||||
msm_gem_put_vaddr_locked(&obj->base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -692,7 +752,20 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
|
||||
ret = submit_lookup_objects(submit, args, file);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_pre_pm;
|
||||
|
||||
ret = submit_lookup_cmds(submit, args, file);
|
||||
if (ret)
|
||||
goto out_pre_pm;
|
||||
|
||||
/*
|
||||
* Thanks to dev_pm_opp opp_table_lock interactions with mm->mmap_sem
|
||||
* in the resume path, we need to to rpm get before we lock objs.
|
||||
* Which unfortunately might involve powering up the GPU sooner than
|
||||
* is necessary. But at least in the explicit fencing case, we will
|
||||
* have already done all the fence waiting.
|
||||
*/
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
|
||||
/* copy_*_user while holding a ww ticket upsets lockdep */
|
||||
ww_acquire_init(&submit->ticket, &reservation_ww_class);
|
||||
@ -710,60 +783,29 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < args->nr_cmds; i++) {
|
||||
struct drm_msm_gem_submit_cmd submit_cmd;
|
||||
void __user *userptr =
|
||||
u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
|
||||
struct msm_gem_object *msm_obj;
|
||||
uint64_t iova;
|
||||
|
||||
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* validate input from userspace: */
|
||||
switch (submit_cmd.type) {
|
||||
case MSM_SUBMIT_CMD_BUF:
|
||||
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
|
||||
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = submit_bo(submit, submit_cmd.submit_idx,
|
||||
ret = submit_bo(submit, submit->cmd[i].idx,
|
||||
&msm_obj, &iova, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (submit_cmd.size % 4) {
|
||||
DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
|
||||
submit_cmd.size);
|
||||
if (!submit->cmd[i].size ||
|
||||
((submit->cmd[i].size + submit->cmd[i].offset) >
|
||||
msm_obj->base.size / 4)) {
|
||||
DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!submit_cmd.size ||
|
||||
((submit_cmd.size + submit_cmd.submit_offset) >
|
||||
msm_obj->base.size)) {
|
||||
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
submit->cmd[i].type = submit_cmd.type;
|
||||
submit->cmd[i].size = submit_cmd.size / 4;
|
||||
submit->cmd[i].iova = iova + submit_cmd.submit_offset;
|
||||
submit->cmd[i].idx = submit_cmd.submit_idx;
|
||||
submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
|
||||
|
||||
if (submit->valid)
|
||||
continue;
|
||||
|
||||
ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
|
||||
submit_cmd.nr_relocs, submit_cmd.relocs);
|
||||
ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
|
||||
submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
@ -800,11 +842,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
|
||||
|
||||
out:
|
||||
pm_runtime_put(&gpu->pdev->dev);
|
||||
out_pre_pm:
|
||||
submit_cleanup(submit);
|
||||
if (has_ww_ticket)
|
||||
ww_acquire_fini(&submit->ticket);
|
||||
if (ret)
|
||||
msm_gem_submit_free(submit);
|
||||
msm_gem_submit_put(submit);
|
||||
out_unlock:
|
||||
if (ret && (out_fence_fd >= 0))
|
||||
put_unused_fd(out_fence_fd);
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <generated/utsrelease.h>
|
||||
#include <linux/string_helpers.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/devfreq_cooling.h>
|
||||
#include <linux/devcoredump.h>
|
||||
#include <linux/sched/task.h>
|
||||
|
||||
@ -107,9 +108,18 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
|
||||
if (IS_ERR(gpu->devfreq.devfreq)) {
|
||||
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
|
||||
gpu->devfreq.devfreq = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
devfreq_suspend_device(gpu->devfreq.devfreq);
|
||||
|
||||
gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node,
|
||||
gpu->devfreq.devfreq);
|
||||
if (IS_ERR(gpu->cooling)) {
|
||||
DRM_DEV_ERROR(&gpu->pdev->dev,
|
||||
"Couldn't register GPU cooling device\n");
|
||||
gpu->cooling = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int enable_pwrrail(struct msm_gpu *gpu)
|
||||
@ -177,15 +187,12 @@ static int disable_clk(struct msm_gpu *gpu)
|
||||
|
||||
static int enable_axi(struct msm_gpu *gpu)
|
||||
{
|
||||
if (gpu->ebi1_clk)
|
||||
clk_prepare_enable(gpu->ebi1_clk);
|
||||
return 0;
|
||||
return clk_prepare_enable(gpu->ebi1_clk);
|
||||
}
|
||||
|
||||
static int disable_axi(struct msm_gpu *gpu)
|
||||
{
|
||||
if (gpu->ebi1_clk)
|
||||
clk_disable_unprepare(gpu->ebi1_clk);
|
||||
clk_disable_unprepare(gpu->ebi1_clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -265,6 +272,22 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
||||
uint32_t fence)
|
||||
{
|
||||
struct msm_gem_submit *submit;
|
||||
|
||||
spin_lock(&ring->submit_lock);
|
||||
list_for_each_entry(submit, &ring->submits, node) {
|
||||
if (submit->seqno > fence)
|
||||
break;
|
||||
|
||||
msm_update_fence(submit->ring->fctx,
|
||||
submit->fence->seqno);
|
||||
}
|
||||
spin_unlock(&ring->submit_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEV_COREDUMP
|
||||
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
|
||||
size_t count, void *data, size_t datalen)
|
||||
@ -326,7 +349,9 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
|
||||
if (!state_bo->data)
|
||||
goto out;
|
||||
|
||||
msm_gem_lock(&obj->base);
|
||||
ptr = msm_gem_get_vaddr_active(&obj->base);
|
||||
msm_gem_unlock(&obj->base);
|
||||
if (IS_ERR(ptr)) {
|
||||
kvfree(state_bo->data);
|
||||
state_bo->data = NULL;
|
||||
@ -411,37 +436,26 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
|
||||
* Hangcheck detection for locked gpu:
|
||||
*/
|
||||
|
||||
static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
||||
uint32_t fence)
|
||||
{
|
||||
struct msm_gem_submit *submit;
|
||||
|
||||
list_for_each_entry(submit, &ring->submits, node) {
|
||||
if (submit->seqno > fence)
|
||||
break;
|
||||
|
||||
msm_update_fence(submit->ring->fctx,
|
||||
submit->fence->seqno);
|
||||
}
|
||||
}
|
||||
|
||||
static struct msm_gem_submit *
|
||||
find_submit(struct msm_ringbuffer *ring, uint32_t fence)
|
||||
{
|
||||
struct msm_gem_submit *submit;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
|
||||
|
||||
list_for_each_entry(submit, &ring->submits, node)
|
||||
if (submit->seqno == fence)
|
||||
spin_lock(&ring->submit_lock);
|
||||
list_for_each_entry(submit, &ring->submits, node) {
|
||||
if (submit->seqno == fence) {
|
||||
spin_unlock(&ring->submit_lock);
|
||||
return submit;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ring->submit_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void retire_submits(struct msm_gpu *gpu);
|
||||
|
||||
static void recover_worker(struct work_struct *work)
|
||||
static void recover_worker(struct kthread_work *work)
|
||||
{
|
||||
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
|
||||
struct drm_device *dev = gpu->dev;
|
||||
@ -470,14 +484,22 @@ static void recover_worker(struct work_struct *work)
|
||||
put_task_struct(task);
|
||||
}
|
||||
|
||||
/* msm_rd_dump_submit() needs bo locked to dump: */
|
||||
for (i = 0; i < submit->nr_bos; i++)
|
||||
msm_gem_lock(&submit->bos[i].obj->base);
|
||||
|
||||
if (comm && cmd) {
|
||||
DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
|
||||
gpu->name, comm, cmd);
|
||||
|
||||
msm_rd_dump_submit(priv->hangrd, submit,
|
||||
"offending task: %s (%s)", comm, cmd);
|
||||
} else
|
||||
} else {
|
||||
msm_rd_dump_submit(priv->hangrd, submit, NULL);
|
||||
}
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++)
|
||||
msm_gem_unlock(&submit->bos[i].obj->base);
|
||||
}
|
||||
|
||||
/* Record the crash state */
|
||||
@ -523,8 +545,10 @@ static void recover_worker(struct work_struct *work)
|
||||
for (i = 0; i < gpu->nr_rings; i++) {
|
||||
struct msm_ringbuffer *ring = gpu->rb[i];
|
||||
|
||||
spin_lock(&ring->submit_lock);
|
||||
list_for_each_entry(submit, &ring->submits, node)
|
||||
gpu->funcs->submit(gpu, submit);
|
||||
spin_unlock(&ring->submit_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -535,7 +559,6 @@ static void recover_worker(struct work_struct *work)
|
||||
|
||||
static void hangcheck_timer_reset(struct msm_gpu *gpu)
|
||||
{
|
||||
DBG("%s", gpu->name);
|
||||
mod_timer(&gpu->hangcheck_timer,
|
||||
round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
|
||||
}
|
||||
@ -544,7 +567,6 @@ static void hangcheck_handler(struct timer_list *t)
|
||||
{
|
||||
struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
|
||||
uint32_t fence = ring->memptrs->fence;
|
||||
|
||||
@ -561,7 +583,7 @@ static void hangcheck_handler(struct timer_list *t)
|
||||
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
|
||||
gpu->name, ring->seqno);
|
||||
|
||||
queue_work(priv->wq, &gpu->recover_work);
|
||||
kthread_queue_work(gpu->worker, &gpu->recover_work);
|
||||
}
|
||||
|
||||
/* if still more pending work, reset the hangcheck timer: */
|
||||
@ -569,7 +591,7 @@ static void hangcheck_handler(struct timer_list *t)
|
||||
hangcheck_timer_reset(gpu);
|
||||
|
||||
/* workaround for missing irq: */
|
||||
queue_work(priv->wq, &gpu->retire_work);
|
||||
kthread_queue_work(gpu->worker, &gpu->retire_work);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -697,56 +719,70 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
|
||||
stats->alwayson_start, stats->alwayson_end);
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
struct drm_gem_object *obj = &submit->bos[i].obj->base;
|
||||
|
||||
msm_gem_active_put(&msm_obj->base);
|
||||
msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
|
||||
drm_gem_object_put_locked(&msm_obj->base);
|
||||
msm_gem_lock(obj);
|
||||
msm_gem_active_put(obj);
|
||||
msm_gem_unpin_iova_locked(obj, submit->aspace);
|
||||
msm_gem_unlock(obj);
|
||||
drm_gem_object_put(obj);
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(&gpu->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&gpu->pdev->dev);
|
||||
msm_gem_submit_free(submit);
|
||||
|
||||
spin_lock(&ring->submit_lock);
|
||||
list_del(&submit->node);
|
||||
spin_unlock(&ring->submit_lock);
|
||||
|
||||
msm_gem_submit_put(submit);
|
||||
}
|
||||
|
||||
static void retire_submits(struct msm_gpu *gpu)
|
||||
{
|
||||
struct drm_device *dev = gpu->dev;
|
||||
struct msm_gem_submit *submit, *tmp;
|
||||
int i;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
/* Retire the commits starting with highest priority */
|
||||
for (i = 0; i < gpu->nr_rings; i++) {
|
||||
struct msm_ringbuffer *ring = gpu->rb[i];
|
||||
|
||||
list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
|
||||
if (dma_fence_is_signaled(submit->fence))
|
||||
while (true) {
|
||||
struct msm_gem_submit *submit = NULL;
|
||||
|
||||
spin_lock(&ring->submit_lock);
|
||||
submit = list_first_entry_or_null(&ring->submits,
|
||||
struct msm_gem_submit, node);
|
||||
spin_unlock(&ring->submit_lock);
|
||||
|
||||
/*
|
||||
* If no submit, we are done. If submit->fence hasn't
|
||||
* been signalled, then later submits are not signalled
|
||||
* either, so we are also done.
|
||||
*/
|
||||
if (submit && dma_fence_is_signaled(submit->fence)) {
|
||||
retire_submit(gpu, ring, submit);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void retire_worker(struct work_struct *work)
|
||||
static void retire_worker(struct kthread_work *work)
|
||||
{
|
||||
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
|
||||
struct drm_device *dev = gpu->dev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gpu->nr_rings; i++)
|
||||
update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
retire_submits(gpu);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
/* call from irq handler to schedule work to retire bo's */
|
||||
void msm_gpu_retire(struct msm_gpu *gpu)
|
||||
{
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
queue_work(priv->wq, &gpu->retire_work);
|
||||
kthread_queue_work(gpu->worker, &gpu->retire_work);
|
||||
update_sw_cntrs(gpu);
|
||||
}
|
||||
|
||||
@ -766,8 +802,6 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
|
||||
submit->seqno = ++ring->seqno;
|
||||
|
||||
list_add_tail(&submit->node, &ring->submits);
|
||||
|
||||
msm_rd_dump_submit(priv->rd, submit, NULL);
|
||||
|
||||
update_sw_cntrs(gpu);
|
||||
@ -777,14 +811,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
struct drm_gem_object *drm_obj = &msm_obj->base;
|
||||
uint64_t iova;
|
||||
|
||||
/* can't happen yet.. but when we add 2d support we'll have
|
||||
* to deal w/ cross-ring synchronization:
|
||||
*/
|
||||
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
|
||||
|
||||
/* submit takes a reference to the bo and iova until retired: */
|
||||
drm_gem_object_get(&msm_obj->base);
|
||||
msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
|
||||
msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova);
|
||||
|
||||
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
|
||||
dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
|
||||
@ -794,6 +823,16 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
msm_gem_active_get(drm_obj, gpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* ring->submits holds a ref to the submit, to deal with the case
|
||||
* that a submit completes before msm_ioctl_gem_submit() returns.
|
||||
*/
|
||||
msm_gem_submit_get(submit);
|
||||
|
||||
spin_lock(&ring->submit_lock);
|
||||
list_add_tail(&submit->node, &ring->submits);
|
||||
spin_unlock(&ring->submit_lock);
|
||||
|
||||
gpu->funcs->submit(gpu, submit);
|
||||
priv->lastctx = submit->queue->ctx;
|
||||
|
||||
@ -869,10 +908,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
gpu->funcs = funcs;
|
||||
gpu->name = name;
|
||||
|
||||
INIT_LIST_HEAD(&gpu->active_list);
|
||||
INIT_WORK(&gpu->retire_work, retire_worker);
|
||||
INIT_WORK(&gpu->recover_work, recover_worker);
|
||||
gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name);
|
||||
if (IS_ERR(gpu->worker)) {
|
||||
ret = PTR_ERR(gpu->worker);
|
||||
gpu->worker = NULL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
sched_set_fifo_low(gpu->worker->task);
|
||||
|
||||
INIT_LIST_HEAD(&gpu->active_list);
|
||||
kthread_init_work(&gpu->retire_work, retire_worker);
|
||||
kthread_init_work(&gpu->recover_work, recover_worker);
|
||||
|
||||
timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
|
||||
|
||||
@ -1005,4 +1052,10 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
|
||||
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
|
||||
msm_gem_address_space_put(gpu->aspace);
|
||||
}
|
||||
|
||||
if (gpu->worker) {
|
||||
kthread_destroy_worker(gpu->worker);
|
||||
}
|
||||
|
||||
devfreq_cooling_unregister(gpu->cooling);
|
||||
}
|
||||
|
@ -94,7 +94,10 @@ struct msm_gpu {
|
||||
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
|
||||
int nr_rings;
|
||||
|
||||
/* list of GEM active objects: */
|
||||
/*
|
||||
* List of GEM active objects on this gpu. Protected by
|
||||
* msm_drm_private::mm_lock
|
||||
*/
|
||||
struct list_head active_list;
|
||||
|
||||
/* does gpu need hw_init? */
|
||||
@ -103,9 +106,6 @@ struct msm_gpu {
|
||||
/* number of GPU hangs (for all contexts) */
|
||||
int global_faults;
|
||||
|
||||
/* worker for handling active-list retiring: */
|
||||
struct work_struct retire_work;
|
||||
|
||||
void __iomem *mmio;
|
||||
int irq;
|
||||
|
||||
@ -134,7 +134,15 @@ struct msm_gpu {
|
||||
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
|
||||
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
|
||||
struct timer_list hangcheck_timer;
|
||||
struct work_struct recover_work;
|
||||
|
||||
/* work for handling GPU recovery: */
|
||||
struct kthread_work recover_work;
|
||||
|
||||
/* work for handling active-list retiring: */
|
||||
struct kthread_work retire_work;
|
||||
|
||||
/* worker for retire/recover: */
|
||||
struct kthread_worker *worker;
|
||||
|
||||
struct drm_gem_object *memptrs_bo;
|
||||
|
||||
@ -147,6 +155,8 @@ struct msm_gpu {
|
||||
struct msm_gpu_state *crashstate;
|
||||
/* True if the hardware supports expanded apriv (a650 and newer) */
|
||||
bool hw_apriv;
|
||||
|
||||
struct thermal_cooling_device *cooling;
|
||||
};
|
||||
|
||||
static inline struct msm_gpu *dev_to_gpu(struct device *dev)
|
||||
@ -246,10 +256,7 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
|
||||
|
||||
static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
|
||||
{
|
||||
uint32_t val = gpu_read(gpu, reg);
|
||||
|
||||
val &= ~mask;
|
||||
gpu_write(gpu, reg, val | or);
|
||||
msm_rmw(gpu->mmio + (reg << 2), mask, or);
|
||||
}
|
||||
|
||||
static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
|
||||
|
@ -136,7 +136,8 @@ struct msm_kms;
|
||||
*/
|
||||
struct msm_pending_timer {
|
||||
struct hrtimer timer;
|
||||
struct work_struct work;
|
||||
struct kthread_work work;
|
||||
struct kthread_worker *worker;
|
||||
struct msm_kms *kms;
|
||||
unsigned crtc_idx;
|
||||
};
|
||||
@ -155,21 +156,37 @@ struct msm_kms {
|
||||
* For async commit, where ->flush_commit() and later happens
|
||||
* from the crtc's pending_timer close to end of the frame:
|
||||
*/
|
||||
struct mutex commit_lock;
|
||||
struct mutex commit_lock[MAX_CRTCS];
|
||||
unsigned pending_crtc_mask;
|
||||
struct msm_pending_timer pending_timers[MAX_CRTCS];
|
||||
};
|
||||
|
||||
static inline void msm_kms_init(struct msm_kms *kms,
|
||||
static inline int msm_kms_init(struct msm_kms *kms,
|
||||
const struct msm_kms_funcs *funcs)
|
||||
{
|
||||
unsigned i, ret;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
|
||||
mutex_init(&kms->commit_lock[i]);
|
||||
|
||||
kms->funcs = funcs;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
|
||||
ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void msm_kms_destroy(struct msm_kms *kms)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
mutex_init(&kms->commit_lock);
|
||||
kms->funcs = funcs;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
|
||||
msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
|
||||
msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
|
||||
}
|
||||
|
||||
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
|
||||
@ -194,4 +211,8 @@ int dpu_mdss_init(struct drm_device *dev);
|
||||
drm_for_each_crtc(crtc, dev) \
|
||||
for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
|
||||
|
||||
#define for_each_crtc_mask_reverse(dev, crtc, crtc_mask) \
|
||||
drm_for_each_crtc_reverse(crtc, dev) \
|
||||
for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
|
||||
|
||||
#endif /* __MSM_KMS_H__ */
|
||||
|
@ -333,7 +333,7 @@ static void snapshot_buf(struct msm_rd_state *rd,
|
||||
|
||||
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
|
||||
|
||||
msm_gem_put_vaddr(&obj->base);
|
||||
msm_gem_put_vaddr_locked(&obj->base);
|
||||
}
|
||||
|
||||
/* called under struct_mutex */
|
||||
|
@ -46,7 +46,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
|
||||
ring->memptrs_iova = memptrs_iova;
|
||||
|
||||
INIT_LIST_HEAD(&ring->submits);
|
||||
spin_lock_init(&ring->lock);
|
||||
spin_lock_init(&ring->submit_lock);
|
||||
spin_lock_init(&ring->preempt_lock);
|
||||
|
||||
snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
|
||||
|
||||
|
@ -39,14 +39,25 @@ struct msm_ringbuffer {
|
||||
int id;
|
||||
struct drm_gem_object *bo;
|
||||
uint32_t *start, *end, *cur, *next;
|
||||
|
||||
/*
|
||||
* List of in-flight submits on this ring. Protected by submit_lock.
|
||||
*/
|
||||
struct list_head submits;
|
||||
spinlock_t submit_lock;
|
||||
|
||||
uint64_t iova;
|
||||
uint32_t seqno;
|
||||
uint32_t hangcheck_fence;
|
||||
struct msm_rbmemptrs *memptrs;
|
||||
uint64_t memptrs_iova;
|
||||
struct msm_fence_context *fctx;
|
||||
spinlock_t lock;
|
||||
|
||||
/*
|
||||
* preempt_lock protects preemption and serializes wptr updates against
|
||||
* preemption. Can be aquired from irq context.
|
||||
*/
|
||||
spinlock_t preempt_lock;
|
||||
};
|
||||
|
||||
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
|
||||
|
@ -767,7 +767,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
|
||||
IO_PGTABLE_QUIRK_NON_STRICT |
|
||||
IO_PGTABLE_QUIRK_ARM_TTBR1))
|
||||
IO_PGTABLE_QUIRK_ARM_TTBR1 |
|
||||
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
|
||||
return NULL;
|
||||
|
||||
data = arm_lpae_alloc_pgtable(cfg);
|
||||
@ -779,10 +780,15 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
tcr->sh = ARM_LPAE_TCR_SH_IS;
|
||||
tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
|
||||
tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
|
||||
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
|
||||
goto out_free_data;
|
||||
} else {
|
||||
tcr->sh = ARM_LPAE_TCR_SH_OS;
|
||||
tcr->irgn = ARM_LPAE_TCR_RGN_NC;
|
||||
tcr->orgn = ARM_LPAE_TCR_RGN_NC;
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
|
||||
tcr->orgn = ARM_LPAE_TCR_RGN_NC;
|
||||
else
|
||||
tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
|
||||
}
|
||||
|
||||
tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
|
||||
|
@ -1279,6 +1279,16 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
|
||||
#define drm_for_each_crtc(crtc, dev) \
|
||||
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
|
||||
|
||||
/**
|
||||
* drm_for_each_crtc_reverse - iterate over all CRTCs in reverse order
|
||||
* @crtc: a &struct drm_crtc as the loop cursor
|
||||
* @dev: the &struct drm_device
|
||||
*
|
||||
* Iterate over all CRTCs of @dev.
|
||||
*/
|
||||
#define drm_for_each_crtc_reverse(crtc, dev) \
|
||||
list_for_each_entry_reverse(crtc, &(dev)->mode_config.crtc_list, head)
|
||||
|
||||
int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
|
||||
unsigned int supported_filters);
|
||||
|
||||
|
@ -86,6 +86,9 @@ struct io_pgtable_cfg {
|
||||
*
|
||||
* IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
|
||||
* for use in the upper half of a split address space.
|
||||
*
|
||||
* IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
|
||||
* attributes set in the TCR for a non-coherent page-table walker.
|
||||
*/
|
||||
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
|
||||
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
|
||||
@ -93,6 +96,7 @@ struct io_pgtable_cfg {
|
||||
#define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
|
||||
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
|
||||
#define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
|
||||
#define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
|
||||
unsigned long quirks;
|
||||
unsigned long pgsize_bitmap;
|
||||
unsigned int ias;
|
||||
@ -208,6 +212,10 @@ struct io_pgtable {
|
||||
|
||||
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
|
||||
|
||||
struct io_pgtable_domain_attr {
|
||||
unsigned long quirks;
|
||||
};
|
||||
|
||||
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
|
||||
{
|
||||
iop->cfg.tlb->tlb_flush_all(iop->cookie);
|
||||
|
@ -118,6 +118,7 @@ enum iommu_attr {
|
||||
DOMAIN_ATTR_FSL_PAMUV1,
|
||||
DOMAIN_ATTR_NESTING, /* two stages of translation */
|
||||
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
|
||||
DOMAIN_ATTR_IO_PGTABLE_CFG,
|
||||
DOMAIN_ATTR_MAX,
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user