mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 23:45:31 +08:00
Merge tag 'drm-msm-fixes-2020-02-16' of https://gitlab.freedesktop.org/drm/msm into drm-fixes
+ fix UBWC on GPU and display side for sc7180 + fix DSI suspend/resume issue encountered on sc7180 + fix some breakage on so called "linux-android" devices (fallout from sc7180/a618 support, not seen earlier due to bootloader/firmware differences) + couple other misc fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/ <CAF6AEGshz5K3tJd=NsBSHq6HGT-ZRa67qt+iN=U2ZFO2oD8kuw@mail.gmail.com
This commit is contained in:
commit
ec0bd60a47
@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define GBIF_CLIENT_HALT_MASK BIT(0)
|
||||||
|
#define GBIF_ARB_HALT_MASK BIT(1)
|
||||||
|
|
||||||
|
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
|
||||||
|
{
|
||||||
|
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||||
|
|
||||||
|
if (!a6xx_has_gbif(adreno_gpu)) {
|
||||||
|
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
|
||||||
|
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
|
||||||
|
0xf) == 0xf);
|
||||||
|
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Halt new client requests on GBIF */
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
|
||||||
|
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
||||||
|
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
|
||||||
|
|
||||||
|
/* Halt all AXI requests on GBIF */
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
|
||||||
|
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
||||||
|
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
|
||||||
|
|
||||||
|
/* The GBIF halt needs to be explicitly cleared */
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Gracefully try to shut down the GMU and by extension the GPU */
|
/* Gracefully try to shut down the GMU and by extension the GPU */
|
||||||
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
|
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
|
||||||
{
|
{
|
||||||
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
||||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear the VBIF pipe before shutting down */
|
a6xx_bus_clear_pending_transactions(adreno_gpu);
|
||||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
|
|
||||||
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
|
|
||||||
== 0xf);
|
|
||||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
|
|
||||||
|
|
||||||
/* tell the GMU we want to slumber */
|
/* tell the GMU we want to slumber */
|
||||||
a6xx_gmu_notify_slumber(gmu);
|
a6xx_gmu_notify_slumber(gmu);
|
||||||
|
@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
|||||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
|
||||||
* During a previous slumber, GBIF halt is asserted to ensure
|
|
||||||
* no further transaction can go through GPU before GPU
|
|
||||||
* headswitch is turned off.
|
|
||||||
*
|
|
||||||
* This halt is deasserted once headswitch goes off but
|
|
||||||
* incase headswitch doesn't goes off clear GBIF halt
|
|
||||||
* here to ensure GPU wake-up doesn't fail because of
|
|
||||||
* halted GPU transactions.
|
|
||||||
*/
|
|
||||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
|
|
||||||
|
|
||||||
/* Make sure the GMU keeps the GPU on while we set it up */
|
/* Make sure the GMU keeps the GPU on while we set it up */
|
||||||
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
|
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
|
||||||
|
|
||||||
@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
|||||||
/* Select CP0 to always count cycles */
|
/* Select CP0 to always count cycles */
|
||||||
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
|
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
|
||||||
|
|
||||||
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
|
if (adreno_is_a630(adreno_gpu)) {
|
||||||
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
|
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
|
||||||
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
|
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
|
||||||
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
|
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
|
||||||
|
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
|
||||||
|
}
|
||||||
|
|
||||||
/* Enable fault detection */
|
/* Enable fault detection */
|
||||||
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
|
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
|
||||||
@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
|
|||||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
|
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define GBIF_CLIENT_HALT_MASK BIT(0)
|
|
||||||
#define GBIF_ARB_HALT_MASK BIT(1)
|
|
||||||
|
|
||||||
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
|
|
||||||
{
|
|
||||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
|
||||||
|
|
||||||
if(!a6xx_has_gbif(adreno_gpu)){
|
|
||||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
|
|
||||||
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
|
|
||||||
0xf) == 0xf);
|
|
||||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Halt new client requests on GBIF */
|
|
||||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
|
|
||||||
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
|
||||||
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
|
|
||||||
|
|
||||||
/* Halt all AXI requests on GBIF */
|
|
||||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
|
|
||||||
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
|
||||||
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* GMU needs DDR access in slumber path. Deassert GBIF halt now
|
|
||||||
* to allow for GMU to access system memory.
|
|
||||||
*/
|
|
||||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int a6xx_pm_resume(struct msm_gpu *gpu)
|
static int a6xx_pm_resume(struct msm_gpu *gpu)
|
||||||
{
|
{
|
||||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
|
|||||||
|
|
||||||
devfreq_suspend_device(gpu->devfreq.devfreq);
|
devfreq_suspend_device(gpu->devfreq.devfreq);
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure the GMU is idle before continuing (because some transitions
|
|
||||||
* may use VBIF
|
|
||||||
*/
|
|
||||||
a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
|
|
||||||
|
|
||||||
/* Clear the VBIF pipe before shutting down */
|
|
||||||
/* FIXME: This accesses the GPU - do we need to make sure it is on? */
|
|
||||||
a6xx_bus_clear_pending_transactions(adreno_gpu);
|
|
||||||
|
|
||||||
return a6xx_gmu_stop(a6xx_gpu);
|
return a6xx_gmu_stop(a6xx_gpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#include "a6xx_gmu.h"
|
#include "a6xx_gmu.h"
|
||||||
#include "a6xx_gmu.xml.h"
|
#include "a6xx_gmu.xml.h"
|
||||||
|
#include "a6xx_gpu.h"
|
||||||
|
|
||||||
#define HFI_MSG_ID(val) [val] = #val
|
#define HFI_MSG_ID(val) [val] = #val
|
||||||
|
|
||||||
@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
|
|||||||
NULL, 0);
|
NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
|
static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
|
||||||
{
|
{
|
||||||
struct a6xx_hfi_msg_bw_table msg = { 0 };
|
/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
|
||||||
|
msg->bw_level_num = 1;
|
||||||
|
|
||||||
|
msg->ddr_cmds_num = 3;
|
||||||
|
msg->ddr_wait_bitmask = 0x01;
|
||||||
|
|
||||||
|
msg->ddr_cmds_addrs[0] = 0x50000;
|
||||||
|
msg->ddr_cmds_addrs[1] = 0x5003c;
|
||||||
|
msg->ddr_cmds_addrs[2] = 0x5000c;
|
||||||
|
|
||||||
|
msg->ddr_cmds_data[0][0] = 0x40000000;
|
||||||
|
msg->ddr_cmds_data[0][1] = 0x40000000;
|
||||||
|
msg->ddr_cmds_data[0][2] = 0x40000000;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The sdm845 GMU doesn't do bus frequency scaling on its own but it
|
* These are the CX (CNOC) votes - these are used by the GMU but the
|
||||||
* does need at least one entry in the list because it might be accessed
|
* votes are known and fixed for the target
|
||||||
* when the GMU is shutting down. Send a single "off" entry.
|
|
||||||
*/
|
*/
|
||||||
|
msg->cnoc_cmds_num = 1;
|
||||||
|
msg->cnoc_wait_bitmask = 0x01;
|
||||||
|
|
||||||
msg.bw_level_num = 1;
|
msg->cnoc_cmds_addrs[0] = 0x5007c;
|
||||||
|
msg->cnoc_cmds_data[0][0] = 0x40000000;
|
||||||
|
msg->cnoc_cmds_data[1][0] = 0x60000001;
|
||||||
|
}
|
||||||
|
|
||||||
msg.ddr_cmds_num = 3;
|
static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
|
||||||
msg.ddr_wait_bitmask = 0x07;
|
{
|
||||||
|
/* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
|
||||||
|
msg->bw_level_num = 1;
|
||||||
|
|
||||||
msg.ddr_cmds_addrs[0] = 0x50000;
|
msg->ddr_cmds_num = 3;
|
||||||
msg.ddr_cmds_addrs[1] = 0x5005c;
|
msg->ddr_wait_bitmask = 0x07;
|
||||||
msg.ddr_cmds_addrs[2] = 0x5000c;
|
|
||||||
|
|
||||||
msg.ddr_cmds_data[0][0] = 0x40000000;
|
msg->ddr_cmds_addrs[0] = 0x50000;
|
||||||
msg.ddr_cmds_data[0][1] = 0x40000000;
|
msg->ddr_cmds_addrs[1] = 0x5005c;
|
||||||
msg.ddr_cmds_data[0][2] = 0x40000000;
|
msg->ddr_cmds_addrs[2] = 0x5000c;
|
||||||
|
|
||||||
|
msg->ddr_cmds_data[0][0] = 0x40000000;
|
||||||
|
msg->ddr_cmds_data[0][1] = 0x40000000;
|
||||||
|
msg->ddr_cmds_data[0][2] = 0x40000000;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These are the CX (CNOC) votes. This is used but the values for the
|
* These are the CX (CNOC) votes. This is used but the values for the
|
||||||
* sdm845 GMU are known and fixed so we can hard code them.
|
* sdm845 GMU are known and fixed so we can hard code them.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
msg.cnoc_cmds_num = 3;
|
msg->cnoc_cmds_num = 3;
|
||||||
msg.cnoc_wait_bitmask = 0x05;
|
msg->cnoc_wait_bitmask = 0x05;
|
||||||
|
|
||||||
msg.cnoc_cmds_addrs[0] = 0x50034;
|
msg->cnoc_cmds_addrs[0] = 0x50034;
|
||||||
msg.cnoc_cmds_addrs[1] = 0x5007c;
|
msg->cnoc_cmds_addrs[1] = 0x5007c;
|
||||||
msg.cnoc_cmds_addrs[2] = 0x5004c;
|
msg->cnoc_cmds_addrs[2] = 0x5004c;
|
||||||
|
|
||||||
msg.cnoc_cmds_data[0][0] = 0x40000000;
|
msg->cnoc_cmds_data[0][0] = 0x40000000;
|
||||||
msg.cnoc_cmds_data[0][1] = 0x00000000;
|
msg->cnoc_cmds_data[0][1] = 0x00000000;
|
||||||
msg.cnoc_cmds_data[0][2] = 0x40000000;
|
msg->cnoc_cmds_data[0][2] = 0x40000000;
|
||||||
|
|
||||||
msg.cnoc_cmds_data[1][0] = 0x60000001;
|
msg->cnoc_cmds_data[1][0] = 0x60000001;
|
||||||
msg.cnoc_cmds_data[1][1] = 0x20000001;
|
msg->cnoc_cmds_data[1][1] = 0x20000001;
|
||||||
msg.cnoc_cmds_data[1][2] = 0x60000001;
|
msg->cnoc_cmds_data[1][2] = 0x60000001;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
|
||||||
|
{
|
||||||
|
struct a6xx_hfi_msg_bw_table msg = { 0 };
|
||||||
|
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
||||||
|
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||||
|
|
||||||
|
if (adreno_is_a618(adreno_gpu))
|
||||||
|
a618_build_bw_table(&msg);
|
||||||
|
else
|
||||||
|
a6xx_build_bw_table(&msg);
|
||||||
|
|
||||||
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
|
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
|
||||||
NULL, 0);
|
NULL, 0);
|
||||||
|
@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
|
|||||||
|
|
||||||
INTERLEAVED_RGB_FMT(RGB565,
|
INTERLEAVED_RGB_FMT(RGB565,
|
||||||
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
|
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
|
||||||
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
|
C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
|
||||||
false, 2, 0,
|
false, 2, 0,
|
||||||
DPU_FETCH_LINEAR, 1),
|
DPU_FETCH_LINEAR, 1),
|
||||||
|
|
||||||
INTERLEAVED_RGB_FMT(BGR565,
|
INTERLEAVED_RGB_FMT(BGR565,
|
||||||
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
|
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
|
||||||
C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
|
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
|
||||||
false, 2, 0,
|
false, 2, 0,
|
||||||
DPU_FETCH_LINEAR, 1),
|
DPU_FETCH_LINEAR, 1),
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
|
|
||||||
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
|
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
|
||||||
|
|
||||||
|
#define HW_REV 0x0
|
||||||
#define HW_INTR_STATUS 0x0010
|
#define HW_INTR_STATUS 0x0010
|
||||||
|
|
||||||
/* Max BW defined in KBps */
|
/* Max BW defined in KBps */
|
||||||
@ -22,6 +23,17 @@ struct dpu_irq_controller {
|
|||||||
struct irq_domain *domain;
|
struct irq_domain *domain;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct dpu_hw_cfg {
|
||||||
|
u32 val;
|
||||||
|
u32 offset;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct dpu_mdss_hw_init_handler {
|
||||||
|
u32 hw_rev;
|
||||||
|
u32 hw_reg_count;
|
||||||
|
struct dpu_hw_cfg* hw_cfg;
|
||||||
|
};
|
||||||
|
|
||||||
struct dpu_mdss {
|
struct dpu_mdss {
|
||||||
struct msm_mdss base;
|
struct msm_mdss base;
|
||||||
void __iomem *mmio;
|
void __iomem *mmio;
|
||||||
@ -32,6 +44,44 @@ struct dpu_mdss {
|
|||||||
u32 num_paths;
|
u32 num_paths;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct dpu_hw_cfg hw_cfg[] = {
|
||||||
|
{
|
||||||
|
/* UBWC global settings */
|
||||||
|
.val = 0x1E,
|
||||||
|
.offset = 0x144,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct dpu_mdss_hw_init_handler cfg_handler[] = {
|
||||||
|
{ .hw_rev = DPU_HW_VER_620,
|
||||||
|
.hw_reg_count = ARRAY_SIZE(hw_cfg),
|
||||||
|
.hw_cfg = hw_cfg
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
u32 count = 0;
|
||||||
|
struct dpu_hw_cfg *hw_cfg = NULL;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
|
||||||
|
if (cfg_handler[i].hw_rev == hw_rev) {
|
||||||
|
hw_cfg = cfg_handler[i].hw_cfg;
|
||||||
|
count = cfg_handler[i].hw_reg_count;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < count; i++ ) {
|
||||||
|
writel_relaxed(hw_cfg->val,
|
||||||
|
dpu_mdss->mmio + hw_cfg->offset);
|
||||||
|
hw_cfg++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
|
static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
|
||||||
struct dpu_mdss *dpu_mdss)
|
struct dpu_mdss *dpu_mdss)
|
||||||
{
|
{
|
||||||
@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
|
|||||||
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
|
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
|
||||||
struct dss_module_power *mp = &dpu_mdss->mp;
|
struct dss_module_power *mp = &dpu_mdss->mp;
|
||||||
int ret;
|
int ret;
|
||||||
|
u32 mdss_rev;
|
||||||
|
|
||||||
dpu_mdss_icc_request_bw(mdss);
|
dpu_mdss_icc_request_bw(mdss);
|
||||||
|
|
||||||
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
|
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
|
||||||
if (ret)
|
if (ret) {
|
||||||
DPU_ERROR("clock enable failed, ret:%d\n", ret);
|
DPU_ERROR("clock enable failed, ret:%d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV);
|
||||||
|
dpu_mdss_hw_init(dpu_mdss, mdss_rev);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1109,8 +1109,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
|
|||||||
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
|
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
|
||||||
msecs_to_jiffies(50));
|
msecs_to_jiffies(50));
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
dev_warn(dev->dev, "pp done time out, lm=%d\n",
|
dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
|
||||||
mdp5_cstate->pipeline.mixer->lm);
|
mdp5_cstate->pipeline.mixer->lm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
|
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
|
||||||
|
@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
|
|||||||
return num;
|
return num;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
|
static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
|
||||||
struct drm_display_mode *mode)
|
struct drm_display_mode *mode)
|
||||||
{
|
{
|
||||||
int id = dsi_mgr_connector_get_id(connector);
|
int id = dsi_mgr_connector_get_id(connector);
|
||||||
@ -506,6 +506,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
|
|||||||
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
|
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
|
||||||
struct mipi_dsi_host *host = msm_dsi->host;
|
struct mipi_dsi_host *host = msm_dsi->host;
|
||||||
struct drm_panel *panel = msm_dsi->panel;
|
struct drm_panel *panel = msm_dsi->panel;
|
||||||
|
struct msm_dsi_pll *src_pll;
|
||||||
bool is_dual_dsi = IS_DUAL_DSI();
|
bool is_dual_dsi = IS_DUAL_DSI();
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -539,6 +540,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
|
|||||||
id, ret);
|
id, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Save PLL status if it is a clock source */
|
||||||
|
src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
|
||||||
|
msm_dsi_pll_save_state(src_pll);
|
||||||
|
|
||||||
ret = msm_dsi_host_power_off(host);
|
ret = msm_dsi_host_power_off(host);
|
||||||
if (ret)
|
if (ret)
|
||||||
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
|
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
|
||||||
|
@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
|
|||||||
if (!phy || !phy->cfg->ops.disable)
|
if (!phy || !phy->cfg->ops.disable)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Save PLL status if it is a clock source */
|
|
||||||
if (phy->usecase != MSM_DSI_PHY_SLAVE)
|
|
||||||
msm_dsi_pll_save_state(phy->pll);
|
|
||||||
|
|
||||||
phy->cfg->ops.disable(phy);
|
phy->cfg->ops.disable(phy);
|
||||||
|
|
||||||
dsi_phy_regulator_disable(phy);
|
dsi_phy_regulator_disable(phy);
|
||||||
|
@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
|
|||||||
if (pll_10nm->slave)
|
if (pll_10nm->slave)
|
||||||
dsi_pll_enable_pll_bias(pll_10nm->slave);
|
dsi_pll_enable_pll_bias(pll_10nm->slave);
|
||||||
|
|
||||||
|
rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
|
||||||
|
if (rc) {
|
||||||
|
pr_err("vco_set_rate failed, rc=%d\n", rc);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/* Start PLL */
|
/* Start PLL */
|
||||||
pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
|
pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
|
||||||
0x01);
|
0x01);
|
||||||
|
Loading…
Reference in New Issue
Block a user