mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-18 02:24:21 +08:00
drm/msm: Fix a6xx GMU shutdown sequence
Commite812744c5f
("drm: msm: a6xx: Add support for A618") missed updating the VBIF flush in a6xx_gmu_shutdown and instead inserted the new sequence into a6xx_pm_suspend along with a redundant GMU idle. Move a6xx_bus_clear_pending_transactions to a6xx_gmu.c and use it in the appropriate place in the shutdown routine and remove the redundant idle call. v2: Remove newly unused variable that was triggering a warning Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org> Reviewed-by: Rob Clark <robdclark@gmail.com> Fixes:e812744c5f
("drm: msm: a6xx: Add support for A618") Tested-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
parent
1636295a9f
commit
9cc68ee1d9
@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define GBIF_CLIENT_HALT_MASK BIT(0)
|
||||||
|
#define GBIF_ARB_HALT_MASK BIT(1)
|
||||||
|
|
||||||
|
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
|
||||||
|
{
|
||||||
|
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||||
|
|
||||||
|
if (!a6xx_has_gbif(adreno_gpu)) {
|
||||||
|
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
|
||||||
|
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
|
||||||
|
0xf) == 0xf);
|
||||||
|
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Halt new client requests on GBIF */
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
|
||||||
|
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
||||||
|
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
|
||||||
|
|
||||||
|
/* Halt all AXI requests on GBIF */
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
|
||||||
|
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
||||||
|
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
|
||||||
|
|
||||||
|
/* The GBIF halt needs to be explicitly cleared */
|
||||||
|
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
|
||||||
|
}
|
||||||
|
|
||||||
/* Gracefully try to shut down the GMU and by extension the GPU */
|
/* Gracefully try to shut down the GMU and by extension the GPU */
|
||||||
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
|
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
|
||||||
{
|
{
|
||||||
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
|
||||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear the VBIF pipe before shutting down */
|
a6xx_bus_clear_pending_transactions(adreno_gpu);
|
||||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
|
|
||||||
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
|
|
||||||
== 0xf);
|
|
||||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
|
|
||||||
|
|
||||||
/* tell the GMU we want to slumber */
|
/* tell the GMU we want to slumber */
|
||||||
a6xx_gmu_notify_slumber(gmu);
|
a6xx_gmu_notify_slumber(gmu);
|
||||||
|
@ -738,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
|
|||||||
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
|
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define GBIF_CLIENT_HALT_MASK BIT(0)
|
|
||||||
#define GBIF_ARB_HALT_MASK BIT(1)
|
|
||||||
|
|
||||||
static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
|
|
||||||
{
|
|
||||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
|
||||||
|
|
||||||
if(!a6xx_has_gbif(adreno_gpu)){
|
|
||||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
|
|
||||||
spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
|
|
||||||
0xf) == 0xf);
|
|
||||||
gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Halt new client requests on GBIF */
|
|
||||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
|
|
||||||
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
|
||||||
(GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
|
|
||||||
|
|
||||||
/* Halt all AXI requests on GBIF */
|
|
||||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
|
|
||||||
spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
|
|
||||||
(GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* GMU needs DDR access in slumber path. Deassert GBIF halt now
|
|
||||||
* to allow for GMU to access system memory.
|
|
||||||
*/
|
|
||||||
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int a6xx_pm_resume(struct msm_gpu *gpu)
|
static int a6xx_pm_resume(struct msm_gpu *gpu)
|
||||||
{
|
{
|
||||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||||
@ -795,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
|
|||||||
|
|
||||||
devfreq_suspend_device(gpu->devfreq.devfreq);
|
devfreq_suspend_device(gpu->devfreq.devfreq);
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure the GMU is idle before continuing (because some transitions
|
|
||||||
* may use VBIF
|
|
||||||
*/
|
|
||||||
a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
|
|
||||||
|
|
||||||
/* Clear the VBIF pipe before shutting down */
|
|
||||||
/* FIXME: This accesses the GPU - do we need to make sure it is on? */
|
|
||||||
a6xx_bus_clear_pending_transactions(adreno_gpu);
|
|
||||||
|
|
||||||
return a6xx_gmu_stop(a6xx_gpu);
|
return a6xx_gmu_stop(a6xx_gpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user