mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
- DisplayPort LTTPR fixes around link training and limiting it
according to supported spec version. (Imre) - Fix enabled_planes bitmask to really represent only logically enabled planes (Ville). - Fix DSS CTL registers for ICL DSI transcoders (Jani) - Fix the GT fence revocation runtime PM logic. (Imre) -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmBcWEIACgkQ+mJfZA7r E8qFygf+Om4G3aUAPYyXyGa9K5dCkUHSpevUEurPGZfEQQgwYPtMqub41McudI76 gIJqvGytA2FtC47OyVOfmaauA4DX7Tm3F8ddKuUYgEeh7sQy0pCijXOq7Ozfv1tJ Le91jdYGHAFh0q6JJ8w+SAladEWrcmxN1Wd459IHYs6D3F0qyialT2Z9c4lj8/kZ 1dFfJDH7/x6fJMfkS+wpfjsPj2NDI8mzihYUS7ySmFVsn3CF3cXpVDrkWG2BTvCA pqTxvVEcrG1njjRtOmSEMck1JsrtzxO2bbJH7ZP0TEOJWwBV8EDeko8AkGrObOd3 1EtwIYB5zLF8GoUnlzhTJJ5weCXUEw== =P/6h -----END PGP SIGNATURE----- Merge tag 'drm-intel-fixes-2021-03-25-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes - DisplayPort LTTPR fixes around link training and limiting it according to supported spec version. (Imre) - Fix enabled_planes bitmask to really represent only logically enabled planes (Ville). - Fix DSS CTL registers for ICL DSI transcoders (Jani) - Fix the GT fence revocation runtime PM logic. (Imre) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/YFxYdrjqeUtSu+3p@intel.com
This commit is contained in:
commit
5165fe0bd1
@ -317,12 +317,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
|
||||
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
|
||||
return 0;
|
||||
|
||||
new_crtc_state->enabled_planes |= BIT(plane->id);
|
||||
|
||||
ret = plane->check_plane(new_crtc_state, new_plane_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (fb)
|
||||
new_crtc_state->enabled_planes |= BIT(plane->id);
|
||||
|
||||
/* FIXME pre-g4x don't work like this */
|
||||
if (new_plane_state->uapi.visible)
|
||||
new_crtc_state->active_planes |= BIT(plane->id);
|
||||
|
@ -3619,9 +3619,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
intel_dp_lttpr_init(intel_dp);
|
||||
|
||||
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
|
||||
if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
|
||||
return false;
|
||||
|
||||
/*
|
||||
|
@ -133,6 +133,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
else
|
||||
precharge = 5;
|
||||
|
||||
/* Max timeout value on G4x-BDW: 1.6ms */
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
|
||||
else
|
||||
@ -159,6 +160,12 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
|
||||
u32 ret;
|
||||
|
||||
/*
|
||||
* Max timeout values:
|
||||
* SKL-GLK: 1.6ms
|
||||
* CNL: 3.2ms
|
||||
* ICL+: 4ms
|
||||
*/
|
||||
ret = DP_AUX_CH_CTL_SEND_BUSY |
|
||||
DP_AUX_CH_CTL_DONE |
|
||||
DP_AUX_CH_CTL_INTERRUPT |
|
||||
|
@ -34,6 +34,11 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
link_status[3], link_status[4], link_status[5]);
|
||||
}
|
||||
|
||||
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
|
||||
{
|
||||
memset(&intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
|
||||
}
|
||||
|
||||
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
|
||||
{
|
||||
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
|
||||
@ -81,19 +86,36 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
|
||||
|
||||
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
|
||||
{
|
||||
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
|
||||
intel_dp->lttpr_common_caps) < 0) {
|
||||
memset(intel_dp->lttpr_common_caps, 0,
|
||||
sizeof(intel_dp->lttpr_common_caps));
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
|
||||
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
|
||||
*/
|
||||
if (INTEL_GEN(i915) < 10)
|
||||
return false;
|
||||
|
||||
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
|
||||
intel_dp->lttpr_common_caps) < 0)
|
||||
goto reset_caps;
|
||||
|
||||
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
|
||||
"LTTPR common capabilities: %*ph\n",
|
||||
(int)sizeof(intel_dp->lttpr_common_caps),
|
||||
intel_dp->lttpr_common_caps);
|
||||
|
||||
/* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
|
||||
if (intel_dp->lttpr_common_caps[0] < 0x14)
|
||||
goto reset_caps;
|
||||
|
||||
return true;
|
||||
|
||||
reset_caps:
|
||||
intel_dp_reset_lttpr_common_caps(intel_dp);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -106,33 +128,49 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode
|
||||
* intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
|
||||
* @intel_dp: Intel DP struct
|
||||
*
|
||||
* Read the LTTPR common capabilities, switch to non-transparent link training
|
||||
* mode if any is detected and read the PHY capabilities for all detected
|
||||
* LTTPRs. In case of an LTTPR detection error or if the number of
|
||||
* Read the LTTPR common and DPRX capabilities and switch to non-transparent
|
||||
* link training mode if any is detected and read the PHY capabilities for all
|
||||
* detected LTTPRs. In case of an LTTPR detection error or if the number of
|
||||
* LTTPRs is more than is supported (8), fall back to the no-LTTPR,
|
||||
* transparent mode link training mode.
|
||||
*
|
||||
* Returns:
|
||||
* >0 if LTTPRs were detected and the non-transparent LT mode was set
|
||||
* >0 if LTTPRs were detected and the non-transparent LT mode was set. The
|
||||
* DPRX capabilities are read out.
|
||||
* 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
|
||||
* detection failure and the transparent LT mode was set
|
||||
* detection failure and the transparent LT mode was set. The DPRX
|
||||
* capabilities are read out.
|
||||
* <0 Reading out the DPRX capabilities failed.
|
||||
*/
|
||||
int intel_dp_lttpr_init(struct intel_dp *intel_dp)
|
||||
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
|
||||
{
|
||||
int lttpr_count;
|
||||
bool ret;
|
||||
int i;
|
||||
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
return 0;
|
||||
|
||||
ret = intel_dp_read_lttpr_common_caps(intel_dp);
|
||||
|
||||
/* The DPTX shall read the DPRX caps after LTTPR detection. */
|
||||
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
|
||||
intel_dp_reset_lttpr_common_caps(intel_dp);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The 0xF0000-0xF02FF range is only valid if the DPCD revision is
|
||||
* at least 1.4.
|
||||
*/
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
|
||||
intel_dp_reset_lttpr_common_caps(intel_dp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
|
||||
/*
|
||||
* Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
|
||||
@ -172,7 +210,7 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
|
||||
|
||||
return lttpr_count;
|
||||
}
|
||||
EXPORT_SYMBOL(intel_dp_lttpr_init);
|
||||
EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
|
||||
|
||||
static u8 dp_voltage_max(u8 preemph)
|
||||
{
|
||||
@ -807,7 +845,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
|
||||
* TODO: Reiniting LTTPRs here won't be needed once proper connector
|
||||
* HW state readout is added.
|
||||
*/
|
||||
int lttpr_count = intel_dp_lttpr_init(intel_dp);
|
||||
int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
|
||||
|
||||
if (lttpr_count < 0)
|
||||
return;
|
||||
|
||||
if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
|
||||
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
|
||||
|
@ -11,7 +11,7 @@
|
||||
struct intel_crtc_state;
|
||||
struct intel_dp;
|
||||
|
||||
int intel_dp_lttpr_init(struct intel_dp *intel_dp);
|
||||
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
|
||||
|
||||
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
|
@ -1014,20 +1014,14 @@ static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
|
||||
|
||||
if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
|
||||
return DSS_CTL1;
|
||||
|
||||
return ICL_PIPE_DSS_CTL1(pipe);
|
||||
return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1;
|
||||
}
|
||||
|
||||
static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
|
||||
|
||||
if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
|
||||
return DSS_CTL2;
|
||||
|
||||
return ICL_PIPE_DSS_CTL2(pipe);
|
||||
return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2;
|
||||
}
|
||||
|
||||
void intel_dsc_enable(struct intel_encoder *encoder,
|
||||
|
@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
|
||||
WRITE_ONCE(fence->vma, NULL);
|
||||
vma->fence = NULL;
|
||||
|
||||
with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
|
||||
/*
|
||||
* Skip the write to HW if and only if the device is currently
|
||||
* suspended.
|
||||
*
|
||||
* If the driver does not currently hold a wakeref (if_in_use == 0),
|
||||
* the device may currently be runtime suspended, or it may be woken
|
||||
* up before the suspend takes place. If the device is not suspended
|
||||
* (powered down) and we skip clearing the fence register, the HW is
|
||||
* left in an undefined state where we may end up with multiple
|
||||
* registers overlapping.
|
||||
*/
|
||||
with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
|
||||
fence_write(fence);
|
||||
}
|
||||
|
||||
|
@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
|
||||
* __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
|
||||
* @rpm: the intel_runtime_pm structure
|
||||
* @ignore_usecount: get a ref even if dev->power.usage_count is 0
|
||||
*
|
||||
* This function grabs a device-level runtime pm reference if the device is
|
||||
* already in use and ensures that it is powered up. It is illegal to try
|
||||
* and access the HW should intel_runtime_pm_get_if_in_use() report failure.
|
||||
* already active and ensures that it is powered up. It is illegal to try
|
||||
* and access the HW should intel_runtime_pm_get_if_active() report failure.
|
||||
*
|
||||
* If @ignore_usecount=true, a reference will be acquired even if there is no
|
||||
* user requiring the device to be powered up (dev->power.usage_count == 0).
|
||||
* If the function returns false in this case then it's guaranteed that the
|
||||
* device's runtime suspend hook has been called already or that it will be
|
||||
* called (and hence it's also guaranteed that the device's runtime resume
|
||||
* hook will be called eventually).
|
||||
*
|
||||
* Any runtime pm reference obtained by this function must have a symmetric
|
||||
* call to intel_runtime_pm_put() to release the reference again.
|
||||
@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
|
||||
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
|
||||
* as True if the wakeref was acquired, or False otherwise.
|
||||
*/
|
||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
|
||||
static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
|
||||
bool ignore_usecount)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
/*
|
||||
@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
|
||||
* function, since the power state is undefined. This applies
|
||||
* atm to the late/early system suspend/resume handlers.
|
||||
*/
|
||||
if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
|
||||
if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
|
||||
return track_intel_runtime_pm_wakeref(rpm);
|
||||
}
|
||||
|
||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
|
||||
{
|
||||
return __intel_runtime_pm_get_if_active(rpm, false);
|
||||
}
|
||||
|
||||
intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
|
||||
{
|
||||
return __intel_runtime_pm_get_if_active(rpm, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_runtime_pm_get_noresume - grab a runtime pm reference
|
||||
* @rpm: the intel_runtime_pm structure
|
||||
|
@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
|
||||
|
||||
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
|
||||
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
|
||||
intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
|
||||
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
|
||||
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
|
||||
|
||||
@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
|
||||
for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
|
||||
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
|
||||
|
||||
#define with_intel_runtime_pm_if_active(rpm, wf) \
|
||||
for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
|
||||
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
|
||||
|
||||
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||
void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
|
||||
|
Loading…
Reference in New Issue
Block a user