mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-19 00:54:41 +08:00
Merge tag 'drm-intel-next-2021-07-08' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
drm/i915 changes for v5.15: Features: - Enable pipe DMC loading on XE-LPD and ADL-P (Anusha) - Finally remove JSH and EHL force probe requirement (Tejas) Refactoring and cleanups: - Refactor and fix DDI buffer translations (Ville) - Clean up FBC CFB allocation code (Ville, with a fix from Matthew) - Finish INTEL_GEN() and friends macro conversions (Lucas) - Misc display cleanups (Ville) Fixes: - PSR fixes and ADL-P workarounds (José) - Fix drm infoframe state mismatch (Bhanuprakash) - Force Type-C PHY disconnect during suspend/shutdown (Imre) - Fix power sequence violation on some Chromebook models (Shawn) - Fix VGA workaround to avoid screen flicker at boot (Emil) - Fix display 12+ watermark workaround adjustment (Lucas) Misc: - Backmerge drm-next (Jani) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/878s2h6t5o.fsf@intel.com
This commit is contained in:
commit
b4d7049ace
@ -729,8 +729,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 tmp;
|
||||
enum port port;
|
||||
enum transcoder dsi_trans;
|
||||
@ -1253,15 +1253,36 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
|
||||
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wa_1409054076:icl,jsl,ehl
|
||||
* When pipe A is disabled and MIPI DSI is enabled on pipe B,
|
||||
* the AMT KVMR feature will incorrectly see pipe A as enabled.
|
||||
* Set 0x42080 bit 23=1 before enabling DSI on pipe B and leave
|
||||
* it set while DSI is enabled on pipe B
|
||||
*/
|
||||
static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B)
|
||||
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
|
||||
IGNORE_KVMR_PIPE_A,
|
||||
enable ? IGNORE_KVMR_PIPE_A : 0);
|
||||
}
|
||||
static void gen11_dsi_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
|
||||
|
||||
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
|
||||
|
||||
/* Wa_1409054076:icl,jsl,ehl */
|
||||
icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true);
|
||||
|
||||
/* step6d: enable dsi transcoder */
|
||||
gen11_dsi_enable_transcoder(encoder);
|
||||
|
||||
@ -1415,6 +1436,7 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_conn_state->crtc);
|
||||
|
||||
/* step1: turn off backlight */
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
|
||||
@ -1423,6 +1445,9 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
|
||||
/* step2d,e: disable transcoder and wait */
|
||||
gen11_dsi_disable_transcoder(encoder);
|
||||
|
||||
/* Wa_1409054076:icl,jsl,ehl */
|
||||
icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, false);
|
||||
|
||||
/* step2f,g: powerdown panel */
|
||||
gen11_dsi_powerdown_panel(encoder);
|
||||
|
||||
@ -1548,6 +1573,22 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
|
||||
pipe_config->mode_flags |= I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
|
||||
}
|
||||
|
||||
static void gen11_dsi_sync_state(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
|
||||
/* wa verify 1409054076:icl,jsl,ehl */
|
||||
if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B &&
|
||||
!(intel_de_read(dev_priv, CHICKEN_PAR1_1) & IGNORE_KVMR_PIPE_A))
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] BIOS left IGNORE_KVMR_PIPE_A cleared with pipe B enabled\n",
|
||||
encoder->base.base.id,
|
||||
encoder->base.name);
|
||||
}
|
||||
|
||||
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
@ -1966,6 +2007,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
|
||||
encoder->post_disable = gen11_dsi_post_disable;
|
||||
encoder->port = port;
|
||||
encoder->get_config = gen11_dsi_get_config;
|
||||
encoder->sync_state = gen11_dsi_sync_state;
|
||||
encoder->update_pipe = intel_panel_update_backlight;
|
||||
encoder->compute_config = gen11_dsi_compute_config;
|
||||
encoder->get_hw_state = gen11_dsi_get_hw_state;
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "intel_crt.h"
|
||||
#include "intel_crtc.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_ddi_buf_trans.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fdi.h"
|
||||
@ -1081,6 +1082,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
|
||||
crt->base.enable_clock = hsw_ddi_enable_clock;
|
||||
crt->base.disable_clock = hsw_ddi_disable_clock;
|
||||
crt->base.is_clock_enabled = hsw_ddi_is_clock_enabled;
|
||||
|
||||
intel_ddi_buf_trans_init(&crt->base);
|
||||
} else {
|
||||
if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
crt->base.compute_config = pch_crt_compute_config;
|
||||
|
@ -163,12 +163,12 @@ static void intel_crtc_free(struct intel_crtc *crtc)
|
||||
kfree(crtc);
|
||||
}
|
||||
|
||||
static void intel_crtc_destroy(struct drm_crtc *crtc)
|
||||
static void intel_crtc_destroy(struct drm_crtc *_crtc)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(_crtc);
|
||||
|
||||
drm_crtc_cleanup(crtc);
|
||||
kfree(intel_crtc);
|
||||
drm_crtc_cleanup(&crtc->base);
|
||||
kfree(crtc);
|
||||
}
|
||||
|
||||
static int intel_crtc_late_register(struct drm_crtc *crtc)
|
||||
|
@ -95,24 +95,18 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
|
||||
* values in advance. This function programs the correct values for
|
||||
* DP/eDP/FDI use cases.
|
||||
*/
|
||||
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
|
||||
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
u32 iboost_bit = 0;
|
||||
int i, n_entries;
|
||||
enum port port = encoder->port;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
const struct intel_ddi_buf_trans *ddi_translations;
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
|
||||
ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
|
||||
&n_entries);
|
||||
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
|
||||
ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
|
||||
&n_entries);
|
||||
else
|
||||
ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
|
||||
&n_entries);
|
||||
ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
|
||||
/* If we're boosting the current, set bit 31 of trans1 */
|
||||
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
|
||||
@ -121,9 +115,9 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
|
||||
|
||||
for (i = 0; i < n_entries; i++) {
|
||||
intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
|
||||
ddi_translations[i].trans1 | iboost_bit);
|
||||
ddi_translations->entries[i].hsw.trans1 | iboost_bit);
|
||||
intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
|
||||
ddi_translations[i].trans2);
|
||||
ddi_translations->entries[i].hsw.trans2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -132,17 +126,17 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
|
||||
* values in advance. This function programs the correct values for
|
||||
* HDMI/DVI use cases.
|
||||
*/
|
||||
static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
|
||||
static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
u32 iboost_bit = 0;
|
||||
int n_entries;
|
||||
enum port port = encoder->port;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
|
||||
ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
const struct intel_ddi_buf_trans *ddi_translations;
|
||||
|
||||
ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
|
||||
@ -155,9 +149,9 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
|
||||
|
||||
/* Entry 9 is for HDMI: */
|
||||
intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
|
||||
ddi_translations[level].trans1 | iboost_bit);
|
||||
ddi_translations->entries[level].hsw.trans1 | iboost_bit);
|
||||
intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
|
||||
ddi_translations[level].trans2);
|
||||
ddi_translations->entries[level].hsw.trans2);
|
||||
}
|
||||
|
||||
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
@ -948,22 +942,16 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
|
||||
iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
|
||||
|
||||
if (iboost == 0) {
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
const struct intel_ddi_buf_trans *ddi_translations;
|
||||
int n_entries;
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
|
||||
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
|
||||
ddi_translations = intel_ddi_get_buf_trans_edp(encoder, &n_entries);
|
||||
else
|
||||
ddi_translations = intel_ddi_get_buf_trans_dp(encoder, &n_entries);
|
||||
|
||||
ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
|
||||
level = n_entries - 1;
|
||||
|
||||
iboost = ddi_translations[level].i_boost;
|
||||
iboost = ddi_translations->entries[level].hsw.i_boost;
|
||||
}
|
||||
|
||||
/* Make sure that the requested I_boost is valid */
|
||||
@ -983,21 +971,21 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
int level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
const struct bxt_ddi_buf_trans *ddi_translations;
|
||||
const struct intel_ddi_buf_trans *ddi_translations;
|
||||
enum port port = encoder->port;
|
||||
int n_entries;
|
||||
|
||||
ddi_translations = bxt_get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
|
||||
level = n_entries - 1;
|
||||
|
||||
bxt_ddi_phy_set_signal_level(dev_priv, port,
|
||||
ddi_translations[level].margin,
|
||||
ddi_translations[level].scale,
|
||||
ddi_translations[level].enable,
|
||||
ddi_translations[level].deemphasis);
|
||||
ddi_translations->entries[level].bxt.margin,
|
||||
ddi_translations->entries[level].bxt.scale,
|
||||
ddi_translations->entries[level].bxt.enable,
|
||||
ddi_translations->entries[level].bxt.deemphasis);
|
||||
}
|
||||
|
||||
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
|
||||
@ -1005,36 +993,9 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
|
||||
{
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
int n_entries;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
if (intel_phy_is_combo(dev_priv, phy))
|
||||
tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
|
||||
else if (IS_ALDERLAKE_P(dev_priv))
|
||||
adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
|
||||
else
|
||||
tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
|
||||
} else if (DISPLAY_VER(dev_priv) == 11) {
|
||||
if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE))
|
||||
jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
|
||||
else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
|
||||
ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
|
||||
else if (intel_phy_is_combo(dev_priv, phy))
|
||||
icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
|
||||
else
|
||||
icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
cnl_get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
|
||||
bxt_get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
} else {
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
|
||||
intel_ddi_get_buf_trans_edp(encoder, &n_entries);
|
||||
else
|
||||
intel_ddi_get_buf_trans_dp(encoder, &n_entries);
|
||||
}
|
||||
encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
|
||||
n_entries = 1;
|
||||
@ -1061,13 +1022,12 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
|
||||
int level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
const struct cnl_ddi_buf_trans *ddi_translations;
|
||||
const struct intel_ddi_buf_trans *ddi_translations;
|
||||
enum port port = encoder->port;
|
||||
int n_entries, ln;
|
||||
u32 val;
|
||||
|
||||
ddi_translations = cnl_get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
|
||||
ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
|
||||
@ -1083,8 +1043,8 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
|
||||
val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port));
|
||||
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
|
||||
RCOMP_SCALAR_MASK);
|
||||
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
|
||||
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
|
||||
val |= SWING_SEL_UPPER(ddi_translations->entries[level].cnl.dw2_swing_sel);
|
||||
val |= SWING_SEL_LOWER(ddi_translations->entries[level].cnl.dw2_swing_sel);
|
||||
/* Rcomp scalar is fixed as 0x98 for every table entry */
|
||||
val |= RCOMP_SCALAR(0x98);
|
||||
intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val);
|
||||
@ -1095,9 +1055,9 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
|
||||
val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
|
||||
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
|
||||
CURSOR_COEFF_MASK);
|
||||
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
|
||||
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
|
||||
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
|
||||
val |= POST_CURSOR_1(ddi_translations->entries[level].cnl.dw4_post_cursor_1);
|
||||
val |= POST_CURSOR_2(ddi_translations->entries[level].cnl.dw4_post_cursor_2);
|
||||
val |= CURSOR_COEFF(ddi_translations->entries[level].cnl.dw4_cursor_coeff);
|
||||
intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
|
||||
}
|
||||
|
||||
@ -1112,7 +1072,7 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
|
||||
/* Program PORT_TX_DW7 */
|
||||
val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port));
|
||||
val &= ~N_SCALAR_MASK;
|
||||
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
|
||||
val |= N_SCALAR(ddi_translations->entries[level].cnl.dw7_n_scalar);
|
||||
intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val);
|
||||
}
|
||||
|
||||
@ -1182,20 +1142,12 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
|
||||
int level)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
const struct cnl_ddi_buf_trans *ddi_translations;
|
||||
const struct intel_ddi_buf_trans *ddi_translations;
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
int n_entries, ln;
|
||||
u32 val;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 12)
|
||||
ddi_translations = tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
|
||||
else if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE))
|
||||
ddi_translations = jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
|
||||
else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
|
||||
ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
|
||||
else
|
||||
ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
|
||||
|
||||
ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
|
||||
@ -1223,8 +1175,8 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
|
||||
val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
|
||||
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
|
||||
RCOMP_SCALAR_MASK);
|
||||
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
|
||||
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
|
||||
val |= SWING_SEL_UPPER(ddi_translations->entries[level].cnl.dw2_swing_sel);
|
||||
val |= SWING_SEL_LOWER(ddi_translations->entries[level].cnl.dw2_swing_sel);
|
||||
/* Program Rcomp scalar for every table entry */
|
||||
val |= RCOMP_SCALAR(0x98);
|
||||
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
|
||||
@ -1235,16 +1187,16 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
|
||||
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
|
||||
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
|
||||
CURSOR_COEFF_MASK);
|
||||
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
|
||||
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
|
||||
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
|
||||
val |= POST_CURSOR_1(ddi_translations->entries[level].cnl.dw4_post_cursor_1);
|
||||
val |= POST_CURSOR_2(ddi_translations->entries[level].cnl.dw4_post_cursor_2);
|
||||
val |= CURSOR_COEFF(ddi_translations->entries[level].cnl.dw4_cursor_coeff);
|
||||
intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
|
||||
}
|
||||
|
||||
/* Program PORT_TX_DW7 */
|
||||
val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
|
||||
val &= ~N_SCALAR_MASK;
|
||||
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
|
||||
val |= N_SCALAR(ddi_translations->entries[level].cnl.dw7_n_scalar);
|
||||
intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
|
||||
}
|
||||
|
||||
@ -1315,15 +1267,14 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
|
||||
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
|
||||
const struct intel_ddi_buf_trans *ddi_translations;
|
||||
int n_entries, ln;
|
||||
u32 val;
|
||||
|
||||
if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
|
||||
return;
|
||||
|
||||
ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
|
||||
|
||||
ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
|
||||
@ -1345,13 +1296,13 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
|
||||
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
|
||||
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
|
||||
ddi_translations[level].cri_txdeemph_override_17_12);
|
||||
ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
|
||||
intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
|
||||
|
||||
val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
|
||||
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
|
||||
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
|
||||
ddi_translations[level].cri_txdeemph_override_17_12);
|
||||
ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
|
||||
intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
|
||||
}
|
||||
|
||||
@ -1361,9 +1312,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
|
||||
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
|
||||
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
|
||||
ddi_translations[level].cri_txdeemph_override_5_0) |
|
||||
ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
|
||||
CRI_TXDEEMPH_OVERRIDE_11_6(
|
||||
ddi_translations[level].cri_txdeemph_override_11_6) |
|
||||
ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
|
||||
CRI_TXDEEMPH_OVERRIDE_EN;
|
||||
intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
|
||||
|
||||
@ -1371,9 +1322,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
|
||||
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
|
||||
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
|
||||
ddi_translations[level].cri_txdeemph_override_5_0) |
|
||||
ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
|
||||
CRI_TXDEEMPH_OVERRIDE_11_6(
|
||||
ddi_translations[level].cri_txdeemph_override_11_6) |
|
||||
ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
|
||||
CRI_TXDEEMPH_OVERRIDE_EN;
|
||||
intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
|
||||
|
||||
@ -1453,18 +1404,14 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
|
||||
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
|
||||
const struct intel_ddi_buf_trans *ddi_translations;
|
||||
u32 val, dpcnt_mask, dpcnt_val;
|
||||
int n_entries, ln;
|
||||
|
||||
if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
|
||||
return;
|
||||
|
||||
if (IS_ALDERLAKE_P(dev_priv))
|
||||
ddi_translations = adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
|
||||
else
|
||||
ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
|
||||
|
||||
ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
|
||||
return;
|
||||
if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
|
||||
@ -1473,9 +1420,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
|
||||
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
|
||||
DKL_TX_VSWING_CONTROL_MASK);
|
||||
dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control);
|
||||
dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control);
|
||||
dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
|
||||
dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations->entries[level].dkl.dkl_vswing_control);
|
||||
dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations->entries[level].dkl.dkl_de_emphasis_control);
|
||||
dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations->entries[level].dkl.dkl_preshoot_control);
|
||||
|
||||
for (ln = 0; ln < 2; ln++) {
|
||||
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
|
||||
@ -2715,7 +2662,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
|
||||
bxt_ddi_vswing_sequence(encoder, crtc_state, level);
|
||||
else
|
||||
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
|
||||
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
|
||||
|
||||
intel_ddi_power_up_lanes(encoder, crtc_state);
|
||||
|
||||
@ -2823,6 +2770,7 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
|
||||
conn_state);
|
||||
|
||||
/* FIXME precompute everything properly */
|
||||
/* FIXME how do we turn infoframes off again? */
|
||||
if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
|
||||
dig_port->set_infoframes(encoder,
|
||||
crtc_state->has_infoframe,
|
||||
@ -3162,7 +3110,7 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
|
||||
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
|
||||
bxt_ddi_vswing_sequence(encoder, crtc_state, level);
|
||||
else
|
||||
intel_prepare_hdmi_ddi_buffers(encoder, level);
|
||||
hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state, level);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
|
||||
skl_ddi_set_iboost(encoder, crtc_state, level);
|
||||
@ -3590,7 +3538,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
u32 temp, flags = 0;
|
||||
@ -3653,7 +3601,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
|
||||
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
|
||||
pipe_config->lane_count =
|
||||
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
|
||||
intel_dp_get_m_n(intel_crtc, pipe_config);
|
||||
intel_dp_get_m_n(crtc, pipe_config);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 11) {
|
||||
i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config);
|
||||
@ -3683,7 +3631,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
|
||||
pipe_config->mst_master_transcoder =
|
||||
REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
|
||||
|
||||
intel_dp_get_m_n(intel_crtc, pipe_config);
|
||||
intel_dp_get_m_n(crtc, pipe_config);
|
||||
|
||||
pipe_config->infoframes.enable |=
|
||||
intel_hdmi_infoframes_enabled(encoder, pipe_config);
|
||||
@ -4509,6 +4457,36 @@ static bool intel_ddi_is_tc(struct drm_i915_private *i915, enum port port)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
intel_dp_encoder_suspend(encoder);
|
||||
|
||||
if (!intel_phy_is_tc(i915, phy))
|
||||
return;
|
||||
|
||||
intel_tc_port_disconnect_phy(dig_port);
|
||||
}
|
||||
|
||||
static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
|
||||
intel_dp_encoder_shutdown(encoder);
|
||||
|
||||
if (!intel_phy_is_tc(i915, phy))
|
||||
return;
|
||||
|
||||
intel_tc_port_disconnect_phy(dig_port);
|
||||
}
|
||||
|
||||
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
|
||||
#define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
|
||||
|
||||
@ -4618,8 +4596,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
encoder->get_hw_state = intel_ddi_get_hw_state;
|
||||
encoder->sync_state = intel_ddi_sync_state;
|
||||
encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
|
||||
encoder->suspend = intel_dp_encoder_suspend;
|
||||
encoder->shutdown = intel_dp_encoder_shutdown;
|
||||
encoder->suspend = intel_ddi_encoder_suspend;
|
||||
encoder->shutdown = intel_ddi_encoder_shutdown;
|
||||
encoder->get_power_domains = intel_ddi_get_power_domains;
|
||||
|
||||
encoder->type = INTEL_OUTPUT_DDI;
|
||||
@ -4687,6 +4665,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
encoder->get_config = hsw_ddi_get_config;
|
||||
}
|
||||
|
||||
intel_ddi_buf_trans_init(encoder);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 13)
|
||||
encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
|
||||
else if (IS_DG1(dev_priv))
|
||||
|
@ -40,7 +40,7 @@ bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder);
|
||||
void hsw_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
|
||||
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
|
||||
void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
enum port port);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -12,7 +12,7 @@ struct drm_i915_private;
|
||||
struct intel_encoder;
|
||||
struct intel_crtc_state;
|
||||
|
||||
struct ddi_buf_trans {
|
||||
struct hsw_ddi_buf_trans {
|
||||
u32 trans1; /* balance leg enable, de-emph level */
|
||||
u32 trans2; /* vref sel, vswing */
|
||||
u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
|
||||
@ -45,60 +45,26 @@ struct tgl_dkl_phy_ddi_buf_trans {
|
||||
u32 dkl_de_emphasis_control;
|
||||
};
|
||||
|
||||
bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table);
|
||||
union intel_ddi_buf_trans_entry {
|
||||
struct hsw_ddi_buf_trans hsw;
|
||||
struct bxt_ddi_buf_trans bxt;
|
||||
struct cnl_ddi_buf_trans cnl;
|
||||
struct icl_mg_phy_ddi_buf_trans mg;
|
||||
struct tgl_dkl_phy_ddi_buf_trans dkl;
|
||||
};
|
||||
|
||||
struct intel_ddi_buf_trans {
|
||||
const union intel_ddi_buf_trans_entry *entries;
|
||||
u8 num_entries;
|
||||
u8 hdmi_default_entry;
|
||||
};
|
||||
|
||||
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table);
|
||||
|
||||
int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *default_entry);
|
||||
|
||||
const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries);
|
||||
const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
|
||||
int *n_entries);
|
||||
const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder,
|
||||
int *n_entries);
|
||||
const struct ddi_buf_trans *
|
||||
intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries);
|
||||
|
||||
const struct bxt_ddi_buf_trans *
|
||||
bxt_get_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
|
||||
const struct tgl_dkl_phy_ddi_buf_trans *
|
||||
adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct cnl_ddi_buf_trans *
|
||||
tgl_get_combo_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct tgl_dkl_phy_ddi_buf_trans *
|
||||
tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct cnl_ddi_buf_trans *
|
||||
jsl_get_combo_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct cnl_ddi_buf_trans *
|
||||
ehl_get_combo_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct cnl_ddi_buf_trans *
|
||||
icl_get_combo_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
const struct icl_mg_phy_ddi_buf_trans *
|
||||
icl_get_mg_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
|
||||
const struct cnl_ddi_buf_trans *
|
||||
cnl_get_buf_trans(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
void intel_ddi_buf_trans_init(struct intel_encoder *encoder);
|
||||
|
||||
#endif
|
||||
|
@ -1914,20 +1914,50 @@ static void intel_dpt_unpin(struct i915_address_space *vm)
|
||||
i915_vma_put(dpt->vma);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
|
||||
const struct intel_initial_plane_config *plane_config,
|
||||
struct drm_framebuffer **fb,
|
||||
struct i915_vma **vma)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
for_each_intel_crtc(&i915->drm, crtc) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
struct intel_plane *plane =
|
||||
to_intel_plane(crtc->base.primary);
|
||||
struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(plane->base.state);
|
||||
|
||||
if (!crtc_state->uapi.active)
|
||||
continue;
|
||||
|
||||
if (!plane_state->ggtt_vma)
|
||||
continue;
|
||||
|
||||
if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
|
||||
*fb = plane_state->hw.fb;
|
||||
*vma = plane_state->ggtt_vma;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
||||
intel_find_initial_plane_obj(struct intel_crtc *crtc,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *c;
|
||||
struct drm_plane *primary = intel_crtc->base.primary;
|
||||
struct drm_plane_state *plane_state = primary->state;
|
||||
struct intel_plane *intel_plane = to_intel_plane(primary);
|
||||
struct intel_plane_state *intel_state =
|
||||
to_intel_plane_state(plane_state);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(intel_crtc->base.state);
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
struct intel_plane *plane =
|
||||
to_intel_plane(crtc->base.primary);
|
||||
struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(plane->base.state);
|
||||
struct drm_framebuffer *fb;
|
||||
struct i915_vma *vma;
|
||||
|
||||
@ -1939,7 +1969,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
||||
if (!plane_config->fb)
|
||||
return;
|
||||
|
||||
if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
|
||||
if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
|
||||
fb = &plane_config->fb->base;
|
||||
vma = plane_config->vma;
|
||||
goto valid_fb;
|
||||
@ -1949,25 +1979,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
||||
* Failed to alloc the obj, check to see if we should share
|
||||
* an fb with another CRTC instead
|
||||
*/
|
||||
for_each_crtc(dev, c) {
|
||||
struct intel_plane_state *state;
|
||||
|
||||
if (c == &intel_crtc->base)
|
||||
continue;
|
||||
|
||||
if (!to_intel_crtc_state(c->state)->uapi.active)
|
||||
continue;
|
||||
|
||||
state = to_intel_plane_state(c->primary->state);
|
||||
if (!state->ggtt_vma)
|
||||
continue;
|
||||
|
||||
if (intel_plane_ggtt_offset(state) == plane_config->base) {
|
||||
fb = state->hw.fb;
|
||||
vma = state->ggtt_vma;
|
||||
if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
|
||||
goto valid_fb;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We've failed to reconstruct the BIOS FB. Current display state
|
||||
@ -1976,7 +1989,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
||||
* simplest solution is to just disable the primary plane now and
|
||||
* pretend the BIOS never had it enabled.
|
||||
*/
|
||||
intel_plane_disable_noatomic(intel_crtc, intel_plane);
|
||||
intel_plane_disable_noatomic(crtc, plane);
|
||||
if (crtc_state->bigjoiner) {
|
||||
struct intel_crtc *slave =
|
||||
crtc_state->bigjoiner_linked_crtc;
|
||||
@ -1986,40 +1999,38 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
||||
return;
|
||||
|
||||
valid_fb:
|
||||
plane_state->rotation = plane_config->rotation;
|
||||
intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->rotation,
|
||||
&intel_state->view);
|
||||
plane_state->uapi.rotation = plane_config->rotation;
|
||||
intel_fb_fill_view(to_intel_framebuffer(fb),
|
||||
plane_state->uapi.rotation, &plane_state->view);
|
||||
|
||||
__i915_vma_pin(vma);
|
||||
intel_state->ggtt_vma = i915_vma_get(vma);
|
||||
if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
|
||||
if (vma->fence)
|
||||
intel_state->flags |= PLANE_HAS_FENCE;
|
||||
plane_state->ggtt_vma = i915_vma_get(vma);
|
||||
if (intel_plane_uses_fence(plane_state) &&
|
||||
i915_vma_pin_fence(vma) == 0 && vma->fence)
|
||||
plane_state->flags |= PLANE_HAS_FENCE;
|
||||
|
||||
plane_state->src_x = 0;
|
||||
plane_state->src_y = 0;
|
||||
plane_state->src_w = fb->width << 16;
|
||||
plane_state->src_h = fb->height << 16;
|
||||
plane_state->uapi.src_x = 0;
|
||||
plane_state->uapi.src_y = 0;
|
||||
plane_state->uapi.src_w = fb->width << 16;
|
||||
plane_state->uapi.src_h = fb->height << 16;
|
||||
|
||||
plane_state->crtc_x = 0;
|
||||
plane_state->crtc_y = 0;
|
||||
plane_state->crtc_w = fb->width;
|
||||
plane_state->crtc_h = fb->height;
|
||||
plane_state->uapi.crtc_x = 0;
|
||||
plane_state->uapi.crtc_y = 0;
|
||||
plane_state->uapi.crtc_w = fb->width;
|
||||
plane_state->uapi.crtc_h = fb->height;
|
||||
|
||||
if (plane_config->tiling)
|
||||
dev_priv->preserve_bios_swizzle = true;
|
||||
|
||||
plane_state->fb = fb;
|
||||
plane_state->uapi.fb = fb;
|
||||
drm_framebuffer_get(fb);
|
||||
|
||||
plane_state->crtc = &intel_crtc->base;
|
||||
intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
|
||||
intel_crtc);
|
||||
plane_state->uapi.crtc = &crtc->base;
|
||||
intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
|
||||
|
||||
intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
|
||||
|
||||
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
|
||||
&to_intel_frontbuffer(fb)->bits);
|
||||
atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
|
||||
}
|
||||
|
||||
unsigned int
|
||||
@ -2706,10 +2717,10 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
|
||||
intel_wait_for_vblank(dev_priv, crtc->pipe);
|
||||
}
|
||||
|
||||
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
|
||||
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
|
||||
{
|
||||
if (intel_crtc->overlay)
|
||||
(void) intel_overlay_switch_off(intel_crtc->overlay);
|
||||
if (crtc->overlay)
|
||||
(void) intel_overlay_switch_off(crtc->overlay);
|
||||
|
||||
/* Let userspace switch the overlay on again. In most cases userspace
|
||||
* has to recompute where to put it anyway.
|
||||
@ -6473,23 +6484,21 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
struct intel_load_detect_pipe *old,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct intel_encoder *intel_encoder =
|
||||
struct intel_encoder *encoder =
|
||||
intel_attached_encoder(to_intel_connector(connector));
|
||||
struct drm_crtc *possible_crtc;
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct intel_crtc *possible_crtc;
|
||||
struct intel_crtc *crtc = NULL;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
struct drm_atomic_state *state = NULL, *restore_state = NULL;
|
||||
struct drm_connector_state *connector_state;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
int ret, i = -1;
|
||||
int ret;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
|
||||
connector->base.id, connector->name,
|
||||
encoder->base.id, encoder->name);
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
|
||||
old->restore_state = NULL;
|
||||
|
||||
@ -6507,9 +6516,9 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
|
||||
/* See if we already have a CRTC for this connector */
|
||||
if (connector->state->crtc) {
|
||||
crtc = connector->state->crtc;
|
||||
crtc = to_intel_crtc(connector->state->crtc);
|
||||
|
||||
ret = drm_modeset_lock(&crtc->mutex, ctx);
|
||||
ret = drm_modeset_lock(&crtc->base.mutex, ctx);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
@ -6518,17 +6527,17 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
}
|
||||
|
||||
/* Find an unused one (if possible) */
|
||||
for_each_crtc(dev, possible_crtc) {
|
||||
i++;
|
||||
if (!(encoder->possible_crtcs & (1 << i)))
|
||||
for_each_intel_crtc(dev, possible_crtc) {
|
||||
if (!(encoder->base.possible_crtcs &
|
||||
drm_crtc_mask(&possible_crtc->base)))
|
||||
continue;
|
||||
|
||||
ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
|
||||
ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (possible_crtc->state->enable) {
|
||||
drm_modeset_unlock(&possible_crtc->mutex);
|
||||
if (possible_crtc->base.state->enable) {
|
||||
drm_modeset_unlock(&possible_crtc->base.mutex);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -6547,8 +6556,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
}
|
||||
|
||||
found:
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
state = drm_atomic_state_alloc(dev);
|
||||
restore_state = drm_atomic_state_alloc(dev);
|
||||
if (!state || !restore_state) {
|
||||
@ -6565,11 +6572,11 @@ found:
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
|
||||
ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
|
||||
crtc_state = intel_atomic_get_crtc_state(state, crtc);
|
||||
if (IS_ERR(crtc_state)) {
|
||||
ret = PTR_ERR(crtc_state);
|
||||
goto fail;
|
||||
@ -6582,15 +6589,15 @@ found:
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = intel_modeset_disable_planes(state, crtc);
|
||||
ret = intel_modeset_disable_planes(state, &crtc->base);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
|
||||
if (!ret)
|
||||
ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
|
||||
ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
|
||||
if (!ret)
|
||||
ret = drm_atomic_add_affected_planes(restore_state, crtc);
|
||||
ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Failed to create a copy of old state to restore: %i\n",
|
||||
@ -6609,7 +6616,7 @@ found:
|
||||
drm_atomic_state_put(state);
|
||||
|
||||
/* let the connector get through one full cycle before testing */
|
||||
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
|
||||
intel_wait_for_vblank(dev_priv, crtc->pipe);
|
||||
return true;
|
||||
|
||||
fail:
|
||||
@ -7281,12 +7288,13 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
|
||||
}
|
||||
|
||||
if (dev_priv->display.compute_pipe_wm) {
|
||||
ret = dev_priv->display.compute_pipe_wm(crtc_state);
|
||||
ret = dev_priv->display.compute_pipe_wm(state, crtc);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Target pipe watermarks are invalid\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (dev_priv->display.compute_intermediate_wm) {
|
||||
@ -7299,7 +7307,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
|
||||
* old state and the new state. We can program these
|
||||
* immediately.
|
||||
*/
|
||||
ret = dev_priv->display.compute_intermediate_wm(crtc_state);
|
||||
ret = dev_priv->display.compute_intermediate_wm(state, crtc);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"No valid intermediate pipe watermarks are possible\n");
|
||||
|
@ -544,6 +544,11 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
|
||||
|
||||
seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
|
||||
seq_printf(m, "path: %s\n", dmc->fw_path);
|
||||
seq_printf(m, "Pipe A fw support: %s\n",
|
||||
yesno(GRAPHICS_VER(dev_priv) >= 12));
|
||||
seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload));
|
||||
seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv)));
|
||||
seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload));
|
||||
|
||||
if (!intel_dmc_has_payload(dev_priv))
|
||||
goto out;
|
||||
@ -582,7 +587,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
|
||||
|
||||
out:
|
||||
seq_printf(m, "program base: 0x%08x\n",
|
||||
intel_de_read(dev_priv, DMC_PROGRAM(0)));
|
||||
intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
|
||||
seq_printf(m, "ssp base: 0x%08x\n",
|
||||
intel_de_read(dev_priv, DMC_SSP_BASE));
|
||||
seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
|
||||
@ -1225,7 +1230,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
|
||||
|
||||
static void drrs_status_per_crtc(struct seq_file *m,
|
||||
struct drm_device *dev,
|
||||
struct intel_crtc *intel_crtc)
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_drrs *drrs = &dev_priv->drrs;
|
||||
@ -1237,7 +1242,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
bool supported = false;
|
||||
|
||||
if (connector->state->crtc != &intel_crtc->base)
|
||||
if (connector->state->crtc != &crtc->base)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "%s:\n", connector->name);
|
||||
@ -1252,7 +1257,7 @@ static void drrs_status_per_crtc(struct seq_file *m,
|
||||
|
||||
seq_puts(m, "\n");
|
||||
|
||||
if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
|
||||
if (to_intel_crtc_state(crtc->base.state)->has_drrs) {
|
||||
struct intel_panel *panel;
|
||||
|
||||
mutex_lock(&drrs->mutex);
|
||||
@ -1298,16 +1303,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct intel_crtc *crtc;
|
||||
int active_crtc_cnt = 0;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
for_each_intel_crtc(dev, intel_crtc) {
|
||||
if (intel_crtc->base.state->active) {
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
if (crtc->base.state->active) {
|
||||
active_crtc_cnt++;
|
||||
seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
|
||||
|
||||
drrs_status_per_crtc(m, dev, intel_crtc);
|
||||
drrs_status_per_crtc(m, dev, crtc);
|
||||
}
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
@ -2064,7 +2069,7 @@ i915_fifo_underrun_reset_write(struct file *filp,
|
||||
size_t cnt, loff_t *ppos)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = filp->private_data;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct intel_crtc *crtc;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
bool reset;
|
||||
@ -2076,15 +2081,15 @@ i915_fifo_underrun_reset_write(struct file *filp,
|
||||
if (!reset)
|
||||
return cnt;
|
||||
|
||||
for_each_intel_crtc(dev, intel_crtc) {
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
struct drm_crtc_commit *commit;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
|
||||
ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
|
||||
ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state = to_intel_crtc_state(intel_crtc->base.state);
|
||||
crtc_state = to_intel_crtc_state(crtc->base.state);
|
||||
commit = crtc_state->uapi.commit;
|
||||
if (commit) {
|
||||
ret = wait_for_completion_interruptible(&commit->hw_done);
|
||||
@ -2095,12 +2100,12 @@ i915_fifo_underrun_reset_write(struct file *filp,
|
||||
if (!ret && crtc_state->hw.active) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Re-arming FIFO underruns on pipe %c\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
|
||||
intel_crtc_arm_fifo_underrun(crtc, crtc_state);
|
||||
}
|
||||
|
||||
drm_modeset_unlock(&intel_crtc->base.mutex);
|
||||
drm_modeset_unlock(&crtc->base.mutex);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -961,7 +961,8 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
drm_WARN_ONCE(&dev_priv->drm,
|
||||
!intel_de_read(dev_priv, DMC_PROGRAM(0)),
|
||||
!intel_de_read(dev_priv,
|
||||
DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
|
||||
"DMC program storage start is NULL\n");
|
||||
drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
|
||||
"DMC SSP Base Not fine\n");
|
||||
|
@ -48,6 +48,7 @@
|
||||
|
||||
struct drm_printer;
|
||||
struct __intel_global_objs_state;
|
||||
struct intel_ddi_buf_trans;
|
||||
|
||||
/*
|
||||
* Display related stuff
|
||||
@ -263,6 +264,9 @@ struct intel_encoder {
|
||||
* Returns whether the port clock is enabled or not.
|
||||
*/
|
||||
bool (*is_clock_enabled)(struct intel_encoder *encoder);
|
||||
const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
int *n_entries);
|
||||
enum hpd_pin hpd_pin;
|
||||
enum intel_display_power_domain power_domain;
|
||||
/* for communication with audio component; protected by av_mutex */
|
||||
@ -1040,7 +1044,9 @@ struct intel_crtc_state {
|
||||
bool has_psr;
|
||||
bool has_psr2;
|
||||
bool enable_psr2_sel_fetch;
|
||||
bool req_psr2_sdp_prior_scanline;
|
||||
u32 dc3co_exitline;
|
||||
u16 su_y_granularity;
|
||||
|
||||
/*
|
||||
* Frequence the dpll for the port should run at. Differs from the
|
||||
@ -1493,12 +1499,14 @@ struct intel_psr {
|
||||
bool colorimetry_support;
|
||||
bool psr2_enabled;
|
||||
bool psr2_sel_fetch_enabled;
|
||||
bool req_psr2_sdp_prior_scanline;
|
||||
u8 sink_sync_latency;
|
||||
ktime_t last_entry_attempt;
|
||||
ktime_t last_exit;
|
||||
bool sink_not_reliable;
|
||||
bool irq_aux_error;
|
||||
u16 su_x_granularity;
|
||||
u16 su_w_granularity;
|
||||
u16 su_y_granularity;
|
||||
u32 dc3co_exitline;
|
||||
u32 dc3co_exit_delay;
|
||||
struct delayed_work dc3co_work;
|
||||
|
@ -45,6 +45,10 @@
|
||||
|
||||
#define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
|
||||
|
||||
#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 10)
|
||||
#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 10)
|
||||
MODULE_FIRMWARE(ADLP_DMC_PATH);
|
||||
|
||||
#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
|
||||
#define ADLS_DMC_VERSION_REQUIRED DMC_VERSION(2, 1)
|
||||
MODULE_FIRMWARE(ADLS_DMC_PATH);
|
||||
@ -96,6 +100,7 @@ MODULE_FIRMWARE(BXT_DMC_PATH);
|
||||
#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
|
||||
#define DMC_V1_MAX_MMIO_COUNT 8
|
||||
#define DMC_V3_MAX_MMIO_COUNT 20
|
||||
#define DMC_V1_MMIO_START_RANGE 0x80000
|
||||
|
||||
struct intel_css_header {
|
||||
/* 0x09 for DMC */
|
||||
@ -239,7 +244,7 @@ struct stepping_info {
|
||||
|
||||
bool intel_dmc_has_payload(struct drm_i915_private *i915)
|
||||
{
|
||||
return i915->dmc.dmc_payload;
|
||||
return i915->dmc.dmc_info[DMC_FW_MAIN].payload;
|
||||
}
|
||||
|
||||
static const struct stepping_info skl_stepping_info[] = {
|
||||
@ -316,8 +321,8 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_dmc_load_program(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 *payload = dev_priv->dmc.dmc_payload;
|
||||
u32 i, fw_size;
|
||||
struct intel_dmc *dmc = &dev_priv->dmc;
|
||||
u32 id, i;
|
||||
|
||||
if (!HAS_DMC(dev_priv)) {
|
||||
drm_err(&dev_priv->drm,
|
||||
@ -325,26 +330,31 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!intel_dmc_has_payload(dev_priv)) {
|
||||
if (!dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Tried to program CSR with empty payload\n");
|
||||
return;
|
||||
}
|
||||
|
||||
fw_size = dev_priv->dmc.dmc_fw_size;
|
||||
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
for (i = 0; i < fw_size; i++)
|
||||
intel_uncore_write_fw(&dev_priv->uncore, DMC_PROGRAM(i),
|
||||
payload[i]);
|
||||
for (id = 0; id < DMC_FW_MAX; id++) {
|
||||
for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
|
||||
intel_uncore_write_fw(&dev_priv->uncore,
|
||||
DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
|
||||
dmc->dmc_info[id].payload[i]);
|
||||
}
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
|
||||
for (i = 0; i < dev_priv->dmc.mmio_count; i++) {
|
||||
intel_de_write(dev_priv, dev_priv->dmc.mmioaddr[i],
|
||||
dev_priv->dmc.mmiodata[i]);
|
||||
for (id = 0; id < DMC_FW_MAX; id++) {
|
||||
for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) {
|
||||
intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i],
|
||||
dmc->dmc_info[id].mmiodata[i]);
|
||||
}
|
||||
}
|
||||
|
||||
dev_priv->dmc.dc_state = 0;
|
||||
@ -352,62 +362,72 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
|
||||
gen9_set_dc_state_debugmask(dev_priv);
|
||||
}
|
||||
|
||||
static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
|
||||
const struct stepping_info *si)
|
||||
{
|
||||
if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) ||
|
||||
(si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) ||
|
||||
/*
|
||||
* If we don't find a more specific one from above two checks, we
|
||||
* then check for the generic one to be sure to work even with
|
||||
* "broken firmware"
|
||||
*/
|
||||
(si->stepping == '*' && si->substepping == fw_info->substepping) ||
|
||||
(fw_info->stepping == '*' && fw_info->substepping == '*'))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Search fw_info table for dmc_offset to find firmware binary: num_entries is
|
||||
* already sanitized.
|
||||
*/
|
||||
static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
|
||||
static void dmc_set_fw_offset(struct intel_dmc *dmc,
|
||||
const struct intel_fw_info *fw_info,
|
||||
unsigned int num_entries,
|
||||
const struct stepping_info *si,
|
||||
u8 package_ver)
|
||||
{
|
||||
u32 dmc_offset = DMC_DEFAULT_FW_OFFSET;
|
||||
unsigned int i;
|
||||
unsigned int i, id;
|
||||
|
||||
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
|
||||
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
if (package_ver > 1 && fw_info[i].dmc_id != 0)
|
||||
id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
|
||||
|
||||
if (id >= DMC_FW_MAX) {
|
||||
drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* More specific versions come first, so we don't even have to
|
||||
* check for the stepping since we already found a previous FW
|
||||
* for this id.
|
||||
*/
|
||||
if (dmc->dmc_info[id].present)
|
||||
continue;
|
||||
|
||||
if (fw_info[i].substepping == '*' &&
|
||||
si->stepping == fw_info[i].stepping) {
|
||||
dmc_offset = fw_info[i].offset;
|
||||
break;
|
||||
}
|
||||
|
||||
if (si->stepping == fw_info[i].stepping &&
|
||||
si->substepping == fw_info[i].substepping) {
|
||||
dmc_offset = fw_info[i].offset;
|
||||
break;
|
||||
}
|
||||
|
||||
if (fw_info[i].stepping == '*' &&
|
||||
fw_info[i].substepping == '*') {
|
||||
/*
|
||||
* In theory we should stop the search as generic
|
||||
* entries should always come after the more specific
|
||||
* ones, but let's continue to make sure to work even
|
||||
* with "broken" firmwares. If we don't find a more
|
||||
* specific one, then we use this entry
|
||||
*/
|
||||
dmc_offset = fw_info[i].offset;
|
||||
if (fw_info_matches_stepping(&fw_info[i], si)) {
|
||||
dmc->dmc_info[id].present = true;
|
||||
dmc->dmc_info[id].dmc_offset = fw_info[i].offset;
|
||||
}
|
||||
}
|
||||
|
||||
return dmc_offset;
|
||||
}
|
||||
|
||||
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
const struct intel_dmc_header_base *dmc_header,
|
||||
size_t rem_size)
|
||||
size_t rem_size, u8 dmc_id)
|
||||
{
|
||||
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
|
||||
struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
|
||||
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
|
||||
const u32 *mmioaddr, *mmiodata;
|
||||
u32 mmio_count, mmio_count_max;
|
||||
u32 mmio_count, mmio_count_max, start_mmioaddr;
|
||||
u8 *payload;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dmc->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
|
||||
ARRAY_SIZE(dmc->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
|
||||
ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
|
||||
|
||||
/*
|
||||
* Check if we can access common fields, we will checkc again below
|
||||
@ -430,6 +450,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
|
||||
/* header_len is in dwords */
|
||||
header_len_bytes = dmc_header->header_len * 4;
|
||||
start_mmioaddr = v3->start_mmioaddr;
|
||||
dmc_header_size = sizeof(*v3);
|
||||
} else if (dmc_header->header_ver == 1) {
|
||||
const struct intel_dmc_header_v1 *v1 =
|
||||
@ -443,6 +464,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
mmio_count = v1->mmio_count;
|
||||
mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
|
||||
header_len_bytes = dmc_header->header_len;
|
||||
start_mmioaddr = DMC_V1_MMIO_START_RANGE;
|
||||
dmc_header_size = sizeof(*v1);
|
||||
} else {
|
||||
drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
|
||||
@ -463,16 +485,11 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
}
|
||||
|
||||
for (i = 0; i < mmio_count; i++) {
|
||||
if (mmioaddr[i] < DMC_MMIO_START_RANGE ||
|
||||
mmioaddr[i] > DMC_MMIO_END_RANGE) {
|
||||
drm_err(&i915->drm, "DMC firmware has wrong mmio address 0x%x\n",
|
||||
mmioaddr[i]);
|
||||
return 0;
|
||||
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
|
||||
dmc_info->mmiodata[i] = mmiodata[i];
|
||||
}
|
||||
dmc->mmioaddr[i] = _MMIO(mmioaddr[i]);
|
||||
dmc->mmiodata[i] = mmiodata[i];
|
||||
}
|
||||
dmc->mmio_count = mmio_count;
|
||||
dmc_info->mmio_count = mmio_count;
|
||||
dmc_info->start_mmioaddr = start_mmioaddr;
|
||||
|
||||
rem_size -= header_len_bytes;
|
||||
|
||||
@ -485,14 +502,14 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
|
||||
return 0;
|
||||
}
|
||||
dmc->dmc_fw_size = dmc_header->fw_size;
|
||||
dmc_info->dmc_fw_size = dmc_header->fw_size;
|
||||
|
||||
dmc->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
|
||||
if (!dmc->dmc_payload)
|
||||
dmc_info->payload = kmalloc(payload_size, GFP_KERNEL);
|
||||
if (!dmc_info->payload)
|
||||
return 0;
|
||||
|
||||
payload = (u8 *)(dmc_header) + header_len_bytes;
|
||||
memcpy(dmc->dmc_payload, payload, payload_size);
|
||||
memcpy(dmc_info->payload, payload, payload_size);
|
||||
|
||||
return header_len_bytes + payload_size;
|
||||
|
||||
@ -509,7 +526,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
|
||||
{
|
||||
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
|
||||
u32 package_size = sizeof(struct intel_package_header);
|
||||
u32 num_entries, max_entries, dmc_offset;
|
||||
u32 num_entries, max_entries;
|
||||
const struct intel_fw_info *fw_info;
|
||||
|
||||
if (rem_size < package_size)
|
||||
@ -545,16 +562,11 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
|
||||
|
||||
fw_info = (const struct intel_fw_info *)
|
||||
((u8 *)package_header + sizeof(*package_header));
|
||||
dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si,
|
||||
dmc_set_fw_offset(dmc, fw_info, num_entries, si,
|
||||
package_header->header_ver);
|
||||
if (dmc_offset == DMC_DEFAULT_FW_OFFSET) {
|
||||
drm_err(&i915->drm, "DMC firmware not supported for %c stepping\n",
|
||||
si->stepping);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* dmc_offset is in dwords */
|
||||
return package_size + dmc_offset * 4;
|
||||
return package_size;
|
||||
|
||||
error_truncated:
|
||||
drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
|
||||
@ -606,7 +618,8 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
|
||||
struct intel_dmc *dmc = &dev_priv->dmc;
|
||||
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
|
||||
u32 readcount = 0;
|
||||
u32 r;
|
||||
u32 r, offset;
|
||||
int id;
|
||||
|
||||
if (!fw)
|
||||
return;
|
||||
@ -627,9 +640,19 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
|
||||
|
||||
readcount += r;
|
||||
|
||||
/* Extract dmc_header information */
|
||||
dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount];
|
||||
parse_dmc_fw_header(dmc, dmc_header, fw->size - readcount);
|
||||
for (id = 0; id < DMC_FW_MAX; id++) {
|
||||
if (!dev_priv->dmc.dmc_info[id].present)
|
||||
continue;
|
||||
|
||||
offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
|
||||
if (fw->size - offset < 0) {
|
||||
drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
|
||||
parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
|
||||
@ -705,7 +728,11 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
intel_dmc_runtime_pm_get(dev_priv);
|
||||
|
||||
if (IS_ALDERLAKE_S(dev_priv)) {
|
||||
if (IS_ALDERLAKE_P(dev_priv)) {
|
||||
dmc->fw_path = ADLP_DMC_PATH;
|
||||
dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
|
||||
dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
|
||||
} else if (IS_ALDERLAKE_S(dev_priv)) {
|
||||
dmc->fw_path = ADLS_DMC_PATH;
|
||||
dmc->required_version = ADLS_DMC_VERSION_REQUIRED;
|
||||
dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE;
|
||||
@ -827,5 +854,5 @@ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
|
||||
intel_dmc_ucode_suspend(dev_priv);
|
||||
drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
|
||||
|
||||
kfree(dev_priv->dmc.dmc_payload);
|
||||
kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload);
|
||||
}
|
||||
|
@ -16,17 +16,30 @@ struct drm_i915_private;
|
||||
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
|
||||
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
|
||||
|
||||
enum {
|
||||
DMC_FW_MAIN = 0,
|
||||
DMC_FW_PIPEA,
|
||||
DMC_FW_PIPEB,
|
||||
DMC_FW_MAX
|
||||
};
|
||||
|
||||
struct intel_dmc {
|
||||
struct work_struct work;
|
||||
const char *fw_path;
|
||||
u32 required_version;
|
||||
u32 max_fw_size; /* bytes */
|
||||
u32 *dmc_payload;
|
||||
u32 dmc_fw_size; /* dwords */
|
||||
u32 version;
|
||||
struct dmc_fw_info {
|
||||
u32 mmio_count;
|
||||
i915_reg_t mmioaddr[20];
|
||||
u32 mmiodata[20];
|
||||
u32 dmc_offset;
|
||||
u32 start_mmioaddr;
|
||||
u32 dmc_fw_size; /*dwords */
|
||||
u32 *payload;
|
||||
bool present;
|
||||
} dmc_info[DMC_FW_MAX];
|
||||
|
||||
u32 dc_state;
|
||||
u32 target_dc_state;
|
||||
u32 allowed_dc_mask;
|
||||
|
@ -3031,9 +3031,6 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
unsigned int type)
|
||||
{
|
||||
if (encoder->type != INTEL_OUTPUT_DDI)
|
||||
return;
|
||||
|
||||
switch (type) {
|
||||
case DP_SDP_VSC:
|
||||
intel_read_dp_vsc_sdp(encoder, crtc_state,
|
||||
@ -4741,7 +4738,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
|
||||
int refresh_rate)
|
||||
{
|
||||
struct intel_dp *intel_dp = dev_priv->drrs.dp;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
|
||||
|
||||
if (refresh_rate <= 0) {
|
||||
@ -4755,7 +4752,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!intel_crtc) {
|
||||
if (!crtc) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"DRRS: intel_crtc not initialized\n");
|
||||
return;
|
||||
@ -5238,6 +5235,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
}
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
|
||||
intel_connector->panel.backlight.power = intel_pps_backlight_power;
|
||||
intel_panel_setup_backlight(connector, pipe);
|
||||
|
||||
|
@ -308,9 +308,9 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
|
||||
* connector
|
||||
*/
|
||||
if (new_crtc) {
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(new_crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, intel_crtc);
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
if (!crtc_state ||
|
||||
!drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
|
||||
@ -835,13 +835,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
|
||||
intel_attach_force_audio_property(connector);
|
||||
intel_attach_broadcast_rgb_property(connector);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) <= 12) {
|
||||
ret = intel_dp_hdcp_init(dig_port, intel_connector);
|
||||
if (ret)
|
||||
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
|
||||
connector->name, connector->base.id);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reuse the prop from the SST connector because we're
|
||||
* not allowed to create new props after device registration.
|
||||
|
@ -104,7 +104,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
|
||||
int i;
|
||||
u32 fbc_ctl;
|
||||
|
||||
/* Note: fbc.threshold == 1 for i8xx */
|
||||
/* Note: fbc.limit == 1 for i8xx */
|
||||
cfb_pitch = params->cfb_size / FBC_LL_SIZE;
|
||||
if (params->fb.stride < cfb_pitch)
|
||||
cfb_pitch = params->fb.stride;
|
||||
@ -148,16 +148,35 @@ static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
|
||||
return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN;
|
||||
}
|
||||
|
||||
static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915)
|
||||
{
|
||||
const struct intel_fbc_reg_params *params = &i915->fbc.params;
|
||||
int limit = i915->fbc.limit;
|
||||
|
||||
if (params->fb.format->cpp[0] == 2)
|
||||
limit <<= 1;
|
||||
|
||||
switch (limit) {
|
||||
default:
|
||||
MISSING_CASE(limit);
|
||||
fallthrough;
|
||||
case 1:
|
||||
return DPFC_CTL_LIMIT_1X;
|
||||
case 2:
|
||||
return DPFC_CTL_LIMIT_2X;
|
||||
case 4:
|
||||
return DPFC_CTL_LIMIT_4X;
|
||||
}
|
||||
}
|
||||
|
||||
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
|
||||
u32 dpfc_ctl;
|
||||
|
||||
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
|
||||
if (params->fb.format->cpp[0] == 2)
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
|
||||
else
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
|
||||
|
||||
dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
|
||||
|
||||
if (params->fence_id >= 0) {
|
||||
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
|
||||
@ -235,24 +254,10 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
|
||||
u32 dpfc_ctl;
|
||||
int threshold = dev_priv->fbc.threshold;
|
||||
|
||||
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
|
||||
if (params->fb.format->cpp[0] == 2)
|
||||
threshold++;
|
||||
|
||||
switch (threshold) {
|
||||
case 4:
|
||||
case 3:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
|
||||
break;
|
||||
case 2:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
|
||||
break;
|
||||
case 1:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
|
||||
break;
|
||||
}
|
||||
dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
|
||||
|
||||
if (params->fence_id >= 0) {
|
||||
dpfc_ctl |= DPFC_CTL_FENCE_EN;
|
||||
@ -300,7 +305,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
|
||||
u32 dpfc_ctl;
|
||||
int threshold = dev_priv->fbc.threshold;
|
||||
|
||||
/* Display WA #0529: skl, kbl, bxt. */
|
||||
if (DISPLAY_VER(dev_priv) == 9) {
|
||||
@ -318,21 +322,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
|
||||
if (IS_IVYBRIDGE(dev_priv))
|
||||
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
|
||||
|
||||
if (params->fb.format->cpp[0] == 2)
|
||||
threshold++;
|
||||
|
||||
switch (threshold) {
|
||||
case 4:
|
||||
case 3:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
|
||||
break;
|
||||
case 2:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
|
||||
break;
|
||||
case 1:
|
||||
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
|
||||
break;
|
||||
}
|
||||
dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
|
||||
|
||||
if (params->fence_id >= 0) {
|
||||
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
|
||||
@ -433,13 +423,8 @@ static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
|
||||
return BIT_ULL(32);
|
||||
}
|
||||
|
||||
static int find_compression_threshold(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node,
|
||||
unsigned int size,
|
||||
unsigned int fb_cpp)
|
||||
static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int compression_threshold = 1;
|
||||
int ret;
|
||||
u64 end;
|
||||
|
||||
/* The FBC hardware for BDW/SKL doesn't have access to the stolen
|
||||
@ -452,51 +437,69 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
|
||||
else
|
||||
end = U64_MAX;
|
||||
|
||||
end = min(end, intel_fbc_cfb_base_max(dev_priv));
|
||||
return min(end, intel_fbc_cfb_base_max(dev_priv));
|
||||
}
|
||||
|
||||
/* HACK: This code depends on what we will do in *_enable_fbc. If that
|
||||
* code changes, this code needs to change as well.
|
||||
*
|
||||
* The enable_fbc code will attempt to use one of our 2 compression
|
||||
* thresholds, therefore, in that case, we only have 1 resort.
|
||||
static int intel_fbc_max_limit(struct drm_i915_private *dev_priv, int fb_cpp)
|
||||
{
|
||||
/*
|
||||
* FIXME: FBC1 can have arbitrary cfb stride,
|
||||
* so we could support different compression ratios.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
|
||||
return 1;
|
||||
|
||||
/* WaFbcOnly1to1Ratio:ctg */
|
||||
if (IS_G4X(dev_priv))
|
||||
return 1;
|
||||
|
||||
/* FBC2 can only do 1:1, 1:2, 1:4 */
|
||||
return fb_cpp == 2 ? 2 : 4;
|
||||
}
|
||||
|
||||
static int find_compression_limit(struct drm_i915_private *dev_priv,
|
||||
unsigned int size,
|
||||
unsigned int fb_cpp)
|
||||
{
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
u64 end = intel_fbc_stolen_end(dev_priv);
|
||||
int ret, limit = 1;
|
||||
|
||||
/* Try to over-allocate to reduce reallocations and fragmentation. */
|
||||
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
|
||||
4096, 0, end);
|
||||
ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
|
||||
size <<= 1, 4096, 0, end);
|
||||
if (ret == 0)
|
||||
return compression_threshold;
|
||||
return limit;
|
||||
|
||||
again:
|
||||
/* HW's ability to limit the CFB is 1:4 */
|
||||
if (compression_threshold > 4 ||
|
||||
(fb_cpp == 2 && compression_threshold == 2))
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
|
||||
4096, 0, end);
|
||||
if (ret && DISPLAY_VER(dev_priv) <= 4) {
|
||||
return 0;
|
||||
} else if (ret) {
|
||||
compression_threshold <<= 1;
|
||||
goto again;
|
||||
} else {
|
||||
return compression_threshold;
|
||||
for (; limit <= intel_fbc_max_limit(dev_priv, fb_cpp); limit <<= 1) {
|
||||
ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
|
||||
size >>= 1, 4096, 0, end);
|
||||
if (ret == 0)
|
||||
return limit;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
|
||||
unsigned int size, unsigned int fb_cpp)
|
||||
{
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
struct drm_mm_node *compressed_llb;
|
||||
int ret;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm,
|
||||
drm_mm_node_allocated(&fbc->compressed_fb));
|
||||
drm_WARN_ON(&dev_priv->drm,
|
||||
drm_mm_node_allocated(&fbc->compressed_llb));
|
||||
|
||||
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
|
||||
size, fb_cpp);
|
||||
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
|
||||
ret = i915_gem_stolen_insert_node(dev_priv, &fbc->compressed_llb,
|
||||
4096, 4096);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = find_compression_limit(dev_priv, size, fb_cpp);
|
||||
if (!ret)
|
||||
goto err_llb;
|
||||
else if (ret > 1) {
|
||||
@ -504,51 +507,46 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
|
||||
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
||||
}
|
||||
|
||||
fbc->threshold = ret;
|
||||
fbc->limit = ret;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 5)
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
|
||||
fbc->compressed_fb.size, fbc->limit);
|
||||
|
||||
return 0;
|
||||
|
||||
err_llb:
|
||||
if (drm_mm_node_allocated(&fbc->compressed_llb))
|
||||
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb);
|
||||
err:
|
||||
if (drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void intel_fbc_program_cfb(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 5) {
|
||||
intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
|
||||
fbc->compressed_fb.start);
|
||||
else if (IS_GM45(dev_priv)) {
|
||||
} else if (IS_GM45(dev_priv)) {
|
||||
intel_de_write(dev_priv, DPFC_CB_BASE,
|
||||
fbc->compressed_fb.start);
|
||||
} else {
|
||||
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
|
||||
if (!compressed_llb)
|
||||
goto err_fb;
|
||||
|
||||
ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
|
||||
4096, 4096);
|
||||
if (ret)
|
||||
goto err_fb;
|
||||
|
||||
fbc->compressed_llb = compressed_llb;
|
||||
|
||||
GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
|
||||
fbc->compressed_fb.start,
|
||||
U32_MAX));
|
||||
GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
|
||||
fbc->compressed_llb->start,
|
||||
fbc->compressed_llb.start,
|
||||
U32_MAX));
|
||||
|
||||
intel_de_write(dev_priv, FBC_CFB_BASE,
|
||||
dev_priv->dsm.start + fbc->compressed_fb.start);
|
||||
intel_de_write(dev_priv, FBC_LL_BASE,
|
||||
dev_priv->dsm.start + compressed_llb->start);
|
||||
dev_priv->dsm.start + fbc->compressed_llb.start);
|
||||
}
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
|
||||
fbc->compressed_fb.size, fbc->threshold);
|
||||
|
||||
return 0;
|
||||
|
||||
err_fb:
|
||||
kfree(compressed_llb);
|
||||
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
|
||||
err_llb:
|
||||
if (drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||
@ -558,14 +556,9 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||
if (WARN_ON(intel_fbc_hw_is_active(dev_priv)))
|
||||
return;
|
||||
|
||||
if (!drm_mm_node_allocated(&fbc->compressed_fb))
|
||||
return;
|
||||
|
||||
if (fbc->compressed_llb) {
|
||||
i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
|
||||
kfree(fbc->compressed_llb);
|
||||
}
|
||||
|
||||
if (drm_mm_node_allocated(&fbc->compressed_llb))
|
||||
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb);
|
||||
if (drm_mm_node_allocated(&fbc->compressed_fb))
|
||||
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
|
||||
}
|
||||
|
||||
@ -753,7 +746,7 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
|
||||
return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
|
||||
fbc->compressed_fb.size * fbc->threshold;
|
||||
fbc->compressed_fb.size * fbc->limit;
|
||||
}
|
||||
|
||||
static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
|
||||
@ -763,7 +756,7 @@ static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
|
||||
|
||||
if ((DISPLAY_VER(dev_priv) == 9) &&
|
||||
cache->fb.modifier != I915_FORMAT_MOD_X_TILED)
|
||||
return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
|
||||
return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->limit) * 8;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
@ -1302,6 +1295,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
|
||||
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
|
||||
|
||||
fbc->crtc = crtc;
|
||||
|
||||
intel_fbc_program_cfb(dev_priv);
|
||||
out:
|
||||
mutex_unlock(&fbc->lock);
|
||||
}
|
||||
|
@ -339,28 +339,39 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
struct intel_framebuffer *fb = NULL;
|
||||
struct drm_crtc *crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct intel_crtc *crtc;
|
||||
unsigned int max_size = 0;
|
||||
|
||||
/* Find the largest fb */
|
||||
for_each_crtc(dev, crtc) {
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
struct intel_plane *plane =
|
||||
to_intel_plane(crtc->base.primary);
|
||||
struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(plane->base.state);
|
||||
struct drm_i915_gem_object *obj =
|
||||
intel_fb_obj(crtc->primary->state->fb);
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
intel_fb_obj(plane_state->uapi.fb);
|
||||
|
||||
if (!crtc->state->active || !obj) {
|
||||
if (!crtc_state->uapi.active) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"pipe %c not active or no fb, skipping\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
"[CRTC:%d:%s] not active, skipping\n",
|
||||
crtc->base.base.id, crtc->base.name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!obj) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[PLANE:%d:%s] no fb, skipping\n",
|
||||
plane->base.base.id, plane->base.name);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (obj->base.size > max_size) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"found possible fb from plane %c\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
fb = to_intel_framebuffer(crtc->primary->state->fb);
|
||||
"found possible fb from [PLANE:%d:%s]\n",
|
||||
plane->base.base.id, plane->base.name);
|
||||
fb = to_intel_framebuffer(plane_state->uapi.fb);
|
||||
max_size = obj->base.size;
|
||||
}
|
||||
}
|
||||
@ -372,60 +383,62 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
}
|
||||
|
||||
/* Now make sure all the pipes will fit into it */
|
||||
for_each_crtc(dev, crtc) {
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
struct intel_plane *plane =
|
||||
to_intel_plane(crtc->base.primary);
|
||||
unsigned int cur_size;
|
||||
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
if (!crtc->state->active) {
|
||||
if (!crtc_state->uapi.active) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"pipe %c not active, skipping\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
"[CRTC:%d:%s] not active, skipping\n",
|
||||
crtc->base.base.id, crtc->base.name);
|
||||
continue;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm, "checking plane %c for BIOS fb\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
|
||||
plane->base.base.id, plane->base.name);
|
||||
|
||||
/*
|
||||
* See if the plane fb we found above will fit on this
|
||||
* pipe. Note we need to use the selected fb's pitch and bpp
|
||||
* rather than the current pipe's, since they differ.
|
||||
*/
|
||||
cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
|
||||
cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay;
|
||||
cur_size = cur_size * fb->base.format->cpp[0];
|
||||
if (fb->base.pitches[0] < cur_size) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"fb not wide enough for plane %c (%d vs %d)\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
"fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n",
|
||||
plane->base.base.id, plane->base.name,
|
||||
cur_size, fb->base.pitches[0]);
|
||||
fb = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
|
||||
cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
|
||||
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
|
||||
cur_size *= fb->base.pitches[0];
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"pipe %c area: %dx%d, bpp: %d, size: %d\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
crtc->state->adjusted_mode.crtc_hdisplay,
|
||||
crtc->state->adjusted_mode.crtc_vdisplay,
|
||||
"[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
crtc_state->uapi.adjusted_mode.crtc_hdisplay,
|
||||
crtc_state->uapi.adjusted_mode.crtc_vdisplay,
|
||||
fb->base.format->cpp[0] * 8,
|
||||
cur_size);
|
||||
|
||||
if (cur_size > max_size) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"fb not big enough for plane %c (%d vs %d)\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
"fb not big enough for [PLANE:%d:%s] (%d vs %d)\n",
|
||||
plane->base.base.id, plane->base.name,
|
||||
cur_size, max_size);
|
||||
fb = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"fb big enough for plane %c (%d >= %d)\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
"fb big enough [PLANE:%d:%s] (%d >= %d)\n",
|
||||
plane->base.base.id, plane->base.name,
|
||||
max_size, cur_size);
|
||||
}
|
||||
|
||||
@ -441,15 +454,20 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
drm_framebuffer_get(&ifbdev->fb->base);
|
||||
|
||||
/* Final pass to check if any active pipes don't have fbs */
|
||||
for_each_crtc(dev, crtc) {
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
struct intel_plane *plane =
|
||||
to_intel_plane(crtc->base.primary);
|
||||
struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(plane->base.state);
|
||||
|
||||
if (!crtc->state->active)
|
||||
if (!crtc_state->uapi.active)
|
||||
continue;
|
||||
|
||||
drm_WARN(dev, !crtc->primary->state->fb,
|
||||
"re-used BIOS config but lost an fb on crtc %d\n",
|
||||
crtc->base.id);
|
||||
drm_WARN(dev, !plane_state->uapi.fb,
|
||||
"re-used BIOS config but lost an fb on [PLANE:%d:%s]\n",
|
||||
plane->base.base.id, plane->base.name);
|
||||
}
|
||||
|
||||
|
||||
|
@ -4,7 +4,6 @@
|
||||
*/
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_ddi_buf_trans.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fdi.h"
|
||||
@ -96,10 +95,10 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
|
||||
}
|
||||
}
|
||||
|
||||
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
|
||||
int ilk_fdi_compute_config(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
int lane, link_bw, fdi_dotclock, ret;
|
||||
@ -125,7 +124,7 @@ retry:
|
||||
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
|
||||
link_bw, &pipe_config->fdi_m_n, false, false);
|
||||
|
||||
ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
|
||||
ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
|
||||
if (ret == -EDEADLK)
|
||||
return ret;
|
||||
|
||||
@ -569,9 +568,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
u32 temp, i, rx_ctl_val;
|
||||
int n_entries;
|
||||
|
||||
intel_ddi_get_buf_trans_fdi(dev_priv, &n_entries);
|
||||
encoder->get_buf_trans(encoder, crtc_state, &n_entries);
|
||||
|
||||
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
|
||||
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
|
||||
|
||||
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
|
||||
* mode set "sequence for CRT port" document:
|
||||
@ -691,9 +690,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
|
||||
void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
i915_reg_t reg;
|
||||
u32 temp;
|
||||
|
||||
@ -726,11 +725,11 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
|
||||
}
|
||||
}
|
||||
|
||||
void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
|
||||
void ilk_fdi_pll_disable(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
i915_reg_t reg;
|
||||
u32 temp;
|
||||
|
||||
|
@ -270,8 +270,8 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
|
||||
{
|
||||
const u32 *data = frame;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
|
||||
u32 val = intel_de_read(dev_priv, reg);
|
||||
int i;
|
||||
|
||||
@ -286,13 +286,13 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
|
||||
intel_de_write(dev_priv, reg, val);
|
||||
|
||||
for (i = 0; i < len; i += 4) {
|
||||
intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
|
||||
intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
|
||||
*data);
|
||||
data++;
|
||||
}
|
||||
/* Write every possible data byte to force correct ECC calculation. */
|
||||
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
|
||||
intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
|
||||
intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
|
||||
|
||||
val |= g4x_infoframe_enable(type);
|
||||
val &= ~VIDEO_DIP_FREQ_MASK;
|
||||
@ -349,8 +349,8 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
|
||||
{
|
||||
const u32 *data = frame;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
|
||||
u32 val = intel_de_read(dev_priv, reg);
|
||||
int i;
|
||||
|
||||
@ -368,13 +368,13 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
|
||||
intel_de_write(dev_priv, reg, val);
|
||||
|
||||
for (i = 0; i < len; i += 4) {
|
||||
intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
|
||||
intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
|
||||
*data);
|
||||
data++;
|
||||
}
|
||||
/* Write every possible data byte to force correct ECC calculation. */
|
||||
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
|
||||
intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
|
||||
intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
|
||||
|
||||
val |= g4x_infoframe_enable(type);
|
||||
val &= ~VIDEO_DIP_FREQ_MASK;
|
||||
@ -427,8 +427,8 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
|
||||
{
|
||||
const u32 *data = frame;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
|
||||
u32 val = intel_de_read(dev_priv, reg);
|
||||
int i;
|
||||
|
||||
@ -444,13 +444,13 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
|
||||
|
||||
for (i = 0; i < len; i += 4) {
|
||||
intel_de_write(dev_priv,
|
||||
VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
|
||||
VLV_TVIDEO_DIP_DATA(crtc->pipe), *data);
|
||||
data++;
|
||||
}
|
||||
/* Write every possible data byte to force correct ECC calculation. */
|
||||
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
|
||||
intel_de_write(dev_priv,
|
||||
VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
|
||||
VLV_TVIDEO_DIP_DATA(crtc->pipe), 0);
|
||||
|
||||
val |= g4x_infoframe_enable(type);
|
||||
val &= ~VIDEO_DIP_FREQ_MASK;
|
||||
@ -1040,10 +1040,10 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
|
||||
u32 val = intel_de_read(dev_priv, reg);
|
||||
u32 port = VIDEO_DIP_PORT(encoder->port);
|
||||
|
||||
@ -1099,9 +1099,9 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
|
||||
u32 val = intel_de_read(dev_priv, reg);
|
||||
|
||||
assert_hdmi_port_disabled(intel_hdmi);
|
||||
@ -1148,9 +1148,9 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
|
||||
u32 val = intel_de_read(dev_priv, reg);
|
||||
u32 port = VIDEO_DIP_PORT(encoder->port);
|
||||
|
||||
@ -1465,14 +1465,12 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_crtc *crtc = connector->base.state->crtc;
|
||||
struct intel_crtc *intel_crtc = container_of(crtc,
|
||||
struct intel_crtc, base);
|
||||
struct intel_crtc *crtc = to_intel_crtc(connector->base.state->crtc);
|
||||
u32 scanline;
|
||||
int ret;
|
||||
|
||||
for (;;) {
|
||||
scanline = intel_de_read(dev_priv, PIPEDSL(intel_crtc->pipe));
|
||||
scanline = intel_de_read(dev_priv, PIPEDSL(crtc->pipe));
|
||||
if (scanline > 100 && scanline < 200)
|
||||
break;
|
||||
usleep_range(25, 50);
|
||||
|
@ -411,12 +411,12 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
|
||||
struct intel_connector *intel_connector =
|
||||
lvds_encoder->attached_connector;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
unsigned int lvds_bpp;
|
||||
int ret;
|
||||
|
||||
/* Should never happen!! */
|
||||
if (DISPLAY_VER(dev_priv) < 4 && intel_crtc->pipe == 0) {
|
||||
if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) {
|
||||
drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -265,32 +265,44 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
|
||||
return val;
|
||||
}
|
||||
|
||||
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
|
||||
static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u16 val;
|
||||
ssize_t r;
|
||||
u16 w;
|
||||
u8 y;
|
||||
|
||||
/*
|
||||
* Returning the default X granularity if granularity not required or
|
||||
* if DPCD read fails
|
||||
*/
|
||||
if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
|
||||
return 4;
|
||||
/* If sink don't have specific granularity requirements set legacy ones */
|
||||
if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
|
||||
/* As PSR2 HW sends full lines, we do not care about x granularity */
|
||||
w = 4;
|
||||
y = 4;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
|
||||
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
|
||||
if (r != 2)
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Unable to read DP_PSR2_SU_X_GRANULARITY\n");
|
||||
|
||||
/*
|
||||
* Spec says that if the value read is 0 the default granularity should
|
||||
* be used instead.
|
||||
*/
|
||||
if (r != 2 || val == 0)
|
||||
val = 4;
|
||||
if (r != 2 || w == 0)
|
||||
w = 4;
|
||||
|
||||
return val;
|
||||
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
|
||||
if (r != 1) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
|
||||
y = 4;
|
||||
}
|
||||
if (y == 0)
|
||||
y = 1;
|
||||
|
||||
exit:
|
||||
intel_dp->psr.su_w_granularity = w;
|
||||
intel_dp->psr.su_y_granularity = y;
|
||||
}
|
||||
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
||||
@ -346,8 +358,7 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
|
||||
if (intel_dp->psr.sink_psr2_support) {
|
||||
intel_dp->psr.colorimetry_support =
|
||||
intel_dp_get_colorimetry_status(intel_dp);
|
||||
intel_dp->psr.su_x_granularity =
|
||||
intel_dp_get_su_x_granulartiy(intel_dp);
|
||||
intel_dp_get_su_granularity(intel_dp);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -407,6 +418,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
dpcd_val |= DP_PSR_CRC_VERIFICATION;
|
||||
}
|
||||
|
||||
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
|
||||
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
|
||||
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
|
||||
@ -531,7 +545,34 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
||||
val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
|
||||
val |= intel_psr2_get_tp_time(intel_dp);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
/* Wa_22012278275:adlp */
|
||||
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D1)) {
|
||||
static const u8 map[] = {
|
||||
2, /* 5 lines */
|
||||
1, /* 6 lines */
|
||||
0, /* 7 lines */
|
||||
3, /* 8 lines */
|
||||
6, /* 9 lines */
|
||||
5, /* 10 lines */
|
||||
4, /* 11 lines */
|
||||
7, /* 12 lines */
|
||||
};
|
||||
/*
|
||||
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
|
||||
* comments bellow for more information
|
||||
*/
|
||||
u32 tmp, lines = 7;
|
||||
|
||||
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
|
||||
|
||||
tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
|
||||
tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
|
||||
val |= tmp;
|
||||
|
||||
tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
|
||||
tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
|
||||
val |= tmp;
|
||||
} else if (DISPLAY_VER(dev_priv) >= 12) {
|
||||
/*
|
||||
* TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
|
||||
* values from BSpec. In order to setting an optimal power
|
||||
@ -547,6 +588,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
|
||||
val |= EDP_PSR2_FAST_WAKE(7);
|
||||
}
|
||||
|
||||
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
|
||||
val |= EDP_PSR2_SU_SDP_SCANLINE;
|
||||
|
||||
if (intel_dp->psr.psr2_sel_fetch_enabled) {
|
||||
/* WA 1408330847 */
|
||||
if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
|
||||
@ -689,6 +733,10 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
|
||||
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
|
||||
return;
|
||||
|
||||
/* Wa_16011303918:adlp */
|
||||
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
|
||||
return;
|
||||
|
||||
/*
|
||||
* DC3CO Exit time 200us B.Spec 49196
|
||||
* PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
|
||||
@ -742,6 +790,63 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
|
||||
return crtc_state->enable_psr2_sel_fetch = true;
|
||||
}
|
||||
|
||||
static bool psr2_granularity_check(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
|
||||
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
|
||||
u16 y_granularity = 0;
|
||||
|
||||
/* PSR2 HW only send full lines so we only need to validate the width */
|
||||
if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
|
||||
return false;
|
||||
|
||||
if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
|
||||
return false;
|
||||
|
||||
/* HW tracking is only aligned to 4 lines */
|
||||
if (!crtc_state->enable_psr2_sel_fetch)
|
||||
return intel_dp->psr.su_y_granularity == 4;
|
||||
|
||||
/*
|
||||
* For SW tracking we can adjust the y to match sink requirement if
|
||||
* multiple of 4
|
||||
*/
|
||||
if (intel_dp->psr.su_y_granularity <= 2)
|
||||
y_granularity = 4;
|
||||
else if ((intel_dp->psr.su_y_granularity % 4) == 0)
|
||||
y_granularity = intel_dp->psr.su_y_granularity;
|
||||
|
||||
if (y_granularity == 0 || crtc_vdisplay % y_granularity)
|
||||
return false;
|
||||
|
||||
crtc_state->su_y_granularity = y_granularity;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u32 hblank_total, hblank_ns, req_ns;
|
||||
|
||||
hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
|
||||
hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
|
||||
|
||||
/* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */
|
||||
req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000);
|
||||
|
||||
if ((hblank_ns - req_ns) > 100)
|
||||
return true;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
|
||||
return false;
|
||||
|
||||
crtc_state->req_psr2_sdp_prior_scanline = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
@ -824,19 +929,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* HW sends SU blocks of size four scan lines, which means the starting
|
||||
* X coordinate and Y granularity requirements will always be met. We
|
||||
* only need to validate the SU block width is a multiple of
|
||||
* x granularity.
|
||||
*/
|
||||
if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
|
||||
crtc_hdisplay, intel_dp->psr.su_x_granularity);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
|
||||
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
|
||||
!HAS_PSR_HW_TRACKING(dev_priv)) {
|
||||
@ -853,6 +945,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!psr2_granularity_check(intel_dp, crtc_state)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!crtc_state->enable_psr2_sel_fetch &&
|
||||
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
@ -862,6 +959,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Wa_16011303918:adlp */
|
||||
if (crtc_state->vrr.enable &&
|
||||
IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
|
||||
return true;
|
||||
}
|
||||
@ -1048,6 +1159,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
|
||||
intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
|
||||
intel_dp->psr.psr2_sel_fetch_enabled ?
|
||||
IGNORE_PSR2_HW_TRACKING : 0);
|
||||
|
||||
/* Wa_16011168373:adlp */
|
||||
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) &&
|
||||
intel_dp->psr.psr2_enabled)
|
||||
intel_de_rmw(dev_priv,
|
||||
TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
|
||||
TRANS_SET_CONTEXT_LATENCY_MASK,
|
||||
TRANS_SET_CONTEXT_LATENCY_VALUE(1));
|
||||
}
|
||||
|
||||
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
|
||||
@ -1101,6 +1220,8 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
|
||||
intel_dp->psr.dc3co_exit_delay = val;
|
||||
intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
|
||||
intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
|
||||
intel_dp->psr.req_psr2_sdp_prior_scanline =
|
||||
crtc_state->req_psr2_sdp_prior_scanline;
|
||||
|
||||
if (!psr_interrupt_error_check(intel_dp))
|
||||
return;
|
||||
@ -1225,6 +1346,13 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
|
||||
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
|
||||
DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
|
||||
|
||||
/* Wa_16011168373:adlp */
|
||||
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) &&
|
||||
intel_dp->psr.psr2_enabled)
|
||||
intel_de_rmw(dev_priv,
|
||||
TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
|
||||
TRANS_SET_CONTEXT_LATENCY_MASK, 0);
|
||||
|
||||
/* Disable PSR on Sink */
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
|
||||
|
||||
@ -1432,6 +1560,16 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
|
||||
overlap_damage_area->y2 = damage_area->y2;
|
||||
}
|
||||
|
||||
static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
|
||||
struct drm_rect *pipe_clip)
|
||||
{
|
||||
const u16 y_alignment = crtc_state->su_y_granularity;
|
||||
|
||||
pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
|
||||
if (pipe_clip->y2 % y_alignment)
|
||||
pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
|
||||
}
|
||||
|
||||
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
@ -1540,10 +1678,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
if (full_update)
|
||||
goto skip_sel_fetch_set_loop;
|
||||
|
||||
/* It must be aligned to 4 lines */
|
||||
pipe_clip.y1 -= pipe_clip.y1 % 4;
|
||||
if (pipe_clip.y2 % 4)
|
||||
pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
|
||||
intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
|
||||
|
||||
/*
|
||||
* Now that we have the pipe damaged area check if it intersect with
|
||||
|
@ -53,6 +53,12 @@ static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
|
||||
drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
|
||||
}
|
||||
|
||||
static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915)
|
||||
{
|
||||
i915->quirks |= QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK;
|
||||
drm_info(&i915->drm, "Applying no pps backlight power quirk\n");
|
||||
}
|
||||
|
||||
struct intel_quirk {
|
||||
int device;
|
||||
int subsystem_vendor;
|
||||
@ -72,6 +78,12 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int intel_dmi_no_pps_backlight(const struct dmi_system_id *id)
|
||||
{
|
||||
DRM_INFO("No pps backlight support on %s\n", id->ident);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
|
||||
{
|
||||
.dmi_id_list = &(const struct dmi_system_id[]) {
|
||||
@ -96,6 +108,28 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
|
||||
},
|
||||
.hook = quirk_invert_brightness,
|
||||
},
|
||||
{
|
||||
.dmi_id_list = &(const struct dmi_system_id[]) {
|
||||
{
|
||||
.callback = intel_dmi_no_pps_backlight,
|
||||
.ident = "Google Lillipup sku524294",
|
||||
.matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"),
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524294"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = intel_dmi_no_pps_backlight,
|
||||
.ident = "Google Lillipup sku524295",
|
||||
.matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Google"),
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Lindar"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "sku524295"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
},
|
||||
.hook = quirk_no_pps_backlight_power_hook,
|
||||
},
|
||||
};
|
||||
|
||||
static struct intel_quirk intel_quirks[] = {
|
||||
|
@ -1824,7 +1824,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
u32 temp;
|
||||
bool input1, input2;
|
||||
int i;
|
||||
@ -1835,7 +1835,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
|
||||
intel_sdvo_write_sdvox(intel_sdvo, temp);
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
|
||||
intel_wait_for_vblank(dev_priv, crtc->pipe);
|
||||
|
||||
success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
|
||||
/*
|
||||
|
@ -556,7 +556,7 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
|
||||
}
|
||||
|
||||
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
|
||||
int required_lanes)
|
||||
int required_lanes, bool force_disconnect)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
|
||||
@ -572,6 +572,7 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
|
||||
}
|
||||
|
||||
icl_tc_phy_disconnect(dig_port);
|
||||
if (!force_disconnect)
|
||||
icl_tc_phy_connect(dig_port, required_lanes);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
|
||||
@ -662,7 +663,7 @@ bool intel_tc_port_connected(struct intel_encoder *encoder)
|
||||
}
|
||||
|
||||
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
|
||||
int required_lanes)
|
||||
int required_lanes, bool force_disconnect)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
intel_wakeref_t wakeref;
|
||||
@ -676,8 +677,9 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
|
||||
|
||||
tc_cold_wref = tc_cold_block(dig_port);
|
||||
|
||||
if (intel_tc_port_needs_reset(dig_port))
|
||||
intel_tc_port_reset_mode(dig_port, required_lanes);
|
||||
if (force_disconnect || intel_tc_port_needs_reset(dig_port))
|
||||
intel_tc_port_reset_mode(dig_port, required_lanes,
|
||||
force_disconnect);
|
||||
|
||||
tc_cold_unblock(dig_port, tc_cold_wref);
|
||||
}
|
||||
@ -688,7 +690,7 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
|
||||
|
||||
void intel_tc_port_lock(struct intel_digital_port *dig_port)
|
||||
{
|
||||
__intel_tc_port_lock(dig_port, 1);
|
||||
__intel_tc_port_lock(dig_port, 1, false);
|
||||
}
|
||||
|
||||
void intel_tc_port_unlock(struct intel_digital_port *dig_port)
|
||||
@ -702,6 +704,24 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port)
|
||||
wakeref);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_tc_port_disconnect_phy: disconnect TypeC PHY from display port
|
||||
* @dig_port: digital port
|
||||
*
|
||||
* Disconnect the given digital port from its TypeC PHY (handing back the
|
||||
* control of the PHY to the TypeC subsystem). The only purpose of this
|
||||
* function is to force the disconnect even with a TypeC display output still
|
||||
* plugged to the TypeC connector, which is required by the TypeC firmwares
|
||||
* during system suspend and shutdown. Otherwise - during the unplug event
|
||||
* handling - the PHY ownership is released automatically by
|
||||
* intel_tc_port_reset_mode(), when calling this function is not required.
|
||||
*/
|
||||
void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port)
|
||||
{
|
||||
__intel_tc_port_lock(dig_port, 1, true);
|
||||
intel_tc_port_unlock(dig_port);
|
||||
}
|
||||
|
||||
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
|
||||
{
|
||||
return mutex_is_locked(&dig_port->tc_lock) ||
|
||||
@ -711,7 +731,7 @@ bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
|
||||
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
|
||||
int required_lanes)
|
||||
{
|
||||
__intel_tc_port_lock(dig_port, required_lanes);
|
||||
__intel_tc_port_lock(dig_port, required_lanes, false);
|
||||
dig_port->tc_link_refcount++;
|
||||
intel_tc_port_unlock(dig_port);
|
||||
}
|
||||
|
@ -13,6 +13,8 @@ struct intel_digital_port;
|
||||
struct intel_encoder;
|
||||
|
||||
bool intel_tc_port_connected(struct intel_encoder *encoder);
|
||||
void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port);
|
||||
|
||||
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
|
||||
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
|
||||
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
|
||||
|
@ -1420,7 +1420,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_tv *intel_tv = enc_to_tv(encoder);
|
||||
const struct intel_tv_connector_state *tv_conn_state =
|
||||
to_intel_tv_connector_state(conn_state);
|
||||
@ -1466,7 +1466,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
|
||||
break;
|
||||
}
|
||||
|
||||
tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
|
||||
tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe);
|
||||
|
||||
switch (tv_mode->oversample) {
|
||||
case 8:
|
||||
@ -1571,8 +1571,7 @@ static int
|
||||
intel_tv_detect_type(struct intel_tv *intel_tv,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct drm_crtc *crtc = connector->state->crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(connector->state->crtc);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 tv_ctl, save_tv_ctl;
|
||||
@ -1594,7 +1593,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
|
||||
/* Poll for TV detection */
|
||||
tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
|
||||
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
|
||||
tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
|
||||
tv_ctl |= TV_ENC_PIPE_SEL(crtc->pipe);
|
||||
|
||||
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
|
||||
tv_dac |= (TVDAC_STATE_CHG_EN |
|
||||
@ -1619,7 +1618,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
|
||||
intel_de_write(dev_priv, TV_DAC, tv_dac);
|
||||
intel_de_posting_read(dev_priv, TV_DAC);
|
||||
|
||||
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
|
||||
intel_wait_for_vblank(dev_priv, crtc->pipe);
|
||||
|
||||
type = -1;
|
||||
tv_dac = intel_de_read(dev_priv, TV_DAC);
|
||||
@ -1652,7 +1651,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
|
||||
intel_de_posting_read(dev_priv, TV_CTL);
|
||||
|
||||
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
|
||||
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
|
||||
intel_wait_for_vblank(dev_priv, crtc->pipe);
|
||||
|
||||
/* Restore interrupt config */
|
||||
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
|
||||
|
@ -29,6 +29,9 @@ void intel_vga_disable(struct drm_i915_private *dev_priv)
|
||||
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
|
||||
u8 sr1;
|
||||
|
||||
if (intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)
|
||||
return;
|
||||
|
||||
/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
|
||||
vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
|
||||
outb(SR01, VGA_SR_INDEX);
|
||||
|
@ -96,9 +96,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
{
|
||||
struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
|
||||
@ -141,7 +140,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"scaler_user index %u.%u: "
|
||||
"Staged freeing scaler id %d scaler_users = 0x%x\n",
|
||||
intel_crtc->pipe, scaler_user, *scaler_id,
|
||||
crtc->pipe, scaler_user, *scaler_id,
|
||||
scaler_state->scaler_users);
|
||||
*scaler_id = -1;
|
||||
}
|
||||
@ -167,7 +166,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"scaler_user index %u.%u: src %ux%u dst %ux%u "
|
||||
"size is out of scaler range\n",
|
||||
intel_crtc->pipe, scaler_user, src_w, src_h,
|
||||
crtc->pipe, scaler_user, src_w, src_h,
|
||||
dst_w, dst_h);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -176,7 +175,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
scaler_state->scaler_users |= (1 << scaler_user);
|
||||
drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
|
||||
"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
|
||||
intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
|
||||
crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
|
||||
scaler_state->scaler_users);
|
||||
|
||||
return 0;
|
||||
@ -515,17 +514,17 @@ skl_program_plane_scaler(struct intel_plane *plane,
|
||||
(crtc_w << 16) | crtc_h);
|
||||
}
|
||||
|
||||
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
|
||||
static void skl_detach_scaler(struct intel_crtc *crtc, int id)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(crtc->pipe, id), 0);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(crtc->pipe, id), 0);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, id), 0);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
@ -535,15 +534,15 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
|
||||
*/
|
||||
void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
const struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
int i;
|
||||
|
||||
/* loop through and disable scalers that aren't in use */
|
||||
for (i = 0; i < intel_crtc->num_scalers; i++) {
|
||||
for (i = 0; i < crtc->num_scalers; i++) {
|
||||
if (!scaler_state->scalers[i].in_use)
|
||||
skl_detach_scaler(intel_crtc, i);
|
||||
skl_detach_scaler(crtc, i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -780,10 +780,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct drm_crtc *crtc = pipe_config->uapi.crtc;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum port port;
|
||||
u32 val;
|
||||
bool glk_cold_boot = false;
|
||||
@ -1389,7 +1388,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
enum port port;
|
||||
@ -1397,7 +1396,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
|
||||
u32 val, tmp;
|
||||
u16 mode_hdisplay;
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(intel_crtc->pipe));
|
||||
drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(crtc->pipe));
|
||||
|
||||
mode_hdisplay = adjusted_mode->crtc_hdisplay;
|
||||
|
||||
@ -1424,7 +1423,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
|
||||
intel_de_write(dev_priv, MIPI_CTRL(port),
|
||||
tmp | READ_REQUEST_PRIORITY_HIGH);
|
||||
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
|
||||
tmp &= ~BXT_PIPE_SELECT_MASK;
|
||||
|
@ -636,7 +636,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
|
||||
intel_uncore_read16(uncore, C0DRB3_BW));
|
||||
seq_printf(m, "C1DRB3 = 0x%04x\n",
|
||||
intel_uncore_read16(uncore, C1DRB3_BW));
|
||||
} else if (INTEL_GEN(dev_priv) >= 6) {
|
||||
} else if (GRAPHICS_VER(dev_priv) >= 6) {
|
||||
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
|
||||
intel_uncore_read(uncore, MAD_DIMM_C0));
|
||||
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
|
||||
|
@ -270,8 +270,10 @@ struct drm_i915_display_funcs {
|
||||
int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
|
||||
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
|
||||
enum i9xx_plane_id i9xx_plane);
|
||||
int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
|
||||
int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
|
||||
int (*compute_pipe_wm)(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int (*compute_intermediate_wm)(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void (*initial_watermarks)(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
|
||||
@ -346,13 +348,14 @@ struct intel_fbc {
|
||||
/* This is always the inner lock when overlapping with struct_mutex and
|
||||
* it's the outer lock when overlapping with stolen_lock. */
|
||||
struct mutex lock;
|
||||
unsigned threshold;
|
||||
unsigned int possible_framebuffer_bits;
|
||||
unsigned int busy_bits;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
struct drm_mm_node compressed_fb;
|
||||
struct drm_mm_node *compressed_llb;
|
||||
struct drm_mm_node compressed_llb;
|
||||
|
||||
u8 limit;
|
||||
|
||||
bool false_color;
|
||||
|
||||
@ -467,6 +470,7 @@ struct i915_drrs {
|
||||
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
|
||||
#define QUIRK_INCREASE_T12_DELAY (1<<6)
|
||||
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
|
||||
#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
|
||||
|
||||
struct intel_fbdev;
|
||||
struct intel_fbc_work;
|
||||
@ -1237,21 +1241,6 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
|
||||
|
||||
#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
|
||||
|
||||
/*
|
||||
* Deprecated: this will be replaced by individual IP checks:
|
||||
* GRAPHICS_VER(), MEDIA_VER() and DISPLAY_VER()
|
||||
*/
|
||||
#define INTEL_GEN(dev_priv) GRAPHICS_VER(dev_priv)
|
||||
/*
|
||||
* Deprecated: use IS_GRAPHICS_VER(), IS_MEDIA_VER() and IS_DISPLAY_VER() as
|
||||
* appropriate.
|
||||
*/
|
||||
#define IS_GEN_RANGE(dev_priv, s, e) IS_GRAPHICS_VER(dev_priv, (s), (e))
|
||||
/*
|
||||
* Deprecated: use GRAPHICS_VER(), MEDIA_VER() and DISPLAY_VER() as appropriate.
|
||||
*/
|
||||
#define IS_GEN(dev_priv, n) (GRAPHICS_VER(dev_priv) == (n))
|
||||
|
||||
#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics_ver)
|
||||
#define IS_GRAPHICS_VER(i915, from, until) \
|
||||
(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
|
||||
|
@ -2880,14 +2880,14 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
|
||||
return true;
|
||||
}
|
||||
|
||||
int bdw_enable_vblank(struct drm_crtc *crtc)
|
||||
int bdw_enable_vblank(struct drm_crtc *_crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct intel_crtc *crtc = to_intel_crtc(_crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (gen11_dsi_configure_te(intel_crtc, true))
|
||||
if (gen11_dsi_configure_te(crtc, true))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
@ -2898,7 +2898,7 @@ int bdw_enable_vblank(struct drm_crtc *crtc)
|
||||
* PSR is active as no frames are generated, so check only for PSR.
|
||||
*/
|
||||
if (HAS_PSR(dev_priv))
|
||||
drm_crtc_vblank_restore(crtc);
|
||||
drm_crtc_vblank_restore(&crtc->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2952,14 +2952,14 @@ void ilk_disable_vblank(struct drm_crtc *crtc)
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
|
||||
void bdw_disable_vblank(struct drm_crtc *crtc)
|
||||
void bdw_disable_vblank(struct drm_crtc *_crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct intel_crtc *crtc = to_intel_crtc(_crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (gen11_dsi_configure_te(intel_crtc, false))
|
||||
if (gen11_dsi_configure_te(crtc, false))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
|
@ -845,7 +845,6 @@ static const struct intel_device_info icl_info = {
|
||||
static const struct intel_device_info ehl_info = {
|
||||
GEN11_FEATURES,
|
||||
PLATFORM(INTEL_ELKHARTLAKE),
|
||||
.require_force_probe = 1,
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
|
||||
.ppgtt_size = 36,
|
||||
};
|
||||
@ -853,7 +852,6 @@ static const struct intel_device_info ehl_info = {
|
||||
static const struct intel_device_info jsl_info = {
|
||||
GEN11_FEATURES,
|
||||
PLATFORM(INTEL_JASPERLAKE),
|
||||
.require_force_probe = 1,
|
||||
.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
|
||||
.ppgtt_size = 36,
|
||||
};
|
||||
@ -939,15 +937,48 @@ static const struct intel_device_info adl_s_info = {
|
||||
.dma_mask_size = 46,
|
||||
};
|
||||
|
||||
#define XE_LPD_CURSOR_OFFSETS \
|
||||
.cursor_offsets = { \
|
||||
[PIPE_A] = CURSOR_A_OFFSET, \
|
||||
[PIPE_B] = IVB_CURSOR_B_OFFSET, \
|
||||
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
|
||||
[PIPE_D] = TGL_CURSOR_D_OFFSET, \
|
||||
}
|
||||
|
||||
#define XE_LPD_FEATURES \
|
||||
.display.ver = 13, \
|
||||
.display.has_psr_hw_tracking = 0, \
|
||||
.abox_mask = GENMASK(1, 0), \
|
||||
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
|
||||
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \
|
||||
.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
|
||||
BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \
|
||||
.dbuf.size = 4096, \
|
||||
.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4)
|
||||
.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
|
||||
BIT(DBUF_S4), \
|
||||
.display.has_ddi = 1, \
|
||||
.display.has_dmc = 1, \
|
||||
.display.has_dp_mst = 1, \
|
||||
.display.has_dsb = 1, \
|
||||
.display.has_dsc = 1, \
|
||||
.display.has_fbc = 1, \
|
||||
.display.has_fpga_dbg = 1, \
|
||||
.display.has_hdcp = 1, \
|
||||
.display.has_hotplug = 1, \
|
||||
.display.has_ipc = 1, \
|
||||
.display.has_psr = 1, \
|
||||
.display.ver = 13, \
|
||||
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
|
||||
.pipe_offsets = { \
|
||||
[TRANSCODER_A] = PIPE_A_OFFSET, \
|
||||
[TRANSCODER_B] = PIPE_B_OFFSET, \
|
||||
[TRANSCODER_C] = PIPE_C_OFFSET, \
|
||||
[TRANSCODER_D] = PIPE_D_OFFSET, \
|
||||
}, \
|
||||
.trans_offsets = { \
|
||||
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
|
||||
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
|
||||
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
|
||||
[TRANSCODER_D] = TRANSCODER_D_OFFSET, \
|
||||
}, \
|
||||
XE_LPD_CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info adl_p_info = {
|
||||
GEN12_FEATURES,
|
||||
@ -956,6 +987,7 @@ static const struct intel_device_info adl_p_info = {
|
||||
.has_cdclk_crawl = 1,
|
||||
.require_force_probe = 1,
|
||||
.display.has_modular_fia = 1,
|
||||
.display.has_psr_hw_tracking = 0,
|
||||
.platform_engine_mask =
|
||||
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
|
||||
.ppgtt_size = 48,
|
||||
|
@ -4590,19 +4590,22 @@ enum {
|
||||
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28)
|
||||
#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28)
|
||||
#define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */
|
||||
#define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */
|
||||
#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
|
||||
#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
|
||||
#define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8
|
||||
#define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13)
|
||||
#define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13)
|
||||
#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5
|
||||
#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << 13)
|
||||
#define TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT 13
|
||||
#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT)
|
||||
#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13)
|
||||
#define EDP_PSR2_FAST_WAKE_MAX_LINES 8
|
||||
#define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11)
|
||||
#define EDP_PSR2_FAST_WAKE_MASK (3 << 11)
|
||||
#define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5
|
||||
#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << 10)
|
||||
#define TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT 10
|
||||
#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT)
|
||||
#define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10)
|
||||
#define EDP_PSR2_TP2_TIME_500us (0 << 8)
|
||||
#define EDP_PSR2_TP2_TIME_100us (1 << 8)
|
||||
@ -7751,7 +7754,7 @@ enum {
|
||||
#define GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED (3 << 0) /* icl + */
|
||||
|
||||
/* DMC */
|
||||
#define DMC_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
|
||||
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
|
||||
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
|
||||
#define DMC_HTP_ADDR_SKL 0x00500034
|
||||
#define DMC_SSP_BASE _MMIO(0x8F074)
|
||||
@ -8107,6 +8110,7 @@ enum {
|
||||
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
|
||||
|
||||
#define CHICKEN_PAR1_1 _MMIO(0x42080)
|
||||
#define IGNORE_KVMR_PIPE_A REG_BIT(23)
|
||||
#define KBL_ARB_FILL_SPARE_22 REG_BIT(22)
|
||||
#define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16)
|
||||
#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
|
||||
@ -10365,6 +10369,14 @@ enum skl_power_gate {
|
||||
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
|
||||
/* See DP_MSA_MISC_* for the bit definitions */
|
||||
|
||||
#define _TRANS_A_SET_CONTEXT_LATENCY 0x6007C
|
||||
#define _TRANS_B_SET_CONTEXT_LATENCY 0x6107C
|
||||
#define _TRANS_C_SET_CONTEXT_LATENCY 0x6207C
|
||||
#define _TRANS_D_SET_CONTEXT_LATENCY 0x6307C
|
||||
#define TRANS_SET_CONTEXT_LATENCY(tran) _MMIO_TRANS2(tran, _TRANS_A_SET_CONTEXT_LATENCY)
|
||||
#define TRANS_SET_CONTEXT_LATENCY_MASK REG_GENMASK(15, 0)
|
||||
#define TRANS_SET_CONTEXT_LATENCY_VALUE(x) REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
|
||||
|
||||
/* LCPLL Control */
|
||||
#define LCPLL_CTL _MMIO(0x130040)
|
||||
#define LCPLL_PLL_DISABLE (1 << 31)
|
||||
|
@ -484,8 +484,7 @@ static int gen11_get_dram_info(struct drm_i915_private *i915)
|
||||
|
||||
static int gen12_get_dram_info(struct drm_i915_private *i915)
|
||||
{
|
||||
/* Always needed for GEN12+ */
|
||||
i915->dram_info.wm_lv_0_adjust_needed = true;
|
||||
i915->dram_info.wm_lv_0_adjust_needed = false;
|
||||
|
||||
return icl_pcode_read_mem_global_info(i915);
|
||||
}
|
||||
|
@ -1370,11 +1370,11 @@ static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
|
||||
static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_atomic_state *state =
|
||||
to_intel_atomic_state(crtc_state->uapi.state);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
|
||||
int num_active_planes = hweight8(crtc_state->active_planes &
|
||||
~BIT(PLANE_CURSOR));
|
||||
@ -1451,20 +1451,21 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
|
||||
static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
|
||||
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
|
||||
struct intel_atomic_state *intel_state =
|
||||
to_intel_atomic_state(new_crtc_state->uapi.state);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(intel_state, crtc);
|
||||
const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
|
||||
enum plane_id plane_id;
|
||||
|
||||
if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
|
||||
if (!new_crtc_state->hw.active ||
|
||||
drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
|
||||
*intermediate = *optimal;
|
||||
|
||||
intermediate->cxsr = false;
|
||||
@ -1890,12 +1891,12 @@ static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
|
||||
vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
|
||||
}
|
||||
|
||||
static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
|
||||
static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_atomic_state *state =
|
||||
to_intel_atomic_state(crtc_state->uapi.state);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
|
||||
const struct vlv_fifo_state *fifo_state =
|
||||
&crtc_state->wm.vlv.fifo_state;
|
||||
@ -2095,19 +2096,20 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
|
||||
|
||||
#undef VLV_FIFO
|
||||
|
||||
static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
|
||||
static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
|
||||
struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
|
||||
const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
|
||||
struct intel_atomic_state *intel_state =
|
||||
to_intel_atomic_state(new_crtc_state->uapi.state);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(intel_state, crtc);
|
||||
const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
|
||||
int level;
|
||||
|
||||
if (!new_crtc_state->hw.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
|
||||
if (!new_crtc_state->hw.active ||
|
||||
drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
|
||||
*intermediate = *optimal;
|
||||
|
||||
intermediate->cxsr = false;
|
||||
@ -2906,24 +2908,25 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
|
||||
if (wm[level] == 0) {
|
||||
for (i = level + 1; i <= max_level; i++)
|
||||
wm[i] = 0;
|
||||
|
||||
max_level = level - 1;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* WaWmMemoryReadLatency:skl+,glk
|
||||
* WaWmMemoryReadLatency
|
||||
*
|
||||
* punit doesn't take into account the read latency so we need
|
||||
* to add 2us to the various latency levels we retrieve from the
|
||||
* punit when level 0 response data us 0us.
|
||||
* to add proper adjustement to each valid level we retrieve
|
||||
* from the punit when level 0 response data is 0us.
|
||||
*/
|
||||
if (wm[0] == 0) {
|
||||
wm[0] += 2;
|
||||
for (level = 1; level <= max_level; level++) {
|
||||
if (wm[level] == 0)
|
||||
break;
|
||||
wm[level] += 2;
|
||||
}
|
||||
u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
|
||||
|
||||
for (level = 0; level <= max_level; level++)
|
||||
wm[level] += adjust;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2934,7 +2937,6 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
if (dev_priv->dram_info.wm_lv_0_adjust_needed)
|
||||
wm[0] += 1;
|
||||
|
||||
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
|
||||
|
||||
@ -3144,10 +3146,12 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
/* Compute new watermarks for the pipe */
|
||||
static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
|
||||
static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_pipe_wm *pipe_wm;
|
||||
struct intel_plane *plane;
|
||||
const struct intel_plane_state *plane_state;
|
||||
@ -3220,16 +3224,16 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
|
||||
* state and the new state. These can be programmed to the hardware
|
||||
* immediately.
|
||||
*/
|
||||
static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
|
||||
static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(newstate->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
|
||||
struct intel_atomic_state *intel_state =
|
||||
to_intel_atomic_state(newstate->uapi.state);
|
||||
const struct intel_crtc_state *oldstate =
|
||||
intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
|
||||
const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
|
||||
const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
|
||||
int level, max_level = ilk_wm_max_level(dev_priv);
|
||||
|
||||
/*
|
||||
@ -3237,9 +3241,10 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
|
||||
* currently active watermarks to get values that are safe both before
|
||||
* and after the vblank.
|
||||
*/
|
||||
*a = newstate->wm.ilk.optimal;
|
||||
if (!newstate->hw.active || drm_atomic_crtc_needs_modeset(&newstate->uapi) ||
|
||||
intel_state->skip_intermediate_wm)
|
||||
*a = new_crtc_state->wm.ilk.optimal;
|
||||
if (!new_crtc_state->hw.active ||
|
||||
drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) ||
|
||||
state->skip_intermediate_wm)
|
||||
return 0;
|
||||
|
||||
a->pipe_enabled |= b->pipe_enabled;
|
||||
@ -3270,8 +3275,8 @@ static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
|
||||
* If our intermediate WM are identical to the final WM, then we can
|
||||
* omit the post-vblank programming; only update if it's different.
|
||||
*/
|
||||
if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
|
||||
newstate->wm.need_postvbl_update = true;
|
||||
if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
|
||||
new_crtc_state->wm.need_postvbl_update = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3283,12 +3288,12 @@ static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
|
||||
int level,
|
||||
struct intel_wm_level *ret_wm)
|
||||
{
|
||||
const struct intel_crtc *intel_crtc;
|
||||
const struct intel_crtc *crtc;
|
||||
|
||||
ret_wm->enable = true;
|
||||
|
||||
for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
|
||||
const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
||||
const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
|
||||
const struct intel_wm_level *wm = &active->wm[level];
|
||||
|
||||
if (!active->pipe_enabled)
|
||||
@ -3388,7 +3393,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
|
||||
enum intel_ddb_partitioning partitioning,
|
||||
struct ilk_wm_values *results)
|
||||
{
|
||||
struct intel_crtc *intel_crtc;
|
||||
struct intel_crtc *crtc;
|
||||
int level, wm_lp;
|
||||
|
||||
results->enable_fbc_wm = merged->fbc_wm_enabled;
|
||||
@ -3433,9 +3438,9 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
/* LP0 register values */
|
||||
for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk;
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
|
||||
const struct intel_wm_level *r = &pipe_wm->wm[0];
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, !r->enable))
|
||||
|
@ -1929,7 +1929,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
|
||||
if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
|
||||
uncore->flags |= UNCORE_HAS_FORCEWAKE;
|
||||
|
||||
if (!intel_uncore_has_forcewake(uncore)) {
|
||||
|
Loading…
Reference in New Issue
Block a user