- PSR fixes and improvements for selective fetch (Jose)

- GVT build fixed and cleanup (Jani)
 - RKL display fixes (Lee, Matt)
 - DSI fix (Hans)
 - Panel Power and Backlight fixes (Anshuman, Jani)
 - RPM fix (Chris)
 - Fix HTI port checking (Jose)
 - Clean-up in cursor code (Ville)
 - Once again, trying to use fast+narrow link on eDP (Ville)
 - DG1 display fix (Matt)
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAl/94OgACgkQ+mJfZA7r
 E8pvEQf+Lvix4ipELxPLcoNfpxABb58nsRfxqgzBqGX4GSWso/RKdRfy0zIeJ5Ld
 MSMjFLLU2OS+pU3VGtoMU4QnkB65lqp2cKNPCgvD0Q5WHbE5dRaysFnRAsKPLJTj
 mTK65YmFIyRl7sAR9NgMGn1WnjyLKVRK9MdAXACv/vY8LERzrnRbGKvb4HinHX7o
 aeDTO56+Z0hLvHjOcNvZtghwLrgCfx9nUOR+OpknfzzjlMJzCTcL/K8CJyLUnCO0
 vtBVt7oTRY231cRR5jCwCosVPh5daxOA/DV6OKRpgZj5wsj6G/85oij8TJK9XCZ/
 2h84zd5/gBFt5OHw/mSPyIC0gUGexA==
 =nWgJ
 -----END PGP SIGNATURE-----

Merge tag 'drm-intel-next-2021-01-12' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- PSR fixes and improvements for selective fetch (Jose)
- GVT build fixed and cleanup (Jani)
- RKL display fixes (Lee, Matt)
- DSI fix (Hans)
- Panel Power and Backlight fixes (Anshuman, Jani)
- RPM fix (Chris)
- Fix HTI port checking (Jose)
- Clean-up in cursor code (Ville)
- Once again, trying to use fast+narrow link on eDP (Ville)
- DG1 display fix (Matt)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210112175151.GA90999@intel.com
This commit is contained in:
Dave Airlie 2021-01-15 13:41:32 +10:00
commit dfa7c521bf
24 changed files with 396 additions and 97 deletions

View File

@ -285,15 +285,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
# exclude some broken headers from the test coverage
no-header-test := \
display/intel_vbt_defs.h \
gvt/execlist.h \
gvt/fb_decoder.h \
gvt/gtt.h \
gvt/gvt.h \
gvt/interrupt.h \
gvt/mmio_context.h \
gvt/mpt.h \
gvt/scheduler.h
display/intel_vbt_defs.h
extra-$(CONFIG_DRM_I915_WERROR) += \
$(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \

View File

@ -1623,6 +1623,13 @@ static const u8 icp_ddc_pin_map[] = {
[TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
};
static const u8 rkl_pch_tgp_ddc_pin_map[] = {
[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
[ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
[RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
[RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
};
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
const u8 *ddc_pin_map;
@ -1630,6 +1637,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) {
return vbt_pin;
} else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
} else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);

View File

@ -492,8 +492,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
unsigned long irqflags;
if (plane_state && plane_state->uapi.visible) {
unsigned width = drm_rect_width(&plane_state->uapi.dst);
unsigned height = drm_rect_height(&plane_state->uapi.dst);
int width = drm_rect_width(&plane_state->uapi.dst);
int height = drm_rect_height(&plane_state->uapi.dst);
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
@ -522,7 +522,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* cursor that doesn't appear to move, or even change
* shape. Thus we always write CURBASE.
*
* The other registers are armed by by the CURBASE write
* The other registers are armed by the CURBASE write
* except when the plane is getting enabled at which time
* the CURCNTR write arms the update.
*/

View File

@ -611,6 +611,34 @@ static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[]
{ 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
};
static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
/* NT mV Trans mV db */
{ 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
{ 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
{ 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
{ 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
{ 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
{ 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
{ 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
{ 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */
{ 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
{ 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
{ 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
{ 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
{ 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
{ 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
struct icl_mg_phy_ddi_buf_trans {
u32 cri_txdeemph_override_11_6;
u32 cri_txdeemph_override_5_0;
@ -766,6 +794,34 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_ho
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */
};
static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = {
/* NT mV Trans mV db */
{ 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
{ 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
{ 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */
{ 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
{ 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
{ 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
{ 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
{ 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */
{ 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */
{ 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */
{ 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
{ 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */
{ 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
{ 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
{ 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */
{ 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table)
{
return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
@ -1093,6 +1149,12 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
} else if (dev_priv->vbt.edp.low_vswing) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
return icl_combo_phy_ddi_translations_edp_hbr2;
} else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) {
*n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3);
return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3;
} else if (IS_DG1(dev_priv)) {
*n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr);
return dg1_combo_phy_ddi_translations_dp_rbr_hbr;
}
return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
@ -1259,7 +1321,10 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 270000) {
if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
if (IS_ROCKETLAKE(dev_priv)) {
*n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3);
return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3;
} else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
*n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2);
return tgl_uy_combo_phy_ddi_translations_dp_hbr2;
} else {
@ -1267,8 +1332,13 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
return tgl_combo_phy_ddi_translations_dp_hbr2;
}
} else {
*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
return tgl_combo_phy_ddi_translations_dp_hbr;
if (IS_ROCKETLAKE(dev_priv)) {
*n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr);
return rkl_combo_phy_ddi_translations_dp_hbr;
} else {
*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
return tgl_combo_phy_ddi_translations_dp_hbr;
}
}
}
@ -5285,8 +5355,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port)
static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
{
return i915->hti_state & HDPORT_ENABLED &&
(i915->hti_state & HDPORT_PHY_USED_DP(phy) ||
i915->hti_state & HDPORT_PHY_USED_HDMI(phy));
i915->hti_state & HDPORT_DDI_USED(phy);
}
static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv,

View File

@ -3752,33 +3752,19 @@ static int intel_plane_max_height(struct intel_plane *plane,
return INT_MAX;
}
static int skl_check_main_surface(struct intel_plane_state *plane_state)
int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
int *x, int *y, u32 *offset)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
int x = plane_state->uapi.src.x1 >> 16;
int y = plane_state->uapi.src.y1 >> 16;
int w = drm_rect_width(&plane_state->uapi.src) >> 16;
int h = drm_rect_height(&plane_state->uapi.src) >> 16;
int min_width = intel_plane_min_width(plane, fb, 0, rotation);
int max_width = intel_plane_max_width(plane, fb, 0, rotation);
int max_height = intel_plane_max_height(plane, fb, 0, rotation);
int aux_plane = intel_main_to_aux_plane(fb, 0);
u32 aux_offset = plane_state->color_plane[aux_plane].offset;
u32 alignment, offset;
const int aux_plane = intel_main_to_aux_plane(fb, 0);
const u32 aux_offset = plane_state->color_plane[aux_plane].offset;
const u32 alignment = intel_surf_alignment(fb, 0);
const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
if (w > max_width || w < min_width || h > max_height) {
drm_dbg_kms(&dev_priv->drm,
"requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
w, h, min_width, max_width, max_height);
return -EINVAL;
}
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
intel_add_fb_offsets(x, y, plane_state, 0);
*offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0);
if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
@ -3787,9 +3773,10 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* main surface offset, and it must be non-negative. Make
* sure that is what we will get.
*/
if (aux_plane && offset > aux_offset)
offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
offset, aux_offset & ~(alignment - 1));
if (aux_plane && *offset > aux_offset)
*offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
*offset,
aux_offset & ~(alignment - 1));
/*
* When using an X-tiled surface, the plane blows up
@ -3800,18 +3787,51 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = fb->format->cpp[0];
while ((x + w) * cpp > plane_state->color_plane[0].stride) {
if (offset == 0) {
while ((*x + w) * cpp > plane_state->color_plane[0].stride) {
if (*offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
offset, offset - alignment);
*offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0,
*offset,
*offset - alignment);
}
}
return 0;
}
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
const unsigned int rotation = plane_state->hw.rotation;
int x = plane_state->uapi.src.x1 >> 16;
int y = plane_state->uapi.src.y1 >> 16;
const int w = drm_rect_width(&plane_state->uapi.src) >> 16;
const int h = drm_rect_height(&plane_state->uapi.src) >> 16;
const int min_width = intel_plane_min_width(plane, fb, 0, rotation);
const int max_width = intel_plane_max_width(plane, fb, 0, rotation);
const int max_height = intel_plane_max_height(plane, fb, 0, rotation);
const int aux_plane = intel_main_to_aux_plane(fb, 0);
const u32 alignment = intel_surf_alignment(fb, 0);
u32 offset;
int ret;
if (w > max_width || w < min_width || h > max_height) {
drm_dbg_kms(&dev_priv->drm,
"requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
w, h, min_width, max_width, max_height);
return -EINVAL;
}
ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
if (ret)
return ret;
/*
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.

View File

@ -630,6 +630,8 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int plane);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state,
int *x, int *y, u32 *offset);
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);

View File

@ -1354,6 +1354,7 @@ struct intel_dp {
bool has_hdmi_sink;
bool has_audio;
bool reset_link_params;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];

View File

@ -480,6 +480,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return -1;
}
if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
drm_dbg_kms(&i915->drm,
"Retrying Link training for eDP with max parameters\n");
intel_dp->use_max_params = true;
return 0;
}
index = intel_dp_rate_index(intel_dp->common_rates,
intel_dp->num_common_rates,
link_rate);
@ -895,9 +902,7 @@ pps_lock(struct intel_dp *intel_dp)
* See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dp_to_dig_port(intel_dp)));
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
mutex_lock(&dev_priv->pps_mutex);
return wakeref;
@ -909,9 +914,7 @@ pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
mutex_unlock(&dev_priv->pps_mutex);
intel_display_power_put(dev_priv,
intel_aux_power_domain(dp_to_dig_port(intel_dp)),
wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return 0;
}
@ -2290,6 +2293,44 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
/* Optimize link config in order: max bpp, min lanes, min clock */
static int
intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
const struct link_config_limits *limits)
{
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int bpp, clock, lane_count;
int mode_rate, link_clock, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
output_bpp);
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
link_clock = intel_dp->common_rates[clock];
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
if (mode_rate <= link_avail) {
pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = bpp;
pipe_config->port_clock = link_clock;
return 0;
}
}
}
}
return -EINVAL;
}
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@ -2513,13 +2554,14 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
if (intel_dp_is_edp(intel_dp)) {
if (intel_dp->use_max_params) {
/*
* Use the maximum clock and number of lanes the eDP panel
* advertizes being capable of. The panels are generally
* advertizes being capable of in case the initial fast
* optimal params failed us. The panels are generally
* designed to support only a single clock and lane
* configuration, and typically these values correspond to the
* native resolution of the panel.
* configuration, and typically on older panels these
* values correspond to the native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@ -2538,11 +2580,22 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_can_bigjoiner(intel_dp))
pipe_config->bigjoiner = true;
/*
* Optimize for slow and wide. This is the place to add alternative
* optimization policy.
*/
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
if (intel_dp_is_edp(intel_dp))
/*
* Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
* section A.1: "It is recommended that the minimum number of
* lanes be used, using the minimum link rate allowed for that
* lane configuration."
*
* Note that we fall back to the max clock and lane count for eDP
* panels that fail with the fast optimal settings (see
* intel_dp->use_max_params), in which case the fast vs. wide
* choice doesn't matter.
*/
ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
else
/* Optimize for slow and wide. */
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);

View File

@ -1649,16 +1649,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
val = pch_get_backlight(connector);
else
val = lpt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
if (cpu_mode) {
drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, panel->backlight.level);
lpt_set_backlight(connector->base.state, val);
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
@ -1666,6 +1663,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
return 0;
}

View File

@ -1186,7 +1186,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_rect *clip;
u32 val;
u32 val, offset;
int ret, x, y;
if (!crtc_state->enable_psr2_sel_fetch)
return;
@ -1203,9 +1204,14 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
val |= plane_state->uapi.dst.x1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
/* TODO: consider tiling and auxiliary surfaces */
val = (clip->y1 + plane_state->color_plane[color_plane].y) << 16;
val |= plane_state->color_plane[color_plane].x;
/* TODO: consider auxiliary surfaces */
x = plane_state->uapi.src.x1 >> 16;
y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
if (ret)
drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
ret);
val = y << 16 | x;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
val);
@ -1242,9 +1248,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (clip->y1 == -1)
goto exit;
drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1);
val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
exit:
crtc_state->psr2_man_track_ctl = val;
}
@ -1269,8 +1277,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
struct intel_plane_state *new_plane_state, *old_plane_state;
struct drm_rect pipe_clip = { .y1 = -1 };
struct intel_plane *plane;
bool full_update = false;
int i, ret;
@ -1282,13 +1290,25 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
/*
* Calculate minimal selective fetch area of each plane and calculate
* the pipe damaged area.
* In the next loop the plane selective fetch area will actually be set
* using whole pipe damaged area.
*/
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
struct drm_rect *sel_fetch_area, temp;
struct drm_rect src, damaged_area = { .y1 = -1 };
struct drm_mode_rect *damaged_clips;
u32 num_clips, j;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
if (!new_plane_state->uapi.visible &&
!old_plane_state->uapi.visible)
continue;
/*
* TODO: Not clear how to handle planes with negative position,
* also planes are not updated if they have a negative X
@ -1300,23 +1320,94 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
break;
}
if (!new_plane_state->uapi.visible)
continue;
num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
/*
* For now doing a selective fetch in the whole plane area,
* optimizations will come in the future.
* If visibility or plane moved, mark the whole plane area as
* damaged as it needs to be complete redraw in the new and old
* position.
*/
sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
sel_fetch_area->y1 = new_plane_state->uapi.src.y1 >> 16;
sel_fetch_area->y2 = new_plane_state->uapi.src.y2 >> 16;
if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
!drm_rect_equals(&new_plane_state->uapi.dst,
&old_plane_state->uapi.dst)) {
if (old_plane_state->uapi.visible) {
damaged_area.y1 = old_plane_state->uapi.dst.y1;
damaged_area.y2 = old_plane_state->uapi.dst.y2;
clip_area_update(&pipe_clip, &damaged_area);
}
temp = *sel_fetch_area;
temp.y1 += new_plane_state->uapi.dst.y1;
temp.y2 += new_plane_state->uapi.dst.y2;
clip_area_update(&pipe_clip, &temp);
if (new_plane_state->uapi.visible) {
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
clip_area_update(&pipe_clip, &damaged_area);
}
continue;
} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
(!num_clips &&
new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
/*
* If the plane don't have damaged areas but the
* framebuffer changed or alpha changed, mark the whole
* plane area as damaged.
*/
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
clip_area_update(&pipe_clip, &damaged_area);
continue;
}
drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
for (j = 0; j < num_clips; j++) {
struct drm_rect clip;
clip.x1 = damaged_clips[j].x1;
clip.y1 = damaged_clips[j].y1;
clip.x2 = damaged_clips[j].x2;
clip.y2 = damaged_clips[j].y2;
if (drm_rect_intersect(&clip, &src))
clip_area_update(&damaged_area, &clip);
}
if (damaged_area.y1 == -1)
continue;
damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
clip_area_update(&pipe_clip, &damaged_area);
}
if (full_update)
goto skip_sel_fetch_set_loop;
/* It must be aligned to 4 lines */
pipe_clip.y1 -= pipe_clip.y1 % 4;
if (pipe_clip.y2 % 4)
pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;
/*
* Now that we have the pipe damaged area check if it intersect with
* every plane, if it does set the plane selective fetch area.
*/
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
struct drm_rect *sel_fetch_area, inter;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
!new_plane_state->uapi.visible)
continue;
inter = pipe_clip;
if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
continue;
sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
}
skip_sel_fetch_set_loop:
psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
return 0;
}

View File

@ -319,6 +319,8 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_DDI_A = 0x1,
ICL_DDC_BUS_DDI_B,
TGL_DDC_BUS_DDI_C,
RKL_DDC_BUS_DDI_D = 0x3,
RKL_DDC_BUS_DDI_E,
ICL_DDC_BUS_PORT_1 = 0x4,
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,

View File

@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_prepare(encoder, pipe_config);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
/* Deassert reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
/*
* Give the panel time to power-on and then deassert its reset.
* Depending on the VBT MIPI sequences version the deassert-seq
* may contain the necessary delay, intel_dsi_msleep() will skip
* the delay in that case. If there is no deassert-seq, then an
* unconditional msleep is used to give the panel time to power-on.
*/
if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
} else {
msleep(intel_dsi->panel_on_delay);
}
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);

View File

@ -182,7 +182,4 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
const struct intel_engine_cs *engine);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask);
#endif /*_GVT_EXECLIST_H_*/

View File

@ -38,6 +38,10 @@
#include <linux/types.h>
#include "display/intel_display.h"
struct intel_vgpu;
#define _PLANE_CTL_FORMAT_SHIFT 24
#define _PLANE_CTL_TILED_SHIFT 10
#define _PIPE_V_SRCSZ_SHIFT 0
@ -98,8 +102,6 @@ enum DDI_PORT {
DDI_PORT_E = 4
};
struct intel_gvt;
/* color space conversion and gamma correction are not included */
struct intel_vgpu_primary_plane_format {
u8 enabled; /* plane is enabled */

View File

@ -34,10 +34,19 @@
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
#define I915_GTT_PAGE_SHIFT 12
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
#include "gt/intel_gtt.h"
struct intel_gvt;
struct intel_vgpu;
struct intel_vgpu_mm;
#define I915_GTT_PAGE_SHIFT 12
#define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry {

View File

@ -33,6 +33,10 @@
#ifndef _GVT_H_
#define _GVT_H_
#include <uapi/linux/pci_regs.h>
#include "i915_drv.h"
#include "debug.h"
#include "hypercall.h"
#include "mmio.h"

View File

@ -3686,8 +3686,7 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt)
}
}
static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
u32 offset, void *data)
static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
struct intel_vgpu *vgpu = data;
struct drm_i915_private *dev_priv = gvt->gt->i915;

View File

@ -32,7 +32,10 @@
#ifndef _GVT_INTERRUPT_H_
#define _GVT_INTERRUPT_H_
#include <linux/types.h>
#include <linux/hrtimer.h>
#include <linux/kernel.h>
#include "i915_reg.h"
enum intel_gvt_event_type {
RCS_MI_USER_INTERRUPT = 0,

View File

@ -36,6 +36,17 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
#include <linux/types.h>
#include "gt/intel_engine_types.h"
#include "i915_reg.h"
struct i915_request;
struct intel_context;
struct intel_engine_cs;
struct intel_gvt;
struct intel_vgpu;
struct engine_mmio {
enum intel_engine_id id;
i915_reg_t reg;

View File

@ -33,6 +33,8 @@
#ifndef _GVT_MPT_H_
#define _GVT_MPT_H_
#include "gvt.h"
/**
* DOC: Hypervisor Service APIs for GVT-g Core Logic
*

View File

@ -36,6 +36,11 @@
#ifndef _GVT_SCHEDULER_H_
#define _GVT_SCHEDULER_H_
#include "gt/intel_engine_types.h"
#include "execlist.h"
#include "interrupt.h"
struct intel_gvt_workload_scheduler {
struct intel_vgpu *current_vgpu;
struct intel_vgpu *next_vgpu;

View File

@ -1046,6 +1046,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
disable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_gem_suspend(i915);
drm_kms_helper_poll_disable(&i915->drm);
@ -1059,6 +1061,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)

View File

@ -2928,8 +2928,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define HDPORT_STATE _MMIO(0x45050)
#define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12)
#define HDPORT_PHY_USED_DP(phy) REG_BIT(2 * (phy) + 2)
#define HDPORT_PHY_USED_HDMI(phy) REG_BIT(2 * (phy) + 1)
#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1)
#define HDPORT_ENABLED REG_BIT(0)
/* Make render/texture TLB fetches lower priorty than associated data

View File

@ -206,6 +206,19 @@ static inline bool drm_rect_equals(const struct drm_rect *r1,
r1->y1 == r2->y1 && r1->y2 == r2->y2;
}
/**
* drm_rect_fp_to_int - Convert a rect in 16.16 fixed point form to int form.
* @dst: rect to be stored the converted value
* @src: rect in 16.16 fixed point form
*/
static inline void drm_rect_fp_to_int(struct drm_rect *dst,
const struct drm_rect *src)
{
drm_rect_init(dst, src->x1 >> 16, src->y1 >> 16,
drm_rect_width(src) >> 16,
drm_rect_height(src) >> 16);
}
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip);