mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-10 06:34:17 +08:00
Merge tag 'drm-intel-next-2020-04-30' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Driver Changes: - Fix GitLab #1698: Performance regression with Linux 5.7-rc1 on Iris Plus 655 and 4K screen (Chris) - Add Wa_14011059788 for Tigerlake (Matt A) - Add per ctx batchbuffer wa for timestamp for Gen12 (Mika) - Use indirect ctx bb to load cmd buffer control value from context image to avoid corruption (Mika) - Enable DP Display Audio WA (Uma, Jani) - Update forcewake firmware ranges for Icelake (Radhakrishna) - Add missing deinitialization cases of load failure for display (Jose) - Implement TC cold sequences for Icelake and Tigerlake (Jose) - Unbreak enable_dpcd_backlight modparam (Lyude) - Move the late flush_submission in retire to the end (Chris) - Demote "Reducing compressed framebufer size" message to info (Peter) - Push MST link retraining to the hotplug work (Ville) - Hold obj->vma.lock over for_each_ggtt_vma() (Chris) - Fix timeout handling during TypeC AUX power well enabling for ICL (Imre) - Fix skl+ non-scaled pfit modes (Ville) - Prefer soft-rc6 over RPS DOWN_TIMEOUT (Chris) - Sanitize GT first before poisoning HWSP (Chris) - Fix up clock RPS frequency readout (Chris) - Avoid reusing the same logical CCID (Chris) - Avoid dereferencing a dead context (Chris) - Always enable busy-stats for execlists (Chris) - Apply the aggressive downclocking to parking (Chris) - Restore aggressive post-boost downclocking (Chris) - Scrub execlists state on resume (Chris) - Add debugfs attributes for LPSP (Ansuman) - Improvements to kernel selftests (Chris, Mika) - Add tiled blits selftest (Zbigniew) - Fix error handling in __live_lrc_indirect_ctx_bb() (Dan) - Add pre/post plane updates for SAGV (Stanislav) - Add ICL PG3 PW ID for EHL (Anshuman) - Fix Sphinx build duplicate label warning (Jani) - Error log non-zero audio power refcount after unbind (Jani) - Remove object_is_locked assertion from unpin_from_display_plane (Chris) - Use single set of AUX powerwell ops for gen11+ (Matt R) - Prefer drm_WARN_ON over WARN_ON (Pankaj) - Poison residual state [HWSP] across resume (Chris, Tvrtko) - Convert request-before-CS assertion to debug (Chris) - Carefully order virtual_submission_tasklet (Chris) - Check carefully for an idle engine in wait-for-idle (Chris) - Only close vma we open (Chris) - Trace RPS events (Chris) - Use the RPM config register to determine clk frequencies (Chris) - Drop rq->ring->vma peeking from error capture (Chris) - Check preempt-timeout target before submit_ports (Chris) - Check HWSP cacheline is valid before acquiring (Chris) - Use proper fault mask in interrupt postinstall too (Matt R) - Keep a no-frills swappable copy of the default context state (Chris) - Add atomic helpers for bandwidth (Stanislav) - Refactor setting dma info to a common helper from device info (Michael) - Refactor DDI transcoder code for clairty (Ville) - Extend PG3 power well ID to ICL (Anshuman) - Refactor PFIT code for readability and future extensibility (Ville) - Clarify code split between intel_ddi.c and intel_dp.c (Ville) - Move out code to return the digital_port of the aux ch (Jose) - Move rps.enabled/active and use of RPS interrupts to flags (Chris) - Remove superfluous inlines and dead code (Jani) - Re-disable -Wframe-address from top-level Makefile (Nick) - Static checker and spelling fixes (Colin, Nathan) - Split long lines (Ville) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200430124904.GA100924@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
commit
a1fb548962
@ -1407,13 +1407,16 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
|
||||
dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
|
||||
|
||||
mask = intel_private.driver->dma_mask_size;
|
||||
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
|
||||
dev_err(&intel_private.pcidev->dev,
|
||||
"set gfx device dma mask %d-bit failed!\n", mask);
|
||||
else
|
||||
pci_set_consistent_dma_mask(intel_private.pcidev,
|
||||
DMA_BIT_MASK(mask));
|
||||
if (bridge) {
|
||||
mask = intel_private.driver->dma_mask_size;
|
||||
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
|
||||
dev_err(&intel_private.pcidev->dev,
|
||||
"set gfx device dma mask %d-bit failed!\n",
|
||||
mask);
|
||||
else
|
||||
pci_set_consistent_dma_mask(intel_private.pcidev,
|
||||
DMA_BIT_MASK(mask));
|
||||
}
|
||||
|
||||
if (intel_gtt_init() != 0) {
|
||||
intel_gmch_remove();
|
||||
|
@ -22,6 +22,7 @@ subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, uninitialized)
|
||||
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
|
||||
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
|
||||
|
||||
# Fine grained warnings disable
|
||||
@ -91,6 +92,7 @@ gt-y += \
|
||||
gt/intel_ggtt.o \
|
||||
gt/intel_ggtt_fencing.o \
|
||||
gt/intel_gt.o \
|
||||
gt/intel_gt_clock_utils.o \
|
||||
gt/intel_gt_irq.o \
|
||||
gt/intel_gt_pm.o \
|
||||
gt/intel_gt_pm_irq.o \
|
||||
@ -109,6 +111,7 @@ gt-y += \
|
||||
gt/intel_sseu.o \
|
||||
gt/intel_timeline.o \
|
||||
gt/intel_workarounds.o \
|
||||
gt/shmem_utils.o \
|
||||
gt/sysfs_engines.o
|
||||
# autogenerated null render state
|
||||
gt-y += \
|
||||
@ -257,7 +260,8 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
|
||||
selftests/igt_live_test.o \
|
||||
selftests/igt_mmap.o \
|
||||
selftests/igt_reset.o \
|
||||
selftests/igt_spinner.o
|
||||
selftests/igt_spinner.o \
|
||||
selftests/librapl.o
|
||||
|
||||
# virtual gpu code
|
||||
i915-y += i915_vgpu.o
|
||||
|
@ -36,15 +36,15 @@
|
||||
#include "intel_panel.h"
|
||||
#include "intel_vdsc.h"
|
||||
|
||||
static inline int header_credits_available(struct drm_i915_private *dev_priv,
|
||||
enum transcoder dsi_trans)
|
||||
static int header_credits_available(struct drm_i915_private *dev_priv,
|
||||
enum transcoder dsi_trans)
|
||||
{
|
||||
return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
|
||||
>> FREE_HEADER_CREDIT_SHIFT;
|
||||
}
|
||||
|
||||
static inline int payload_credits_available(struct drm_i915_private *dev_priv,
|
||||
enum transcoder dsi_trans)
|
||||
static int payload_credits_available(struct drm_i915_private *dev_priv,
|
||||
enum transcoder dsi_trans)
|
||||
{
|
||||
return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
|
||||
>> FREE_PLOAD_CREDIT_SHIFT;
|
||||
@ -1195,7 +1195,7 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
|
||||
WARN_ON(crtc_state->has_pch_encoder);
|
||||
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
|
||||
|
||||
/* step6d: enable dsi transcoder */
|
||||
gen11_dsi_enable_transcoder(encoder);
|
||||
@ -1525,15 +1525,18 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
||||
base);
|
||||
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
intel_connector->panel.fixed_mode;
|
||||
intel_connector->panel.fixed_mode;
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
int ret;
|
||||
|
||||
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
|
||||
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
|
||||
intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
|
||||
|
||||
ret = intel_pch_panel_fitting(pipe_config, conn_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
adjusted_mode->flags = 0;
|
||||
|
||||
|
@ -125,7 +125,7 @@ intel_plane_destroy_state(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct intel_plane_state *plane_state = to_intel_plane_state(state);
|
||||
WARN_ON(plane_state->vma);
|
||||
drm_WARN_ON(plane->dev, plane_state->vma);
|
||||
|
||||
__drm_atomic_helper_plane_destroy_state(&plane_state->uapi);
|
||||
if (plane_state->hw.fb)
|
||||
@ -396,7 +396,7 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
|
||||
}
|
||||
|
||||
/* should never happen */
|
||||
WARN_ON(1);
|
||||
drm_WARN_ON(state->base.dev, 1);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -514,6 +514,143 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
|
||||
mutex_unlock(&dev_priv->av_mutex);
|
||||
}
|
||||
|
||||
/* Add a factor to take care of rounding and truncations */
|
||||
#define ROUNDING_FACTOR 10000
|
||||
|
||||
static unsigned int get_hblank_early_enable_config(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
unsigned int link_clks_available, link_clks_required;
|
||||
unsigned int tu_data, tu_line, link_clks_active;
|
||||
unsigned int hblank_rise, hblank_early_prog;
|
||||
unsigned int h_active, h_total, hblank_delta, pixel_clk, v_total;
|
||||
unsigned int fec_coeff, refresh_rate, cdclk, vdsc_bpp;
|
||||
|
||||
h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
|
||||
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
|
||||
v_total = crtc_state->hw.adjusted_mode.crtc_vtotal;
|
||||
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
|
||||
refresh_rate = crtc_state->hw.adjusted_mode.vrefresh;
|
||||
vdsc_bpp = crtc_state->dsc.compressed_bpp;
|
||||
cdclk = i915->cdclk.hw.cdclk;
|
||||
/* fec= 0.972261, using rounding multiplier of 1000000 */
|
||||
fec_coeff = 972261;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
|
||||
"lanes = %u vdsc_bpp = %u cdclk = %u\n",
|
||||
h_active, crtc_state->port_clock, crtc_state->lane_count,
|
||||
vdsc_bpp, cdclk);
|
||||
|
||||
if (WARN_ON(!crtc_state->port_clock || !crtc_state->lane_count ||
|
||||
!crtc_state->dsc.compressed_bpp || !i915->cdclk.hw.cdclk))
|
||||
return 0;
|
||||
|
||||
link_clks_available = ((((h_total - h_active) *
|
||||
((crtc_state->port_clock * ROUNDING_FACTOR) /
|
||||
pixel_clk)) / ROUNDING_FACTOR) - 28);
|
||||
|
||||
link_clks_required = DIV_ROUND_UP(192000, (refresh_rate *
|
||||
v_total)) * ((48 /
|
||||
crtc_state->lane_count) + 2);
|
||||
|
||||
if (link_clks_available > link_clks_required)
|
||||
hblank_delta = 32;
|
||||
else
|
||||
hblank_delta = DIV_ROUND_UP(((((5 * ROUNDING_FACTOR) /
|
||||
crtc_state->port_clock) + ((5 *
|
||||
ROUNDING_FACTOR) /
|
||||
cdclk)) * pixel_clk),
|
||||
ROUNDING_FACTOR);
|
||||
|
||||
tu_data = (pixel_clk * vdsc_bpp * 8) / ((crtc_state->port_clock *
|
||||
crtc_state->lane_count * fec_coeff) / 1000000);
|
||||
tu_line = (((h_active * crtc_state->port_clock * fec_coeff) /
|
||||
1000000) / (64 * pixel_clk));
|
||||
link_clks_active = (tu_line - 1) * 64 + tu_data;
|
||||
|
||||
hblank_rise = ((link_clks_active + 6 * DIV_ROUND_UP(link_clks_active,
|
||||
250) + 4) * ((pixel_clk * ROUNDING_FACTOR) /
|
||||
crtc_state->port_clock)) / ROUNDING_FACTOR;
|
||||
|
||||
hblank_early_prog = h_active - hblank_rise + hblank_delta;
|
||||
|
||||
return hblank_early_prog;
|
||||
}
|
||||
|
||||
static unsigned int get_sample_room_req_config(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
unsigned int h_active, h_total, pixel_clk;
|
||||
unsigned int samples_room;
|
||||
|
||||
h_active = crtc_state->hw.adjusted_mode.hdisplay;
|
||||
h_total = crtc_state->hw.adjusted_mode.htotal;
|
||||
pixel_clk = crtc_state->hw.adjusted_mode.clock;
|
||||
|
||||
samples_room = ((((h_total - h_active) * ((crtc_state->port_clock *
|
||||
ROUNDING_FACTOR) / pixel_clk)) /
|
||||
ROUNDING_FACTOR) - 12) / ((48 /
|
||||
crtc_state->lane_count) + 2);
|
||||
|
||||
return samples_room;
|
||||
}
|
||||
|
||||
static void enable_audio_dsc_wa(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
unsigned int hblank_early_prog, samples_room;
|
||||
unsigned int val;
|
||||
|
||||
if (INTEL_GEN(i915) < 11)
|
||||
return;
|
||||
|
||||
val = intel_de_read(i915, AUD_CONFIG_BE);
|
||||
|
||||
if (INTEL_GEN(i915) == 11)
|
||||
val |= HBLANK_EARLY_ENABLE_ICL(pipe);
|
||||
else if (INTEL_GEN(i915) >= 12)
|
||||
val |= HBLANK_EARLY_ENABLE_TGL(pipe);
|
||||
|
||||
if (crtc_state->dsc.compression_enable &&
|
||||
(crtc_state->hw.adjusted_mode.hdisplay >= 3840 &&
|
||||
crtc_state->hw.adjusted_mode.vdisplay >= 2160)) {
|
||||
/* Get hblank early enable value required */
|
||||
hblank_early_prog = get_hblank_early_enable_config(encoder,
|
||||
crtc_state);
|
||||
if (hblank_early_prog < 32) {
|
||||
val &= ~HBLANK_START_COUNT_MASK(pipe);
|
||||
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32);
|
||||
} else if (hblank_early_prog < 64) {
|
||||
val &= ~HBLANK_START_COUNT_MASK(pipe);
|
||||
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64);
|
||||
} else if (hblank_early_prog < 96) {
|
||||
val &= ~HBLANK_START_COUNT_MASK(pipe);
|
||||
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96);
|
||||
} else {
|
||||
val &= ~HBLANK_START_COUNT_MASK(pipe);
|
||||
val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128);
|
||||
}
|
||||
|
||||
/* Get samples room value required */
|
||||
samples_room = get_sample_room_req_config(crtc_state);
|
||||
if (samples_room < 3) {
|
||||
val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
|
||||
val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room);
|
||||
} else {
|
||||
/* Program 0 i.e "All Samples available in buffer" */
|
||||
val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe);
|
||||
val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0);
|
||||
}
|
||||
}
|
||||
|
||||
intel_de_write(i915, AUD_CONFIG_BE, val);
|
||||
}
|
||||
|
||||
#undef ROUNDING_FACTOR
|
||||
|
||||
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
@ -531,6 +668,10 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
|
||||
|
||||
mutex_lock(&dev_priv->av_mutex);
|
||||
|
||||
/* Enable Audio WA for 4k DSC usecases */
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
|
||||
enable_audio_dsc_wa(encoder, crtc_state);
|
||||
|
||||
/* Enable audio presence detect, invalidate ELD */
|
||||
tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
|
||||
@ -1138,6 +1279,10 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
|
||||
drm_modeset_unlock_all(&dev_priv->drm);
|
||||
|
||||
device_link_remove(hda_kdev, i915_kdev);
|
||||
|
||||
if (dev_priv->audio_power_refcount)
|
||||
drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
|
||||
dev_priv->audio_power_refcount);
|
||||
}
|
||||
|
||||
static const struct component_ops i915_audio_component_bind_ops = {
|
||||
|
@ -375,7 +375,29 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
|
||||
return data_rate;
|
||||
}
|
||||
|
||||
static struct intel_bw_state *
|
||||
struct intel_bw_state *
|
||||
intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_global_state *bw_state;
|
||||
|
||||
bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
|
||||
|
||||
return to_intel_bw_state(bw_state);
|
||||
}
|
||||
|
||||
struct intel_bw_state *
|
||||
intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_global_state *bw_state;
|
||||
|
||||
bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
|
||||
|
||||
return to_intel_bw_state(bw_state);
|
||||
}
|
||||
|
||||
struct intel_bw_state *
|
||||
intel_atomic_get_bw_state(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
|
@ -24,6 +24,15 @@ struct intel_bw_state {
|
||||
|
||||
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
|
||||
|
||||
struct intel_bw_state *
|
||||
intel_atomic_get_old_bw_state(struct intel_atomic_state *state);
|
||||
|
||||
struct intel_bw_state *
|
||||
intel_atomic_get_new_bw_state(struct intel_atomic_state *state);
|
||||
|
||||
struct intel_bw_state *
|
||||
intel_atomic_get_bw_state(struct intel_atomic_state *state);
|
||||
|
||||
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
|
||||
int intel_bw_init(struct drm_i915_private *dev_priv);
|
||||
int intel_bw_atomic_check(struct intel_atomic_state *state);
|
||||
|
@ -33,6 +33,7 @@
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_display_debugfs.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_hdcp.h"
|
||||
|
||||
@ -123,6 +124,8 @@ int intel_connector_register(struct drm_connector *connector)
|
||||
goto err_backlight;
|
||||
}
|
||||
|
||||
intel_connector_debugfs_add(connector);
|
||||
|
||||
return 0;
|
||||
|
||||
err_backlight:
|
||||
|
@ -294,7 +294,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state,
|
||||
|
||||
hsw_fdi_link_train(encoder, crtc_state);
|
||||
|
||||
intel_ddi_enable_pipe_clock(crtc_state);
|
||||
intel_ddi_enable_pipe_clock(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static void hsw_enable_crt(struct intel_atomic_state *state,
|
||||
@ -308,6 +308,8 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
|
||||
|
||||
intel_ddi_enable_transcoder_func(encoder, crtc_state);
|
||||
|
||||
intel_enable_pipe(crtc_state);
|
||||
|
||||
lpt_pch_enable(crtc_state);
|
||||
|
@ -1261,7 +1261,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
|
||||
/* Configure DP_TP_CTL with auto-training */
|
||||
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
|
||||
DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE);
|
||||
DP_TP_CTL_FDI_AUTOTRAIN |
|
||||
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
|
||||
DP_TP_CTL_LINK_TRAIN_PAT1 |
|
||||
DP_TP_CTL_ENABLE);
|
||||
|
||||
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
|
||||
* DDI E does not support port reversal, the functionality is
|
||||
@ -1337,7 +1340,10 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
|
||||
/* Enable normal pixel sending for FDI */
|
||||
intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
|
||||
DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE);
|
||||
DP_TP_CTL_FDI_AUTOTRAIN |
|
||||
DP_TP_CTL_LINK_TRAIN_NORMAL |
|
||||
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
|
||||
DP_TP_CTL_ENABLE);
|
||||
}
|
||||
|
||||
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
|
||||
@ -1351,27 +1357,6 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
|
||||
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
|
||||
}
|
||||
|
||||
static struct intel_encoder *
|
||||
intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct intel_encoder *encoder, *ret = NULL;
|
||||
int num_encoders = 0;
|
||||
|
||||
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
|
||||
ret = encoder;
|
||||
num_encoders++;
|
||||
}
|
||||
|
||||
if (num_encoders != 1)
|
||||
drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
|
||||
num_encoders,
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
BUG_ON(ret == NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
|
||||
enum port port)
|
||||
{
|
||||
@ -1512,10 +1497,10 @@ static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
|
||||
* intel_ddi_config_transcoder_func().
|
||||
*/
|
||||
static u32
|
||||
intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
|
||||
intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
@ -1617,7 +1602,8 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
|
||||
return temp;
|
||||
}
|
||||
|
||||
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
||||
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
@ -1640,7 +1626,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
||||
TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
|
||||
}
|
||||
|
||||
ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state);
|
||||
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
|
||||
ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
|
||||
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
|
||||
@ -1651,14 +1637,15 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
||||
* bit.
|
||||
*/
|
||||
static void
|
||||
intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
|
||||
intel_ddi_config_transcoder_func(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
u32 ctl;
|
||||
|
||||
ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state);
|
||||
ctl = intel_ddi_transcoder_func_reg_val_get(encoder, crtc_state);
|
||||
ctl &= ~TRANS_DDI_FUNC_ENABLE;
|
||||
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
|
||||
}
|
||||
@ -1927,7 +1914,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline enum intel_display_power_domain
|
||||
static enum intel_display_power_domain
|
||||
intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
|
||||
{
|
||||
/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
|
||||
@ -1986,11 +1973,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
|
||||
intel_dsc_power_domain(crtc_state));
|
||||
}
|
||||
|
||||
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
|
||||
void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
enum port port = encoder->port;
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
|
||||
@ -2654,8 +2641,9 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
|
||||
}
|
||||
|
||||
static u32 translate_signal_level(int signal_levels)
|
||||
static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
|
||||
@ -2663,8 +2651,9 @@ static u32 translate_signal_level(int signal_levels)
|
||||
return i;
|
||||
}
|
||||
|
||||
WARN(1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
|
||||
signal_levels);
|
||||
drm_WARN(&i915->drm, 1,
|
||||
"Unsupported voltage swing/pre-emphasis level: 0x%x\n",
|
||||
signal_levels);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2675,46 +2664,73 @@ static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
|
||||
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
|
||||
return translate_signal_level(signal_levels);
|
||||
return translate_signal_level(intel_dp, signal_levels);
|
||||
}
|
||||
|
||||
u32 bxt_signal_levels(struct intel_dp *intel_dp)
|
||||
static void
|
||||
tgl_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
|
||||
struct intel_encoder *encoder = &dport->base;
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
int level = intel_ddi_dp_level(intel_dp);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
|
||||
level, encoder->type);
|
||||
else if (INTEL_GEN(dev_priv) >= 11)
|
||||
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
|
||||
level, encoder->type);
|
||||
else if (IS_CANNONLAKE(dev_priv))
|
||||
cnl_ddi_vswing_sequence(encoder, level, encoder->type);
|
||||
else
|
||||
bxt_ddi_vswing_sequence(encoder, level, encoder->type);
|
||||
|
||||
return 0;
|
||||
tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
|
||||
level, encoder->type);
|
||||
}
|
||||
|
||||
u32 ddi_signal_levels(struct intel_dp *intel_dp)
|
||||
static void
|
||||
icl_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
|
||||
struct intel_encoder *encoder = &dport->base;
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
int level = intel_ddi_dp_level(intel_dp);
|
||||
|
||||
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
|
||||
level, encoder->type);
|
||||
}
|
||||
|
||||
static void
|
||||
cnl_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
int level = intel_ddi_dp_level(intel_dp);
|
||||
|
||||
cnl_ddi_vswing_sequence(encoder, level, encoder->type);
|
||||
}
|
||||
|
||||
static void
|
||||
bxt_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
int level = intel_ddi_dp_level(intel_dp);
|
||||
|
||||
bxt_ddi_vswing_sequence(encoder, level, encoder->type);
|
||||
}
|
||||
|
||||
static void
|
||||
hsw_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
int level = intel_ddi_dp_level(intel_dp);
|
||||
enum port port = encoder->port;
|
||||
u32 signal_levels;
|
||||
|
||||
signal_levels = DDI_BUF_TRANS_SELECT(level);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
|
||||
signal_levels);
|
||||
|
||||
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
|
||||
intel_dp->DP |= signal_levels;
|
||||
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
skl_ddi_set_iboost(encoder, level, encoder->type);
|
||||
|
||||
return DDI_BUF_TRANS_SELECT(level);
|
||||
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
|
||||
}
|
||||
|
||||
static inline
|
||||
u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
|
||||
enum phy phy)
|
||||
static u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
|
||||
enum phy phy)
|
||||
{
|
||||
if (intel_phy_is_combo(dev_priv, phy)) {
|
||||
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
|
||||
@ -3158,13 +3174,13 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
* 7.a Configure Transcoder Clock Select to direct the Port clock to the
|
||||
* Transcoder.
|
||||
*/
|
||||
intel_ddi_enable_pipe_clock(crtc_state);
|
||||
intel_ddi_enable_pipe_clock(encoder, crtc_state);
|
||||
|
||||
/*
|
||||
* 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
|
||||
* Transport Select
|
||||
*/
|
||||
intel_ddi_config_transcoder_func(crtc_state);
|
||||
intel_ddi_config_transcoder_func(encoder, crtc_state);
|
||||
|
||||
/*
|
||||
* 7.c Configure & enable DP_TP_CTL with link training pattern 1
|
||||
@ -3252,9 +3268,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
|
||||
crtc_state->lane_count, is_mst);
|
||||
|
||||
intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
|
||||
intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
|
||||
|
||||
intel_edp_panel_on(intel_dp);
|
||||
|
||||
intel_ddi_clk_select(encoder, crtc_state);
|
||||
@ -3299,7 +3312,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
intel_ddi_enable_fec(encoder, crtc_state);
|
||||
|
||||
if (!is_mst)
|
||||
intel_ddi_enable_pipe_clock(crtc_state);
|
||||
intel_ddi_enable_pipe_clock(encoder, crtc_state);
|
||||
|
||||
intel_dsc_enable(encoder, crtc_state);
|
||||
}
|
||||
@ -3360,7 +3373,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
|
||||
if (IS_GEN9_BC(dev_priv))
|
||||
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
|
||||
|
||||
intel_ddi_enable_pipe_clock(crtc_state);
|
||||
intel_ddi_enable_pipe_clock(encoder, crtc_state);
|
||||
|
||||
intel_dig_port->set_infoframes(encoder,
|
||||
crtc_state->has_infoframe,
|
||||
@ -3766,7 +3779,9 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
WARN_ON(crtc_state->has_pch_encoder);
|
||||
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
|
||||
|
||||
intel_ddi_enable_transcoder_func(encoder, crtc_state);
|
||||
|
||||
intel_enable_pipe(crtc_state);
|
||||
|
||||
@ -3877,7 +3892,7 @@ intel_ddi_update_prepare(struct intel_atomic_state *state,
|
||||
crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
|
||||
int required_lanes = crtc_state ? crtc_state->lane_count : 1;
|
||||
|
||||
WARN_ON(crtc && crtc->active);
|
||||
drm_WARN_ON(state->base.dev, crtc && crtc->active);
|
||||
|
||||
intel_tc_port_get_link(enc_to_dig_port(encoder),
|
||||
required_lanes);
|
||||
@ -3969,6 +3984,74 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
|
||||
udelay(600);
|
||||
}
|
||||
|
||||
static void intel_ddi_set_link_train(struct intel_dp *intel_dp,
|
||||
u8 dp_train_pat)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
|
||||
enum port port = dp_to_dig_port(intel_dp)->base.port;
|
||||
u32 temp;
|
||||
|
||||
temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
|
||||
|
||||
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
|
||||
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
|
||||
else
|
||||
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
|
||||
|
||||
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
|
||||
switch (dp_train_pat & train_pat_mask) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_4:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
|
||||
break;
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
|
||||
|
||||
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
|
||||
}
|
||||
|
||||
static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum port port = encoder->port;
|
||||
u32 val;
|
||||
|
||||
val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
|
||||
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
|
||||
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
|
||||
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
|
||||
|
||||
/*
|
||||
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
|
||||
* reason we need to set idle transmission mode is to work around a HW
|
||||
* issue where we enable the pipe while not in idle link-training mode.
|
||||
* In this case there is requirement to wait for a minimum number of
|
||||
* idle patterns to be sent.
|
||||
*/
|
||||
if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
|
||||
return;
|
||||
|
||||
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
|
||||
DP_TP_STATUS_IDLE_DONE, 1))
|
||||
drm_err(&dev_priv->drm,
|
||||
"Timed out waiting for DP idle patterns\n");
|
||||
}
|
||||
|
||||
static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder)
|
||||
{
|
||||
@ -4061,12 +4144,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
u32 temp, flags = 0;
|
||||
|
||||
/* XXX: DSI transcoder paranoia */
|
||||
if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
|
||||
return;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder);
|
||||
intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder);
|
||||
}
|
||||
|
||||
intel_dsc_get_config(encoder, pipe_config);
|
||||
|
||||
temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
@ -4396,6 +4485,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
|
||||
static struct intel_connector *
|
||||
intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_connector *connector;
|
||||
enum port port = intel_dig_port->base.port;
|
||||
|
||||
@ -4406,6 +4496,24 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
|
||||
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
|
||||
intel_dig_port->dp.prepare_link_retrain =
|
||||
intel_ddi_prepare_link_retrain;
|
||||
intel_dig_port->dp.set_link_train = intel_ddi_set_link_train;
|
||||
intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels;
|
||||
else if (INTEL_GEN(dev_priv) >= 11)
|
||||
intel_dig_port->dp.set_signal_levels = icl_set_signal_levels;
|
||||
else if (IS_CANNONLAKE(dev_priv))
|
||||
intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels;
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels;
|
||||
else
|
||||
intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12) {
|
||||
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
|
||||
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
|
||||
}
|
||||
|
||||
if (!intel_dp_init_connector(intel_dig_port, connector)) {
|
||||
kfree(connector);
|
||||
|
@ -25,9 +25,11 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
|
||||
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
|
@ -238,9 +238,9 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
|
||||
dev_priv->czclk_freq);
|
||||
}
|
||||
|
||||
static inline u32 /* units of 100MHz */
|
||||
intel_fdi_link_freq(struct drm_i915_private *dev_priv,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
/* units of 100MHz */
|
||||
static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
if (HAS_DDI(dev_priv))
|
||||
return pipe_config->port_clock; /* SPLL */
|
||||
@ -1973,16 +1973,16 @@ static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
|
||||
|
||||
static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
|
||||
{
|
||||
WARN_ON(!is_ccs_modifier(fb->modifier) ||
|
||||
(main_plane && main_plane >= fb->format->num_planes / 2));
|
||||
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
|
||||
(main_plane && main_plane >= fb->format->num_planes / 2));
|
||||
|
||||
return fb->format->num_planes / 2 + main_plane;
|
||||
}
|
||||
|
||||
static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
|
||||
{
|
||||
WARN_ON(!is_ccs_modifier(fb->modifier) ||
|
||||
ccs_plane < fb->format->num_planes / 2);
|
||||
drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
|
||||
ccs_plane < fb->format->num_planes / 2);
|
||||
|
||||
return ccs_plane - fb->format->num_planes / 2;
|
||||
}
|
||||
@ -2992,7 +2992,7 @@ setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
|
||||
fb->modifier != I915_FORMAT_MOD_Yf_TILED)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
|
||||
if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
|
||||
return 0;
|
||||
|
||||
rot_info->plane[plane] = *plane_info;
|
||||
@ -6089,30 +6089,26 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
|
||||
*
|
||||
* @state: crtc's scaler state
|
||||
*
|
||||
* Return
|
||||
* 0 - scaler_usage updated successfully
|
||||
* error - requested scaling cannot be supported or other error condition
|
||||
*/
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *state)
|
||||
static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
|
||||
bool need_scaler = false;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
int width, height;
|
||||
|
||||
if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
|
||||
state->pch_pfit.enabled)
|
||||
need_scaler = true;
|
||||
if (crtc_state->pch_pfit.enabled) {
|
||||
width = drm_rect_width(&crtc_state->pch_pfit.dst);
|
||||
height = drm_rect_height(&crtc_state->pch_pfit.dst);
|
||||
} else {
|
||||
width = adjusted_mode->crtc_hdisplay;
|
||||
height = adjusted_mode->crtc_vdisplay;
|
||||
}
|
||||
|
||||
return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
|
||||
&state->scaler_state.scaler_id,
|
||||
state->pipe_src_w, state->pipe_src_h,
|
||||
adjusted_mode->crtc_hdisplay,
|
||||
adjusted_mode->crtc_vdisplay, NULL, 0,
|
||||
need_scaler);
|
||||
return skl_update_scaler(crtc_state, !crtc_state->hw.active,
|
||||
SKL_CRTC_INDEX,
|
||||
&crtc_state->scaler_state.scaler_id,
|
||||
crtc_state->pipe_src_w, crtc_state->pipe_src_h,
|
||||
width, height, NULL, 0,
|
||||
crtc_state->pch_pfit.enabled);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -6221,70 +6217,80 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
const struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
struct drm_rect src = {
|
||||
.x2 = crtc_state->pipe_src_w << 16,
|
||||
.y2 = crtc_state->pipe_src_h << 16,
|
||||
};
|
||||
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
|
||||
u16 uv_rgb_hphase, uv_rgb_vphase;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int width = drm_rect_width(dst);
|
||||
int height = drm_rect_height(dst);
|
||||
int x = dst->x1;
|
||||
int y = dst->y1;
|
||||
int hscale, vscale;
|
||||
unsigned long irqflags;
|
||||
int id;
|
||||
|
||||
if (crtc_state->pch_pfit.enabled) {
|
||||
u16 uv_rgb_hphase, uv_rgb_vphase;
|
||||
int pfit_w, pfit_h, hscale, vscale;
|
||||
unsigned long irqflags;
|
||||
int id;
|
||||
if (!crtc_state->pch_pfit.enabled)
|
||||
return;
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm,
|
||||
crtc_state->scaler_state.scaler_id < 0))
|
||||
return;
|
||||
if (drm_WARN_ON(&dev_priv->drm,
|
||||
crtc_state->scaler_state.scaler_id < 0))
|
||||
return;
|
||||
|
||||
pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
|
||||
pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
|
||||
hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
|
||||
vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
|
||||
|
||||
hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
|
||||
vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
|
||||
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
|
||||
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
|
||||
|
||||
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
|
||||
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
|
||||
id = scaler_state->scaler_id;
|
||||
|
||||
id = scaler_state->scaler_id;
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
|
||||
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
|
||||
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
|
||||
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
|
||||
x << 16 | y);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
|
||||
width << 16 | height);
|
||||
|
||||
intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
|
||||
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
|
||||
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
|
||||
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
|
||||
crtc_state->pch_pfit.pos);
|
||||
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
|
||||
crtc_state->pch_pfit.size);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
int width = drm_rect_width(dst);
|
||||
int height = drm_rect_height(dst);
|
||||
int x = dst->x1;
|
||||
int y = dst->y1;
|
||||
|
||||
if (crtc_state->pch_pfit.enabled) {
|
||||
/* Force use of hard-coded filter coefficients
|
||||
* as some pre-programmed values are broken,
|
||||
* e.g. x201.
|
||||
*/
|
||||
if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
|
||||
intel_de_write(dev_priv, PF_CTL(pipe),
|
||||
PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
|
||||
else
|
||||
intel_de_write(dev_priv, PF_CTL(pipe),
|
||||
PF_ENABLE | PF_FILTER_MED_3x3);
|
||||
intel_de_write(dev_priv, PF_WIN_POS(pipe),
|
||||
crtc_state->pch_pfit.pos);
|
||||
intel_de_write(dev_priv, PF_WIN_SZ(pipe),
|
||||
crtc_state->pch_pfit.size);
|
||||
}
|
||||
if (!crtc_state->pch_pfit.enabled)
|
||||
return;
|
||||
|
||||
/* Force use of hard-coded filter coefficients
|
||||
* as some pre-programmed values are broken,
|
||||
* e.g. x201.
|
||||
*/
|
||||
if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
|
||||
intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
|
||||
PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
|
||||
else
|
||||
intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
|
||||
PF_FILTER_MED_3x3);
|
||||
intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
|
||||
intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
|
||||
}
|
||||
|
||||
void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
|
||||
@ -6626,7 +6632,7 @@ intel_connector_primary_encoder(struct intel_connector *connector)
|
||||
return &dp_to_dig_port(connector->mst_port)->base;
|
||||
|
||||
encoder = intel_attached_encoder(connector);
|
||||
WARN_ON(!encoder);
|
||||
drm_WARN_ON(connector->base.dev, !encoder);
|
||||
|
||||
return encoder;
|
||||
}
|
||||
@ -7071,9 +7077,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
icl_set_pipe_chicken(crtc);
|
||||
|
||||
if (!transcoder_is_dsi(cpu_transcoder))
|
||||
intel_ddi_enable_transcoder_func(new_crtc_state);
|
||||
|
||||
if (dev_priv->display.initial_watermarks)
|
||||
dev_priv->display.initial_watermarks(state, crtc);
|
||||
|
||||
@ -7104,11 +7107,12 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
|
||||
|
||||
/* To avoid upsetting the power well on haswell only disable the pfit if
|
||||
* it's in use. The hw state code will make sure we get this right. */
|
||||
if (old_crtc_state->pch_pfit.enabled) {
|
||||
intel_de_write(dev_priv, PF_CTL(pipe), 0);
|
||||
intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
|
||||
intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
|
||||
}
|
||||
if (!old_crtc_state->pch_pfit.enabled)
|
||||
return;
|
||||
|
||||
intel_de_write(dev_priv, PF_CTL(pipe), 0);
|
||||
intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
|
||||
intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
|
||||
}
|
||||
|
||||
static void ilk_crtc_disable(struct intel_atomic_state *state,
|
||||
@ -7296,7 +7300,17 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
|
||||
}
|
||||
}
|
||||
|
||||
switch (dig_port->aux_ch) {
|
||||
return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
|
||||
}
|
||||
|
||||
/*
|
||||
* Converts aux_ch to power_domain without caring about TBT ports for that use
|
||||
* intel_aux_power_domain()
|
||||
*/
|
||||
enum intel_display_power_domain
|
||||
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
|
||||
{
|
||||
switch (aux_ch) {
|
||||
case AUX_CH_A:
|
||||
return POWER_DOMAIN_AUX_A;
|
||||
case AUX_CH_B:
|
||||
@ -7312,7 +7326,7 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
|
||||
case AUX_CH_G:
|
||||
return POWER_DOMAIN_AUX_G;
|
||||
default:
|
||||
MISSING_CASE(dig_port->aux_ch);
|
||||
MISSING_CASE(aux_ch);
|
||||
return POWER_DOMAIN_AUX_A;
|
||||
}
|
||||
}
|
||||
@ -7926,39 +7940,36 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
|
||||
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
|
||||
}
|
||||
|
||||
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
|
||||
static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
u32 pixel_rate;
|
||||
|
||||
pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
|
||||
u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock;
|
||||
unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
|
||||
|
||||
/*
|
||||
* We only use IF-ID interlacing. If we ever use
|
||||
* PF-ID we'll need to adjust the pixel_rate here.
|
||||
*/
|
||||
|
||||
if (pipe_config->pch_pfit.enabled) {
|
||||
u64 pipe_w, pipe_h, pfit_w, pfit_h;
|
||||
u32 pfit_size = pipe_config->pch_pfit.size;
|
||||
if (!crtc_state->pch_pfit.enabled)
|
||||
return pixel_rate;
|
||||
|
||||
pipe_w = pipe_config->pipe_src_w;
|
||||
pipe_h = pipe_config->pipe_src_h;
|
||||
pipe_w = crtc_state->pipe_src_w;
|
||||
pipe_h = crtc_state->pipe_src_h;
|
||||
|
||||
pfit_w = (pfit_size >> 16) & 0xFFFF;
|
||||
pfit_h = pfit_size & 0xFFFF;
|
||||
if (pipe_w < pfit_w)
|
||||
pipe_w = pfit_w;
|
||||
if (pipe_h < pfit_h)
|
||||
pipe_h = pfit_h;
|
||||
pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
|
||||
pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
|
||||
|
||||
if (WARN_ON(!pfit_w || !pfit_h))
|
||||
return pixel_rate;
|
||||
if (pipe_w < pfit_w)
|
||||
pipe_w = pfit_w;
|
||||
if (pipe_h < pfit_h)
|
||||
pipe_h = pfit_h;
|
||||
|
||||
pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
|
||||
pfit_w * pfit_h);
|
||||
}
|
||||
if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
|
||||
!pfit_w || !pfit_h))
|
||||
return pixel_rate;
|
||||
|
||||
return pixel_rate;
|
||||
return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
|
||||
pfit_w * pfit_h);
|
||||
}
|
||||
|
||||
static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
|
||||
@ -8127,7 +8138,7 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
|
||||
static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (i915_modparams.panel_use_ssc >= 0)
|
||||
return i915_modparams.panel_use_ssc != 0;
|
||||
@ -9151,9 +9162,9 @@ static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
|
||||
IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
|
||||
}
|
||||
|
||||
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
u32 tmp;
|
||||
|
||||
@ -9173,9 +9184,9 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
|
||||
return;
|
||||
}
|
||||
|
||||
pipe_config->gmch_pfit.control = tmp;
|
||||
pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
|
||||
PFIT_PGM_RATIOS);
|
||||
crtc_state->gmch_pfit.control = tmp;
|
||||
crtc_state->gmch_pfit.pgm_ratios =
|
||||
intel_de_read(dev_priv, PFIT_PGM_RATIOS);
|
||||
}
|
||||
|
||||
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
@ -9425,7 +9436,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
||||
intel_get_pipe_timings(crtc, pipe_config);
|
||||
intel_get_pipe_src_size(crtc, pipe_config);
|
||||
|
||||
i9xx_get_pfit_config(crtc, pipe_config);
|
||||
i9xx_get_pfit_config(pipe_config);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
/* No way to read it out on pipes B and C */
|
||||
@ -10395,37 +10406,47 @@ static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
|
||||
&pipe_config->fdi_m_n, NULL);
|
||||
}
|
||||
|
||||
static void skl_get_pfit_config(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
|
||||
u32 pos, u32 size)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
|
||||
u32 ps_ctrl = 0;
|
||||
drm_rect_init(&crtc_state->pch_pfit.dst,
|
||||
pos >> 16, pos & 0xffff,
|
||||
size >> 16, size & 0xffff);
|
||||
}
|
||||
|
||||
static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
|
||||
int id = -1;
|
||||
int i;
|
||||
|
||||
/* find scaler attached to this pipe */
|
||||
for (i = 0; i < crtc->num_scalers; i++) {
|
||||
ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
|
||||
if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
|
||||
id = i;
|
||||
pipe_config->pch_pfit.enabled = true;
|
||||
pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
|
||||
SKL_PS_WIN_POS(crtc->pipe, i));
|
||||
pipe_config->pch_pfit.size = intel_de_read(dev_priv,
|
||||
SKL_PS_WIN_SZ(crtc->pipe, i));
|
||||
scaler_state->scalers[i].in_use = true;
|
||||
break;
|
||||
}
|
||||
u32 ctl, pos, size;
|
||||
|
||||
ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
|
||||
if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
|
||||
continue;
|
||||
|
||||
id = i;
|
||||
crtc_state->pch_pfit.enabled = true;
|
||||
|
||||
pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
|
||||
size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
|
||||
|
||||
ilk_get_pfit_pos_size(crtc_state, pos, size);
|
||||
|
||||
scaler_state->scalers[i].in_use = true;
|
||||
break;
|
||||
}
|
||||
|
||||
scaler_state->scaler_id = id;
|
||||
if (id >= 0) {
|
||||
if (id >= 0)
|
||||
scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
|
||||
} else {
|
||||
else
|
||||
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -10561,30 +10582,30 @@ error:
|
||||
kfree(intel_fb);
|
||||
}
|
||||
|
||||
static void ilk_get_pfit_config(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u32 tmp;
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
u32 ctl, pos, size;
|
||||
|
||||
tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
|
||||
ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
|
||||
if ((ctl & PF_ENABLE) == 0)
|
||||
return;
|
||||
|
||||
if (tmp & PF_ENABLE) {
|
||||
pipe_config->pch_pfit.enabled = true;
|
||||
pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
|
||||
PF_WIN_POS(crtc->pipe));
|
||||
pipe_config->pch_pfit.size = intel_de_read(dev_priv,
|
||||
PF_WIN_SZ(crtc->pipe));
|
||||
crtc_state->pch_pfit.enabled = true;
|
||||
|
||||
/* We currently do not free assignements of panel fitters on
|
||||
* ivb/hsw (since we don't use the higher upscaling modes which
|
||||
* differentiates them) so just WARN about this case for now. */
|
||||
if (IS_GEN(dev_priv, 7)) {
|
||||
drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
|
||||
PF_PIPE_SEL_IVB(crtc->pipe));
|
||||
}
|
||||
}
|
||||
pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
|
||||
size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
|
||||
|
||||
ilk_get_pfit_pos_size(crtc_state, pos, size);
|
||||
|
||||
/*
|
||||
* We currently do not free assignements of panel fitters on
|
||||
* ivb/hsw (since we don't use the higher upscaling modes which
|
||||
* differentiates them) so just WARN about this case for now.
|
||||
*/
|
||||
drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
|
||||
(ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
|
||||
}
|
||||
|
||||
static bool ilk_get_pipe_config(struct intel_crtc *crtc,
|
||||
@ -10695,7 +10716,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
|
||||
intel_get_pipe_timings(crtc, pipe_config);
|
||||
intel_get_pipe_src_size(crtc, pipe_config);
|
||||
|
||||
ilk_get_pfit_config(crtc, pipe_config);
|
||||
ilk_get_pfit_config(pipe_config);
|
||||
|
||||
ret = true;
|
||||
|
||||
@ -11169,9 +11190,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
|
||||
power_domain_mask |= BIT_ULL(power_domain);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
skl_get_pfit_config(crtc, pipe_config);
|
||||
skl_get_pfit_config(pipe_config);
|
||||
else
|
||||
ilk_get_pfit_config(crtc, pipe_config);
|
||||
ilk_get_pfit_config(pipe_config);
|
||||
}
|
||||
|
||||
if (hsw_crtc_supports_ips(crtc)) {
|
||||
@ -12430,8 +12451,10 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
|
||||
if (IS_ERR(linked_plane_state))
|
||||
return PTR_ERR(linked_plane_state);
|
||||
|
||||
WARN_ON(linked_plane_state->planar_linked_plane != plane);
|
||||
WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
|
||||
drm_WARN_ON(state->base.dev,
|
||||
linked_plane_state->planar_linked_plane != plane);
|
||||
drm_WARN_ON(state->base.dev,
|
||||
linked_plane_state->planar_slave == plane_state->planar_slave);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -12819,7 +12842,7 @@ static void intel_dump_crtc_timings(struct drm_i915_private *i915,
|
||||
mode->type, mode->flags);
|
||||
}
|
||||
|
||||
static inline void
|
||||
static void
|
||||
intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
|
||||
const char *id, unsigned int lane_count,
|
||||
const struct intel_link_m_n *m_n)
|
||||
@ -13030,9 +13053,8 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
|
||||
pipe_config->gmch_pfit.lvds_border_bits);
|
||||
else
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
|
||||
pipe_config->pch_pfit.pos,
|
||||
pipe_config->pch_pfit.size,
|
||||
"pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
|
||||
DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
|
||||
enableddisabled(pipe_config->pch_pfit.enabled),
|
||||
yesno(pipe_config->pch_pfit.force_thru));
|
||||
|
||||
@ -13154,7 +13176,8 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
|
||||
{
|
||||
crtc_state->uapi.enable = crtc_state->hw.enable;
|
||||
crtc_state->uapi.active = crtc_state->hw.active;
|
||||
WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
|
||||
drm_WARN_ON(crtc_state->uapi.crtc->dev,
|
||||
drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
|
||||
|
||||
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
|
||||
|
||||
@ -13773,8 +13796,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
|
||||
|
||||
PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
|
||||
if (current_config->pch_pfit.enabled) {
|
||||
PIPE_CONF_CHECK_X(pch_pfit.pos);
|
||||
PIPE_CONF_CHECK_X(pch_pfit.size);
|
||||
PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
|
||||
PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
|
||||
PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
|
||||
PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
|
||||
}
|
||||
|
||||
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
|
||||
@ -15353,12 +15378,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
||||
|
||||
intel_set_cdclk_pre_plane_update(state);
|
||||
|
||||
/*
|
||||
* SKL workaround: bspec recommends we disable the SAGV when we
|
||||
* have more then one pipe enabled
|
||||
*/
|
||||
if (!intel_can_enable_sagv(state))
|
||||
intel_disable_sagv(dev_priv);
|
||||
intel_sagv_pre_plane_update(state);
|
||||
|
||||
intel_modeset_verify_disabled(dev_priv, state);
|
||||
}
|
||||
@ -15455,11 +15475,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
||||
intel_check_cpu_fifo_underruns(dev_priv);
|
||||
intel_check_pch_fifo_underruns(dev_priv);
|
||||
|
||||
if (state->modeset)
|
||||
if (state->modeset) {
|
||||
intel_verify_planes(state);
|
||||
|
||||
if (state->modeset && intel_can_enable_sagv(state))
|
||||
intel_enable_sagv(dev_priv);
|
||||
intel_sagv_post_plane_update(state);
|
||||
}
|
||||
|
||||
drm_atomic_helper_commit_hw_done(&state->base);
|
||||
|
||||
|
@ -583,13 +583,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
|
||||
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
|
||||
enum intel_display_power_domain
|
||||
intel_aux_power_domain(struct intel_digital_port *dig_port);
|
||||
enum intel_display_power_domain
|
||||
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
|
||||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "i915_debugfs.h"
|
||||
#include "intel_csr.h"
|
||||
#include "intel_display_debugfs.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_fbc.h"
|
||||
@ -1143,6 +1144,51 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
|
||||
seq_puts(m, "LPSP: disabled\n"))
|
||||
|
||||
static bool
|
||||
intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
|
||||
enum i915_power_well_id power_well_id)
|
||||
{
|
||||
intel_wakeref_t wakeref;
|
||||
bool is_enabled;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
is_enabled = intel_display_power_well_is_enabled(i915,
|
||||
power_well_id);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
|
||||
return is_enabled;
|
||||
}
|
||||
|
||||
static int i915_lpsp_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *i915 = node_to_i915(m->private);
|
||||
|
||||
switch (INTEL_GEN(i915)) {
|
||||
case 12:
|
||||
case 11:
|
||||
LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
|
||||
break;
|
||||
case 10:
|
||||
case 9:
|
||||
LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* Apart from HASWELL/BROADWELL other legacy platform doesn't
|
||||
* support lpsp.
|
||||
*/
|
||||
if (IS_HASWELL(i915) || IS_BROADWELL(i915))
|
||||
LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
|
||||
else
|
||||
seq_puts(m, "LPSP: not supported\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_dp_mst_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
@ -1910,6 +1956,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
|
||||
{"i915_dp_mst_info", i915_dp_mst_info, 0},
|
||||
{"i915_ddb_info", i915_ddb_info, 0},
|
||||
{"i915_drrs_status", i915_drrs_status, 0},
|
||||
{"i915_lpsp_status", i915_lpsp_status, 0},
|
||||
};
|
||||
|
||||
static const struct {
|
||||
@ -1991,6 +2038,48 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
|
||||
|
||||
#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
|
||||
seq_puts(m, "LPSP: incapable\n"))
|
||||
|
||||
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
struct intel_encoder *encoder =
|
||||
intel_attached_encoder(to_intel_connector(connector));
|
||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||
|
||||
if (connector->status != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
switch (INTEL_GEN(i915)) {
|
||||
case 12:
|
||||
/*
|
||||
* Actually TGL can drive LPSP on port till DDI_C
|
||||
* but there is no physical connected DDI_C on TGL sku's,
|
||||
* even driver is not initilizing DDI_C port for gen12.
|
||||
*/
|
||||
LPSP_CAPABLE(encoder->port <= PORT_B);
|
||||
break;
|
||||
case 11:
|
||||
LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_eDP);
|
||||
break;
|
||||
case 10:
|
||||
case 9:
|
||||
LPSP_CAPABLE(encoder->port == PORT_A &&
|
||||
(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
|
||||
break;
|
||||
default:
|
||||
if (IS_HASWELL(i915) || IS_BROADWELL(i915))
|
||||
LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
|
||||
|
||||
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
@ -2134,5 +2223,16 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
|
||||
debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
|
||||
connector, &i915_dsc_fec_support_fops);
|
||||
|
||||
/* Legacy panels doesn't lpsp on any platform */
|
||||
if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
|
||||
IS_BROADWELL(dev_priv)) &&
|
||||
(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
|
||||
debugfs_create_file("i915_lpsp_capability", 0444, root,
|
||||
connector, &i915_lpsp_capability_fops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -151,6 +151,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
|
||||
return "GT_IRQ";
|
||||
case POWER_DOMAIN_DPLL_DC_OFF:
|
||||
return "DPLL_DC_OFF";
|
||||
case POWER_DOMAIN_TC_COLD_OFF:
|
||||
return "TC_COLD_OFF";
|
||||
default:
|
||||
MISSING_CASE(domain);
|
||||
return "?";
|
||||
@ -282,8 +284,51 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
|
||||
}
|
||||
|
||||
#define ICL_AUX_PW_TO_CH(pw_idx) \
|
||||
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
|
||||
|
||||
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
|
||||
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
|
||||
|
||||
static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
int pw_idx = power_well->desc->hsw.idx;
|
||||
|
||||
return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
|
||||
ICL_AUX_PW_TO_CH(pw_idx);
|
||||
}
|
||||
|
||||
static struct intel_digital_port *
|
||||
aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
|
||||
enum aux_ch aux_ch)
|
||||
{
|
||||
struct intel_digital_port *dig_port = NULL;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
/* We'll check the MST primary port */
|
||||
if (encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
dig_port = enc_to_dig_port(encoder);
|
||||
if (!dig_port)
|
||||
continue;
|
||||
|
||||
if (dig_port->aux_ch != aux_ch) {
|
||||
dig_port = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return dig_port;
|
||||
}
|
||||
|
||||
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
struct i915_power_well *power_well,
|
||||
bool timeout_expected)
|
||||
{
|
||||
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
|
||||
int pw_idx = power_well->desc->hsw.idx;
|
||||
@ -294,8 +339,8 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
|
||||
power_well->desc->name);
|
||||
|
||||
/* An AUX timeout is expected if the TBT DP tunnel is down. */
|
||||
drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
|
||||
drm_WARN_ON(&dev_priv->drm, !timeout_expected);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -358,11 +403,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
|
||||
int pw_idx = power_well->desc->hsw.idx;
|
||||
bool wait_fuses = power_well->desc->hsw.has_fuses;
|
||||
enum skl_power_gate uninitialized_var(pg);
|
||||
u32 val;
|
||||
|
||||
if (wait_fuses) {
|
||||
if (power_well->desc->hsw.has_fuses) {
|
||||
enum skl_power_gate pg;
|
||||
|
||||
pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
|
||||
SKL_PW_CTL_IDX_TO_PG(pw_idx);
|
||||
/*
|
||||
@ -379,19 +424,27 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
val = intel_de_read(dev_priv, regs->driver);
|
||||
intel_de_write(dev_priv, regs->driver,
|
||||
val | HSW_PWR_WELL_CTL_REQ(pw_idx));
|
||||
hsw_wait_for_power_well_enable(dev_priv, power_well);
|
||||
|
||||
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
|
||||
|
||||
/* Display WA #1178: cnl */
|
||||
if (IS_CANNONLAKE(dev_priv) &&
|
||||
pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
|
||||
pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
|
||||
u32 val;
|
||||
|
||||
val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
|
||||
val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
|
||||
intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
|
||||
}
|
||||
|
||||
if (wait_fuses)
|
||||
if (power_well->desc->hsw.has_fuses) {
|
||||
enum skl_power_gate pg;
|
||||
|
||||
pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
|
||||
SKL_PW_CTL_IDX_TO_PG(pw_idx);
|
||||
gen9_wait_for_power_well_fuses(dev_priv, pg);
|
||||
}
|
||||
|
||||
hsw_power_well_post_enable(dev_priv,
|
||||
power_well->desc->hsw.irq_pipe_mask,
|
||||
@ -437,7 +490,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
val | ICL_LANE_ENABLE_AUX);
|
||||
}
|
||||
|
||||
hsw_wait_for_power_well_enable(dev_priv, power_well);
|
||||
hsw_wait_for_power_well_enable(dev_priv, power_well, false);
|
||||
|
||||
/* Display WA #1178: icl */
|
||||
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
|
||||
@ -470,21 +523,6 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
hsw_wait_for_power_well_disable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
#define ICL_AUX_PW_TO_CH(pw_idx) \
|
||||
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
|
||||
|
||||
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
|
||||
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
|
||||
|
||||
static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
int pw_idx = power_well->desc->hsw.idx;
|
||||
|
||||
return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
|
||||
ICL_AUX_PW_TO_CH(pw_idx);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
|
||||
|
||||
static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
|
||||
@ -501,51 +539,28 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
struct i915_power_well *power_well,
|
||||
struct intel_digital_port *dig_port)
|
||||
{
|
||||
enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
|
||||
struct intel_digital_port *dig_port = NULL;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
/* Bypass the check if all references are released asynchronously */
|
||||
if (power_well_async_ref_count(dev_priv, power_well) ==
|
||||
power_well->count)
|
||||
return;
|
||||
|
||||
aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
|
||||
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
|
||||
if (!intel_phy_is_tc(dev_priv, phy))
|
||||
continue;
|
||||
|
||||
/* We'll check the MST primary port */
|
||||
if (encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
dig_port = enc_to_dig_port(encoder);
|
||||
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
|
||||
continue;
|
||||
|
||||
if (dig_port->aux_ch != aux_ch) {
|
||||
dig_port = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
|
||||
return;
|
||||
|
||||
if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
|
||||
return;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
struct i915_power_well *power_well,
|
||||
struct intel_digital_port *dig_port)
|
||||
{
|
||||
}
|
||||
|
||||
@ -553,24 +568,65 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
|
||||
|
||||
#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
|
||||
|
||||
static void icl_tc_cold_exit(struct drm_i915_private *i915)
|
||||
{
|
||||
int ret, tries = 0;
|
||||
|
||||
while (1) {
|
||||
ret = sandybridge_pcode_write_timeout(i915,
|
||||
ICL_PCODE_EXIT_TCCOLD,
|
||||
0, 250, 1);
|
||||
if (ret != -EAGAIN || ++tries == 3)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
/* Spec states that TC cold exit can take up to 1ms to complete */
|
||||
if (!ret)
|
||||
msleep(1);
|
||||
|
||||
/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
|
||||
drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
|
||||
"succeeded");
|
||||
}
|
||||
|
||||
static void
|
||||
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
|
||||
struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
|
||||
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
|
||||
bool is_tbt = power_well->desc->hsw.is_tc_tbt;
|
||||
bool timeout_expected;
|
||||
u32 val;
|
||||
|
||||
icl_tc_port_assert_ref_held(dev_priv, power_well);
|
||||
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
|
||||
|
||||
val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
|
||||
val &= ~DP_AUX_CH_CTL_TBT_IO;
|
||||
if (power_well->desc->hsw.is_tc_tbt)
|
||||
if (is_tbt)
|
||||
val |= DP_AUX_CH_CTL_TBT_IO;
|
||||
intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
|
||||
|
||||
hsw_power_well_enable(dev_priv, power_well);
|
||||
val = intel_de_read(dev_priv, regs->driver);
|
||||
intel_de_write(dev_priv, regs->driver,
|
||||
val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
|
||||
/*
|
||||
* An AUX timeout is expected if the TBT DP tunnel is down,
|
||||
* or need to enable AUX on a legacy TypeC port as part of the TC-cold
|
||||
* exit sequence.
|
||||
*/
|
||||
timeout_expected = is_tbt;
|
||||
if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) {
|
||||
icl_tc_cold_exit(dev_priv);
|
||||
timeout_expected = true;
|
||||
}
|
||||
|
||||
hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) {
|
||||
enum tc_port tc_port;
|
||||
|
||||
tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
|
||||
@ -588,11 +644,48 @@ static void
|
||||
icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
icl_tc_port_assert_ref_held(dev_priv, power_well);
|
||||
enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
|
||||
struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
|
||||
|
||||
icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
|
||||
|
||||
hsw_power_well_disable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
static void
|
||||
icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
int pw_idx = power_well->desc->hsw.idx;
|
||||
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
|
||||
bool is_tbt = power_well->desc->hsw.is_tc_tbt;
|
||||
|
||||
if (is_tbt || intel_phy_is_tc(dev_priv, phy))
|
||||
return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
|
||||
else if (IS_ICELAKE(dev_priv))
|
||||
return icl_combo_phy_aux_power_well_enable(dev_priv,
|
||||
power_well);
|
||||
else
|
||||
return hsw_power_well_enable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
static void
|
||||
icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
int pw_idx = power_well->desc->hsw.idx;
|
||||
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
|
||||
bool is_tbt = power_well->desc->hsw.is_tc_tbt;
|
||||
|
||||
if (is_tbt || intel_phy_is_tc(dev_priv, phy))
|
||||
return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
|
||||
else if (IS_ICELAKE(dev_priv))
|
||||
return icl_combo_phy_aux_power_well_disable(dev_priv,
|
||||
power_well);
|
||||
else
|
||||
return hsw_power_well_disable(dev_priv, power_well);
|
||||
}
|
||||
|
||||
/*
|
||||
* We should only use the power well if we explicitly asked the hardware to
|
||||
* enable it, so check if it's enabled and also check if we've requested it to
|
||||
@ -943,7 +1036,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
|
||||
/* Power wells at this level and above must be disabled for DC5 entry */
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
high_pg = TGL_DISP_PW_3;
|
||||
high_pg = ICL_DISP_PW_3;
|
||||
else
|
||||
high_pg = SKL_DISP_PW_2;
|
||||
|
||||
@ -2805,6 +2898,21 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
|
||||
|
||||
#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_E) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_F) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_G) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_H) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_I) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
|
||||
BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
|
||||
|
||||
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
|
||||
.sync_hw = i9xx_power_well_sync_hw_noop,
|
||||
.enable = i9xx_always_on_power_well_noop,
|
||||
@ -3503,17 +3611,10 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
|
||||
static const struct i915_power_well_ops icl_aux_power_well_ops = {
|
||||
.sync_hw = hsw_power_well_sync_hw,
|
||||
.enable = icl_combo_phy_aux_power_well_enable,
|
||||
.disable = icl_combo_phy_aux_power_well_disable,
|
||||
.is_enabled = hsw_power_well_enabled,
|
||||
};
|
||||
|
||||
static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
|
||||
.sync_hw = hsw_power_well_sync_hw,
|
||||
.enable = icl_tc_phy_aux_power_well_enable,
|
||||
.disable = icl_tc_phy_aux_power_well_disable,
|
||||
.enable = icl_aux_power_well_enable,
|
||||
.disable = icl_aux_power_well_disable,
|
||||
.is_enabled = hsw_power_well_enabled,
|
||||
};
|
||||
|
||||
@ -3571,7 +3672,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
.name = "power well 3",
|
||||
.domains = ICL_PW_3_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
.id = ICL_DISP_PW_3,
|
||||
{
|
||||
.hsw.regs = &hsw_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
|
||||
@ -3643,7 +3744,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX A",
|
||||
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
|
||||
.ops = &icl_combo_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3653,7 +3754,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX B",
|
||||
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
|
||||
.ops = &icl_combo_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3663,7 +3764,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX C TC1",
|
||||
.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3674,7 +3775,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX D TC2",
|
||||
.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3685,7 +3786,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX E TC3",
|
||||
.domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3696,7 +3797,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX F TC4",
|
||||
.domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3707,7 +3808,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX C TBT1",
|
||||
.domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3718,7 +3819,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX D TBT2",
|
||||
.domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3729,7 +3830,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX E TBT3",
|
||||
.domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3740,7 +3841,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX F TBT4",
|
||||
.domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -3762,149 +3863,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static const struct i915_power_well_desc ehl_power_wells[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
.always_on = true,
|
||||
.domains = POWER_DOMAIN_MASK,
|
||||
.ops = &i9xx_always_on_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
},
|
||||
{
|
||||
.name = "power well 1",
|
||||
/* Handled by the DMC firmware */
|
||||
.always_on = true,
|
||||
.domains = 0,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = SKL_DISP_PW_1,
|
||||
{
|
||||
.hsw.regs = &hsw_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_PW_1,
|
||||
.hsw.has_fuses = true,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DC off",
|
||||
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
|
||||
.ops = &gen9_dc_off_power_well_ops,
|
||||
.id = SKL_DISP_DC_OFF,
|
||||
},
|
||||
{
|
||||
.name = "power well 2",
|
||||
.domains = ICL_PW_2_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = SKL_DISP_PW_2,
|
||||
{
|
||||
.hsw.regs = &hsw_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_PW_2,
|
||||
.hsw.has_fuses = true,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "power well 3",
|
||||
.domains = ICL_PW_3_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &hsw_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
|
||||
.hsw.irq_pipe_mask = BIT(PIPE_B),
|
||||
.hsw.has_vga = true,
|
||||
.hsw.has_fuses = true,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI A IO",
|
||||
.domains = ICL_DDI_IO_A_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_ddi_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI B IO",
|
||||
.domains = ICL_DDI_IO_B_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_ddi_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI C IO",
|
||||
.domains = ICL_DDI_IO_C_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_ddi_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "DDI D IO",
|
||||
.domains = ICL_DDI_IO_D_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_ddi_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX A",
|
||||
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX B",
|
||||
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX C",
|
||||
.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "AUX D",
|
||||
.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "power well 4",
|
||||
.domains = ICL_PW_4_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &hsw_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_PW_4,
|
||||
.hsw.has_fuses = true,
|
||||
.hsw.irq_pipe_mask = BIT(PIPE_C),
|
||||
},
|
||||
},
|
||||
static void
|
||||
tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
|
||||
{
|
||||
u8 tries = 0;
|
||||
int ret;
|
||||
|
||||
while (1) {
|
||||
u32 low_val = 0, high_val;
|
||||
|
||||
if (block)
|
||||
high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
|
||||
else
|
||||
high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
|
||||
|
||||
/*
|
||||
* Spec states that we should timeout the request after 200us
|
||||
* but the function below will timeout after 500us
|
||||
*/
|
||||
ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
|
||||
&high_val);
|
||||
if (ret == 0) {
|
||||
if (block &&
|
||||
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
|
||||
ret = -EIO;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (++tries == 3)
|
||||
break;
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
drm_err(&i915->drm, "TC cold %sblock failed\n",
|
||||
block ? "" : "un");
|
||||
else
|
||||
drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
|
||||
block ? "" : "un");
|
||||
}
|
||||
|
||||
static void
|
||||
tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
tgl_tc_cold_request(i915, true);
|
||||
}
|
||||
|
||||
static void
|
||||
tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
tgl_tc_cold_request(i915, false);
|
||||
}
|
||||
|
||||
static void
|
||||
tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
if (power_well->count > 0)
|
||||
tgl_tc_cold_off_power_well_enable(i915, power_well);
|
||||
else
|
||||
tgl_tc_cold_off_power_well_disable(i915, power_well);
|
||||
}
|
||||
|
||||
static bool
|
||||
tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
/*
|
||||
* Not the correctly implementation but there is no way to just read it
|
||||
* from PCODE, so returning count to avoid state mismatch errors
|
||||
*/
|
||||
return power_well->count;
|
||||
}
|
||||
|
||||
static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
|
||||
.sync_hw = tgl_tc_cold_off_power_well_sync_hw,
|
||||
.enable = tgl_tc_cold_off_power_well_enable,
|
||||
.disable = tgl_tc_cold_off_power_well_disable,
|
||||
.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
|
||||
};
|
||||
|
||||
static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
@ -3949,7 +3990,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
.name = "power well 3",
|
||||
.domains = TGL_PW_3_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.id = TGL_DISP_PW_3,
|
||||
.id = ICL_DISP_PW_3,
|
||||
{
|
||||
.hsw.regs = &hsw_power_well_regs,
|
||||
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
|
||||
@ -4051,7 +4092,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX A",
|
||||
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4061,7 +4102,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX B",
|
||||
.domains = TGL_AUX_B_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4071,7 +4112,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX C",
|
||||
.domains = TGL_AUX_C_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4081,7 +4122,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX D TC1",
|
||||
.domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4092,7 +4133,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX E TC2",
|
||||
.domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4103,7 +4144,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX F TC3",
|
||||
.domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4114,7 +4155,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX G TC4",
|
||||
.domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4125,7 +4166,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX H TC5",
|
||||
.domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4136,7 +4177,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX I TC6",
|
||||
.domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4147,7 +4188,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX D TBT1",
|
||||
.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4158,7 +4199,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX E TBT2",
|
||||
.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4169,7 +4210,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX F TBT3",
|
||||
.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4180,7 +4221,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX G TBT4",
|
||||
.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4191,7 +4232,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX H TBT5",
|
||||
.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4202,7 +4243,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX I TBT6",
|
||||
.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.ops = &icl_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4234,6 +4275,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
.hsw.irq_pipe_mask = BIT(PIPE_D),
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "TC cold off",
|
||||
.domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
|
||||
.ops = &tgl_tc_cold_off_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
@ -4383,8 +4430,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
if (IS_GEN(dev_priv, 12)) {
|
||||
err = set_power_wells(power_domains, tgl_power_wells);
|
||||
} else if (IS_ELKHARTLAKE(dev_priv)) {
|
||||
err = set_power_wells(power_domains, ehl_power_wells);
|
||||
} else if (IS_GEN(dev_priv, 11)) {
|
||||
err = set_power_wells(power_domains, icl_power_wells);
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
@ -4446,9 +4491,8 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
|
||||
mutex_unlock(&power_domains->lock);
|
||||
}
|
||||
|
||||
static inline
|
||||
bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg, bool enable)
|
||||
static bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg, bool enable)
|
||||
{
|
||||
u32 val, status;
|
||||
|
||||
|
@ -76,6 +76,7 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_MODESET,
|
||||
POWER_DOMAIN_GT_IRQ,
|
||||
POWER_DOMAIN_DPLL_DC_OFF,
|
||||
POWER_DOMAIN_TC_COLD_OFF,
|
||||
POWER_DOMAIN_INIT,
|
||||
|
||||
POWER_DOMAIN_NUM,
|
||||
@ -100,7 +101,7 @@ enum i915_power_well_id {
|
||||
SKL_DISP_PW_MISC_IO,
|
||||
SKL_DISP_PW_1,
|
||||
SKL_DISP_PW_2,
|
||||
TGL_DISP_PW_3,
|
||||
ICL_DISP_PW_3,
|
||||
SKL_DISP_DC_OFF,
|
||||
};
|
||||
|
||||
@ -266,6 +267,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain);
|
||||
|
||||
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum i915_power_well_id power_well_id);
|
||||
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
|
@ -974,8 +974,7 @@ struct intel_crtc_state {
|
||||
|
||||
/* Panel fitter placement and size for Ironlake+ */
|
||||
struct {
|
||||
u32 pos;
|
||||
u32 size;
|
||||
struct drm_rect dst;
|
||||
bool enabled;
|
||||
bool force_thru;
|
||||
} pch_pfit;
|
||||
@ -1368,6 +1367,9 @@ struct intel_dp {
|
||||
|
||||
/* This is called before a link training is starterd */
|
||||
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
|
||||
void (*set_link_train)(struct intel_dp *intel_dp, u8 dp_train_pat);
|
||||
void (*set_idle_link_train)(struct intel_dp *intel_dp);
|
||||
void (*set_signal_levels)(struct intel_dp *intel_dp);
|
||||
|
||||
/* Displayport compliance testing */
|
||||
struct intel_dp_compliance compliance;
|
||||
|
@ -48,7 +48,6 @@
|
||||
#include "intel_audio.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_display_debugfs.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
@ -2340,15 +2339,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
|
||||
static int
|
||||
intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
|
||||
struct drm_connector *connector,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
const struct drm_display_info *info = &connector->display_info;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
int ret;
|
||||
|
||||
if (!drm_mode_is_420_only(info, adjusted_mode) ||
|
||||
!intel_dp_get_colorimetry_status(intel_dp) ||
|
||||
@ -2357,17 +2354,7 @@ intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
|
||||
|
||||
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
|
||||
|
||||
/* YCBCR 420 output conversion needs a scaler */
|
||||
ret = skl_update_scaler_crtc(crtc_state);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Scaler allocation for output failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
|
||||
|
||||
return 0;
|
||||
return intel_pch_panel_fitting(crtc_state, conn_state);
|
||||
}
|
||||
|
||||
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
|
||||
@ -2546,7 +2533,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
|
||||
enum port port = encoder->port;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_connector *intel_connector = intel_dp->attached_connector;
|
||||
struct intel_digital_connector_state *intel_conn_state =
|
||||
to_intel_digital_connector_state(conn_state);
|
||||
@ -2562,9 +2548,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
if (lspcon->active)
|
||||
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
|
||||
else
|
||||
ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
|
||||
pipe_config);
|
||||
|
||||
ret = intel_dp_ycbcr420_config(intel_dp, pipe_config,
|
||||
conn_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2580,18 +2565,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
|
||||
adjusted_mode);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
ret = skl_update_scaler_crtc(pipe_config);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (HAS_GMCH(dev_priv))
|
||||
intel_gmch_panel_fitting(intel_crtc, pipe_config,
|
||||
conn_state->scaling_mode);
|
||||
ret = intel_gmch_panel_fitting(pipe_config, conn_state);
|
||||
else
|
||||
intel_pch_panel_fitting(intel_crtc, pipe_config,
|
||||
conn_state->scaling_mode);
|
||||
ret = intel_pch_panel_fitting(pipe_config, conn_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
@ -2671,9 +2650,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
|
||||
intel_crtc_has_type(pipe_config,
|
||||
INTEL_OUTPUT_DP_MST));
|
||||
|
||||
intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
|
||||
intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
|
||||
|
||||
/*
|
||||
* There are four kinds of DP registers:
|
||||
*
|
||||
@ -3642,90 +3618,63 @@ static void chv_post_disable_dp(struct intel_atomic_state *state,
|
||||
}
|
||||
|
||||
static void
|
||||
_intel_dp_set_link_train(struct intel_dp *intel_dp,
|
||||
u32 *DP,
|
||||
u8 dp_train_pat)
|
||||
cpt_set_link_train(struct intel_dp *intel_dp,
|
||||
u8 dp_train_pat)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
enum port port = intel_dig_port->base.port;
|
||||
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
|
||||
u32 *DP = &intel_dp->DP;
|
||||
|
||||
if (dp_train_pat & train_pat_mask)
|
||||
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
|
||||
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
*DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
*DP |= DP_LINK_TRAIN_PAT_1_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Using DP training pattern TPS%d\n",
|
||||
dp_train_pat & train_pat_mask);
|
||||
|
||||
if (HAS_DDI(dev_priv)) {
|
||||
u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
|
||||
|
||||
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
|
||||
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
|
||||
else
|
||||
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
|
||||
|
||||
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
|
||||
switch (dp_train_pat & train_pat_mask) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
|
||||
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_4:
|
||||
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
|
||||
break;
|
||||
}
|
||||
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
|
||||
|
||||
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
|
||||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
|
||||
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
|
||||
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
*DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
*DP |= DP_LINK_TRAIN_PAT_1_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"TPS3 not supported, using TPS2 instead\n");
|
||||
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
break;
|
||||
}
|
||||
|
||||
} else {
|
||||
*DP &= ~DP_LINK_TRAIN_MASK;
|
||||
|
||||
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
*DP |= DP_LINK_TRAIN_OFF;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
*DP |= DP_LINK_TRAIN_PAT_1;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
*DP |= DP_LINK_TRAIN_PAT_2;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"TPS3 not supported, using TPS2 instead\n");
|
||||
*DP |= DP_LINK_TRAIN_PAT_2;
|
||||
break;
|
||||
}
|
||||
"TPS3 not supported, using TPS2 instead\n");
|
||||
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
|
||||
break;
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
}
|
||||
|
||||
static void
|
||||
g4x_set_link_train(struct intel_dp *intel_dp,
|
||||
u8 dp_train_pat)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u32 *DP = &intel_dp->DP;
|
||||
|
||||
*DP &= ~DP_LINK_TRAIN_MASK;
|
||||
|
||||
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
|
||||
case DP_TRAINING_PATTERN_DISABLE:
|
||||
*DP |= DP_LINK_TRAIN_OFF;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_1:
|
||||
*DP |= DP_LINK_TRAIN_PAT_1;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_2:
|
||||
*DP |= DP_LINK_TRAIN_PAT_2;
|
||||
break;
|
||||
case DP_TRAINING_PATTERN_3:
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"TPS3 not supported, using TPS2 instead\n");
|
||||
*DP |= DP_LINK_TRAIN_PAT_2;
|
||||
break;
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
}
|
||||
|
||||
static void intel_dp_enable_port(struct intel_dp *intel_dp,
|
||||
@ -4064,7 +4013,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 vlv_signal_levels(struct intel_dp *intel_dp)
|
||||
static void vlv_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
unsigned long demph_reg_value, preemph_reg_value,
|
||||
@ -4092,7 +4041,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
|
||||
uniqtranscale_reg_value = 0x5598DA3A;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case DP_TRAIN_PRE_EMPH_LEVEL_1:
|
||||
@ -4111,7 +4060,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
|
||||
uniqtranscale_reg_value = 0x55ADDA3A;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case DP_TRAIN_PRE_EMPH_LEVEL_2:
|
||||
@ -4126,7 +4075,7 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
|
||||
uniqtranscale_reg_value = 0x55ADDA3A;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case DP_TRAIN_PRE_EMPH_LEVEL_3:
|
||||
@ -4137,20 +4086,18 @@ static u32 vlv_signal_levels(struct intel_dp *intel_dp)
|
||||
uniqtranscale_reg_value = 0x55ADDA3A;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
|
||||
uniqtranscale_reg_value, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 chv_signal_levels(struct intel_dp *intel_dp)
|
||||
static void chv_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
u32 deemph_reg_value, margin_reg_value;
|
||||
@ -4178,7 +4125,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
|
||||
uniq_trans_scale = true;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case DP_TRAIN_PRE_EMPH_LEVEL_1:
|
||||
@ -4196,7 +4143,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
|
||||
margin_reg_value = 154;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case DP_TRAIN_PRE_EMPH_LEVEL_2:
|
||||
@ -4210,7 +4157,7 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
|
||||
margin_reg_value = 154;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case DP_TRAIN_PRE_EMPH_LEVEL_3:
|
||||
@ -4220,21 +4167,18 @@ static u32 chv_signal_levels(struct intel_dp *intel_dp)
|
||||
margin_reg_value = 154;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
chv_set_phy_signal_level(encoder, deemph_reg_value,
|
||||
margin_reg_value, uniq_trans_scale);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32
|
||||
g4x_signal_levels(u8 train_set)
|
||||
static u32 g4x_signal_levels(u8 train_set)
|
||||
{
|
||||
u32 signal_levels = 0;
|
||||
|
||||
@ -4271,12 +4215,31 @@ g4x_signal_levels(u8 train_set)
|
||||
return signal_levels;
|
||||
}
|
||||
|
||||
/* SNB CPU eDP voltage swing and pre-emphasis control */
|
||||
static u32
|
||||
snb_cpu_edp_signal_levels(u8 train_set)
|
||||
static void
|
||||
g4x_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 train_set = intel_dp->train_set[0];
|
||||
u32 signal_levels;
|
||||
|
||||
signal_levels = g4x_signal_levels(train_set);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
|
||||
signal_levels);
|
||||
|
||||
intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
|
||||
intel_dp->DP |= signal_levels;
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
}
|
||||
|
||||
/* SNB CPU eDP voltage swing and pre-emphasis control */
|
||||
static u32 snb_cpu_edp_signal_levels(u8 train_set)
|
||||
{
|
||||
u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
|
||||
switch (signal_levels) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
@ -4299,12 +4262,31 @@ snb_cpu_edp_signal_levels(u8 train_set)
|
||||
}
|
||||
}
|
||||
|
||||
/* IVB CPU eDP voltage swing and pre-emphasis control */
|
||||
static u32
|
||||
ivb_cpu_edp_signal_levels(u8 train_set)
|
||||
static void
|
||||
snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 train_set = intel_dp->train_set[0];
|
||||
u32 signal_levels;
|
||||
|
||||
signal_levels = snb_cpu_edp_signal_levels(train_set);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
|
||||
signal_levels);
|
||||
|
||||
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
|
||||
intel_dp->DP |= signal_levels;
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
}
|
||||
|
||||
/* IVB CPU eDP voltage swing and pre-emphasis control */
|
||||
static u32 ivb_cpu_edp_signal_levels(u8 train_set)
|
||||
{
|
||||
u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
|
||||
switch (signal_levels) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
|
||||
return EDP_LINK_TRAIN_400MV_0DB_IVB;
|
||||
@ -4330,38 +4312,29 @@ ivb_cpu_edp_signal_levels(u8 train_set)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
|
||||
static void
|
||||
ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
enum port port = intel_dig_port->base.port;
|
||||
u32 signal_levels, mask = 0;
|
||||
u8 train_set = intel_dp->train_set[0];
|
||||
u32 signal_levels;
|
||||
|
||||
if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
|
||||
signal_levels = bxt_signal_levels(intel_dp);
|
||||
} else if (HAS_DDI(dev_priv)) {
|
||||
signal_levels = ddi_signal_levels(intel_dp);
|
||||
mask = DDI_BUF_EMP_MASK;
|
||||
} else if (IS_CHERRYVIEW(dev_priv)) {
|
||||
signal_levels = chv_signal_levels(intel_dp);
|
||||
} else if (IS_VALLEYVIEW(dev_priv)) {
|
||||
signal_levels = vlv_signal_levels(intel_dp);
|
||||
} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
|
||||
signal_levels = ivb_cpu_edp_signal_levels(train_set);
|
||||
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
|
||||
} else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
|
||||
signal_levels = snb_cpu_edp_signal_levels(train_set);
|
||||
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
|
||||
} else {
|
||||
signal_levels = g4x_signal_levels(train_set);
|
||||
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
|
||||
}
|
||||
signal_levels = ivb_cpu_edp_signal_levels(train_set);
|
||||
|
||||
if (mask)
|
||||
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
|
||||
signal_levels);
|
||||
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
|
||||
signal_levels);
|
||||
|
||||
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
|
||||
intel_dp->DP |= signal_levels;
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
}
|
||||
|
||||
void intel_dp_set_signal_levels(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 train_set = intel_dp->train_set[0];
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
|
||||
train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
|
||||
@ -4372,55 +4345,28 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
|
||||
train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
|
||||
" (max)" : "");
|
||||
|
||||
intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
intel_dp->set_signal_levels(intel_dp);
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
u8 dp_train_pat)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv =
|
||||
to_i915(intel_dig_port->base.base.dev);
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
|
||||
|
||||
_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
|
||||
if (dp_train_pat & train_pat_mask)
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Using DP training pattern TPS%d\n",
|
||||
dp_train_pat & train_pat_mask);
|
||||
|
||||
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
intel_dp->set_link_train(intel_dp, dp_train_pat);
|
||||
}
|
||||
|
||||
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
enum port port = intel_dig_port->base.port;
|
||||
u32 val;
|
||||
|
||||
if (!HAS_DDI(dev_priv))
|
||||
return;
|
||||
|
||||
val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
|
||||
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
|
||||
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
|
||||
intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
|
||||
|
||||
/*
|
||||
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
|
||||
* reason we need to set idle transmission mode is to work around a HW
|
||||
* issue where we enable the pipe while not in idle link-training mode.
|
||||
* In this case there is requirement to wait for a minimum number of
|
||||
* idle patterns to be sent.
|
||||
*/
|
||||
if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
|
||||
return;
|
||||
|
||||
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
|
||||
DP_TP_STATUS_IDLE_DONE, 1))
|
||||
drm_err(&dev_priv->drm,
|
||||
"Timed out waiting for DP idle patterns\n");
|
||||
if (intel_dp->set_idle_link_train)
|
||||
intel_dp->set_idle_link_train(intel_dp);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -5567,7 +5513,7 @@ void intel_dp_process_phy_request(struct intel_dp *intel_dp)
|
||||
|
||||
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
|
||||
{
|
||||
u8 test_result = DP_TEST_NAK;
|
||||
u8 test_result;
|
||||
|
||||
test_result = intel_dp_prepare_phytest(intel_dp);
|
||||
if (test_result != DP_TEST_ACK)
|
||||
@ -5629,61 +5575,51 @@ static int
|
||||
intel_dp_check_mst_status(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
bool bret;
|
||||
bool need_retrain = false;
|
||||
|
||||
if (intel_dp->is_mst) {
|
||||
u8 esi[DP_DPRX_ESI_LEN] = { 0 };
|
||||
int ret = 0;
|
||||
if (!intel_dp->is_mst)
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON_ONCE(intel_dp->active_mst_links < 0);
|
||||
|
||||
for (;;) {
|
||||
u8 esi[DP_DPRX_ESI_LEN] = {};
|
||||
bool bret, handled;
|
||||
int retry;
|
||||
bool handled;
|
||||
|
||||
WARN_ON_ONCE(intel_dp->active_mst_links < 0);
|
||||
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
|
||||
go_again:
|
||||
if (bret == true) {
|
||||
|
||||
/* check link status - esi[10] = 0x200c */
|
||||
if (intel_dp->active_mst_links > 0 &&
|
||||
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"channel EQ not ok, retraining\n");
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
|
||||
ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
|
||||
|
||||
if (handled) {
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
int wret;
|
||||
wret = drm_dp_dpcd_write(&intel_dp->aux,
|
||||
DP_SINK_COUNT_ESI+1,
|
||||
&esi[1], 3);
|
||||
if (wret == 3) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
|
||||
if (bret == true) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"got esi2 %3ph\n", esi);
|
||||
goto go_again;
|
||||
}
|
||||
} else
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
} else {
|
||||
if (!bret) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"failed to get ESI - device may have failed\n");
|
||||
intel_dp->is_mst = false;
|
||||
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
|
||||
intel_dp->is_mst);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* check link status - esi[10] = 0x200c */
|
||||
if (intel_dp->active_mst_links > 0 && !need_retrain &&
|
||||
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"channel EQ not ok, retraining\n");
|
||||
need_retrain = true;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
|
||||
|
||||
drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
|
||||
if (!handled)
|
||||
break;
|
||||
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
int wret;
|
||||
|
||||
wret = drm_dp_dpcd_write(&intel_dp->aux,
|
||||
DP_SINK_COUNT_ESI+1,
|
||||
&esi[1], 3);
|
||||
if (wret == 3)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
|
||||
return need_retrain;
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -5720,20 +5656,102 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
|
||||
return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
|
||||
}
|
||||
|
||||
static bool intel_dp_has_connector(struct intel_dp *intel_dp,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_encoder *encoder;
|
||||
enum pipe pipe;
|
||||
|
||||
if (!conn_state->best_encoder)
|
||||
return false;
|
||||
|
||||
/* SST */
|
||||
encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
if (conn_state->best_encoder == &encoder->base)
|
||||
return true;
|
||||
|
||||
/* MST */
|
||||
for_each_pipe(i915, pipe) {
|
||||
encoder = &intel_dp->mst_encoders[pipe]->base;
|
||||
if (conn_state->best_encoder == &encoder->base)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
|
||||
struct drm_modeset_acquire_ctx *ctx,
|
||||
u32 *crtc_mask)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
struct intel_connector *connector;
|
||||
int ret = 0;
|
||||
|
||||
*crtc_mask = 0;
|
||||
|
||||
if (!intel_dp_needs_link_retrain(intel_dp))
|
||||
return 0;
|
||||
|
||||
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
|
||||
for_each_intel_connector_iter(connector, &conn_iter) {
|
||||
struct drm_connector_state *conn_state =
|
||||
connector->base.state;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
if (!intel_dp_has_connector(intel_dp, conn_state))
|
||||
continue;
|
||||
|
||||
crtc = to_intel_crtc(conn_state->crtc);
|
||||
if (!crtc)
|
||||
continue;
|
||||
|
||||
ret = drm_modeset_lock(&crtc->base.mutex, ctx);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
crtc_state = to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
|
||||
|
||||
if (!crtc_state->hw.active)
|
||||
continue;
|
||||
|
||||
if (conn_state->commit &&
|
||||
!try_wait_for_completion(&conn_state->commit->hw_done))
|
||||
continue;
|
||||
|
||||
*crtc_mask |= drm_crtc_mask(&crtc->base);
|
||||
}
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
|
||||
if (!intel_dp_needs_link_retrain(intel_dp))
|
||||
*crtc_mask = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool intel_dp_is_connected(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_connector *connector = intel_dp->attached_connector;
|
||||
|
||||
return connector->base.status == connector_status_connected ||
|
||||
intel_dp->is_mst;
|
||||
}
|
||||
|
||||
int intel_dp_retrain_link(struct intel_encoder *encoder,
|
||||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_connector *connector = intel_dp->attached_connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
u32 crtc_mask;
|
||||
int ret;
|
||||
|
||||
/* FIXME handle the MST connectors as well */
|
||||
|
||||
if (!connector || connector->base.status != connector_status_connected)
|
||||
if (!intel_dp_is_connected(intel_dp))
|
||||
return 0;
|
||||
|
||||
ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
|
||||
@ -5741,46 +5759,42 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
conn_state = connector->base.state;
|
||||
|
||||
crtc = to_intel_crtc(conn_state->crtc);
|
||||
if (!crtc)
|
||||
return 0;
|
||||
|
||||
ret = drm_modeset_lock(&crtc->base.mutex, ctx);
|
||||
ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state = to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
|
||||
|
||||
if (!crtc_state->hw.active)
|
||||
if (crtc_mask == 0)
|
||||
return 0;
|
||||
|
||||
if (conn_state->commit &&
|
||||
!try_wait_for_completion(&conn_state->commit->hw_done))
|
||||
return 0;
|
||||
drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
|
||||
if (!intel_dp_needs_link_retrain(intel_dp))
|
||||
return 0;
|
||||
for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
|
||||
const struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
/* Suppress underruns caused by re-training */
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
|
||||
if (crtc_state->has_pch_encoder)
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv,
|
||||
intel_crtc_pch_transcoder(crtc), false);
|
||||
/* Suppress underruns caused by re-training */
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
|
||||
if (crtc_state->has_pch_encoder)
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv,
|
||||
intel_crtc_pch_transcoder(crtc), false);
|
||||
}
|
||||
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
|
||||
/* Keep underrun reporting disabled until things are stable */
|
||||
intel_wait_for_vblank(dev_priv, crtc->pipe);
|
||||
for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
|
||||
const struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
|
||||
if (crtc_state->has_pch_encoder)
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv,
|
||||
intel_crtc_pch_transcoder(crtc), true);
|
||||
/* Keep underrun reporting disabled until things are stable */
|
||||
intel_wait_for_vblank(dev_priv, crtc->pipe);
|
||||
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
|
||||
if (crtc_state->has_pch_encoder)
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv,
|
||||
intel_crtc_pch_transcoder(crtc), true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -6451,8 +6465,6 @@ intel_dp_connector_register(struct drm_connector *connector)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_connector_debugfs_add(connector);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
|
||||
intel_dp->aux.name, connector->kdev->kobj.name);
|
||||
|
||||
@ -6833,9 +6845,9 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
|
||||
0, 0 },
|
||||
};
|
||||
|
||||
static inline
|
||||
int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
|
||||
u8 *rx_status)
|
||||
static int
|
||||
intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
|
||||
u8 *rx_status)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
ssize_t ret;
|
||||
@ -7424,7 +7436,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
||||
}
|
||||
|
||||
if (intel_dp->is_mst) {
|
||||
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
|
||||
switch (intel_dp_check_mst_status(intel_dp)) {
|
||||
case -EINVAL:
|
||||
/*
|
||||
* If we were in MST mode, and device is not
|
||||
* there, get out of MST mode
|
||||
@ -7438,6 +7451,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
||||
intel_dp->is_mst);
|
||||
|
||||
return IRQ_NONE;
|
||||
case 1:
|
||||
return IRQ_NONE;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -8468,8 +8485,27 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
|
||||
intel_encoder->post_disable = g4x_post_disable_dp;
|
||||
}
|
||||
|
||||
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
|
||||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
|
||||
intel_dig_port->dp.set_link_train = cpt_set_link_train;
|
||||
else
|
||||
intel_dig_port->dp.set_link_train = g4x_set_link_train;
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
intel_dig_port->dp.set_signal_levels = chv_set_signal_levels;
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels;
|
||||
else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
|
||||
intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
|
||||
else if (IS_GEN(dev_priv, 6) && port == PORT_A)
|
||||
intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
|
||||
else
|
||||
intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels;
|
||||
|
||||
intel_dig_port->dp.output_reg = output_reg;
|
||||
intel_dig_port->max_lanes = 4;
|
||||
intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
|
||||
intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_DP;
|
||||
intel_encoder->power_domain = intel_port_to_power_domain(port);
|
||||
|
@ -358,6 +358,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
|
||||
*/
|
||||
if (i915->vbt.backlight.type !=
|
||||
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
|
||||
i915_modparams.enable_dpcd_backlight != 1 &&
|
||||
!drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
|
||||
DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
|
||||
drm_info(&i915->drm,
|
||||
|
@ -489,7 +489,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
|
||||
* here for the following ones.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) < 12 || !first_mst_stream)
|
||||
intel_ddi_enable_pipe_clock(pipe_config);
|
||||
intel_ddi_enable_pipe_clock(encoder, pipe_config);
|
||||
|
||||
intel_ddi_set_dp_msa(pipe_config, conn_state);
|
||||
|
||||
@ -508,6 +508,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
|
||||
|
||||
intel_ddi_enable_transcoder_func(encoder, pipe_config);
|
||||
|
||||
intel_enable_pipe(pipe_config);
|
||||
|
||||
intel_crtc_vblank_on(pipe_config);
|
||||
|
@ -80,7 +80,7 @@ intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(s);
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
|
||||
drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
|
||||
|
||||
if (!state->dpll_set) {
|
||||
state->dpll_set = true;
|
||||
@ -979,7 +979,7 @@ hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
|
||||
if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
|
||||
return NULL;
|
||||
|
||||
crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
|
||||
@ -1616,7 +1616,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
|
||||
dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
|
||||
ref_clock / 0x8000;
|
||||
|
||||
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
|
||||
if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
|
||||
return 0;
|
||||
|
||||
return dco_freq / (p0 * p1 * p2 * 5);
|
||||
@ -2074,7 +2074,7 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
|
||||
|
||||
clk_div->p1 = best_clock.p1;
|
||||
clk_div->p2 = best_clock.p2;
|
||||
WARN_ON(best_clock.m1 != 2);
|
||||
drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
|
||||
clk_div->n = best_clock.n;
|
||||
clk_div->m2_int = best_clock.m2 >> 22;
|
||||
clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
|
||||
|
@ -34,7 +34,7 @@
|
||||
#define DSB_BYTE_EN_SHIFT 20
|
||||
#define DSB_REG_VALUE_MASK 0xfffff
|
||||
|
||||
static inline bool is_dsb_busy(struct intel_dsb *dsb)
|
||||
static bool is_dsb_busy(struct intel_dsb *dsb)
|
||||
{
|
||||
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
@ -43,7 +43,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb)
|
||||
return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
|
||||
}
|
||||
|
||||
static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
|
||||
static bool intel_dsb_enable_engine(struct intel_dsb *dsb)
|
||||
{
|
||||
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
@ -63,7 +63,7 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
|
||||
static bool intel_dsb_disable_engine(struct intel_dsb *dsb)
|
||||
{
|
||||
struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
@ -121,7 +121,7 @@ struct i2c_adapter_lookup {
|
||||
#define ICL_GPIO_DDPA_CTRLCLK_2 8
|
||||
#define ICL_GPIO_DDPA_CTRLDATA_2 9
|
||||
|
||||
static inline enum port intel_dsi_seq_port_to_port(u8 port)
|
||||
static enum port intel_dsi_seq_port_to_port(u8 port)
|
||||
{
|
||||
return port ? PORT_C : PORT_A;
|
||||
}
|
||||
|
@ -485,9 +485,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
|
||||
if (!ret)
|
||||
goto err_llb;
|
||||
else if (ret > 1) {
|
||||
drm_info(&dev_priv->drm,
|
||||
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
||||
|
||||
drm_info_once(&dev_priv->drm,
|
||||
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
||||
}
|
||||
|
||||
fbc->threshold = ret;
|
||||
|
@ -302,12 +302,14 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
|
||||
BITS_PER_TYPE(atomic_t));
|
||||
|
||||
if (old) {
|
||||
WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
|
||||
drm_WARN_ON(old->obj->base.dev,
|
||||
!(atomic_read(&old->bits) & frontbuffer_bits));
|
||||
atomic_andnot(frontbuffer_bits, &old->bits);
|
||||
}
|
||||
|
||||
if (new) {
|
||||
WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
|
||||
drm_WARN_ON(new->obj->base.dev,
|
||||
atomic_read(&new->bits) & frontbuffer_bits);
|
||||
atomic_or(frontbuffer_bits, &new->bits);
|
||||
}
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ static void assert_global_state_read_locked(struct intel_atomic_state *state)
|
||||
return;
|
||||
}
|
||||
|
||||
WARN(1, "Global state not read locked\n");
|
||||
drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
|
||||
}
|
||||
|
||||
struct intel_global_state *
|
||||
@ -148,7 +148,7 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
|
||||
|
||||
for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
|
||||
new_obj_state, i) {
|
||||
WARN_ON(obj->state != old_obj_state);
|
||||
drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
|
||||
|
||||
/*
|
||||
* If the new state wasn't modified (and properly
|
||||
|
@ -379,8 +379,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
|
||||
static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
|
||||
GMBUS_BYTE_COUNT_MAX;
|
||||
|
@ -109,18 +109,16 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
|
||||
return capable;
|
||||
}
|
||||
|
||||
static inline
|
||||
bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder, enum port port)
|
||||
static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder, enum port port)
|
||||
{
|
||||
return intel_de_read(dev_priv,
|
||||
HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
|
||||
HDCP_STATUS_ENC;
|
||||
}
|
||||
|
||||
static inline
|
||||
bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder, enum port port)
|
||||
static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder, enum port port)
|
||||
{
|
||||
return intel_de_read(dev_priv,
|
||||
HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
|
||||
@ -853,8 +851,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
|
||||
static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
|
||||
{
|
||||
return container_of(hdcp, struct intel_connector, hdcp);
|
||||
}
|
||||
@ -1856,8 +1853,7 @@ static const struct component_ops i915_hdcp_component_ops = {
|
||||
.unbind = i915_hdcp_component_unbind,
|
||||
};
|
||||
|
||||
static inline
|
||||
enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
|
||||
static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
@ -1869,8 +1865,7 @@ enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
|
||||
static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
|
||||
{
|
||||
switch (cpu_transcoder) {
|
||||
case TRANSCODER_A ... TRANSCODER_D:
|
||||
@ -1880,8 +1875,8 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
|
||||
}
|
||||
}
|
||||
|
||||
static inline int initialize_hdcp_port_data(struct intel_connector *connector,
|
||||
const struct intel_hdcp_shim *shim)
|
||||
static int initialize_hdcp_port_data(struct intel_connector *connector,
|
||||
const struct intel_hdcp_shim *shim)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
|
@ -44,7 +44,6 @@
|
||||
#include "intel_audio.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_display_debugfs.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
@ -1615,10 +1614,10 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline
|
||||
int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
|
||||
u8 msg_id, bool *msg_ready,
|
||||
ssize_t *msg_sz)
|
||||
static int
|
||||
hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
|
||||
u8 msg_id, bool *msg_ready,
|
||||
ssize_t *msg_sz)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
|
||||
@ -1751,12 +1750,6 @@ int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline
|
||||
enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
|
||||
{
|
||||
return HDCP_PROTOCOL_HDMI;
|
||||
}
|
||||
|
||||
static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
|
||||
.write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
|
||||
.read_bksv = intel_hdmi_hdcp_read_bksv,
|
||||
@ -2328,32 +2321,27 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
|
||||
struct intel_crtc_state *config)
|
||||
static int
|
||||
intel_hdmi_ycbcr420_config(struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
|
||||
if (!drm_mode_is_420_only(&connector->display_info, adjusted_mode))
|
||||
return 0;
|
||||
|
||||
if (!connector->ycbcr_420_allowed) {
|
||||
drm_err(&i915->drm,
|
||||
"Platform doesn't support YCBCR420 output\n");
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
|
||||
crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
|
||||
|
||||
/* YCBCR 420 output conversion needs a scaler */
|
||||
if (skl_update_scaler_crtc(config)) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Scaler allocation for output failed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
intel_pch_panel_fitting(intel_crtc, config,
|
||||
DRM_MODE_SCALE_FULLSCREEN);
|
||||
|
||||
return true;
|
||||
return intel_pch_panel_fitting(crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static int intel_hdmi_port_clock(int clock, int bpc)
|
||||
@ -2481,13 +2469,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
pipe_config->pixel_multiplier = 2;
|
||||
|
||||
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
|
||||
if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Can't support YCBCR420 output\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
ret = intel_hdmi_ycbcr420_config(pipe_config, conn_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pipe_config->limited_color_range =
|
||||
intel_hdmi_limited_color_range(pipe_config, conn_state);
|
||||
@ -2878,8 +2862,6 @@ intel_hdmi_connector_register(struct drm_connector *connector)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_connector_debugfs_add(connector);
|
||||
|
||||
intel_hdmi_create_i2c_symlink(connector);
|
||||
|
||||
return ret;
|
||||
|
@ -403,6 +403,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
unsigned int lvds_bpp;
|
||||
int ret;
|
||||
|
||||
/* Should never happen!! */
|
||||
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
|
||||
@ -436,16 +437,15 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return -EINVAL;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
if (HAS_PCH_SPLIT(dev_priv))
|
||||
pipe_config->has_pch_encoder = true;
|
||||
|
||||
intel_pch_panel_fitting(intel_crtc, pipe_config,
|
||||
conn_state->scaling_mode);
|
||||
} else {
|
||||
intel_gmch_panel_fitting(intel_crtc, pipe_config,
|
||||
conn_state->scaling_mode);
|
||||
|
||||
}
|
||||
if (HAS_GMCH(dev_priv))
|
||||
ret = intel_gmch_panel_fitting(pipe_config, conn_state);
|
||||
else
|
||||
ret = intel_pch_panel_fitting(pipe_config, conn_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* XXX: It would be nice to support lower refresh rates on the
|
||||
|
@ -281,7 +281,7 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
|
||||
enum pipe pipe = overlay->crtc->pipe;
|
||||
struct intel_frontbuffer *from = NULL, *to = NULL;
|
||||
|
||||
WARN_ON(overlay->old_vma);
|
||||
drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
|
||||
|
||||
if (overlay->vma)
|
||||
from = intel_frontbuffer_get(overlay->vma->obj);
|
||||
@ -350,7 +350,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = fetch_and_zero(&overlay->old_vma);
|
||||
if (WARN_ON(!vma))
|
||||
if (drm_WARN_ON(&overlay->i915->drm, !vma))
|
||||
return;
|
||||
|
||||
intel_frontbuffer_flip_complete(overlay->i915,
|
||||
@ -396,7 +396,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
struct i915_request *rq;
|
||||
u32 *cs, flip_addr = overlay->flip_addr;
|
||||
|
||||
WARN_ON(!overlay->active);
|
||||
drm_WARN_ON(&overlay->i915->drm, !overlay->active);
|
||||
|
||||
/* According to intel docs the overlay hw may hang (when switching
|
||||
* off) without loading the filter coeffs. It is however unclear whether
|
||||
|
@ -176,24 +176,23 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
|
||||
}
|
||||
|
||||
/* adjusted_mode has been preset to be the panel's fixed mode */
|
||||
void
|
||||
intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int fitting_mode)
|
||||
int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
int x = 0, y = 0, width = 0, height = 0;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
int x, y, width, height;
|
||||
|
||||
/* Native modes don't need fitting */
|
||||
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
|
||||
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
|
||||
pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
|
||||
goto done;
|
||||
if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
|
||||
adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h &&
|
||||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
|
||||
return 0;
|
||||
|
||||
switch (fitting_mode) {
|
||||
switch (conn_state->scaling_mode) {
|
||||
case DRM_MODE_SCALE_CENTER:
|
||||
width = pipe_config->pipe_src_w;
|
||||
height = pipe_config->pipe_src_h;
|
||||
width = crtc_state->pipe_src_w;
|
||||
height = crtc_state->pipe_src_h;
|
||||
x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
|
||||
y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
|
||||
break;
|
||||
@ -202,18 +201,18 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
/* Scale but preserve the aspect ratio */
|
||||
{
|
||||
u32 scaled_width = adjusted_mode->crtc_hdisplay
|
||||
* pipe_config->pipe_src_h;
|
||||
u32 scaled_height = pipe_config->pipe_src_w
|
||||
* crtc_state->pipe_src_h;
|
||||
u32 scaled_height = crtc_state->pipe_src_w
|
||||
* adjusted_mode->crtc_vdisplay;
|
||||
if (scaled_width > scaled_height) { /* pillar */
|
||||
width = scaled_height / pipe_config->pipe_src_h;
|
||||
width = scaled_height / crtc_state->pipe_src_h;
|
||||
if (width & 1)
|
||||
width++;
|
||||
x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
|
||||
y = 0;
|
||||
height = adjusted_mode->crtc_vdisplay;
|
||||
} else if (scaled_width < scaled_height) { /* letter */
|
||||
height = scaled_width / pipe_config->pipe_src_w;
|
||||
height = scaled_width / crtc_state->pipe_src_w;
|
||||
if (height & 1)
|
||||
height++;
|
||||
y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
|
||||
@ -227,6 +226,10 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
}
|
||||
break;
|
||||
|
||||
case DRM_MODE_SCALE_NONE:
|
||||
WARN_ON(adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w);
|
||||
WARN_ON(adjusted_mode->crtc_vdisplay != crtc_state->pipe_src_h);
|
||||
/* fall through */
|
||||
case DRM_MODE_SCALE_FULLSCREEN:
|
||||
x = y = 0;
|
||||
width = adjusted_mode->crtc_hdisplay;
|
||||
@ -234,14 +237,15 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN(1, "bad panel fit mode: %d\n", fitting_mode);
|
||||
return;
|
||||
MISSING_CASE(conn_state->scaling_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
done:
|
||||
pipe_config->pch_pfit.pos = (x << 16) | y;
|
||||
pipe_config->pch_pfit.size = (width << 16) | height;
|
||||
pipe_config->pch_pfit.enabled = pipe_config->pch_pfit.size != 0;
|
||||
drm_rect_init(&crtc_state->pch_pfit.dst,
|
||||
x, y, width, height);
|
||||
crtc_state->pch_pfit.enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -287,7 +291,7 @@ centre_vertically(struct drm_display_mode *adjusted_mode,
|
||||
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
|
||||
}
|
||||
|
||||
static inline u32 panel_fitter_scaling(u32 source, u32 target)
|
||||
static u32 panel_fitter_scaling(u32 source, u32 target)
|
||||
{
|
||||
/*
|
||||
* Floating point operation is not supported. So the FACTOR
|
||||
@ -300,13 +304,14 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
|
||||
return (FACTOR * ratio + FACTOR/2) / FACTOR;
|
||||
}
|
||||
|
||||
static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
static void i965_scale_aspect(struct intel_crtc_state *crtc_state,
|
||||
u32 *pfit_control)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
u32 scaled_width = adjusted_mode->crtc_hdisplay *
|
||||
pipe_config->pipe_src_h;
|
||||
u32 scaled_height = pipe_config->pipe_src_w *
|
||||
crtc_state->pipe_src_h;
|
||||
u32 scaled_height = crtc_state->pipe_src_w *
|
||||
adjusted_mode->crtc_vdisplay;
|
||||
|
||||
/* 965+ is easy, it does everything in hw */
|
||||
@ -316,18 +321,18 @@ static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
else if (scaled_width < scaled_height)
|
||||
*pfit_control |= PFIT_ENABLE |
|
||||
PFIT_SCALING_LETTER;
|
||||
else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w)
|
||||
else if (adjusted_mode->crtc_hdisplay != crtc_state->pipe_src_w)
|
||||
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
|
||||
}
|
||||
|
||||
static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
|
||||
u32 *pfit_control, u32 *pfit_pgm_ratios,
|
||||
u32 *border)
|
||||
{
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
|
||||
u32 scaled_width = adjusted_mode->crtc_hdisplay *
|
||||
pipe_config->pipe_src_h;
|
||||
u32 scaled_height = pipe_config->pipe_src_w *
|
||||
crtc_state->pipe_src_h;
|
||||
u32 scaled_height = crtc_state->pipe_src_w *
|
||||
adjusted_mode->crtc_vdisplay;
|
||||
u32 bits;
|
||||
|
||||
@ -339,11 +344,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
if (scaled_width > scaled_height) { /* pillar */
|
||||
centre_horizontally(adjusted_mode,
|
||||
scaled_height /
|
||||
pipe_config->pipe_src_h);
|
||||
crtc_state->pipe_src_h);
|
||||
|
||||
*border = LVDS_BORDER_ENABLE;
|
||||
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) {
|
||||
bits = panel_fitter_scaling(pipe_config->pipe_src_h,
|
||||
if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay) {
|
||||
bits = panel_fitter_scaling(crtc_state->pipe_src_h,
|
||||
adjusted_mode->crtc_vdisplay);
|
||||
|
||||
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
|
||||
@ -355,11 +360,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
} else if (scaled_width < scaled_height) { /* letter */
|
||||
centre_vertically(adjusted_mode,
|
||||
scaled_width /
|
||||
pipe_config->pipe_src_w);
|
||||
crtc_state->pipe_src_w);
|
||||
|
||||
*border = LVDS_BORDER_ENABLE;
|
||||
if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
|
||||
bits = panel_fitter_scaling(pipe_config->pipe_src_w,
|
||||
if (crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
|
||||
bits = panel_fitter_scaling(crtc_state->pipe_src_w,
|
||||
adjusted_mode->crtc_hdisplay);
|
||||
|
||||
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
|
||||
@ -377,35 +382,35 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int fitting_mode)
|
||||
int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
|
||||
|
||||
/* Native modes don't need fitting */
|
||||
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
|
||||
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
|
||||
if (adjusted_mode->crtc_hdisplay == crtc_state->pipe_src_w &&
|
||||
adjusted_mode->crtc_vdisplay == crtc_state->pipe_src_h)
|
||||
goto out;
|
||||
|
||||
switch (fitting_mode) {
|
||||
switch (conn_state->scaling_mode) {
|
||||
case DRM_MODE_SCALE_CENTER:
|
||||
/*
|
||||
* For centered modes, we have to calculate border widths &
|
||||
* heights and modify the values programmed into the CRTC.
|
||||
*/
|
||||
centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
|
||||
centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
|
||||
centre_horizontally(adjusted_mode, crtc_state->pipe_src_w);
|
||||
centre_vertically(adjusted_mode, crtc_state->pipe_src_h);
|
||||
border = LVDS_BORDER_ENABLE;
|
||||
break;
|
||||
case DRM_MODE_SCALE_ASPECT:
|
||||
/* Scale but preserve the aspect ratio */
|
||||
if (INTEL_GEN(dev_priv) >= 4)
|
||||
i965_scale_aspect(pipe_config, &pfit_control);
|
||||
i965_scale_aspect(crtc_state, &pfit_control);
|
||||
else
|
||||
i9xx_scale_aspect(pipe_config, &pfit_control,
|
||||
i9xx_scale_aspect(crtc_state, &pfit_control,
|
||||
&pfit_pgm_ratios, &border);
|
||||
break;
|
||||
case DRM_MODE_SCALE_FULLSCREEN:
|
||||
@ -413,8 +418,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
* Full scaling, even if it changes the aspect ratio.
|
||||
* Fortunately this is all done for us in hw.
|
||||
*/
|
||||
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
|
||||
pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
|
||||
if (crtc_state->pipe_src_h != adjusted_mode->crtc_vdisplay ||
|
||||
crtc_state->pipe_src_w != adjusted_mode->crtc_hdisplay) {
|
||||
pfit_control |= PFIT_ENABLE;
|
||||
if (INTEL_GEN(dev_priv) >= 4)
|
||||
pfit_control |= PFIT_SCALING_AUTO;
|
||||
@ -426,15 +431,14 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n",
|
||||
fitting_mode);
|
||||
return;
|
||||
MISSING_CASE(conn_state->scaling_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* 965+ wants fuzzy fitting */
|
||||
/* FIXME: handle multiple panels by failing gracefully */
|
||||
if (INTEL_GEN(dev_priv) >= 4)
|
||||
pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
|
||||
pfit_control |= PFIT_PIPE(crtc->pipe) | PFIT_FILTER_FUZZY;
|
||||
|
||||
out:
|
||||
if ((pfit_control & PFIT_ENABLE) == 0) {
|
||||
@ -443,12 +447,14 @@ out:
|
||||
}
|
||||
|
||||
/* Make sure pre-965 set dither correctly for 18bpp panels. */
|
||||
if (INTEL_GEN(dev_priv) < 4 && pipe_config->pipe_bpp == 18)
|
||||
if (INTEL_GEN(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
|
||||
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
|
||||
|
||||
pipe_config->gmch_pfit.control = pfit_control;
|
||||
pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
|
||||
pipe_config->gmch_pfit.lvds_border_bits = border;
|
||||
crtc_state->gmch_pfit.control = pfit_control;
|
||||
crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
|
||||
crtc_state->gmch_pfit.lvds_border_bits = border;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -484,8 +490,8 @@ static u32 scale(u32 source_val,
|
||||
}
|
||||
|
||||
/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
|
||||
static inline u32 scale_user_to_hw(struct intel_connector *connector,
|
||||
u32 user_level, u32 user_max)
|
||||
static u32 scale_user_to_hw(struct intel_connector *connector,
|
||||
u32 user_level, u32 user_max)
|
||||
{
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
|
||||
@ -495,8 +501,8 @@ static inline u32 scale_user_to_hw(struct intel_connector *connector,
|
||||
|
||||
/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
|
||||
* to [hw_min..hw_max]. */
|
||||
static inline u32 clamp_user_to_hw(struct intel_connector *connector,
|
||||
u32 user_level, u32 user_max)
|
||||
static u32 clamp_user_to_hw(struct intel_connector *connector,
|
||||
u32 user_level, u32 user_max)
|
||||
{
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u32 hw_level;
|
||||
@ -508,8 +514,8 @@ static inline u32 clamp_user_to_hw(struct intel_connector *connector,
|
||||
}
|
||||
|
||||
/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
|
||||
static inline u32 scale_hw_to_user(struct intel_connector *connector,
|
||||
u32 hw_level, u32 user_max)
|
||||
static u32 scale_hw_to_user(struct intel_connector *connector,
|
||||
u32 hw_level, u32 user_max)
|
||||
{
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
|
||||
|
@ -25,12 +25,10 @@ int intel_panel_init(struct intel_panel *panel,
|
||||
void intel_panel_fini(struct intel_panel *panel);
|
||||
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
void intel_pch_panel_fitting(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int fitting_mode);
|
||||
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int fitting_mode);
|
||||
int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
|
||||
u32 level, u32 max);
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector,
|
||||
|
@ -34,6 +34,7 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
|
||||
if (INTEL_INFO(i915)->display.has_modular_fia) {
|
||||
modular_fia = intel_uncore_read(&i915->uncore,
|
||||
PORT_TX_DFLEXDPSP(FIA1));
|
||||
drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
|
||||
modular_fia &= MODULAR_FIA_MASK;
|
||||
} else {
|
||||
modular_fia = 0;
|
||||
@ -52,6 +53,62 @@ tc_port_load_fia_params(struct drm_i915_private *i915,
|
||||
}
|
||||
}
|
||||
|
||||
static enum intel_display_power_domain
|
||||
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
|
||||
if (INTEL_GEN(i915) == 11)
|
||||
return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
|
||||
else
|
||||
return POWER_DOMAIN_TC_COLD_OFF;
|
||||
}
|
||||
|
||||
static intel_wakeref_t
|
||||
tc_cold_block(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum intel_display_power_domain domain;
|
||||
|
||||
if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
|
||||
return 0;
|
||||
|
||||
domain = tc_cold_get_power_domain(dig_port);
|
||||
return intel_display_power_get(i915, domain);
|
||||
}
|
||||
|
||||
static void
|
||||
tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
enum intel_display_power_domain domain;
|
||||
|
||||
/*
|
||||
* wakeref == -1, means some error happened saving save_depot_stack but
|
||||
* power should still be put down and 0 is a invalid save_depot_stack
|
||||
* id so can be used to skip it for non TC legacy ports.
|
||||
*/
|
||||
if (wakeref == 0)
|
||||
return;
|
||||
|
||||
domain = tc_cold_get_power_domain(dig_port);
|
||||
intel_display_power_put_async(i915, domain, wakeref);
|
||||
}
|
||||
|
||||
static void
|
||||
assert_tc_cold_blocked(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
bool enabled;
|
||||
|
||||
if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
|
||||
return;
|
||||
|
||||
enabled = intel_display_power_is_enabled(i915,
|
||||
tc_cold_get_power_domain(dig_port));
|
||||
drm_WARN_ON(&i915->drm, !enabled);
|
||||
}
|
||||
|
||||
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
@ -62,6 +119,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
|
||||
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
|
||||
|
||||
drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
|
||||
assert_tc_cold_blocked(dig_port);
|
||||
|
||||
lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
|
||||
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
|
||||
@ -77,6 +135,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
|
||||
PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
|
||||
|
||||
drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
|
||||
assert_tc_cold_blocked(dig_port);
|
||||
|
||||
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
|
||||
DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
|
||||
@ -91,6 +150,8 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
|
||||
if (dig_port->tc_mode != TC_PORT_DP_ALT)
|
||||
return 4;
|
||||
|
||||
assert_tc_cold_blocked(dig_port);
|
||||
|
||||
lane_mask = 0;
|
||||
with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
|
||||
lane_mask = intel_tc_port_get_lane_mask(dig_port);
|
||||
@ -123,6 +184,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
|
||||
drm_WARN_ON(&i915->drm,
|
||||
lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
|
||||
|
||||
assert_tc_cold_blocked(dig_port);
|
||||
|
||||
val = intel_uncore_read(uncore,
|
||||
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
|
||||
val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
|
||||
@ -420,9 +483,14 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
|
||||
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
|
||||
|
||||
intel_display_power_flush_work(i915);
|
||||
drm_WARN_ON(&i915->drm,
|
||||
intel_display_power_is_enabled(i915,
|
||||
intel_aux_power_domain(dig_port)));
|
||||
if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) {
|
||||
enum intel_display_power_domain aux_domain;
|
||||
bool aux_powered;
|
||||
|
||||
aux_domain = intel_aux_power_domain(dig_port);
|
||||
aux_powered = intel_display_power_is_enabled(i915, aux_domain);
|
||||
drm_WARN_ON(&i915->drm, aux_powered);
|
||||
}
|
||||
|
||||
icl_tc_phy_disconnect(dig_port);
|
||||
icl_tc_phy_connect(dig_port, required_lanes);
|
||||
@ -445,9 +513,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_encoder *encoder = &dig_port->base;
|
||||
intel_wakeref_t tc_cold_wref;
|
||||
int active_links = 0;
|
||||
|
||||
mutex_lock(&dig_port->tc_lock);
|
||||
tc_cold_wref = tc_cold_block(dig_port);
|
||||
|
||||
dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
|
||||
if (dig_port->dp.is_mst)
|
||||
@ -473,6 +543,7 @@ out:
|
||||
dig_port->tc_port_name,
|
||||
tc_port_mode_name(dig_port->tc_mode));
|
||||
|
||||
tc_cold_unblock(dig_port, tc_cold_wref);
|
||||
mutex_unlock(&dig_port->tc_lock);
|
||||
}
|
||||
|
||||
@ -494,10 +565,15 @@ static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
|
||||
bool intel_tc_port_connected(struct intel_digital_port *dig_port)
|
||||
{
|
||||
bool is_connected;
|
||||
intel_wakeref_t tc_cold_wref;
|
||||
|
||||
intel_tc_port_lock(dig_port);
|
||||
tc_cold_wref = tc_cold_block(dig_port);
|
||||
|
||||
is_connected = tc_port_live_status_mask(dig_port) &
|
||||
BIT(dig_port->tc_mode);
|
||||
|
||||
tc_cold_unblock(dig_port, tc_cold_wref);
|
||||
intel_tc_port_unlock(dig_port);
|
||||
|
||||
return is_connected;
|
||||
@ -513,9 +589,16 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
|
||||
|
||||
mutex_lock(&dig_port->tc_lock);
|
||||
|
||||
if (!dig_port->tc_link_refcount &&
|
||||
intel_tc_port_needs_reset(dig_port))
|
||||
intel_tc_port_reset_mode(dig_port, required_lanes);
|
||||
if (!dig_port->tc_link_refcount) {
|
||||
intel_wakeref_t tc_cold_wref;
|
||||
|
||||
tc_cold_wref = tc_cold_block(dig_port);
|
||||
|
||||
if (intel_tc_port_needs_reset(dig_port))
|
||||
intel_tc_port_reset_mode(dig_port, required_lanes);
|
||||
|
||||
tc_cold_unblock(dig_port, tc_cold_wref);
|
||||
}
|
||||
|
||||
drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
|
||||
dig_port->tc_lock_wakeref = wakeref;
|
||||
|
@ -267,7 +267,6 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
||||
base);
|
||||
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
int ret;
|
||||
@ -279,11 +278,11 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
|
||||
|
||||
if (HAS_GMCH(dev_priv))
|
||||
intel_gmch_panel_fitting(crtc, pipe_config,
|
||||
conn_state->scaling_mode);
|
||||
ret = intel_gmch_panel_fitting(pipe_config, conn_state);
|
||||
else
|
||||
intel_pch_panel_fitting(crtc, pipe_config,
|
||||
conn_state->scaling_mode);
|
||||
ret = intel_pch_panel_fitting(pipe_config, conn_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
@ -864,7 +863,7 @@ static void bxt_dsi_enable(struct intel_atomic_state *state,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
WARN_ON(crtc_state->has_pch_encoder);
|
||||
drm_WARN_ON(state->base.dev, crtc_state->has_pch_encoder);
|
||||
|
||||
intel_crtc_vblank_on(crtc_state);
|
||||
}
|
||||
|
@ -130,9 +130,7 @@ static void lut_close(struct i915_gem_context *ctx)
|
||||
if (&lut->obj_link != &obj->lut_list) {
|
||||
i915_lut_handle_free(lut);
|
||||
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
|
||||
if (atomic_dec_and_test(&vma->open_count) &&
|
||||
!i915_vma_is_ggtt(vma))
|
||||
i915_vma_close(vma);
|
||||
i915_vma_close(vma);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
|
@ -368,7 +368,6 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct i915_vma *vma;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
if (list_empty(&obj->vma.list))
|
||||
return;
|
||||
|
||||
@ -400,12 +399,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
||||
void
|
||||
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
assert_object_held(obj);
|
||||
|
||||
/* Bump the LRU to try and avoid premature eviction whilst flipping */
|
||||
i915_gem_object_bump_inactive_ggtt(obj);
|
||||
i915_gem_object_bump_inactive_ggtt(vma->obj);
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
|
@ -830,7 +830,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
|
||||
return 0;
|
||||
|
||||
err:
|
||||
atomic_dec(&vma->open_count);
|
||||
i915_vma_close(vma);
|
||||
i915_vma_put(vma);
|
||||
i915_lut_handle_free(lut);
|
||||
return err;
|
||||
|
@ -135,9 +135,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
|
||||
if (vma) {
|
||||
GEM_BUG_ON(vma->obj != obj);
|
||||
GEM_BUG_ON(!atomic_read(&vma->open_count));
|
||||
if (atomic_dec_and_test(&vma->open_count) &&
|
||||
!i915_vma_is_ggtt(vma))
|
||||
i915_vma_close(vma);
|
||||
i915_vma_close(vma);
|
||||
}
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
|
@ -182,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
|
||||
int tiling_mode, unsigned int stride)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
|
||||
struct i915_vma *vma;
|
||||
struct i915_vma *vma, *vn;
|
||||
LIST_HEAD(unbind);
|
||||
int ret = 0;
|
||||
|
||||
if (tiling_mode == I915_TILING_NONE)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&ggtt->vm.mutex);
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
GEM_BUG_ON(vma->vm != &ggtt->vm);
|
||||
|
||||
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
|
||||
continue;
|
||||
|
||||
ret = __i915_vma_unbind(vma);
|
||||
if (ret)
|
||||
break;
|
||||
list_move(&vma->vm_link, &unbind);
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
|
||||
ret = __i915_vma_unbind(vma);
|
||||
if (ret) {
|
||||
/* Restore the remaining vma on an error */
|
||||
list_splice(&unbind, &ggtt->vm.bound_list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ggtt->vm.mutex);
|
||||
|
||||
return ret;
|
||||
@ -268,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
vma->fence_size =
|
||||
i915_gem_fence_size(i915, vma->size, tiling, stride);
|
||||
@ -278,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
||||
if (vma->fence)
|
||||
vma->fence->dirty = true;
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
obj->tiling_and_stride = tiling | stride;
|
||||
i915_gem_object_unlock(obj);
|
||||
|
@ -421,7 +421,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
|
||||
err = igt_check_page_sizes(vma);
|
||||
|
||||
@ -432,8 +432,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
|
||||
}
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
if (err)
|
||||
@ -443,8 +441,6 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
|
||||
|
||||
goto out_device;
|
||||
|
||||
out_close:
|
||||
i915_vma_close(vma);
|
||||
out_put:
|
||||
i915_gem_object_put(obj);
|
||||
out_device:
|
||||
@ -492,7 +488,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
|
||||
err = igt_check_page_sizes(vma);
|
||||
if (err)
|
||||
@ -515,8 +511,6 @@ static int igt_mock_memory_region_huge_pages(void *arg)
|
||||
}
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
__i915_gem_object_put_pages(obj);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
@ -526,8 +520,6 @@ static int igt_mock_memory_region_huge_pages(void *arg)
|
||||
|
||||
out_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
out_close:
|
||||
i915_vma_close(vma);
|
||||
out_put:
|
||||
i915_gem_object_put(obj);
|
||||
out_region:
|
||||
@ -587,10 +579,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, flags);
|
||||
if (err) {
|
||||
i915_vma_close(vma);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
|
||||
err = igt_check_page_sizes(vma);
|
||||
@ -603,10 +593,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
if (err) {
|
||||
i915_vma_close(vma);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try all the other valid offsets until the next
|
||||
@ -615,16 +603,12 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
|
||||
*/
|
||||
for (offset = 4096; offset < page_size; offset += 4096) {
|
||||
err = i915_vma_unbind(vma);
|
||||
if (err) {
|
||||
i915_vma_close(vma);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, flags | offset);
|
||||
if (err) {
|
||||
i915_vma_close(vma);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
err = igt_check_page_sizes(vma);
|
||||
|
||||
@ -636,10 +620,8 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
if (err) {
|
||||
i915_vma_close(vma);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
if (igt_timeout(end_time,
|
||||
"%s timed out at offset %x with page-size %x\n",
|
||||
@ -647,8 +629,6 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
|
||||
break;
|
||||
}
|
||||
|
||||
i915_vma_close(vma);
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj);
|
||||
i915_gem_object_put(obj);
|
||||
@ -670,12 +650,6 @@ static void close_object_list(struct list_head *objects,
|
||||
struct drm_i915_gem_object *obj, *on;
|
||||
|
||||
list_for_each_entry_safe(obj, on, objects, st_link) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
|
||||
if (!IS_ERR(vma))
|
||||
i915_vma_close(vma);
|
||||
|
||||
list_del(&obj->st_link);
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj);
|
||||
@ -912,7 +886,7 @@ static int igt_mock_ppgtt_64K(void *arg)
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, flags);
|
||||
if (err)
|
||||
goto out_vma_close;
|
||||
goto out_object_unpin;
|
||||
|
||||
err = igt_check_page_sizes(vma);
|
||||
if (err)
|
||||
@ -945,8 +919,6 @@ static int igt_mock_ppgtt_64K(void *arg)
|
||||
}
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj);
|
||||
i915_gem_object_put(obj);
|
||||
@ -957,8 +929,6 @@ static int igt_mock_ppgtt_64K(void *arg)
|
||||
|
||||
out_vma_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
out_vma_close:
|
||||
i915_vma_close(vma);
|
||||
out_object_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
out_object_put:
|
||||
@ -1070,7 +1040,7 @@ static int __igt_write_huge(struct intel_context *ce,
|
||||
|
||||
err = i915_vma_unbind(vma);
|
||||
if (err)
|
||||
goto out_vma_close;
|
||||
return err;
|
||||
|
||||
err = i915_vma_pin(vma, size, 0, flags | offset);
|
||||
if (err) {
|
||||
@ -1081,7 +1051,7 @@ static int __igt_write_huge(struct intel_context *ce,
|
||||
if (err == -ENOSPC && i915_is_ggtt(ce->vm))
|
||||
err = 0;
|
||||
|
||||
goto out_vma_close;
|
||||
return err;
|
||||
}
|
||||
|
||||
err = igt_check_page_sizes(vma);
|
||||
@ -1102,8 +1072,6 @@ static int __igt_write_huge(struct intel_context *ce,
|
||||
|
||||
out_vma_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
out_vma_close:
|
||||
__i915_vma_put(vma);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1477,8 +1445,10 @@ static int igt_ppgtt_pin_update(void *arg)
|
||||
unsigned int page_size = BIT(first);
|
||||
|
||||
obj = i915_gem_object_create_internal(dev_priv, page_size);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto out_vm;
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
@ -1488,7 +1458,7 @@ static int igt_ppgtt_pin_update(void *arg)
|
||||
|
||||
err = i915_vma_pin(vma, SZ_2M, 0, flags);
|
||||
if (err)
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
|
||||
if (vma->page_sizes.sg < page_size) {
|
||||
pr_info("Unable to allocate page-size %x, finishing test early\n",
|
||||
@ -1525,14 +1495,14 @@ static int igt_ppgtt_pin_update(void *arg)
|
||||
goto out_unpin;
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto out_vm;
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
@ -1542,7 +1512,7 @@ static int igt_ppgtt_pin_update(void *arg)
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, flags);
|
||||
if (err)
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
|
||||
/*
|
||||
* Make sure we don't end up with something like where the pde is still
|
||||
@ -1572,8 +1542,6 @@ static int igt_ppgtt_pin_update(void *arg)
|
||||
|
||||
out_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
out_close:
|
||||
i915_vma_close(vma);
|
||||
out_put:
|
||||
i915_gem_object_put(obj);
|
||||
out_vm:
|
||||
@ -1625,13 +1593,11 @@ static int igt_tmpfs_fallback(void *arg)
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
|
||||
err = igt_check_page_sizes(vma);
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
out_close:
|
||||
i915_vma_close(vma);
|
||||
out_put:
|
||||
i915_gem_object_put(obj);
|
||||
out_restore:
|
||||
@ -1678,7 +1644,7 @@ static int igt_shrink_thp(void *arg)
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, flags);
|
||||
if (err)
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
|
||||
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
|
||||
pr_info("failed to allocate THP, finishing test early\n");
|
||||
@ -1702,7 +1668,7 @@ static int igt_shrink_thp(void *arg)
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
i915_vma_unpin(vma);
|
||||
if (err)
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
|
||||
/*
|
||||
* Now that the pages are *unpinned* shrink-all should invoke
|
||||
@ -1712,18 +1678,18 @@ static int igt_shrink_thp(void *arg)
|
||||
if (i915_gem_object_has_pages(obj)) {
|
||||
pr_err("shrink-all didn't truncate the pages\n");
|
||||
err = -EINVAL;
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
|
||||
pr_err("residual page-size bits left\n");
|
||||
err = -EINVAL;
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, flags);
|
||||
if (err)
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
|
||||
while (n--) {
|
||||
err = cpu_check(obj, n, 0xdeadbeaf);
|
||||
@ -1733,8 +1699,6 @@ static int igt_shrink_thp(void *arg)
|
||||
|
||||
out_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
out_close:
|
||||
i915_vma_close(vma);
|
||||
out_put:
|
||||
i915_gem_object_put(obj);
|
||||
out_vm:
|
||||
@ -1773,21 +1737,20 @@ int i915_gem_huge_page_mock_selftests(void)
|
||||
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
|
||||
pr_err("failed to create 48b PPGTT\n");
|
||||
err = -EINVAL;
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
/* If we were ever hit this then it's time to mock the 64K scratch */
|
||||
if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
|
||||
pr_err("PPGTT missing 64K scratch page\n");
|
||||
err = -EINVAL;
|
||||
goto out_close;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_subtests(tests, ppgtt);
|
||||
|
||||
out_close:
|
||||
out_put:
|
||||
i915_vm_put(&ppgtt->vm);
|
||||
|
||||
out_unlock:
|
||||
drm_dev_put(&dev_priv->drm);
|
||||
return err;
|
||||
|
@ -7,9 +7,12 @@
|
||||
|
||||
#include "gt/intel_engine_user.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gpu_commands.h"
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
|
||||
#include "selftests/igt_flush_test.h"
|
||||
#include "selftests/mock_drm.h"
|
||||
#include "selftests/i915_random.h"
|
||||
#include "huge_gem_object.h"
|
||||
#include "mock_context.h"
|
||||
|
||||
@ -127,10 +130,602 @@ static int igt_client_fill(void *arg)
|
||||
} while (1);
|
||||
}
|
||||
|
||||
#define WIDTH 512
|
||||
#define HEIGHT 32
|
||||
|
||||
struct blit_buffer {
|
||||
struct i915_vma *vma;
|
||||
u32 start_val;
|
||||
u32 tiling;
|
||||
};
|
||||
|
||||
struct tiled_blits {
|
||||
struct intel_context *ce;
|
||||
struct blit_buffer buffers[3];
|
||||
struct blit_buffer scratch;
|
||||
struct i915_vma *batch;
|
||||
u64 hole;
|
||||
u32 width;
|
||||
u32 height;
|
||||
};
|
||||
|
||||
static int prepare_blit(const struct tiled_blits *t,
|
||||
struct blit_buffer *dst,
|
||||
struct blit_buffer *src,
|
||||
struct drm_i915_gem_object *batch)
|
||||
{
|
||||
const int gen = INTEL_GEN(to_i915(batch->base.dev));
|
||||
bool use_64b_reloc = gen >= 8;
|
||||
u32 src_pitch, dst_pitch;
|
||||
u32 cmd, *cs;
|
||||
|
||||
cs = i915_gem_object_pin_map(batch, I915_MAP_WC);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
||||
*cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
|
||||
cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
|
||||
if (src->tiling == I915_TILING_Y)
|
||||
cmd |= BCS_SRC_Y;
|
||||
if (dst->tiling == I915_TILING_Y)
|
||||
cmd |= BCS_DST_Y;
|
||||
*cs++ = cmd;
|
||||
|
||||
cmd = MI_FLUSH_DW;
|
||||
if (gen >= 8)
|
||||
cmd++;
|
||||
*cs++ = cmd;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
|
||||
cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (8 - 2);
|
||||
if (gen >= 8)
|
||||
cmd += 2;
|
||||
|
||||
src_pitch = t->width * 4;
|
||||
if (src->tiling) {
|
||||
cmd |= XY_SRC_COPY_BLT_SRC_TILED;
|
||||
src_pitch /= 4;
|
||||
}
|
||||
|
||||
dst_pitch = t->width * 4;
|
||||
if (dst->tiling) {
|
||||
cmd |= XY_SRC_COPY_BLT_DST_TILED;
|
||||
dst_pitch /= 4;
|
||||
}
|
||||
|
||||
*cs++ = cmd;
|
||||
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
|
||||
*cs++ = 0;
|
||||
*cs++ = t->height << 16 | t->width;
|
||||
*cs++ = lower_32_bits(dst->vma->node.start);
|
||||
if (use_64b_reloc)
|
||||
*cs++ = upper_32_bits(dst->vma->node.start);
|
||||
*cs++ = 0;
|
||||
*cs++ = src_pitch;
|
||||
*cs++ = lower_32_bits(src->vma->node.start);
|
||||
if (use_64b_reloc)
|
||||
*cs++ = upper_32_bits(src->vma->node.start);
|
||||
|
||||
*cs++ = MI_BATCH_BUFFER_END;
|
||||
|
||||
i915_gem_object_flush_map(batch);
|
||||
i915_gem_object_unpin_map(batch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tiled_blits_destroy_buffers(struct tiled_blits *t)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(t->buffers); i++)
|
||||
i915_vma_put(t->buffers[i].vma);
|
||||
|
||||
i915_vma_put(t->scratch.vma);
|
||||
i915_vma_put(t->batch);
|
||||
}
|
||||
|
||||
static struct i915_vma *
|
||||
__create_vma(struct tiled_blits *t, size_t size, bool lmem)
|
||||
{
|
||||
struct drm_i915_private *i915 = t->ce->vm->i915;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
|
||||
if (lmem)
|
||||
obj = i915_gem_object_create_lmem(i915, size, 0);
|
||||
else
|
||||
obj = i915_gem_object_create_shmem(i915, size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
vma = i915_vma_instance(obj, t->ce->vm, NULL);
|
||||
if (IS_ERR(vma))
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
static struct i915_vma *create_vma(struct tiled_blits *t, bool lmem)
|
||||
{
|
||||
return __create_vma(t, PAGE_ALIGN(t->width * t->height * 4), lmem);
|
||||
}
|
||||
|
||||
static int tiled_blits_create_buffers(struct tiled_blits *t,
|
||||
int width, int height,
|
||||
struct rnd_state *prng)
|
||||
{
|
||||
struct drm_i915_private *i915 = t->ce->engine->i915;
|
||||
int i;
|
||||
|
||||
t->width = width;
|
||||
t->height = height;
|
||||
|
||||
t->batch = __create_vma(t, PAGE_SIZE, false);
|
||||
if (IS_ERR(t->batch))
|
||||
return PTR_ERR(t->batch);
|
||||
|
||||
t->scratch.vma = create_vma(t, false);
|
||||
if (IS_ERR(t->scratch.vma)) {
|
||||
i915_vma_put(t->batch);
|
||||
return PTR_ERR(t->scratch.vma);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = create_vma(t, HAS_LMEM(i915) && i % 2);
|
||||
if (IS_ERR(vma)) {
|
||||
tiled_blits_destroy_buffers(t);
|
||||
return PTR_ERR(vma);
|
||||
}
|
||||
|
||||
t->buffers[i].vma = vma;
|
||||
t->buffers[i].tiling =
|
||||
i915_prandom_u32_max_state(I915_TILING_Y + 1, prng);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fill_scratch(struct tiled_blits *t, u32 *vaddr, u32 val)
|
||||
{
|
||||
int i;
|
||||
|
||||
t->scratch.start_val = val;
|
||||
for (i = 0; i < t->width * t->height; i++)
|
||||
vaddr[i] = val++;
|
||||
|
||||
i915_gem_object_flush_map(t->scratch.vma->obj);
|
||||
}
|
||||
|
||||
static void hexdump(const void *buf, size_t len)
|
||||
{
|
||||
const size_t rowsize = 8 * sizeof(u32);
|
||||
const void *prev = NULL;
|
||||
bool skip = false;
|
||||
size_t pos;
|
||||
|
||||
for (pos = 0; pos < len; pos += rowsize) {
|
||||
char line[128];
|
||||
|
||||
if (prev && !memcmp(prev, buf + pos, rowsize)) {
|
||||
if (!skip) {
|
||||
pr_info("*\n");
|
||||
skip = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
|
||||
rowsize, sizeof(u32),
|
||||
line, sizeof(line),
|
||||
false) >= sizeof(line));
|
||||
pr_info("[%04zx] %s\n", pos, line);
|
||||
|
||||
prev = buf + pos;
|
||||
skip = false;
|
||||
}
|
||||
}
|
||||
|
||||
static u64 swizzle_bit(unsigned int bit, u64 offset)
|
||||
{
|
||||
return (offset & BIT_ULL(bit)) >> (bit - 6);
|
||||
}
|
||||
|
||||
static u64 tiled_offset(const struct intel_gt *gt,
|
||||
u64 v,
|
||||
unsigned int stride,
|
||||
unsigned int tiling)
|
||||
{
|
||||
unsigned int swizzle;
|
||||
u64 x, y;
|
||||
|
||||
if (tiling == I915_TILING_NONE)
|
||||
return v;
|
||||
|
||||
y = div64_u64_rem(v, stride, &x);
|
||||
|
||||
if (tiling == I915_TILING_X) {
|
||||
v = div64_u64_rem(y, 8, &y) * stride * 8;
|
||||
v += y * 512;
|
||||
v += div64_u64_rem(x, 512, &x) << 12;
|
||||
v += x;
|
||||
|
||||
swizzle = gt->ggtt->bit_6_swizzle_x;
|
||||
} else {
|
||||
const unsigned int ytile_span = 16;
|
||||
const unsigned int ytile_height = 512;
|
||||
|
||||
v = div64_u64_rem(y, 32, &y) * stride * 32;
|
||||
v += y * ytile_span;
|
||||
v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
|
||||
v += x;
|
||||
|
||||
swizzle = gt->ggtt->bit_6_swizzle_y;
|
||||
}
|
||||
|
||||
switch (swizzle) {
|
||||
case I915_BIT_6_SWIZZLE_9:
|
||||
v ^= swizzle_bit(9, v);
|
||||
break;
|
||||
case I915_BIT_6_SWIZZLE_9_10:
|
||||
v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
|
||||
break;
|
||||
case I915_BIT_6_SWIZZLE_9_11:
|
||||
v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
|
||||
break;
|
||||
case I915_BIT_6_SWIZZLE_9_10_11:
|
||||
v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
|
||||
break;
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
static const char *repr_tiling(int tiling)
|
||||
{
|
||||
switch (tiling) {
|
||||
case I915_TILING_NONE: return "linear";
|
||||
case I915_TILING_X: return "X";
|
||||
case I915_TILING_Y: return "Y";
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
static int verify_buffer(const struct tiled_blits *t,
|
||||
struct blit_buffer *buf,
|
||||
struct rnd_state *prng)
|
||||
{
|
||||
const u32 *vaddr;
|
||||
int ret = 0;
|
||||
int x, y, p;
|
||||
|
||||
x = i915_prandom_u32_max_state(t->width, prng);
|
||||
y = i915_prandom_u32_max_state(t->height, prng);
|
||||
p = y * t->width + x;
|
||||
|
||||
vaddr = i915_gem_object_pin_map(buf->vma->obj, I915_MAP_WC);
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
|
||||
if (vaddr[0] != buf->start_val) {
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
u64 v = tiled_offset(buf->vma->vm->gt,
|
||||
p * 4, t->width * 4,
|
||||
buf->tiling);
|
||||
|
||||
if (vaddr[v / sizeof(*vaddr)] != buf->start_val + p)
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (ret) {
|
||||
pr_err("Invalid %s tiling detected at (%d, %d), start_val %x\n",
|
||||
repr_tiling(buf->tiling),
|
||||
x, y, buf->start_val);
|
||||
hexdump(vaddr, 4096);
|
||||
}
|
||||
|
||||
i915_gem_object_unpin_map(buf->vma->obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int move_to_active(struct i915_vma *vma,
|
||||
struct i915_request *rq,
|
||||
unsigned int flags)
|
||||
{
|
||||
int err;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, flags);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pin_buffer(struct i915_vma *vma, u64 addr)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
|
||||
err = i915_vma_unbind(vma);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED | addr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
tiled_blit(struct tiled_blits *t,
|
||||
struct blit_buffer *dst, u64 dst_addr,
|
||||
struct blit_buffer *src, u64 src_addr)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
int err;
|
||||
|
||||
err = pin_buffer(src->vma, src_addr);
|
||||
if (err) {
|
||||
pr_err("Cannot pin src @ %llx\n", src_addr);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = pin_buffer(dst->vma, dst_addr);
|
||||
if (err) {
|
||||
pr_err("Cannot pin dst @ %llx\n", dst_addr);
|
||||
goto err_src;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(t->batch, 0, 0, PIN_USER | PIN_HIGH);
|
||||
if (err) {
|
||||
pr_err("cannot pin batch\n");
|
||||
goto err_dst;
|
||||
}
|
||||
|
||||
err = prepare_blit(t, dst, src, t->batch->obj);
|
||||
if (err)
|
||||
goto err_bb;
|
||||
|
||||
rq = intel_context_create_request(t->ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_bb;
|
||||
}
|
||||
|
||||
err = move_to_active(t->batch, rq, 0);
|
||||
if (!err)
|
||||
err = move_to_active(src->vma, rq, 0);
|
||||
if (!err)
|
||||
err = move_to_active(dst->vma, rq, 0);
|
||||
if (!err)
|
||||
err = rq->engine->emit_bb_start(rq,
|
||||
t->batch->node.start,
|
||||
t->batch->node.size,
|
||||
0);
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
if (i915_request_wait(rq, 0, HZ / 2) < 0)
|
||||
err = -ETIME;
|
||||
i915_request_put(rq);
|
||||
|
||||
dst->start_val = src->start_val;
|
||||
err_bb:
|
||||
i915_vma_unpin(t->batch);
|
||||
err_dst:
|
||||
i915_vma_unpin(dst->vma);
|
||||
err_src:
|
||||
i915_vma_unpin(src->vma);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct tiled_blits *
|
||||
tiled_blits_create(struct intel_engine_cs *engine, struct rnd_state *prng)
|
||||
{
|
||||
struct drm_mm_node hole;
|
||||
struct tiled_blits *t;
|
||||
u64 hole_size;
|
||||
int err;
|
||||
|
||||
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
||||
if (!t)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
t->ce = intel_context_create(engine);
|
||||
if (IS_ERR(t->ce)) {
|
||||
err = PTR_ERR(t->ce);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
hole_size = 2 * PAGE_ALIGN(WIDTH * HEIGHT * 4);
|
||||
hole_size *= 2; /* room to maneuver */
|
||||
hole_size += 2 * I915_GTT_MIN_ALIGNMENT;
|
||||
|
||||
mutex_lock(&t->ce->vm->mutex);
|
||||
memset(&hole, 0, sizeof(hole));
|
||||
err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
|
||||
hole_size, 0, I915_COLOR_UNEVICTABLE,
|
||||
0, U64_MAX,
|
||||
DRM_MM_INSERT_BEST);
|
||||
if (!err)
|
||||
drm_mm_remove_node(&hole);
|
||||
mutex_unlock(&t->ce->vm->mutex);
|
||||
if (err) {
|
||||
err = -ENODEV;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
t->hole = hole.start + I915_GTT_MIN_ALIGNMENT;
|
||||
pr_info("Using hole at %llx\n", t->hole);
|
||||
|
||||
err = tiled_blits_create_buffers(t, WIDTH, HEIGHT, prng);
|
||||
if (err)
|
||||
goto err_put;
|
||||
|
||||
return t;
|
||||
|
||||
err_put:
|
||||
intel_context_put(t->ce);
|
||||
err_free:
|
||||
kfree(t);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void tiled_blits_destroy(struct tiled_blits *t)
|
||||
{
|
||||
tiled_blits_destroy_buffers(t);
|
||||
|
||||
intel_context_put(t->ce);
|
||||
kfree(t);
|
||||
}
|
||||
|
||||
static int tiled_blits_prepare(struct tiled_blits *t,
|
||||
struct rnd_state *prng)
|
||||
{
|
||||
u64 offset = PAGE_ALIGN(t->width * t->height * 4);
|
||||
u32 *map;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
map = i915_gem_object_pin_map(t->scratch.vma->obj, I915_MAP_WC);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
/* Use scratch to fill objects */
|
||||
for (i = 0; i < ARRAY_SIZE(t->buffers); i++) {
|
||||
fill_scratch(t, map, prandom_u32_state(prng));
|
||||
GEM_BUG_ON(verify_buffer(t, &t->scratch, prng));
|
||||
|
||||
err = tiled_blit(t,
|
||||
&t->buffers[i], t->hole + offset,
|
||||
&t->scratch, t->hole);
|
||||
if (err == 0)
|
||||
err = verify_buffer(t, &t->buffers[i], prng);
|
||||
if (err) {
|
||||
pr_err("Failed to create buffer %d\n", i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_object_unpin_map(t->scratch.vma->obj);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int tiled_blits_bounce(struct tiled_blits *t, struct rnd_state *prng)
|
||||
{
|
||||
u64 offset =
|
||||
round_up(t->width * t->height * 4, 2 * I915_GTT_MIN_ALIGNMENT);
|
||||
int err;
|
||||
|
||||
/* We want to check position invariant tiling across GTT eviction */
|
||||
|
||||
err = tiled_blit(t,
|
||||
&t->buffers[1], t->hole + offset / 2,
|
||||
&t->buffers[0], t->hole + 2 * offset);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Reposition so that we overlap the old addresses, and slightly off */
|
||||
err = tiled_blit(t,
|
||||
&t->buffers[2], t->hole + I915_GTT_MIN_ALIGNMENT,
|
||||
&t->buffers[1], t->hole + 3 * offset / 2);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = verify_buffer(t, &t->buffers[2], prng);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __igt_client_tiled_blits(struct intel_engine_cs *engine,
|
||||
struct rnd_state *prng)
|
||||
{
|
||||
struct tiled_blits *t;
|
||||
int err;
|
||||
|
||||
t = tiled_blits_create(engine, prng);
|
||||
if (IS_ERR(t))
|
||||
return PTR_ERR(t);
|
||||
|
||||
err = tiled_blits_prepare(t, prng);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = tiled_blits_bounce(t, prng);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
tiled_blits_destroy(t);
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool has_bit17_swizzle(int sw)
|
||||
{
|
||||
return (sw == I915_BIT_6_SWIZZLE_9_10_17 ||
|
||||
sw == I915_BIT_6_SWIZZLE_9_17);
|
||||
}
|
||||
|
||||
static bool bad_swizzling(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
||||
|
||||
if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
||||
return true;
|
||||
|
||||
if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
|
||||
has_bit17_swizzle(ggtt->bit_6_swizzle_y))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int igt_client_tiled_blits(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
I915_RND_STATE(prng);
|
||||
int inst = 0;
|
||||
|
||||
/* Test requires explicit BLT tiling controls */
|
||||
if (INTEL_GEN(i915) < 4)
|
||||
return 0;
|
||||
|
||||
if (bad_swizzling(i915)) /* Requires sane (sub-page) swizzling */
|
||||
return 0;
|
||||
|
||||
do {
|
||||
struct intel_engine_cs *engine;
|
||||
int err;
|
||||
|
||||
engine = intel_engine_lookup_user(i915,
|
||||
I915_ENGINE_CLASS_COPY,
|
||||
inst++);
|
||||
if (!engine)
|
||||
return 0;
|
||||
|
||||
err = __igt_client_tiled_blits(engine, &prng);
|
||||
if (err == -ENODEV)
|
||||
err = 0;
|
||||
if (err)
|
||||
return err;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(igt_client_fill),
|
||||
SUBTEST(igt_client_tiled_blits),
|
||||
};
|
||||
|
||||
if (intel_gt_is_wedged(&i915->gt))
|
||||
|
@ -1687,7 +1687,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
|
||||
goto skip_request;
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "debugfs_gt_pm.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_clock_utils.h"
|
||||
#include "intel_llc.h"
|
||||
#include "intel_rc6.h"
|
||||
#include "intel_rps.h"
|
||||
@ -268,7 +269,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
||||
yesno(rpmodectl & GEN6_RP_ENABLE));
|
||||
seq_printf(m, "SW control enabled: %s\n",
|
||||
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
|
||||
GEN6_RP_MEDIA_SW_MODE));
|
||||
GEN6_RP_MEDIA_SW_MODE));
|
||||
|
||||
vlv_punit_get(i915);
|
||||
freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
|
||||
@ -300,8 +301,9 @@ static int frequency_show(struct seq_file *m, void *unused)
|
||||
u32 rp_state_cap;
|
||||
u32 rpmodectl, rpinclimit, rpdeclimit;
|
||||
u32 rpstat, cagf, reqf;
|
||||
u32 rpupei, rpcurup, rpprevup;
|
||||
u32 rpdownei, rpcurdown, rpprevdown;
|
||||
u32 rpcurupei, rpcurup, rpprevup;
|
||||
u32 rpcurdownei, rpcurdown, rpprevdown;
|
||||
u32 rpupei, rpupt, rpdownei, rpdownt;
|
||||
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
|
||||
int max_freq;
|
||||
|
||||
@ -334,12 +336,19 @@ static int frequency_show(struct seq_file *m, void *unused)
|
||||
rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
|
||||
|
||||
rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
|
||||
rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
|
||||
rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
|
||||
rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
|
||||
rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
|
||||
rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
|
||||
rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
|
||||
rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
|
||||
rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
|
||||
|
||||
rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI);
|
||||
rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
|
||||
|
||||
rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
|
||||
rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
|
||||
|
||||
cagf = intel_rps_read_actual_frequency(rps);
|
||||
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
|
||||
@ -372,7 +381,7 @@ static int frequency_show(struct seq_file *m, void *unused)
|
||||
yesno(rpmodectl & GEN6_RP_ENABLE));
|
||||
seq_printf(m, "SW control enabled: %s\n",
|
||||
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
|
||||
GEN6_RP_MEDIA_SW_MODE));
|
||||
GEN6_RP_MEDIA_SW_MODE));
|
||||
|
||||
seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
|
||||
pm_ier, pm_imr, pm_mask);
|
||||
@ -394,23 +403,35 @@ static int frequency_show(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
|
||||
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
|
||||
seq_printf(m, "CAGF: %dMHz\n", cagf);
|
||||
seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
|
||||
rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
|
||||
seq_printf(m, "RP CUR UP: %d (%dus)\n",
|
||||
rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
|
||||
seq_printf(m, "RP PREV UP: %d (%dus)\n",
|
||||
rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
|
||||
seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
|
||||
rpcurupei,
|
||||
intel_gt_pm_interval_to_ns(gt, rpcurupei));
|
||||
seq_printf(m, "RP CUR UP: %d (%dns)\n",
|
||||
rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
|
||||
seq_printf(m, "RP PREV UP: %d (%dns)\n",
|
||||
rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
|
||||
seq_printf(m, "Up threshold: %d%%\n",
|
||||
rps->power.up_threshold);
|
||||
seq_printf(m, "RP UP EI: %d (%dns)\n",
|
||||
rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
|
||||
seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n",
|
||||
rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
|
||||
|
||||
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
|
||||
rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
|
||||
seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
|
||||
rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
|
||||
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
|
||||
rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
|
||||
seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
|
||||
rpcurdownei,
|
||||
intel_gt_pm_interval_to_ns(gt, rpcurdownei));
|
||||
seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
|
||||
rpcurdown,
|
||||
intel_gt_pm_interval_to_ns(gt, rpcurdown));
|
||||
seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
|
||||
rpprevdown,
|
||||
intel_gt_pm_interval_to_ns(gt, rpprevdown));
|
||||
seq_printf(m, "Down threshold: %d%%\n",
|
||||
rps->power.down_threshold);
|
||||
seq_printf(m, "RP DOWN EI: %d (%dns)\n",
|
||||
rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
|
||||
seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n",
|
||||
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
|
||||
|
||||
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
|
||||
rp_state_cap >> 16) & 0xff;
|
||||
@ -535,7 +556,8 @@ static int rps_boost_show(struct seq_file *m, void *data)
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_rps *rps = >->rps;
|
||||
|
||||
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
|
||||
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
|
||||
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
|
||||
seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
|
||||
seq_printf(m, "Boosts outstanding? %d\n",
|
||||
atomic_read(&rps->num_waiters));
|
||||
@ -555,7 +577,7 @@ static int rps_boost_show(struct seq_file *m, void *data)
|
||||
|
||||
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
|
||||
|
||||
if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
|
||||
if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) {
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 rpup, rpupei;
|
||||
u32 rpdown, rpdownei;
|
||||
|
@ -25,8 +25,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
|
||||
return PTR_ERR(cs);
|
||||
|
||||
offset = i915_ggtt_offset(ce->state) +
|
||||
LRC_STATE_PN * PAGE_SIZE +
|
||||
CTX_R_PWR_CLK_STATE * 4;
|
||||
LRC_STATE_OFFSET + CTX_R_PWR_CLK_STATE * 4;
|
||||
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = lower_32_bits(offset);
|
||||
|
@ -69,7 +69,13 @@ struct intel_context {
|
||||
#define CONTEXT_NOPREEMPT 7
|
||||
|
||||
u32 *lrc_reg_state;
|
||||
u64 lrc_desc;
|
||||
union {
|
||||
struct {
|
||||
u32 lrca;
|
||||
u32 ccid;
|
||||
};
|
||||
u64 desc;
|
||||
} lrc;
|
||||
u32 tag; /* cookie passed to HW to track this context on submission */
|
||||
|
||||
/* Time on GPU as tracked by the hw. */
|
||||
@ -96,6 +102,8 @@ struct intel_context {
|
||||
|
||||
/** sseu: Control eu/slice partitioning */
|
||||
struct intel_sseu sseu;
|
||||
|
||||
u8 wa_bb_page; /* if set, page num reserved for context workarounds */
|
||||
};
|
||||
|
||||
#endif /* __INTEL_CONTEXT_TYPES__ */
|
||||
|
@ -310,9 +310,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
struct drm_printer *m,
|
||||
const char *header, ...);
|
||||
|
||||
int intel_enable_engine_stats(struct intel_engine_cs *engine);
|
||||
void intel_disable_engine_stats(struct intel_engine_cs *engine);
|
||||
|
||||
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
|
||||
|
||||
struct i915_request *
|
||||
|
@ -834,7 +834,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
||||
intel_engine_cleanup_cmd_parser(engine);
|
||||
|
||||
if (engine->default_state)
|
||||
i915_gem_object_put(engine->default_state);
|
||||
fput(engine->default_state);
|
||||
|
||||
if (engine->kernel_context) {
|
||||
intel_context_unpin(engine->kernel_context);
|
||||
@ -1425,7 +1425,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
||||
len = scnprintf(hdr, sizeof(hdr),
|
||||
"\t\tActive[%d]: ccid:%08x, ",
|
||||
(int)(port - execlists->active),
|
||||
upper_32_bits(rq->context->lrc_desc));
|
||||
rq->context->lrc.ccid);
|
||||
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
|
||||
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
|
||||
print_request(m, rq, hdr);
|
||||
@ -1437,7 +1437,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
||||
len = scnprintf(hdr, sizeof(hdr),
|
||||
"\t\tPending[%d]: ccid:%08x, ",
|
||||
(int)(port - execlists->pending),
|
||||
upper_32_bits(rq->context->lrc_desc));
|
||||
rq->context->lrc.ccid);
|
||||
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
|
||||
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
|
||||
print_request(m, rq, hdr);
|
||||
@ -1589,58 +1589,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
intel_engine_print_breadcrumbs(engine, m);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_enable_engine_stats() - Enable engine busy tracking on engine
|
||||
* @engine: engine to enable stats collection
|
||||
*
|
||||
* Start collecting the engine busyness data for @engine.
|
||||
*
|
||||
* Returns 0 on success or a negative error code.
|
||||
*/
|
||||
int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists *execlists = &engine->execlists;
|
||||
unsigned long flags;
|
||||
int err = 0;
|
||||
|
||||
if (!intel_engine_supports_stats(engine))
|
||||
return -ENODEV;
|
||||
|
||||
execlists_active_lock_bh(execlists);
|
||||
write_seqlock_irqsave(&engine->stats.lock, flags);
|
||||
|
||||
if (unlikely(engine->stats.enabled == ~0)) {
|
||||
err = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (engine->stats.enabled++ == 0) {
|
||||
struct i915_request * const *port;
|
||||
struct i915_request *rq;
|
||||
|
||||
engine->stats.enabled_at = ktime_get();
|
||||
|
||||
/* XXX submission method oblivious? */
|
||||
for (port = execlists->active; (rq = *port); port++)
|
||||
engine->stats.active++;
|
||||
|
||||
for (port = execlists->pending; (rq = *port); port++) {
|
||||
/* Exclude any contexts already counted in active */
|
||||
if (!intel_context_inflight_count(rq->context))
|
||||
engine->stats.active++;
|
||||
}
|
||||
|
||||
if (engine->stats.active)
|
||||
engine->stats.start = engine->stats.enabled_at;
|
||||
}
|
||||
|
||||
unlock:
|
||||
write_sequnlock_irqrestore(&engine->stats.lock, flags);
|
||||
execlists_active_unlock_bh(execlists);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
|
||||
{
|
||||
ktime_t total = engine->stats.total;
|
||||
@ -1649,7 +1597,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
|
||||
* If the engine is executing something at the moment
|
||||
* add it to the total.
|
||||
*/
|
||||
if (engine->stats.active)
|
||||
if (atomic_read(&engine->stats.active))
|
||||
total = ktime_add(total,
|
||||
ktime_sub(ktime_get(), engine->stats.start));
|
||||
|
||||
@ -1675,28 +1623,6 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_disable_engine_stats() - Disable engine busy tracking on engine
|
||||
* @engine: engine to disable stats collection
|
||||
*
|
||||
* Stops collecting the engine busyness data for @engine.
|
||||
*/
|
||||
void intel_disable_engine_stats(struct intel_engine_cs *engine)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!intel_engine_supports_stats(engine))
|
||||
return;
|
||||
|
||||
write_seqlock_irqsave(&engine->stats.lock, flags);
|
||||
WARN_ON_ONCE(engine->stats.enabled == 0);
|
||||
if (--engine->stats.enabled == 0) {
|
||||
engine->stats.total = __intel_engine_get_busy_time(engine);
|
||||
engine->stats.active = 0;
|
||||
}
|
||||
write_sequnlock_irqrestore(&engine->stats.lock, flags);
|
||||
}
|
||||
|
||||
static bool match_ring(struct i915_request *rq)
|
||||
{
|
||||
u32 ring = ENGINE_READ(rq->engine, RING_START);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "intel_gt_pm.h"
|
||||
#include "intel_rc6.h"
|
||||
#include "intel_ring.h"
|
||||
#include "shmem_utils.h"
|
||||
|
||||
static int __engine_unpark(struct intel_wakeref *wf)
|
||||
{
|
||||
@ -30,10 +31,8 @@ static int __engine_unpark(struct intel_wakeref *wf)
|
||||
/* Pin the default state for fast resets from atomic context. */
|
||||
map = NULL;
|
||||
if (engine->default_state)
|
||||
map = i915_gem_object_pin_map(engine->default_state,
|
||||
I915_MAP_WB);
|
||||
if (!IS_ERR_OR_NULL(map))
|
||||
engine->pinned_default_state = map;
|
||||
map = shmem_pin_map(engine->default_state);
|
||||
engine->pinned_default_state = map;
|
||||
|
||||
/* Discard stale context state from across idling */
|
||||
ce = engine->kernel_context;
|
||||
@ -264,7 +263,8 @@ static int __engine_park(struct intel_wakeref *wf)
|
||||
engine->park(engine);
|
||||
|
||||
if (engine->pinned_default_state) {
|
||||
i915_gem_object_unpin_map(engine->default_state);
|
||||
shmem_unpin_map(engine->default_state,
|
||||
engine->pinned_default_state);
|
||||
engine->pinned_default_state = NULL;
|
||||
}
|
||||
|
||||
|
@ -156,6 +156,11 @@ struct intel_engine_execlists {
|
||||
*/
|
||||
struct i915_priolist default_priolist;
|
||||
|
||||
/**
|
||||
* @ccid: identifier for contexts submitted to this engine
|
||||
*/
|
||||
u32 ccid;
|
||||
|
||||
/**
|
||||
* @yield: CCID at the time of the last semaphore-wait interrupt.
|
||||
*
|
||||
@ -304,8 +309,7 @@ struct intel_engine_cs {
|
||||
u32 context_size;
|
||||
u32 mmio_base;
|
||||
|
||||
unsigned int context_tag;
|
||||
#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
|
||||
unsigned long context_tag;
|
||||
|
||||
struct rb_node uabi_node;
|
||||
|
||||
@ -335,7 +339,7 @@ struct intel_engine_cs {
|
||||
|
||||
unsigned long wakeref_serial;
|
||||
struct intel_wakeref wakeref;
|
||||
struct drm_i915_gem_object *default_state;
|
||||
struct file *default_state;
|
||||
void *pinned_default_state;
|
||||
|
||||
struct {
|
||||
@ -419,6 +423,7 @@ struct intel_engine_cs {
|
||||
void (*irq_enable)(struct intel_engine_cs *engine);
|
||||
void (*irq_disable)(struct intel_engine_cs *engine);
|
||||
|
||||
void (*sanitize)(struct intel_engine_cs *engine);
|
||||
int (*resume)(struct intel_engine_cs *engine);
|
||||
|
||||
struct {
|
||||
@ -526,28 +531,16 @@ struct intel_engine_cs {
|
||||
u32 (*get_cmd_length_mask)(u32 cmd_header);
|
||||
|
||||
struct {
|
||||
/**
|
||||
* @active: Number of contexts currently scheduled in.
|
||||
*/
|
||||
atomic_t active;
|
||||
|
||||
/**
|
||||
* @lock: Lock protecting the below fields.
|
||||
*/
|
||||
seqlock_t lock;
|
||||
/**
|
||||
* @enabled: Reference count indicating number of listeners.
|
||||
*/
|
||||
unsigned int enabled;
|
||||
/**
|
||||
* @active: Number of contexts currently scheduled in.
|
||||
*/
|
||||
unsigned int active;
|
||||
/**
|
||||
* @enabled_at: Timestamp when busy stats were enabled.
|
||||
*/
|
||||
ktime_t enabled_at;
|
||||
/**
|
||||
* @start: Timestamp of the last idle to active transition.
|
||||
*
|
||||
* Idle is defined as active == 0, active is active > 0.
|
||||
*/
|
||||
ktime_t start;
|
||||
|
||||
/**
|
||||
* @total: Total time this engine was busy.
|
||||
*
|
||||
@ -555,6 +548,18 @@ struct intel_engine_cs {
|
||||
* where engine is currently busy (active > 0).
|
||||
*/
|
||||
ktime_t total;
|
||||
|
||||
/**
|
||||
* @start: Timestamp of the last idle to active transition.
|
||||
*
|
||||
* Idle is defined as active == 0, active is active > 0.
|
||||
*/
|
||||
ktime_t start;
|
||||
|
||||
/**
|
||||
* @rps: Utilisation at last RPS sampling.
|
||||
*/
|
||||
ktime_t rps;
|
||||
} stats;
|
||||
|
||||
struct {
|
||||
|
@ -840,7 +840,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
unsigned int size;
|
||||
u16 snb_gmch_ctl;
|
||||
int err;
|
||||
|
||||
/* TODO: We're not aware of mappable constraints on gen8 yet */
|
||||
if (!IS_DGFX(i915)) {
|
||||
@ -848,13 +847,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
ggtt->mappable_end = resource_size(&ggtt->gmadr);
|
||||
}
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
|
||||
if (err)
|
||||
drm_err(&i915->drm,
|
||||
"Can't set DMA mask/consistent mask (%d)\n", err);
|
||||
|
||||
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
if (IS_CHERRYVIEW(i915))
|
||||
size = chv_get_total_gtt_size(snb_gmch_ctl);
|
||||
@ -990,7 +982,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
unsigned int size;
|
||||
u16 snb_gmch_ctl;
|
||||
int err;
|
||||
|
||||
ggtt->gmadr = pci_resource(pdev, 2);
|
||||
ggtt->mappable_end = resource_size(&ggtt->gmadr);
|
||||
@ -1005,12 +996,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
|
||||
if (err)
|
||||
drm_err(&i915->drm,
|
||||
"Can't set DMA mask/consistent mask (%d)\n", err);
|
||||
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
size = gen6_get_total_gtt_size(snb_gmch_ctl);
|
||||
|
@ -138,7 +138,7 @@
|
||||
*/
|
||||
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
|
||||
/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
|
||||
#define MI_LRI_CS_MMIO (1<<19)
|
||||
#define MI_LRI_LRM_CS_MMIO REG_BIT(19)
|
||||
#define MI_LRI_FORCE_POSTED (1<<12)
|
||||
#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
|
||||
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
|
||||
@ -156,6 +156,7 @@
|
||||
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
|
||||
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
|
||||
#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1)
|
||||
#define MI_LRR_SOURCE_CS_MMIO REG_BIT(18)
|
||||
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
|
||||
#define MI_BATCH_NON_SECURE (1)
|
||||
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "i915_drv.h"
|
||||
#include "intel_context.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_clock_utils.h"
|
||||
#include "intel_gt_pm.h"
|
||||
#include "intel_gt_requests.h"
|
||||
#include "intel_mocs.h"
|
||||
@ -15,6 +16,7 @@
|
||||
#include "intel_rps.h"
|
||||
#include "intel_uncore.h"
|
||||
#include "intel_pm.h"
|
||||
#include "shmem_utils.h"
|
||||
|
||||
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
|
||||
{
|
||||
@ -370,18 +372,6 @@ static struct i915_address_space *kernel_vm(struct intel_gt *gt)
|
||||
return i915_vm_get(>->ggtt->vm);
|
||||
}
|
||||
|
||||
static int __intel_context_flush_retire(struct intel_context *ce)
|
||||
{
|
||||
struct intel_timeline *tl;
|
||||
|
||||
tl = intel_context_timeline_lock(ce);
|
||||
if (IS_ERR(tl))
|
||||
return PTR_ERR(tl);
|
||||
|
||||
intel_context_timeline_unlock(tl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __engines_record_defaults(struct intel_gt *gt)
|
||||
{
|
||||
struct i915_request *requests[I915_NUM_ENGINES] = {};
|
||||
@ -447,8 +437,7 @@ err_rq:
|
||||
|
||||
for (id = 0; id < ARRAY_SIZE(requests); id++) {
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *state;
|
||||
void *vaddr;
|
||||
struct file *state;
|
||||
|
||||
rq = requests[id];
|
||||
if (!rq)
|
||||
@ -460,48 +449,16 @@ err_rq:
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
|
||||
state = rq->context->state;
|
||||
if (!state)
|
||||
if (!rq->context->state)
|
||||
continue;
|
||||
|
||||
/* Serialise with retirement on another CPU */
|
||||
GEM_BUG_ON(!i915_request_completed(rq));
|
||||
err = __intel_context_flush_retire(rq->context);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* We want to be able to unbind the state from the GGTT */
|
||||
GEM_BUG_ON(intel_context_is_pinned(rq->context));
|
||||
|
||||
/*
|
||||
* As we will hold a reference to the logical state, it will
|
||||
* not be torn down with the context, and importantly the
|
||||
* object will hold onto its vma (making it possible for a
|
||||
* stray GTT write to corrupt our defaults). Unmap the vma
|
||||
* from the GTT to prevent such accidents and reclaim the
|
||||
* space.
|
||||
*/
|
||||
err = i915_vma_unbind(state);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
i915_gem_object_lock(state->obj);
|
||||
err = i915_gem_object_set_to_cpu_domain(state->obj, false);
|
||||
i915_gem_object_unlock(state->obj);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
|
||||
|
||||
/* Check we can acquire the image of the context state */
|
||||
vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
/* Keep a copy of the state's backing pages; free the obj */
|
||||
state = shmem_create_from_object(rq->context->state->obj);
|
||||
if (IS_ERR(state)) {
|
||||
err = PTR_ERR(state);
|
||||
goto out;
|
||||
}
|
||||
|
||||
rq->engine->default_state = i915_gem_object_get(state->obj);
|
||||
i915_gem_object_unpin_map(state->obj);
|
||||
rq->engine->default_state = state;
|
||||
}
|
||||
|
||||
out:
|
||||
@ -576,6 +533,8 @@ int intel_gt_init(struct intel_gt *gt)
|
||||
*/
|
||||
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
||||
|
||||
intel_gt_init_clock_frequency(gt);
|
||||
|
||||
err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
|
||||
if (err)
|
||||
goto out_fw;
|
||||
|
102
drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
Normal file
102
drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
Normal file
@ -0,0 +1,102 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_clock_utils.h"
|
||||
|
||||
#define MHZ_12 12000000 /* 12MHz (24MHz/2), 83.333ns */
|
||||
#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */
|
||||
#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */
|
||||
|
||||
static u32 read_clock_frequency(const struct intel_gt *gt)
|
||||
{
|
||||
if (INTEL_GEN(gt->i915) >= 11) {
|
||||
u32 config;
|
||||
|
||||
config = intel_uncore_read(gt->uncore, RPM_CONFIG0);
|
||||
config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK;
|
||||
config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
|
||||
|
||||
switch (config) {
|
||||
case 0: return MHZ_12;
|
||||
case 1:
|
||||
case 2: return MHZ_19_2;
|
||||
default:
|
||||
case 3: return MHZ_12_5;
|
||||
}
|
||||
} else if (INTEL_GEN(gt->i915) >= 9) {
|
||||
if (IS_GEN9_LP(gt->i915))
|
||||
return MHZ_19_2;
|
||||
else
|
||||
return MHZ_12;
|
||||
} else {
|
||||
return MHZ_12_5;
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gt_init_clock_frequency(struct intel_gt *gt)
|
||||
{
|
||||
/*
|
||||
* Note that on gen11+, the clock frequency may be reconfigured.
|
||||
* We do not, and we assume nobody else does.
|
||||
*/
|
||||
gt->clock_frequency = read_clock_frequency(gt);
|
||||
GT_TRACE(gt,
|
||||
"Using clock frequency: %dkHz\n",
|
||||
gt->clock_frequency / 1000);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
||||
void intel_gt_check_clock_frequency(const struct intel_gt *gt)
|
||||
{
|
||||
if (gt->clock_frequency != read_clock_frequency(gt)) {
|
||||
dev_err(gt->i915->drm.dev,
|
||||
"GT clock frequency changed, was %uHz, now %uHz!\n",
|
||||
gt->clock_frequency,
|
||||
read_clock_frequency(gt));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static u64 div_u64_roundup(u64 nom, u32 den)
|
||||
{
|
||||
return div_u64(nom + den - 1, den);
|
||||
}
|
||||
|
||||
u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count)
|
||||
{
|
||||
return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000),
|
||||
gt->clock_frequency);
|
||||
}
|
||||
|
||||
u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count)
|
||||
{
|
||||
return intel_gt_clock_interval_to_ns(gt, 16 * count);
|
||||
}
|
||||
|
||||
u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns)
|
||||
{
|
||||
return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns),
|
||||
1000 * 1000 * 1000);
|
||||
}
|
||||
|
||||
u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
|
||||
* 8300) freezing up around GPU hangs. Looks as if even
|
||||
* scheduling/timer interrupts start misbehaving if the RPS
|
||||
* EI/thresholds are "bad", leading to a very sluggish or even
|
||||
* frozen machine.
|
||||
*/
|
||||
val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16);
|
||||
if (IS_GEN(gt->i915, 6))
|
||||
val = roundup(val, 25);
|
||||
|
||||
return val;
|
||||
}
|
27
drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
Normal file
27
drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h
Normal file
@ -0,0 +1,27 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_GT_CLOCK_UTILS_H__
|
||||
#define __INTEL_GT_CLOCK_UTILS_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct intel_gt;
|
||||
|
||||
void intel_gt_init_clock_frequency(struct intel_gt *gt);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
||||
void intel_gt_check_clock_frequency(const struct intel_gt *gt);
|
||||
#else
|
||||
static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {}
|
||||
#endif
|
||||
|
||||
u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count);
|
||||
u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count);
|
||||
|
||||
u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns);
|
||||
u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns);
|
||||
|
||||
#endif /* __INTEL_GT_CLOCK_UTILS_H__ */
|
@ -12,6 +12,7 @@
|
||||
#include "intel_context.h"
|
||||
#include "intel_engine_pm.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_clock_utils.h"
|
||||
#include "intel_gt_pm.h"
|
||||
#include "intel_gt_requests.h"
|
||||
#include "intel_llc.h"
|
||||
@ -138,6 +139,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
|
||||
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
||||
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
||||
|
||||
intel_gt_check_clock_frequency(gt);
|
||||
|
||||
/*
|
||||
* As we have just resumed the machine and woken the device up from
|
||||
* deep PCI sleep (presumably D3_cold), assume the HW has been reset
|
||||
@ -147,6 +150,10 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
|
||||
if (intel_gt_is_wedged(gt))
|
||||
intel_gt_unset_wedged(gt);
|
||||
|
||||
for_each_engine(engine, gt, id)
|
||||
if (engine->sanitize)
|
||||
engine->sanitize(engine);
|
||||
|
||||
intel_uc_sanitize(>->uc);
|
||||
|
||||
for_each_engine(engine, gt, id)
|
||||
@ -191,11 +198,12 @@ int intel_gt_resume(struct intel_gt *gt)
|
||||
* Only the kernel contexts should remain pinned over suspend,
|
||||
* allowing us to fixup the user contexts on their first pin.
|
||||
*/
|
||||
gt_sanitize(gt, true);
|
||||
|
||||
intel_gt_pm_get(gt);
|
||||
|
||||
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
||||
intel_rc6_sanitize(>->rc6);
|
||||
gt_sanitize(gt, true);
|
||||
if (intel_gt_is_wedged(gt)) {
|
||||
err = -EIO;
|
||||
goto out_fw;
|
||||
|
@ -26,6 +26,11 @@ static bool retire_requests(struct intel_timeline *tl)
|
||||
return !i915_active_fence_isset(&tl->last_request);
|
||||
}
|
||||
|
||||
static bool engine_active(const struct intel_engine_cs *engine)
|
||||
{
|
||||
return !list_empty(&engine->kernel_context->timeline->requests);
|
||||
}
|
||||
|
||||
static bool flush_submission(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
@ -37,8 +42,13 @@ static bool flush_submission(struct intel_gt *gt)
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
intel_engine_flush_submission(engine);
|
||||
active |= flush_work(&engine->retire_work);
|
||||
active |= flush_delayed_work(&engine->wakeref.work);
|
||||
|
||||
/* Flush the background retirement and idle barriers */
|
||||
flush_work(&engine->retire_work);
|
||||
flush_delayed_work(&engine->wakeref.work);
|
||||
|
||||
/* Is the idle barrier still outstanding? */
|
||||
active |= engine_active(engine);
|
||||
}
|
||||
|
||||
return active;
|
||||
@ -162,7 +172,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
|
||||
}
|
||||
}
|
||||
|
||||
if (!retire_requests(tl) || flush_submission(gt))
|
||||
if (!retire_requests(tl))
|
||||
active_count++;
|
||||
mutex_unlock(&tl->mutex);
|
||||
|
||||
@ -173,7 +183,6 @@ out_active: spin_lock(&timelines->lock);
|
||||
if (atomic_dec_and_test(&tl->active_count))
|
||||
list_del(&tl->link);
|
||||
|
||||
|
||||
/* Defer the final release to after the spinlock */
|
||||
if (refcount_dec_and_test(&tl->kref.refcount)) {
|
||||
GEM_BUG_ON(atomic_read(&tl->active_count));
|
||||
@ -185,6 +194,9 @@ out_active: spin_lock(&timelines->lock);
|
||||
list_for_each_entry_safe(tl, tn, &free, link)
|
||||
__intel_timeline_free(&tl->kref);
|
||||
|
||||
if (flush_submission(gt)) /* Wait, there's more! */
|
||||
active_count++;
|
||||
|
||||
return active_count ? timeout : 0;
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,7 @@ struct intel_gt {
|
||||
struct list_head closed_vma;
|
||||
spinlock_t closed_lock; /* guards the list of closed_vma */
|
||||
|
||||
ktime_t last_init_time;
|
||||
struct intel_reset reset;
|
||||
|
||||
/**
|
||||
@ -72,14 +73,12 @@ struct intel_gt {
|
||||
*/
|
||||
intel_wakeref_t awake;
|
||||
|
||||
u32 clock_frequency;
|
||||
|
||||
struct intel_llc llc;
|
||||
struct intel_rc6 rc6;
|
||||
struct intel_rps rps;
|
||||
|
||||
ktime_t last_init_time;
|
||||
|
||||
struct i915_vma *scratch;
|
||||
|
||||
spinlock_t irq_lock;
|
||||
u32 gt_imr;
|
||||
u32 pm_ier;
|
||||
@ -97,6 +96,8 @@ struct intel_gt {
|
||||
* Reserved for exclusive use by the kernel.
|
||||
*/
|
||||
struct i915_address_space *vm;
|
||||
|
||||
struct i915_vma *scratch;
|
||||
};
|
||||
|
||||
enum intel_gt_scratch_field {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -90,6 +90,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
|
||||
#define LRC_PPHWSP_SZ (1)
|
||||
/* After the PPHWSP we have the logical state for the context */
|
||||
#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
|
||||
#define LRC_STATE_OFFSET (LRC_STATE_PN * PAGE_SIZE)
|
||||
|
||||
/* Space within PPHWSP reserved to be used as scratch */
|
||||
#define LRC_PPHWSP_SCRATCH 0x34
|
||||
|
@ -9,14 +9,13 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* GEN8 to GEN11 Reg State Context */
|
||||
/* GEN8 to GEN12 Reg State Context */
|
||||
#define CTX_CONTEXT_CONTROL (0x02 + 1)
|
||||
#define CTX_RING_HEAD (0x04 + 1)
|
||||
#define CTX_RING_TAIL (0x06 + 1)
|
||||
#define CTX_RING_START (0x08 + 1)
|
||||
#define CTX_RING_CTL (0x0a + 1)
|
||||
#define CTX_BB_STATE (0x10 + 1)
|
||||
#define CTX_BB_PER_CTX_PTR (0x18 + 1)
|
||||
#define CTX_TIMESTAMP (0x22 + 1)
|
||||
#define CTX_PDP3_UDW (0x24 + 1)
|
||||
#define CTX_PDP3_LDW (0x26 + 1)
|
||||
@ -30,9 +29,6 @@
|
||||
|
||||
#define GEN9_CTX_RING_MI_MODE 0x54
|
||||
|
||||
/* GEN12+ Reg State Context */
|
||||
#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1)
|
||||
|
||||
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
|
||||
u32 *reg_state__ = (reg_state); \
|
||||
const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
|
||||
|
@ -194,7 +194,7 @@ int intel_renderstate_init(struct intel_renderstate *so,
|
||||
|
||||
err = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
|
||||
if (err)
|
||||
goto err_vma;
|
||||
goto err_obj;
|
||||
|
||||
err = render_state_setup(so, engine->i915);
|
||||
if (err)
|
||||
@ -204,8 +204,6 @@ int intel_renderstate_init(struct intel_renderstate *so,
|
||||
|
||||
err_unpin:
|
||||
i915_vma_unpin(so->vma);
|
||||
err_vma:
|
||||
i915_vma_close(so->vma);
|
||||
err_obj:
|
||||
i915_gem_object_put(obj);
|
||||
so->vma = NULL;
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "intel_reset.h"
|
||||
#include "intel_ring.h"
|
||||
#include "intel_workarounds.h"
|
||||
#include "shmem_utils.h"
|
||||
|
||||
/* Rough estimate of the typical request size, performing a flush,
|
||||
* set-context and then emitting the batch.
|
||||
@ -1241,7 +1242,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
|
||||
|
||||
if (engine->default_state) {
|
||||
void *defaults, *vaddr;
|
||||
void *vaddr;
|
||||
|
||||
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
@ -1249,15 +1250,8 @@ alloc_context_vma(struct intel_engine_cs *engine)
|
||||
goto err_obj;
|
||||
}
|
||||
|
||||
defaults = i915_gem_object_pin_map(engine->default_state,
|
||||
I915_MAP_WB);
|
||||
if (IS_ERR(defaults)) {
|
||||
err = PTR_ERR(defaults);
|
||||
goto err_map;
|
||||
}
|
||||
|
||||
memcpy(vaddr, defaults, engine->context_size);
|
||||
i915_gem_object_unpin_map(engine->default_state);
|
||||
shmem_read(engine->default_state, 0,
|
||||
vaddr, engine->context_size);
|
||||
|
||||
i915_gem_object_flush_map(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
@ -1271,8 +1265,6 @@ alloc_context_vma(struct intel_engine_cs *engine)
|
||||
|
||||
return vma;
|
||||
|
||||
err_map:
|
||||
i915_gem_object_unpin_map(obj);
|
||||
err_obj:
|
||||
i915_gem_object_put(obj);
|
||||
return ERR_PTR(err);
|
||||
|
@ -8,12 +8,15 @@
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_clock_utils.h"
|
||||
#include "intel_gt_irq.h"
|
||||
#include "intel_gt_pm_irq.h"
|
||||
#include "intel_rps.h"
|
||||
#include "intel_sideband.h"
|
||||
#include "../../../platform/x86/intel_ips.h"
|
||||
|
||||
#define BUSY_MAX_EI 20u /* ms */
|
||||
|
||||
/*
|
||||
* Lock protecting IPS related data structures
|
||||
*/
|
||||
@ -44,6 +47,100 @@ static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
|
||||
intel_uncore_write_fw(uncore, reg, val);
|
||||
}
|
||||
|
||||
static void rps_timer(struct timer_list *t)
|
||||
{
|
||||
struct intel_rps *rps = from_timer(rps, t, timer);
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
s64 max_busy[3] = {};
|
||||
ktime_t dt, last;
|
||||
|
||||
for_each_engine(engine, rps_to_gt(rps), id) {
|
||||
s64 busy;
|
||||
int i;
|
||||
|
||||
dt = intel_engine_get_busy_time(engine);
|
||||
last = engine->stats.rps;
|
||||
engine->stats.rps = dt;
|
||||
|
||||
busy = ktime_to_ns(ktime_sub(dt, last));
|
||||
for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
|
||||
if (busy > max_busy[i])
|
||||
swap(busy, max_busy[i]);
|
||||
}
|
||||
}
|
||||
|
||||
dt = ktime_get();
|
||||
last = rps->pm_timestamp;
|
||||
rps->pm_timestamp = dt;
|
||||
|
||||
if (intel_rps_is_active(rps)) {
|
||||
s64 busy;
|
||||
int i;
|
||||
|
||||
dt = ktime_sub(dt, last);
|
||||
|
||||
/*
|
||||
* Our goal is to evaluate each engine independently, so we run
|
||||
* at the lowest clocks required to sustain the heaviest
|
||||
* workload. However, a task may be split into sequential
|
||||
* dependent operations across a set of engines, such that
|
||||
* the independent contributions do not account for high load,
|
||||
* but overall the task is GPU bound. For example, consider
|
||||
* video decode on vcs followed by colour post-processing
|
||||
* on vecs, followed by general post-processing on rcs.
|
||||
* Since multi-engines being active does imply a single
|
||||
* continuous workload across all engines, we hedge our
|
||||
* bets by only contributing a factor of the distributed
|
||||
* load into our busyness calculation.
|
||||
*/
|
||||
busy = max_busy[0];
|
||||
for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
|
||||
if (!max_busy[i])
|
||||
break;
|
||||
|
||||
busy += div_u64(max_busy[i], 1 << i);
|
||||
}
|
||||
GT_TRACE(rps_to_gt(rps),
|
||||
"busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
|
||||
busy, (int)div64_u64(100 * busy, dt),
|
||||
max_busy[0], max_busy[1], max_busy[2],
|
||||
rps->pm_interval);
|
||||
|
||||
if (100 * busy > rps->power.up_threshold * dt &&
|
||||
rps->cur_freq < rps->max_freq_softlimit) {
|
||||
rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
|
||||
rps->pm_interval = 1;
|
||||
schedule_work(&rps->work);
|
||||
} else if (100 * busy < rps->power.down_threshold * dt &&
|
||||
rps->cur_freq > rps->min_freq_softlimit) {
|
||||
rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
|
||||
rps->pm_interval = 1;
|
||||
schedule_work(&rps->work);
|
||||
} else {
|
||||
rps->last_adj = 0;
|
||||
}
|
||||
|
||||
mod_timer(&rps->timer,
|
||||
jiffies + msecs_to_jiffies(rps->pm_interval));
|
||||
rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
|
||||
}
|
||||
}
|
||||
|
||||
static void rps_start_timer(struct intel_rps *rps)
|
||||
{
|
||||
rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
|
||||
rps->pm_interval = 1;
|
||||
mod_timer(&rps->timer, jiffies + 1);
|
||||
}
|
||||
|
||||
static void rps_stop_timer(struct intel_rps *rps)
|
||||
{
|
||||
del_timer_sync(&rps->timer);
|
||||
rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
|
||||
cancel_work_sync(&rps->work);
|
||||
}
|
||||
|
||||
static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
|
||||
{
|
||||
u32 mask = 0;
|
||||
@ -57,7 +154,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
|
||||
if (val < rps->max_freq_softlimit)
|
||||
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
|
||||
|
||||
mask &= READ_ONCE(rps->pm_events);
|
||||
mask &= rps->pm_events;
|
||||
|
||||
return rps_pm_sanitize_mask(rps, ~mask);
|
||||
}
|
||||
@ -70,19 +167,12 @@ static void rps_reset_ei(struct intel_rps *rps)
|
||||
static void rps_enable_interrupts(struct intel_rps *rps)
|
||||
{
|
||||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
u32 events;
|
||||
|
||||
GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
|
||||
rps->pm_events, rps_pm_mask(rps, rps->last_freq));
|
||||
|
||||
rps_reset_ei(rps);
|
||||
|
||||
if (IS_VALLEYVIEW(gt->i915))
|
||||
/* WaGsvRC0ResidencyMethod:vlv */
|
||||
events = GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
else
|
||||
events = (GEN6_PM_RP_UP_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_TIMEOUT);
|
||||
WRITE_ONCE(rps->pm_events, events);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
gen6_gt_pm_enable_irq(gt, rps->pm_events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
@ -120,8 +210,6 @@ static void rps_disable_interrupts(struct intel_rps *rps)
|
||||
{
|
||||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
|
||||
WRITE_ONCE(rps->pm_events, 0);
|
||||
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
|
||||
|
||||
@ -140,6 +228,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
|
||||
cancel_work_sync(&rps->work);
|
||||
|
||||
rps_reset_interrupts(rps);
|
||||
GT_TRACE(gt, "interrupts:off\n");
|
||||
}
|
||||
|
||||
static const struct cparams {
|
||||
@ -532,8 +621,8 @@ static u32 rps_limits(struct intel_rps *rps, u8 val)
|
||||
|
||||
static void rps_set_power(struct intel_rps *rps, int new_power)
|
||||
{
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
u32 threshold_up = 0, threshold_down = 0; /* in % */
|
||||
u32 ei_up = 0, ei_down = 0;
|
||||
|
||||
@ -542,55 +631,49 @@ static void rps_set_power(struct intel_rps *rps, int new_power)
|
||||
if (new_power == rps->power.mode)
|
||||
return;
|
||||
|
||||
threshold_up = 95;
|
||||
threshold_down = 85;
|
||||
|
||||
/* Note the units here are not exactly 1us, but 1280ns. */
|
||||
switch (new_power) {
|
||||
case LOW_POWER:
|
||||
/* Upclock if more than 95% busy over 16ms */
|
||||
ei_up = 16000;
|
||||
threshold_up = 95;
|
||||
|
||||
/* Downclock if less than 85% busy over 32ms */
|
||||
ei_down = 32000;
|
||||
threshold_down = 85;
|
||||
break;
|
||||
|
||||
case BETWEEN:
|
||||
/* Upclock if more than 90% busy over 13ms */
|
||||
ei_up = 13000;
|
||||
threshold_up = 90;
|
||||
|
||||
/* Downclock if less than 75% busy over 32ms */
|
||||
ei_down = 32000;
|
||||
threshold_down = 75;
|
||||
break;
|
||||
|
||||
case HIGH_POWER:
|
||||
/* Upclock if more than 85% busy over 10ms */
|
||||
ei_up = 10000;
|
||||
threshold_up = 85;
|
||||
|
||||
/* Downclock if less than 60% busy over 32ms */
|
||||
ei_down = 32000;
|
||||
threshold_down = 60;
|
||||
break;
|
||||
}
|
||||
|
||||
/* When byt can survive without system hang with dynamic
|
||||
* sw freq adjustments, this restriction can be lifted.
|
||||
*/
|
||||
if (IS_VALLEYVIEW(i915))
|
||||
if (IS_VALLEYVIEW(gt->i915))
|
||||
goto skip_hw_write;
|
||||
|
||||
set(uncore, GEN6_RP_UP_EI, GT_INTERVAL_FROM_US(i915, ei_up));
|
||||
set(uncore, GEN6_RP_UP_THRESHOLD,
|
||||
GT_INTERVAL_FROM_US(i915, ei_up * threshold_up / 100));
|
||||
GT_TRACE(gt,
|
||||
"changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
|
||||
new_power, threshold_up, ei_up, threshold_down, ei_down);
|
||||
|
||||
set(uncore, GEN6_RP_DOWN_EI, GT_INTERVAL_FROM_US(i915, ei_down));
|
||||
set(uncore, GEN6_RP_UP_EI,
|
||||
intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
|
||||
set(uncore, GEN6_RP_UP_THRESHOLD,
|
||||
intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
|
||||
|
||||
set(uncore, GEN6_RP_DOWN_EI,
|
||||
intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
|
||||
set(uncore, GEN6_RP_DOWN_THRESHOLD,
|
||||
GT_INTERVAL_FROM_US(i915, ei_down * threshold_down / 100));
|
||||
intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
|
||||
|
||||
set(uncore, GEN6_RP_CONTROL,
|
||||
(INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
|
||||
(INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
|
||||
GEN6_RP_MEDIA_HW_NORMAL_MODE |
|
||||
GEN6_RP_MEDIA_IS_GFX |
|
||||
GEN6_RP_ENABLE |
|
||||
@ -645,9 +728,11 @@ static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
|
||||
|
||||
void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
|
||||
{
|
||||
GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
|
||||
|
||||
mutex_lock(&rps->power.mutex);
|
||||
if (interactive) {
|
||||
if (!rps->power.interactive++ && READ_ONCE(rps->active))
|
||||
if (!rps->power.interactive++ && intel_rps_is_active(rps))
|
||||
rps_set_power(rps, HIGH_POWER);
|
||||
} else {
|
||||
GEM_BUG_ON(!rps->power.interactive);
|
||||
@ -672,6 +757,9 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
|
||||
GEN6_AGGRESSIVE_TURBO);
|
||||
set(uncore, GEN6_RPNSWREQ, swreq);
|
||||
|
||||
GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
|
||||
val, intel_gpu_freq(rps, val), swreq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -684,6 +772,9 @@ static int vlv_rps_set(struct intel_rps *rps, u8 val)
|
||||
err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
|
||||
vlv_punit_put(i915);
|
||||
|
||||
GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
|
||||
val, intel_gpu_freq(rps, val));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -714,28 +805,30 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
|
||||
|
||||
void intel_rps_unpark(struct intel_rps *rps)
|
||||
{
|
||||
if (!rps->enabled)
|
||||
if (!intel_rps_is_enabled(rps))
|
||||
return;
|
||||
|
||||
GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
|
||||
|
||||
/*
|
||||
* Use the user's desired frequency as a guide, but for better
|
||||
* performance, jump directly to RPe as our starting frequency.
|
||||
*/
|
||||
mutex_lock(&rps->lock);
|
||||
|
||||
WRITE_ONCE(rps->active, true);
|
||||
|
||||
intel_rps_set_active(rps);
|
||||
intel_rps_set(rps,
|
||||
clamp(rps->cur_freq,
|
||||
rps->min_freq_softlimit,
|
||||
rps->max_freq_softlimit));
|
||||
|
||||
rps->last_adj = 0;
|
||||
|
||||
mutex_unlock(&rps->lock);
|
||||
|
||||
if (INTEL_GEN(rps_to_i915(rps)) >= 6)
|
||||
rps->pm_iir = 0;
|
||||
if (intel_rps_has_interrupts(rps))
|
||||
rps_enable_interrupts(rps);
|
||||
if (intel_rps_uses_timer(rps))
|
||||
rps_start_timer(rps);
|
||||
|
||||
if (IS_GEN(rps_to_i915(rps), 5))
|
||||
gen5_rps_update(rps);
|
||||
@ -743,15 +836,16 @@ void intel_rps_unpark(struct intel_rps *rps)
|
||||
|
||||
void intel_rps_park(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
int adj;
|
||||
|
||||
if (!rps->enabled)
|
||||
if (!intel_rps_clear_active(rps))
|
||||
return;
|
||||
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
if (intel_rps_uses_timer(rps))
|
||||
rps_stop_timer(rps);
|
||||
if (intel_rps_has_interrupts(rps))
|
||||
rps_disable_interrupts(rps);
|
||||
|
||||
WRITE_ONCE(rps->active, false);
|
||||
if (rps->last_freq <= rps->idle_freq)
|
||||
return;
|
||||
|
||||
@ -782,8 +876,15 @@ void intel_rps_park(struct intel_rps *rps)
|
||||
* (Note we accommodate Cherryview's limitation of only using an
|
||||
* even bin by applying it to all.)
|
||||
*/
|
||||
rps->cur_freq =
|
||||
max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq);
|
||||
adj = rps->last_adj;
|
||||
if (adj < 0)
|
||||
adj *= 2;
|
||||
else /* CHV needs even encode values */
|
||||
adj = -2;
|
||||
rps->last_adj = adj;
|
||||
rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
|
||||
|
||||
GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
|
||||
}
|
||||
|
||||
void intel_rps_boost(struct i915_request *rq)
|
||||
@ -791,7 +892,7 @@ void intel_rps_boost(struct i915_request *rq)
|
||||
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
|
||||
unsigned long flags;
|
||||
|
||||
if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
|
||||
if (i915_request_signaled(rq) || !intel_rps_is_active(rps))
|
||||
return;
|
||||
|
||||
/* Serializes with i915_request_retire() */
|
||||
@ -800,6 +901,9 @@ void intel_rps_boost(struct i915_request *rq)
|
||||
!dma_fence_is_signaled_locked(&rq->fence)) {
|
||||
set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
|
||||
|
||||
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
|
||||
rq->fence.context, rq->fence.seqno);
|
||||
|
||||
if (!atomic_fetch_inc(&rps->num_waiters) &&
|
||||
READ_ONCE(rps->cur_freq) < rps->boost_freq)
|
||||
schedule_work(&rps->work);
|
||||
@ -817,7 +921,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
|
||||
GEM_BUG_ON(val > rps->max_freq);
|
||||
GEM_BUG_ON(val < rps->min_freq);
|
||||
|
||||
if (rps->active) {
|
||||
if (intel_rps_is_active(rps)) {
|
||||
err = rps_set(rps, val, true);
|
||||
if (err)
|
||||
return err;
|
||||
@ -826,7 +930,7 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
|
||||
* Make sure we continue to get interrupts
|
||||
* until we hit the minimum or maximum frequencies.
|
||||
*/
|
||||
if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
|
||||
if (intel_rps_has_interrupts(rps)) {
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
|
||||
set(uncore,
|
||||
@ -895,6 +999,7 @@ static void gen6_rps_init(struct intel_rps *rps)
|
||||
static bool rps_reset(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
|
||||
/* force a reset */
|
||||
rps->power.mode = -1;
|
||||
rps->last_freq = -1;
|
||||
@ -911,20 +1016,18 @@ static bool rps_reset(struct intel_rps *rps)
|
||||
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
|
||||
static bool gen9_rps_enable(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
|
||||
/* Program defaults and thresholds for RPS */
|
||||
if (IS_GEN(i915, 9))
|
||||
if (IS_GEN(gt->i915, 9))
|
||||
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
|
||||
GEN9_FREQUENCY(rps->rp1_freq));
|
||||
|
||||
/* 1 second timeout */
|
||||
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
|
||||
GT_INTERVAL_FROM_US(i915, 1000000));
|
||||
|
||||
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
|
||||
|
||||
rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
|
||||
|
||||
return rps_reset(rps);
|
||||
}
|
||||
|
||||
@ -935,12 +1038,10 @@ static bool gen8_rps_enable(struct intel_rps *rps)
|
||||
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
|
||||
HSW_FREQUENCY(rps->rp1_freq));
|
||||
|
||||
/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
|
||||
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
|
||||
100000000 / 128); /* 1 second timeout */
|
||||
|
||||
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
|
||||
rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
|
||||
|
||||
return rps_reset(rps);
|
||||
}
|
||||
|
||||
@ -952,6 +1053,10 @@ static bool gen6_rps_enable(struct intel_rps *rps)
|
||||
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
|
||||
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
|
||||
|
||||
rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_TIMEOUT);
|
||||
|
||||
return rps_reset(rps);
|
||||
}
|
||||
|
||||
@ -1037,6 +1142,10 @@ static bool chv_rps_enable(struct intel_rps *rps)
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
|
||||
rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_TIMEOUT);
|
||||
|
||||
/* Setting Fixed Bias */
|
||||
vlv_punit_get(i915);
|
||||
|
||||
@ -1135,6 +1244,9 @@ static bool vlv_rps_enable(struct intel_rps *rps)
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_CONT);
|
||||
|
||||
/* WaGsvRC0ResidencyMethod:vlv */
|
||||
rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
|
||||
vlv_punit_get(i915);
|
||||
|
||||
/* Setting Fixed Bias */
|
||||
@ -1193,33 +1305,71 @@ static unsigned long __ips_gfx_val(struct intel_ips *ips)
|
||||
return ips->gfx_power + state2;
|
||||
}
|
||||
|
||||
static bool has_busy_stats(struct intel_rps *rps)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, rps_to_gt(rps), id) {
|
||||
if (!intel_engine_supports_stats(engine))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void intel_rps_enable(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
struct intel_uncore *uncore = rps_to_uncore(rps);
|
||||
bool enabled = false;
|
||||
|
||||
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
||||
if (IS_CHERRYVIEW(i915))
|
||||
rps->enabled = chv_rps_enable(rps);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
rps->enabled = vlv_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 9)
|
||||
rps->enabled = gen9_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 8)
|
||||
rps->enabled = gen8_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
rps->enabled = gen6_rps_enable(rps);
|
||||
else if (IS_IRONLAKE_M(i915))
|
||||
rps->enabled = gen5_rps_enable(rps);
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
|
||||
if (!rps->enabled)
|
||||
if (!HAS_RPS(i915))
|
||||
return;
|
||||
|
||||
drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
|
||||
drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
|
||||
intel_gt_check_clock_frequency(rps_to_gt(rps));
|
||||
|
||||
drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
|
||||
drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
|
||||
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
|
||||
if (rps->max_freq <= rps->min_freq)
|
||||
/* leave disabled, no room for dynamic reclocking */;
|
||||
else if (IS_CHERRYVIEW(i915))
|
||||
enabled = chv_rps_enable(rps);
|
||||
else if (IS_VALLEYVIEW(i915))
|
||||
enabled = vlv_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 9)
|
||||
enabled = gen9_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 8)
|
||||
enabled = gen8_rps_enable(rps);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
enabled = gen6_rps_enable(rps);
|
||||
else if (IS_IRONLAKE_M(i915))
|
||||
enabled = gen5_rps_enable(rps);
|
||||
else
|
||||
MISSING_CASE(INTEL_GEN(i915));
|
||||
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
|
||||
if (!enabled)
|
||||
return;
|
||||
|
||||
GT_TRACE(rps_to_gt(rps),
|
||||
"min:%x, max:%x, freq:[%d, %d]\n",
|
||||
rps->min_freq, rps->max_freq,
|
||||
intel_gpu_freq(rps, rps->min_freq),
|
||||
intel_gpu_freq(rps, rps->max_freq));
|
||||
|
||||
GEM_BUG_ON(rps->max_freq < rps->min_freq);
|
||||
GEM_BUG_ON(rps->idle_freq > rps->max_freq);
|
||||
|
||||
GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
|
||||
GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
|
||||
|
||||
if (has_busy_stats(rps))
|
||||
intel_rps_set_timer(rps);
|
||||
else if (INTEL_GEN(i915) >= 6)
|
||||
intel_rps_set_interrupts(rps);
|
||||
else
|
||||
/* Ironlake currently uses intel_ips.ko */ {}
|
||||
|
||||
intel_rps_set_enabled(rps);
|
||||
}
|
||||
|
||||
static void gen6_rps_disable(struct intel_rps *rps)
|
||||
@ -1231,7 +1381,9 @@ void intel_rps_disable(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
|
||||
rps->enabled = false;
|
||||
intel_rps_clear_enabled(rps);
|
||||
intel_rps_clear_interrupts(rps);
|
||||
intel_rps_clear_timer(rps);
|
||||
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
gen6_rps_disable(rps);
|
||||
@ -1469,7 +1621,7 @@ static void rps_work(struct work_struct *work)
|
||||
u32 pm_iir = 0;
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
|
||||
pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
|
||||
client_boost = atomic_read(&rps->num_waiters);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
|
||||
@ -1478,6 +1630,10 @@ static void rps_work(struct work_struct *work)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&rps->lock);
|
||||
if (!intel_rps_is_active(rps)) {
|
||||
mutex_unlock(&rps->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
|
||||
|
||||
@ -1487,6 +1643,12 @@ static void rps_work(struct work_struct *work)
|
||||
max = rps->max_freq_softlimit;
|
||||
if (client_boost)
|
||||
max = rps->max_freq;
|
||||
|
||||
GT_TRACE(gt,
|
||||
"pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
|
||||
pm_iir, yesno(client_boost),
|
||||
adj, new_freq, min, max);
|
||||
|
||||
if (client_boost && new_freq < rps->boost_freq) {
|
||||
new_freq = rps->boost_freq;
|
||||
adj = 0;
|
||||
@ -1518,30 +1680,18 @@ static void rps_work(struct work_struct *work)
|
||||
adj = 0;
|
||||
}
|
||||
|
||||
rps->last_adj = adj;
|
||||
|
||||
/*
|
||||
* Limit deboosting and boosting to keep ourselves at the extremes
|
||||
* when in the respective power modes (i.e. slowly decrease frequencies
|
||||
* while in the HIGH_POWER zone and slowly increase frequencies while
|
||||
* in the LOW_POWER zone). On idle, we will hit the timeout and drop
|
||||
* to the next level quickly, and conversely if busy we expect to
|
||||
* hit a waitboost and rapidly switch into max power.
|
||||
*/
|
||||
if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
|
||||
(adj > 0 && rps->power.mode == LOW_POWER))
|
||||
rps->last_adj = 0;
|
||||
|
||||
/* sysfs frequency interfaces may have snuck in while servicing the
|
||||
* interrupt
|
||||
* sysfs frequency limits may have snuck in while
|
||||
* servicing the interrupt
|
||||
*/
|
||||
new_freq += adj;
|
||||
new_freq = clamp_t(int, new_freq, min, max);
|
||||
|
||||
if (intel_rps_set(rps, new_freq)) {
|
||||
drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
|
||||
rps->last_adj = 0;
|
||||
adj = 0;
|
||||
}
|
||||
rps->last_adj = adj;
|
||||
|
||||
mutex_unlock(&rps->lock);
|
||||
|
||||
@ -1561,6 +1711,8 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
|
||||
if (unlikely(!events))
|
||||
return;
|
||||
|
||||
GT_TRACE(gt, "irq events:%x\n", events);
|
||||
|
||||
gen6_gt_pm_mask_irq(gt, events);
|
||||
|
||||
rps->pm_iir |= events;
|
||||
@ -1572,10 +1724,12 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
|
||||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
u32 events;
|
||||
|
||||
events = pm_iir & READ_ONCE(rps->pm_events);
|
||||
events = pm_iir & rps->pm_events;
|
||||
if (events) {
|
||||
spin_lock(>->irq_lock);
|
||||
|
||||
GT_TRACE(gt, "irq events:%x\n", events);
|
||||
|
||||
gen6_gt_pm_mask_irq(gt, events);
|
||||
rps->pm_iir |= events;
|
||||
|
||||
@ -1633,6 +1787,7 @@ void intel_rps_init_early(struct intel_rps *rps)
|
||||
mutex_init(&rps->power.mutex);
|
||||
|
||||
INIT_WORK(&rps->work, rps_work);
|
||||
timer_setup(&rps->timer, rps_timer, 0);
|
||||
|
||||
atomic_set(&rps->num_waiters, 0);
|
||||
}
|
||||
@ -1689,6 +1844,9 @@ void intel_rps_init(struct intel_rps *rps)
|
||||
|
||||
if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11)
|
||||
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
|
||||
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
rps_disable_interrupts(rps);
|
||||
}
|
||||
|
||||
u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
|
||||
@ -1718,7 +1876,7 @@ static u32 read_cagf(struct intel_rps *rps)
|
||||
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
|
||||
vlv_punit_put(i915);
|
||||
} else {
|
||||
freq = intel_uncore_read(rps_to_gt(rps)->uncore, GEN6_RPSTAT1);
|
||||
freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1);
|
||||
}
|
||||
|
||||
return intel_rps_get_cagf(rps, freq);
|
||||
@ -1726,7 +1884,7 @@ static u32 read_cagf(struct intel_rps *rps)
|
||||
|
||||
u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
|
||||
{
|
||||
struct intel_runtime_pm *rpm = rps_to_gt(rps)->uncore->rpm;
|
||||
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
|
||||
intel_wakeref_t wakeref;
|
||||
u32 freq = 0;
|
||||
|
||||
|
@ -36,4 +36,64 @@ void gen5_rps_irq_handler(struct intel_rps *rps);
|
||||
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
|
||||
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
|
||||
|
||||
static inline bool intel_rps_is_enabled(const struct intel_rps *rps)
|
||||
{
|
||||
return test_bit(INTEL_RPS_ENABLED, &rps->flags);
|
||||
}
|
||||
|
||||
static inline void intel_rps_set_enabled(struct intel_rps *rps)
|
||||
{
|
||||
set_bit(INTEL_RPS_ENABLED, &rps->flags);
|
||||
}
|
||||
|
||||
static inline void intel_rps_clear_enabled(struct intel_rps *rps)
|
||||
{
|
||||
clear_bit(INTEL_RPS_ENABLED, &rps->flags);
|
||||
}
|
||||
|
||||
static inline bool intel_rps_is_active(const struct intel_rps *rps)
|
||||
{
|
||||
return test_bit(INTEL_RPS_ACTIVE, &rps->flags);
|
||||
}
|
||||
|
||||
static inline void intel_rps_set_active(struct intel_rps *rps)
|
||||
{
|
||||
set_bit(INTEL_RPS_ACTIVE, &rps->flags);
|
||||
}
|
||||
|
||||
static inline bool intel_rps_clear_active(struct intel_rps *rps)
|
||||
{
|
||||
return test_and_clear_bit(INTEL_RPS_ACTIVE, &rps->flags);
|
||||
}
|
||||
|
||||
static inline bool intel_rps_has_interrupts(const struct intel_rps *rps)
|
||||
{
|
||||
return test_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
|
||||
}
|
||||
|
||||
static inline void intel_rps_set_interrupts(struct intel_rps *rps)
|
||||
{
|
||||
set_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
|
||||
}
|
||||
|
||||
static inline void intel_rps_clear_interrupts(struct intel_rps *rps)
|
||||
{
|
||||
clear_bit(INTEL_RPS_INTERRUPTS, &rps->flags);
|
||||
}
|
||||
|
||||
static inline bool intel_rps_uses_timer(const struct intel_rps *rps)
|
||||
{
|
||||
return test_bit(INTEL_RPS_TIMER, &rps->flags);
|
||||
}
|
||||
|
||||
static inline void intel_rps_set_timer(struct intel_rps *rps)
|
||||
{
|
||||
set_bit(INTEL_RPS_TIMER, &rps->flags);
|
||||
}
|
||||
|
||||
static inline void intel_rps_clear_timer(struct intel_rps *rps)
|
||||
{
|
||||
clear_bit(INTEL_RPS_TIMER, &rps->flags);
|
||||
}
|
||||
|
||||
#endif /* INTEL_RPS_H */
|
||||
|
@ -31,6 +31,13 @@ struct intel_rps_ei {
|
||||
u32 media_c0;
|
||||
};
|
||||
|
||||
enum {
|
||||
INTEL_RPS_ENABLED = 0,
|
||||
INTEL_RPS_ACTIVE,
|
||||
INTEL_RPS_INTERRUPTS,
|
||||
INTEL_RPS_TIMER,
|
||||
};
|
||||
|
||||
struct intel_rps {
|
||||
struct mutex lock; /* protects enabling and the worker */
|
||||
|
||||
@ -38,9 +45,12 @@ struct intel_rps {
|
||||
* work, interrupts_enabled and pm_iir are protected by
|
||||
* dev_priv->irq_lock
|
||||
*/
|
||||
struct timer_list timer;
|
||||
struct work_struct work;
|
||||
bool enabled;
|
||||
bool active;
|
||||
unsigned long flags;
|
||||
|
||||
ktime_t pm_timestamp;
|
||||
u32 pm_interval;
|
||||
u32 pm_iir;
|
||||
|
||||
/* PM interrupt bits that should never be masked */
|
||||
|
@ -337,6 +337,13 @@ int intel_timeline_pin(struct intel_timeline *tl)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_timeline_reset_seqno(const struct intel_timeline *tl)
|
||||
{
|
||||
/* Must be pinned to be writable, and no requests in flight. */
|
||||
GEM_BUG_ON(!atomic_read(&tl->pin_count));
|
||||
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
|
||||
}
|
||||
|
||||
void intel_timeline_enter(struct intel_timeline *tl)
|
||||
{
|
||||
struct intel_gt_timelines *timelines = &tl->gt->timelines;
|
||||
@ -365,8 +372,16 @@ void intel_timeline_enter(struct intel_timeline *tl)
|
||||
return;
|
||||
|
||||
spin_lock(&timelines->lock);
|
||||
if (!atomic_fetch_inc(&tl->active_count))
|
||||
if (!atomic_fetch_inc(&tl->active_count)) {
|
||||
/*
|
||||
* The HWSP is volatile, and may have been lost while inactive,
|
||||
* e.g. across suspend/resume. Be paranoid, and ensure that
|
||||
* the HWSP value matches our seqno so we don't proclaim
|
||||
* the next request as already complete.
|
||||
*/
|
||||
intel_timeline_reset_seqno(tl);
|
||||
list_add_tail(&tl->link, &timelines->active_list);
|
||||
}
|
||||
spin_unlock(&timelines->lock);
|
||||
}
|
||||
|
||||
@ -529,6 +544,8 @@ int intel_timeline_read_hwsp(struct i915_request *from,
|
||||
|
||||
rcu_read_lock();
|
||||
cl = rcu_dereference(from->hwsp_cacheline);
|
||||
if (i915_request_completed(from)) /* confirm cacheline is valid */
|
||||
goto unlock;
|
||||
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
|
||||
goto unlock; /* seqno wrapped and completed! */
|
||||
if (unlikely(i915_request_completed(from)))
|
||||
|
@ -84,6 +84,8 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
|
||||
void intel_timeline_exit(struct intel_timeline *tl);
|
||||
void intel_timeline_unpin(struct intel_timeline *tl);
|
||||
|
||||
void intel_timeline_reset_seqno(const struct intel_timeline *tl);
|
||||
|
||||
int intel_timeline_read_hwsp(struct i915_request *from,
|
||||
struct i915_request *until,
|
||||
u32 *hwsp_offset);
|
||||
|
@ -155,7 +155,7 @@ static int live_context_size(void *arg)
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
struct {
|
||||
struct drm_i915_gem_object *state;
|
||||
struct file *state;
|
||||
void *pinned;
|
||||
} saved;
|
||||
|
||||
|
@ -53,7 +53,13 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(live_rc6_manual),
|
||||
SUBTEST(live_rps_clock_interval),
|
||||
SUBTEST(live_rps_control),
|
||||
SUBTEST(live_rps_frequency_cs),
|
||||
SUBTEST(live_rps_frequency_srm),
|
||||
SUBTEST(live_rps_power),
|
||||
SUBTEST(live_rps_interrupt),
|
||||
SUBTEST(live_rps_dynamic),
|
||||
SUBTEST(live_gt_resume),
|
||||
};
|
||||
|
||||
|
@ -21,7 +21,8 @@
|
||||
#include "gem/selftests/mock_context.h"
|
||||
|
||||
#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
|
||||
#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */
|
||||
#define NUM_GPR 16
|
||||
#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
|
||||
|
||||
static struct i915_vma *create_scratch(struct intel_gt *gt)
|
||||
{
|
||||
@ -2791,6 +2792,331 @@ static int live_preempt_gang(void *arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct i915_vma *
|
||||
create_gpr_user(struct intel_engine_cs *engine,
|
||||
struct i915_vma *result,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
u32 *cs;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
obj = i915_gem_object_create_internal(engine->i915, 4096);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
vma = i915_vma_instance(obj, result->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
i915_gem_object_put(obj);
|
||||
return vma;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err) {
|
||||
i915_vma_put(vma);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_vma_put(vma);
|
||||
return ERR_CAST(cs);
|
||||
}
|
||||
|
||||
/* All GPR are clear for new contexts. We use GPR(0) as a constant */
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
||||
*cs++ = CS_GPR(engine, 0);
|
||||
*cs++ = 1;
|
||||
|
||||
for (i = 1; i < NUM_GPR; i++) {
|
||||
u64 addr;
|
||||
|
||||
/*
|
||||
* Perform: GPR[i]++
|
||||
*
|
||||
* As we read and write into the context saved GPR[i], if
|
||||
* we restart this batch buffer from an earlier point, we
|
||||
* will repeat the increment and store a value > 1.
|
||||
*/
|
||||
*cs++ = MI_MATH(4);
|
||||
*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
|
||||
*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
|
||||
*cs++ = MI_MATH_ADD;
|
||||
*cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
|
||||
|
||||
addr = result->node.start + offset + i * sizeof(*cs);
|
||||
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
|
||||
*cs++ = CS_GPR(engine, 2 * i);
|
||||
*cs++ = lower_32_bits(addr);
|
||||
*cs++ = upper_32_bits(addr);
|
||||
|
||||
*cs++ = MI_SEMAPHORE_WAIT |
|
||||
MI_SEMAPHORE_POLL |
|
||||
MI_SEMAPHORE_SAD_GTE_SDD;
|
||||
*cs++ = i;
|
||||
*cs++ = lower_32_bits(result->node.start);
|
||||
*cs++ = upper_32_bits(result->node.start);
|
||||
}
|
||||
|
||||
*cs++ = MI_BATCH_BUFFER_END;
|
||||
i915_gem_object_flush_map(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create_internal(gt->i915, sz);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
i915_gem_object_put(obj);
|
||||
return vma;
|
||||
}
|
||||
|
||||
err = i915_ggtt_pin(vma, 0, 0);
|
||||
if (err) {
|
||||
i915_vma_put(vma);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
static struct i915_request *
|
||||
create_gpr_client(struct intel_engine_cs *engine,
|
||||
struct i915_vma *global,
|
||||
unsigned int offset)
|
||||
{
|
||||
struct i915_vma *batch, *vma;
|
||||
struct intel_context *ce;
|
||||
struct i915_request *rq;
|
||||
int err;
|
||||
|
||||
ce = intel_context_create(engine);
|
||||
if (IS_ERR(ce))
|
||||
return ERR_CAST(ce);
|
||||
|
||||
vma = i915_vma_instance(global->obj, ce->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto out_ce;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto out_ce;
|
||||
|
||||
batch = create_gpr_user(engine, vma, offset);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_vma;
|
||||
}
|
||||
|
||||
rq = intel_context_create_request(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto out_batch;
|
||||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
i915_vma_lock(batch);
|
||||
if (!err)
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (!err)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
if (!err)
|
||||
err = rq->engine->emit_bb_start(rq,
|
||||
batch->node.start,
|
||||
PAGE_SIZE, 0);
|
||||
i915_vma_unlock(batch);
|
||||
i915_vma_unpin(batch);
|
||||
|
||||
if (!err)
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
|
||||
out_batch:
|
||||
i915_vma_put(batch);
|
||||
out_vma:
|
||||
i915_vma_unpin(vma);
|
||||
out_ce:
|
||||
intel_context_put(ce);
|
||||
return err ? ERR_PTR(err) : rq;
|
||||
}
|
||||
|
||||
static int preempt_user(struct intel_engine_cs *engine,
|
||||
struct i915_vma *global,
|
||||
int id)
|
||||
{
|
||||
struct i915_sched_attr attr = {
|
||||
.priority = I915_PRIORITY_MAX
|
||||
};
|
||||
struct i915_request *rq;
|
||||
int err = 0;
|
||||
u32 *cs;
|
||||
|
||||
rq = intel_engine_create_kernel_request(engine);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
cs = intel_ring_begin(rq, 4);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_request_add(rq);
|
||||
return PTR_ERR(cs);
|
||||
}
|
||||
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = i915_ggtt_offset(global);
|
||||
*cs++ = 0;
|
||||
*cs++ = id;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
|
||||
engine->schedule(rq, &attr);
|
||||
|
||||
if (i915_request_wait(rq, 0, HZ / 2) < 0)
|
||||
err = -ETIME;
|
||||
i915_request_put(rq);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int live_preempt_user(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_vma *global;
|
||||
enum intel_engine_id id;
|
||||
u32 *result;
|
||||
int err = 0;
|
||||
|
||||
if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* In our other tests, we look at preemption in carefully
|
||||
* controlled conditions in the ringbuffer. Since most of the
|
||||
* time is spent in user batches, most of our preemptions naturally
|
||||
* occur there. We want to verify that when we preempt inside a batch
|
||||
* we continue on from the current instruction and do not roll back
|
||||
* to the start, or another earlier arbitration point.
|
||||
*
|
||||
* To verify this, we create a batch which is a mixture of
|
||||
* MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
|
||||
* a few preempting contexts thrown into the mix, we look for any
|
||||
* repeated instructions (which show up as incorrect values).
|
||||
*/
|
||||
|
||||
global = create_global(gt, 4096);
|
||||
if (IS_ERR(global))
|
||||
return PTR_ERR(global);
|
||||
|
||||
result = i915_gem_object_pin_map(global->obj, I915_MAP_WC);
|
||||
if (IS_ERR(result)) {
|
||||
i915_vma_unpin_and_release(&global, 0);
|
||||
return PTR_ERR(result);
|
||||
}
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
struct i915_request *client[3] = {};
|
||||
struct igt_live_test t;
|
||||
int i;
|
||||
|
||||
if (!intel_engine_has_preemption(engine))
|
||||
continue;
|
||||
|
||||
if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS)
|
||||
continue; /* we need per-context GPR */
|
||||
|
||||
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
|
||||
err = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
memset(result, 0, 4096);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(client); i++) {
|
||||
struct i915_request *rq;
|
||||
|
||||
rq = create_gpr_client(engine, global,
|
||||
NUM_GPR * i * sizeof(u32));
|
||||
if (IS_ERR(rq))
|
||||
goto end_test;
|
||||
|
||||
client[i] = rq;
|
||||
}
|
||||
|
||||
/* Continuously preempt the set of 3 running contexts */
|
||||
for (i = 1; i <= NUM_GPR; i++) {
|
||||
err = preempt_user(engine, global, i);
|
||||
if (err)
|
||||
goto end_test;
|
||||
}
|
||||
|
||||
if (READ_ONCE(result[0]) != NUM_GPR) {
|
||||
pr_err("%s: Failed to release semaphore\n",
|
||||
engine->name);
|
||||
err = -EIO;
|
||||
goto end_test;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(client); i++) {
|
||||
int gpr;
|
||||
|
||||
if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
|
||||
err = -ETIME;
|
||||
goto end_test;
|
||||
}
|
||||
|
||||
for (gpr = 1; gpr < NUM_GPR; gpr++) {
|
||||
if (result[NUM_GPR * i + gpr] != 1) {
|
||||
pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
|
||||
engine->name,
|
||||
i, gpr, result[NUM_GPR * i + gpr]);
|
||||
err = -EINVAL;
|
||||
goto end_test;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
end_test:
|
||||
for (i = 0; i < ARRAY_SIZE(client); i++) {
|
||||
if (!client[i])
|
||||
break;
|
||||
|
||||
i915_request_put(client[i]);
|
||||
}
|
||||
|
||||
/* Flush the semaphores on error */
|
||||
smp_store_mb(result[0], -1);
|
||||
if (igt_live_test_end(&t))
|
||||
err = -EIO;
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int live_preempt_timeout(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
@ -3998,6 +4324,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
|
||||
SUBTEST(live_chain_preempt),
|
||||
SUBTEST(live_preempt_gang),
|
||||
SUBTEST(live_preempt_timeout),
|
||||
SUBTEST(live_preempt_user),
|
||||
SUBTEST(live_preempt_smoke),
|
||||
SUBTEST(live_virtual_engine),
|
||||
SUBTEST(live_virtual_mask),
|
||||
@ -4125,13 +4452,12 @@ static int live_lrc_layout(void *arg)
|
||||
if (!engine->default_state)
|
||||
continue;
|
||||
|
||||
hw = i915_gem_object_pin_map(engine->default_state,
|
||||
I915_MAP_WB);
|
||||
hw = shmem_pin_map(engine->default_state);
|
||||
if (IS_ERR(hw)) {
|
||||
err = PTR_ERR(hw);
|
||||
break;
|
||||
}
|
||||
hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
|
||||
hw += LRC_STATE_OFFSET / sizeof(*hw);
|
||||
|
||||
execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
|
||||
engine->kernel_context,
|
||||
@ -4198,7 +4524,7 @@ static int live_lrc_layout(void *arg)
|
||||
hexdump(lrc, PAGE_SIZE);
|
||||
}
|
||||
|
||||
i915_gem_object_unpin_map(engine->default_state);
|
||||
shmem_unpin_map(engine->default_state, hw);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
@ -4266,11 +4592,36 @@ static int live_lrc_fixed(void *arg)
|
||||
CTX_BB_STATE - 1,
|
||||
"BB_STATE"
|
||||
},
|
||||
{
|
||||
i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)),
|
||||
lrc_ring_wa_bb_per_ctx(engine),
|
||||
"RING_BB_PER_CTX_PTR"
|
||||
},
|
||||
{
|
||||
i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)),
|
||||
lrc_ring_indirect_ptr(engine),
|
||||
"RING_INDIRECT_CTX_PTR"
|
||||
},
|
||||
{
|
||||
i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)),
|
||||
lrc_ring_indirect_offset(engine),
|
||||
"RING_INDIRECT_CTX_OFFSET"
|
||||
},
|
||||
{
|
||||
i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
|
||||
CTX_TIMESTAMP - 1,
|
||||
"RING_CTX_TIMESTAMP"
|
||||
},
|
||||
{
|
||||
i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)),
|
||||
lrc_ring_gpr0(engine),
|
||||
"RING_CS_GPR0"
|
||||
},
|
||||
{
|
||||
i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)),
|
||||
lrc_ring_cmd_buf_cctl(engine),
|
||||
"RING_CMD_BUF_CCTL"
|
||||
},
|
||||
{ },
|
||||
}, *t;
|
||||
u32 *hw;
|
||||
@ -4278,13 +4629,12 @@ static int live_lrc_fixed(void *arg)
|
||||
if (!engine->default_state)
|
||||
continue;
|
||||
|
||||
hw = i915_gem_object_pin_map(engine->default_state,
|
||||
I915_MAP_WB);
|
||||
hw = shmem_pin_map(engine->default_state);
|
||||
if (IS_ERR(hw)) {
|
||||
err = PTR_ERR(hw);
|
||||
break;
|
||||
}
|
||||
hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
|
||||
hw += LRC_STATE_OFFSET / sizeof(*hw);
|
||||
|
||||
for (t = tbl; t->name; t++) {
|
||||
int dw = find_offset(hw, t->reg);
|
||||
@ -4300,7 +4650,7 @@ static int live_lrc_fixed(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_object_unpin_map(engine->default_state);
|
||||
shmem_unpin_map(engine->default_state, hw);
|
||||
}
|
||||
|
||||
return err;
|
||||
@ -4870,7 +5220,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
|
||||
x = 0;
|
||||
dw = 0;
|
||||
hw = ce->engine->pinned_default_state;
|
||||
hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
|
||||
hw += LRC_STATE_OFFSET / sizeof(*hw);
|
||||
do {
|
||||
u32 len = hw[dw] & 0x7f;
|
||||
|
||||
@ -5023,7 +5373,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
|
||||
|
||||
dw = 0;
|
||||
hw = ce->engine->pinned_default_state;
|
||||
hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
|
||||
hw += LRC_STATE_OFFSET / sizeof(*hw);
|
||||
do {
|
||||
u32 len = hw[dw] & 0x7f;
|
||||
|
||||
@ -5147,12 +5497,12 @@ static int compare_isolation(struct intel_engine_cs *engine,
|
||||
err = PTR_ERR(lrc);
|
||||
goto err_B1;
|
||||
}
|
||||
lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
|
||||
lrc += LRC_STATE_OFFSET / sizeof(*hw);
|
||||
|
||||
x = 0;
|
||||
dw = 0;
|
||||
hw = engine->pinned_default_state;
|
||||
hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
|
||||
hw += LRC_STATE_OFFSET / sizeof(*hw);
|
||||
do {
|
||||
u32 len = hw[dw] & 0x7f;
|
||||
|
||||
@ -5363,6 +5713,161 @@ static int live_lrc_isolation(void *arg)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int indirect_ctx_submit_req(struct intel_context *ce)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
int err = 0;
|
||||
|
||||
rq = intel_context_create_request(ce);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
|
||||
if (i915_request_wait(rq, 0, HZ / 5) < 0)
|
||||
err = -ETIME;
|
||||
|
||||
i915_request_put(rq);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#define CTX_BB_CANARY_OFFSET (3 * 1024)
|
||||
#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
|
||||
|
||||
static u32 *
|
||||
emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
|
||||
{
|
||||
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT |
|
||||
MI_LRI_LRM_CS_MMIO;
|
||||
*cs++ = i915_mmio_reg_offset(RING_START(0));
|
||||
*cs++ = i915_ggtt_offset(ce->state) +
|
||||
context_wa_bb_offset(ce) +
|
||||
CTX_BB_CANARY_OFFSET;
|
||||
*cs++ = 0;
|
||||
|
||||
return cs;
|
||||
}
|
||||
|
||||
static void
|
||||
indirect_ctx_bb_setup(struct intel_context *ce)
|
||||
{
|
||||
u32 *cs = context_indirect_bb(ce);
|
||||
|
||||
cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
|
||||
|
||||
setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
|
||||
}
|
||||
|
||||
static bool check_ring_start(struct intel_context *ce)
|
||||
{
|
||||
const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
|
||||
LRC_STATE_OFFSET + context_wa_bb_offset(ce);
|
||||
|
||||
if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
|
||||
return true;
|
||||
|
||||
pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n",
|
||||
ctx_bb[CTX_BB_CANARY_INDEX],
|
||||
ce->lrc_reg_state[CTX_RING_START]);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int indirect_ctx_bb_check(struct intel_context *ce)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = indirect_ctx_submit_req(ce);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!check_ring_start(ce))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_context *a, *b;
|
||||
int err;
|
||||
|
||||
a = intel_context_create(engine);
|
||||
if (IS_ERR(a))
|
||||
return PTR_ERR(a);
|
||||
err = intel_context_pin(a);
|
||||
if (err)
|
||||
goto put_a;
|
||||
|
||||
b = intel_context_create(engine);
|
||||
if (IS_ERR(b)) {
|
||||
err = PTR_ERR(b);
|
||||
goto unpin_a;
|
||||
}
|
||||
err = intel_context_pin(b);
|
||||
if (err)
|
||||
goto put_b;
|
||||
|
||||
/* We use the already reserved extra page in context state */
|
||||
if (!a->wa_bb_page) {
|
||||
GEM_BUG_ON(b->wa_bb_page);
|
||||
GEM_BUG_ON(INTEL_GEN(engine->i915) == 12);
|
||||
goto unpin_b;
|
||||
}
|
||||
|
||||
/*
|
||||
* In order to test that our per context bb is truly per context,
|
||||
* and executes at the intended spot on context restoring process,
|
||||
* make the batch store the ring start value to memory.
|
||||
* As ring start is restored apriori of starting the indirect ctx bb and
|
||||
* as it will be different for each context, it fits to this purpose.
|
||||
*/
|
||||
indirect_ctx_bb_setup(a);
|
||||
indirect_ctx_bb_setup(b);
|
||||
|
||||
err = indirect_ctx_bb_check(a);
|
||||
if (err)
|
||||
goto unpin_b;
|
||||
|
||||
err = indirect_ctx_bb_check(b);
|
||||
|
||||
unpin_b:
|
||||
intel_context_unpin(b);
|
||||
put_b:
|
||||
intel_context_put(b);
|
||||
unpin_a:
|
||||
intel_context_unpin(a);
|
||||
put_a:
|
||||
intel_context_put(a);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int live_lrc_indirect_ctx_bb(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int err = 0;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
intel_engine_pm_get(engine);
|
||||
err = __live_lrc_indirect_ctx_bb(engine);
|
||||
intel_engine_pm_put(engine);
|
||||
|
||||
if (igt_flush_test(gt->i915))
|
||||
err = -EIO;
|
||||
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void garbage_reset(struct intel_engine_cs *engine,
|
||||
struct i915_request *rq)
|
||||
{
|
||||
@ -5394,7 +5899,7 @@ static struct i915_request *garbage(struct intel_context *ce,
|
||||
prandom_bytes_state(prng,
|
||||
ce->lrc_reg_state,
|
||||
ce->engine->context_size -
|
||||
LRC_STATE_PN * PAGE_SIZE);
|
||||
LRC_STATE_OFFSET);
|
||||
|
||||
rq = intel_context_create_request(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
@ -5598,6 +6103,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
|
||||
SUBTEST(live_lrc_timestamp),
|
||||
SUBTEST(live_lrc_garbage),
|
||||
SUBTEST(live_pphwsp_runtime),
|
||||
SUBTEST(live_lrc_indirect_ctx_bb),
|
||||
};
|
||||
|
||||
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
|
||||
|
@ -11,22 +11,7 @@
|
||||
#include "selftest_rc6.h"
|
||||
|
||||
#include "selftests/i915_random.h"
|
||||
|
||||
static u64 energy_uJ(struct intel_rc6 *rc6)
|
||||
{
|
||||
unsigned long long power;
|
||||
u32 units;
|
||||
|
||||
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
|
||||
return 0;
|
||||
|
||||
units = (power & 0x1f00) >> 8;
|
||||
|
||||
if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
|
||||
return 0;
|
||||
|
||||
return (1000000 * power) >> units; /* convert to uJ */
|
||||
}
|
||||
#include "selftests/librapl.h"
|
||||
|
||||
static u64 rc6_residency(struct intel_rc6 *rc6)
|
||||
{
|
||||
@ -74,9 +59,9 @@ int live_rc6_manual(void *arg)
|
||||
res[0] = rc6_residency(rc6);
|
||||
|
||||
dt = ktime_get();
|
||||
rc0_power = energy_uJ(rc6);
|
||||
rc0_power = librapl_energy_uJ();
|
||||
msleep(250);
|
||||
rc0_power = energy_uJ(rc6) - rc0_power;
|
||||
rc0_power = librapl_energy_uJ() - rc0_power;
|
||||
dt = ktime_sub(ktime_get(), dt);
|
||||
res[1] = rc6_residency(rc6);
|
||||
if ((res[1] - res[0]) >> 10) {
|
||||
@ -99,9 +84,9 @@ int live_rc6_manual(void *arg)
|
||||
res[0] = rc6_residency(rc6);
|
||||
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
|
||||
dt = ktime_get();
|
||||
rc6_power = energy_uJ(rc6);
|
||||
rc6_power = librapl_energy_uJ();
|
||||
msleep(100);
|
||||
rc6_power = energy_uJ(rc6) - rc6_power;
|
||||
rc6_power = librapl_energy_uJ() - rc6_power;
|
||||
dt = ktime_sub(ktime_get(), dt);
|
||||
res[1] = rc6_residency(rc6);
|
||||
if (res[1] == res[0]) {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -6,6 +6,12 @@
|
||||
#ifndef SELFTEST_RPS_H
|
||||
#define SELFTEST_RPS_H
|
||||
|
||||
int live_rps_control(void *arg);
|
||||
int live_rps_clock_interval(void *arg);
|
||||
int live_rps_frequency_cs(void *arg);
|
||||
int live_rps_frequency_srm(void *arg);
|
||||
int live_rps_power(void *arg);
|
||||
int live_rps_interrupt(void *arg);
|
||||
int live_rps_dynamic(void *arg);
|
||||
|
||||
#endif /* SELFTEST_RPS_H */
|
||||
|
173
drivers/gpu/drm/i915/gt/shmem_utils.c
Normal file
173
drivers/gpu/drm/i915/gt/shmem_utils.c
Normal file
@ -0,0 +1,173 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
|
||||
#include "gem/i915_gem_object.h"
|
||||
#include "shmem_utils.h"
|
||||
|
||||
struct file *shmem_create_from_data(const char *name, void *data, size_t len)
|
||||
{
|
||||
struct file *file;
|
||||
int err;
|
||||
|
||||
file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
|
||||
if (IS_ERR(file))
|
||||
return file;
|
||||
|
||||
err = shmem_write(file, 0, data, len);
|
||||
if (err) {
|
||||
fput(file);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct file *file;
|
||||
void *ptr;
|
||||
|
||||
if (obj->ops == &i915_gem_shmem_ops) {
|
||||
file = obj->base.filp;
|
||||
atomic_long_inc(&file->f_count);
|
||||
return file;
|
||||
}
|
||||
|
||||
ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
||||
if (IS_ERR(ptr))
|
||||
return ERR_CAST(ptr);
|
||||
|
||||
file = shmem_create_from_data("", ptr, obj->base.size);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
static size_t shmem_npte(struct file *file)
|
||||
{
|
||||
return file->f_mapping->host->i_size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
vunmap(ptr);
|
||||
|
||||
for (pfn = 0; pfn < n_pte; pfn++) {
|
||||
struct page *page;
|
||||
|
||||
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
|
||||
GFP_KERNEL);
|
||||
if (!WARN_ON(IS_ERR(page))) {
|
||||
put_page(page);
|
||||
put_page(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void *shmem_pin_map(struct file *file)
|
||||
{
|
||||
const size_t n_pte = shmem_npte(file);
|
||||
pte_t *stack[32], **ptes, **mem;
|
||||
struct vm_struct *area;
|
||||
unsigned long pfn;
|
||||
|
||||
mem = stack;
|
||||
if (n_pte > ARRAY_SIZE(stack)) {
|
||||
mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
|
||||
if (!area) {
|
||||
if (mem != stack)
|
||||
kvfree(mem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ptes = mem;
|
||||
for (pfn = 0; pfn < n_pte; pfn++) {
|
||||
struct page *page;
|
||||
|
||||
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
|
||||
GFP_KERNEL);
|
||||
if (IS_ERR(page))
|
||||
goto err_page;
|
||||
|
||||
**ptes++ = mk_pte(page, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
if (mem != stack)
|
||||
kvfree(mem);
|
||||
|
||||
mapping_set_unevictable(file->f_mapping);
|
||||
return area->addr;
|
||||
|
||||
err_page:
|
||||
if (mem != stack)
|
||||
kvfree(mem);
|
||||
|
||||
__shmem_unpin_map(file, area->addr, pfn);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void shmem_unpin_map(struct file *file, void *ptr)
|
||||
{
|
||||
mapping_clear_unevictable(file->f_mapping);
|
||||
__shmem_unpin_map(file, ptr, shmem_npte(file));
|
||||
}
|
||||
|
||||
static int __shmem_rw(struct file *file, loff_t off,
|
||||
void *ptr, size_t len,
|
||||
bool write)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
for (pfn = off >> PAGE_SHIFT; len; pfn++) {
|
||||
unsigned int this =
|
||||
min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
|
||||
struct page *page;
|
||||
void *vaddr;
|
||||
|
||||
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
|
||||
GFP_KERNEL);
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
|
||||
vaddr = kmap(page);
|
||||
if (write)
|
||||
memcpy(vaddr + offset_in_page(off), ptr, this);
|
||||
else
|
||||
memcpy(ptr, vaddr + offset_in_page(off), this);
|
||||
kunmap(page);
|
||||
put_page(page);
|
||||
|
||||
len -= this;
|
||||
ptr += this;
|
||||
off = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
|
||||
{
|
||||
return __shmem_rw(file, off, dst, len, false);
|
||||
}
|
||||
|
||||
int shmem_write(struct file *file, loff_t off, void *src, size_t len)
|
||||
{
|
||||
return __shmem_rw(file, off, src, len, true);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "st_shmem_utils.c"
|
||||
#endif
|
23
drivers/gpu/drm/i915/gt/shmem_utils.h
Normal file
23
drivers/gpu/drm/i915/gt/shmem_utils.h
Normal file
@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef SHMEM_UTILS_H
|
||||
#define SHMEM_UTILS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
struct file;
|
||||
|
||||
struct file *shmem_create_from_data(const char *name, void *data, size_t len);
|
||||
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj);
|
||||
|
||||
void *shmem_pin_map(struct file *file);
|
||||
void shmem_unpin_map(struct file *file, void *ptr);
|
||||
|
||||
int shmem_read(struct file *file, loff_t off, void *dst, size_t len);
|
||||
int shmem_write(struct file *file, loff_t off, void *src, size_t len);
|
||||
|
||||
#endif /* SHMEM_UTILS_H */
|
63
drivers/gpu/drm/i915/gt/st_shmem_utils.c
Normal file
63
drivers/gpu/drm/i915/gt/st_shmem_utils.c
Normal file
@ -0,0 +1,63 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
/* Just a quick and causal check of the shmem_utils API */
|
||||
|
||||
static int igt_shmem_basic(void *ignored)
|
||||
{
|
||||
u32 datum = 0xdeadbeef, result;
|
||||
struct file *file;
|
||||
u32 *map;
|
||||
int err;
|
||||
|
||||
file = shmem_create_from_data("mock", &datum, sizeof(datum));
|
||||
if (IS_ERR(file))
|
||||
return PTR_ERR(file);
|
||||
|
||||
result = 0;
|
||||
err = shmem_read(file, 0, &result, sizeof(result));
|
||||
if (err)
|
||||
goto out_file;
|
||||
|
||||
if (result != datum) {
|
||||
pr_err("Incorrect read back from shmemfs: %x != %x\n",
|
||||
result, datum);
|
||||
err = -EINVAL;
|
||||
goto out_file;
|
||||
}
|
||||
|
||||
result = 0xc0ffee;
|
||||
err = shmem_write(file, 0, &result, sizeof(result));
|
||||
if (err)
|
||||
goto out_file;
|
||||
|
||||
map = shmem_pin_map(file);
|
||||
if (!map) {
|
||||
err = -ENOMEM;
|
||||
goto out_file;
|
||||
}
|
||||
|
||||
if (*map != result) {
|
||||
pr_err("Incorrect read back via mmap of last write: %x != %x\n",
|
||||
*map, result);
|
||||
err = -EINVAL;
|
||||
goto out_map;
|
||||
}
|
||||
|
||||
out_map:
|
||||
shmem_unpin_map(file, map);
|
||||
out_file:
|
||||
fput(file);
|
||||
return err;
|
||||
}
|
||||
|
||||
int shmem_utils_mock_selftests(void)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(igt_shmem_basic),
|
||||
};
|
||||
|
||||
return i915_subtests(tests, NULL);
|
||||
}
|
@ -217,7 +217,7 @@ static void guc_wq_item_append(struct intel_guc *guc,
|
||||
static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
|
||||
{
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
|
||||
u32 ctx_desc = rq->context->lrc.ccid;
|
||||
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
|
||||
|
||||
guc_wq_item_append(guc, engine->guc_id, ctx_desc,
|
||||
|
@ -2341,12 +2341,27 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
|
||||
{
|
||||
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
||||
int ret;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct intel_engine_cs *engine;
|
||||
int i;
|
||||
|
||||
if (bytes != 4 && bytes != 8)
|
||||
return -EINVAL;
|
||||
|
||||
off -= info->gtt_start_offset;
|
||||
ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
|
||||
|
||||
/* if ggtt of last submitted context is written,
|
||||
* that context is probably got unpinned.
|
||||
* Set last shadowed ctx to invalid.
|
||||
*/
|
||||
for_each_engine(engine, vgpu->gvt->gt, i) {
|
||||
if (!s->last_ctx[i].valid)
|
||||
continue;
|
||||
|
||||
if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
|
||||
s->last_ctx[i].valid = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <xen/xen.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
@ -163,6 +163,11 @@ struct intel_vgpu_submission {
|
||||
const struct intel_vgpu_submission_ops *ops;
|
||||
int virtual_submission_interface;
|
||||
bool active;
|
||||
struct {
|
||||
u32 lrca;
|
||||
bool valid;
|
||||
u64 ring_context_gpa;
|
||||
} last_ctx[I915_NUM_ENGINES];
|
||||
};
|
||||
|
||||
struct intel_vgpu {
|
||||
|
@ -79,6 +79,4 @@ struct intel_gvt_mpt {
|
||||
bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
|
||||
};
|
||||
|
||||
extern struct intel_gvt_mpt xengt_mpt;
|
||||
|
||||
#endif /* _GVT_HYPERCALL_H_ */
|
||||
|
@ -128,16 +128,24 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_gem_object *ctx_obj =
|
||||
workload->req->context->state->obj;
|
||||
struct intel_context *ctx = workload->req->context;
|
||||
struct execlist_ring_context *shadow_ring_context;
|
||||
struct page *page;
|
||||
void *dst;
|
||||
void *context_base;
|
||||
unsigned long context_gpa, context_page_num;
|
||||
unsigned long gpa_base; /* first gpa of consecutive GPAs */
|
||||
unsigned long gpa_size; /* size of consecutive GPAs */
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
int i;
|
||||
bool skip = false;
|
||||
int ring_id = workload->engine->id;
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
shadow_ring_context = kmap(page);
|
||||
GEM_BUG_ON(!intel_context_is_pinned(ctx));
|
||||
|
||||
context_base = (void *) ctx->lrc_reg_state -
|
||||
(LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
|
||||
|
||||
shadow_ring_context = (void *) ctx->lrc_reg_state;
|
||||
|
||||
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
|
||||
#define COPY_REG(name) \
|
||||
@ -169,23 +177,43 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
|
||||
|
||||
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
|
||||
kunmap(page);
|
||||
|
||||
if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
|
||||
gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
|
||||
workload->engine->name, workload->ctx_desc.lrca,
|
||||
workload->ctx_desc.context_id,
|
||||
workload->ring_context_gpa);
|
||||
|
||||
/* only need to ensure this context is not pinned/unpinned during the
|
||||
* period from last submission to this this submission.
|
||||
* Upon reaching this function, the currently submitted context is not
|
||||
* supposed to get unpinned. If a misbehaving guest driver ever does
|
||||
* this, it would corrupt itself.
|
||||
*/
|
||||
if (s->last_ctx[ring_id].valid &&
|
||||
(s->last_ctx[ring_id].lrca ==
|
||||
workload->ctx_desc.lrca) &&
|
||||
(s->last_ctx[ring_id].ring_context_gpa ==
|
||||
workload->ring_context_gpa))
|
||||
skip = true;
|
||||
|
||||
s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
|
||||
s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
|
||||
|
||||
if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip)
|
||||
return 0;
|
||||
|
||||
gvt_dbg_sched("ring %s workload lrca %x",
|
||||
workload->engine->name,
|
||||
workload->ctx_desc.lrca);
|
||||
|
||||
s->last_ctx[ring_id].valid = false;
|
||||
context_page_num = workload->engine->context_size;
|
||||
context_page_num = context_page_num >> PAGE_SHIFT;
|
||||
|
||||
if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
|
||||
context_page_num = 19;
|
||||
|
||||
i = 2;
|
||||
while (i < context_page_num) {
|
||||
/* find consecutive GPAs from gma until the first inconsecutive GPA.
|
||||
* read from the continuous GPAs into dst virtual address
|
||||
*/
|
||||
gpa_size = 0;
|
||||
for (i = 2; i < context_page_num; i++) {
|
||||
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
||||
(u32)((workload->ctx_desc.lrca + i) <<
|
||||
I915_GTT_PAGE_SHIFT));
|
||||
@ -194,13 +222,26 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, i);
|
||||
dst = kmap(page);
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
|
||||
I915_GTT_PAGE_SIZE);
|
||||
kunmap(page);
|
||||
i++;
|
||||
if (gpa_size == 0) {
|
||||
gpa_base = context_gpa;
|
||||
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
|
||||
} else if (context_gpa != gpa_base + gpa_size)
|
||||
goto read;
|
||||
|
||||
gpa_size += I915_GTT_PAGE_SIZE;
|
||||
|
||||
if (i == context_page_num - 1)
|
||||
goto read;
|
||||
|
||||
continue;
|
||||
|
||||
read:
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
|
||||
gpa_base = context_gpa;
|
||||
gpa_size = I915_GTT_PAGE_SIZE;
|
||||
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
|
||||
}
|
||||
s->last_ctx[ring_id].valid = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -290,7 +331,7 @@ static void
|
||||
shadow_context_descriptor_update(struct intel_context *ce,
|
||||
struct intel_vgpu_workload *workload)
|
||||
{
|
||||
u64 desc = ce->lrc_desc;
|
||||
u64 desc = ce->lrc.desc;
|
||||
|
||||
/*
|
||||
* Update bits 0-11 of the context descriptor which includes flags
|
||||
@ -300,7 +341,7 @@ shadow_context_descriptor_update(struct intel_context *ce,
|
||||
desc |= (u64)workload->ctx_desc.addressing_mode <<
|
||||
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
|
||||
ce->lrc_desc = desc;
|
||||
ce->lrc.desc = desc;
|
||||
}
|
||||
|
||||
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
|
||||
@ -595,10 +636,9 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
if (bb->va && !IS_ERR(bb->va))
|
||||
i915_gem_object_unpin_map(bb->obj);
|
||||
|
||||
if (bb->vma && !IS_ERR(bb->vma)) {
|
||||
if (bb->vma && !IS_ERR(bb->vma))
|
||||
i915_vma_unpin(bb->vma);
|
||||
i915_vma_close(bb->vma);
|
||||
}
|
||||
|
||||
i915_gem_object_put(bb->obj);
|
||||
}
|
||||
list_del(&bb->list);
|
||||
@ -784,11 +824,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct i915_request *rq = workload->req;
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
|
||||
struct execlist_ring_context *shadow_ring_context;
|
||||
struct page *page;
|
||||
struct intel_context *ctx = workload->req->context;
|
||||
void *context_base;
|
||||
void *src;
|
||||
unsigned long context_gpa, context_page_num;
|
||||
unsigned long gpa_base; /* first gpa of consecutive GPAs */
|
||||
unsigned long gpa_size; /* size of consecutive GPAs*/
|
||||
int i;
|
||||
u32 ring_base;
|
||||
u32 head, tail;
|
||||
@ -797,6 +839,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
|
||||
workload->ctx_desc.lrca);
|
||||
|
||||
GEM_BUG_ON(!intel_context_is_pinned(ctx));
|
||||
|
||||
head = workload->rb_head;
|
||||
tail = workload->rb_tail;
|
||||
wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
|
||||
@ -820,9 +864,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||
if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
|
||||
context_page_num = 19;
|
||||
|
||||
i = 2;
|
||||
context_base = (void *) ctx->lrc_reg_state -
|
||||
(LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
|
||||
|
||||
while (i < context_page_num) {
|
||||
/* find consecutive GPAs from gma until the first inconsecutive GPA.
|
||||
* write to the consecutive GPAs from src virtual address
|
||||
*/
|
||||
gpa_size = 0;
|
||||
for (i = 2; i < context_page_num; i++) {
|
||||
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
|
||||
(u32)((workload->ctx_desc.lrca + i) <<
|
||||
I915_GTT_PAGE_SHIFT));
|
||||
@ -831,19 +880,30 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||
return;
|
||||
}
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, i);
|
||||
src = kmap(page);
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
|
||||
I915_GTT_PAGE_SIZE);
|
||||
kunmap(page);
|
||||
i++;
|
||||
if (gpa_size == 0) {
|
||||
gpa_base = context_gpa;
|
||||
src = context_base + (i << I915_GTT_PAGE_SHIFT);
|
||||
} else if (context_gpa != gpa_base + gpa_size)
|
||||
goto write;
|
||||
|
||||
gpa_size += I915_GTT_PAGE_SIZE;
|
||||
|
||||
if (i == context_page_num - 1)
|
||||
goto write;
|
||||
|
||||
continue;
|
||||
|
||||
write:
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
|
||||
gpa_base = context_gpa;
|
||||
gpa_size = I915_GTT_PAGE_SIZE;
|
||||
src = context_base + (i << I915_GTT_PAGE_SHIFT);
|
||||
}
|
||||
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
|
||||
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
|
||||
|
||||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
shadow_ring_context = kmap(page);
|
||||
shadow_ring_context = (void *) ctx->lrc_reg_state;
|
||||
|
||||
#define COPY_REG(name) \
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
|
||||
@ -860,8 +920,6 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||
(void *)shadow_ring_context +
|
||||
sizeof(*shadow_ring_context),
|
||||
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
|
||||
|
||||
kunmap(page);
|
||||
}
|
||||
|
||||
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
||||
@ -1260,6 +1318,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
||||
atomic_set(&s->running_workload_num, 0);
|
||||
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
|
||||
|
||||
memset(s->last_ctx, 0, sizeof(s->last_ctx));
|
||||
|
||||
i915_vm_put(&ppgtt->vm);
|
||||
return 0;
|
||||
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <drm/drm_debugfs.h>
|
||||
|
||||
#include "gem/i915_gem_context.h"
|
||||
#include "gt/intel_gt_clock_utils.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
#include "gt/intel_gt_requests.h"
|
||||
#include "gt/intel_reset.h"
|
||||
@ -926,21 +927,30 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
|
||||
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
|
||||
seq_printf(m, "CAGF: %dMHz\n", cagf);
|
||||
seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
|
||||
rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
|
||||
seq_printf(m, "RP CUR UP: %d (%dus)\n",
|
||||
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
|
||||
seq_printf(m, "RP PREV UP: %d (%dus)\n",
|
||||
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
|
||||
seq_printf(m, "RP CUR UP EI: %d (%dns)\n",
|
||||
rpupei,
|
||||
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
|
||||
seq_printf(m, "RP CUR UP: %d (%dun)\n",
|
||||
rpcurup,
|
||||
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
|
||||
seq_printf(m, "RP PREV UP: %d (%dns)\n",
|
||||
rpprevup,
|
||||
intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
|
||||
seq_printf(m, "Up threshold: %d%%\n",
|
||||
rps->power.up_threshold);
|
||||
|
||||
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
|
||||
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
|
||||
seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
|
||||
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
|
||||
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
|
||||
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
|
||||
seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n",
|
||||
rpdownei,
|
||||
intel_gt_pm_interval_to_ns(&dev_priv->gt,
|
||||
rpdownei));
|
||||
seq_printf(m, "RP CUR DOWN: %d (%dns)\n",
|
||||
rpcurdown,
|
||||
intel_gt_pm_interval_to_ns(&dev_priv->gt,
|
||||
rpcurdown));
|
||||
seq_printf(m, "RP PREV DOWN: %d (%dns)\n",
|
||||
rpprevdown,
|
||||
intel_gt_pm_interval_to_ns(&dev_priv->gt,
|
||||
rpprevdown));
|
||||
seq_printf(m, "Down threshold: %d%%\n",
|
||||
rps->power.down_threshold);
|
||||
|
||||
@ -1189,7 +1199,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
|
||||
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
|
||||
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
|
||||
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
|
||||
seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
|
||||
seq_printf(m, "Boosts outstanding? %d\n",
|
||||
atomic_read(&rps->num_waiters));
|
||||
@ -1209,7 +1220,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||
|
||||
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
|
||||
if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) {
|
||||
u32 rpup, rpupei;
|
||||
u32 rpdown, rpdownei;
|
||||
|
||||
|
@ -228,14 +228,14 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
|
||||
ret = drm_vblank_init(&i915->drm,
|
||||
INTEL_NUM_PIPES(i915));
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_bios_init(i915);
|
||||
|
||||
ret = intel_vga_register(i915);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto cleanup_bios;
|
||||
|
||||
intel_power_domains_init_hw(i915, false);
|
||||
|
||||
@ -243,13 +243,16 @@ static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
|
||||
|
||||
ret = intel_modeset_init_noirq(i915);
|
||||
if (ret)
|
||||
goto cleanup_vga_client;
|
||||
goto cleanup_vga_client_pw_domain_csr;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_vga_client:
|
||||
cleanup_vga_client_pw_domain_csr:
|
||||
intel_csr_ucode_fini(i915);
|
||||
intel_power_domains_driver_remove(i915);
|
||||
intel_vga_unregister(i915);
|
||||
out:
|
||||
cleanup_bios:
|
||||
intel_bios_driver_remove(i915);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -308,13 +311,13 @@ static void i915_driver_modeset_remove(struct drm_i915_private *i915)
|
||||
/* part #2: call after irq uninstall */
|
||||
static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
|
||||
{
|
||||
intel_modeset_driver_remove_noirq(i915);
|
||||
intel_csr_ucode_fini(i915);
|
||||
|
||||
intel_bios_driver_remove(i915);
|
||||
intel_power_domains_driver_remove(i915);
|
||||
|
||||
intel_vga_unregister(i915);
|
||||
|
||||
intel_csr_ucode_fini(i915);
|
||||
intel_bios_driver_remove(i915);
|
||||
}
|
||||
|
||||
static void intel_init_dpio(struct drm_i915_private *dev_priv)
|
||||
@ -566,6 +569,62 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
intel_gvt_sanitize_options(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_set_dma_info - set all relevant PCI dma info as configured for the
|
||||
* platform
|
||||
* @i915: valid i915 instance
|
||||
*
|
||||
* Set the dma max segment size, device and coherent masks. The dma mask set
|
||||
* needs to occur before i915_ggtt_probe_hw.
|
||||
*
|
||||
* A couple of platforms have special needs. Address them as well.
|
||||
*
|
||||
*/
|
||||
static int i915_set_dma_info(struct drm_i915_private *i915)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(!mask_size);
|
||||
|
||||
/*
|
||||
* We don't have a max segment size, so set it to the max so sg's
|
||||
* debugging layer doesn't complain
|
||||
*/
|
||||
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
|
||||
|
||||
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
|
||||
if (ret)
|
||||
goto mask_err;
|
||||
|
||||
/* overlay on gen2 is broken and can't address above 1G */
|
||||
if (IS_GEN(i915, 2))
|
||||
mask_size = 30;
|
||||
|
||||
/*
|
||||
* 965GM sometimes incorrectly writes to hardware status page (HWS)
|
||||
* using 32bit addressing, overwriting memory if HWS is located
|
||||
* above 4GB.
|
||||
*
|
||||
* The documentation also mentions an issue with undefined
|
||||
* behaviour if any general state is accessed within a page above 4GB,
|
||||
* which also needs to be handled carefully.
|
||||
*/
|
||||
if (IS_I965G(i915) || IS_I965GM(i915))
|
||||
mask_size = 32;
|
||||
|
||||
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(mask_size));
|
||||
if (ret)
|
||||
goto mask_err;
|
||||
|
||||
return 0;
|
||||
|
||||
mask_err:
|
||||
drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_hw_probe - setup state requiring device access
|
||||
* @dev_priv: device private
|
||||
@ -611,6 +670,10 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
|
||||
/* needs to be done before ggtt probe */
|
||||
intel_dram_edram_detect(dev_priv);
|
||||
|
||||
ret = i915_set_dma_info(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_perf_init(dev_priv);
|
||||
|
||||
ret = i915_ggtt_probe_hw(dev_priv);
|
||||
@ -639,40 +702,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
/*
|
||||
* We don't have a max segment size, so set it to the max so sg's
|
||||
* debugging layer doesn't complain
|
||||
*/
|
||||
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
|
||||
|
||||
/* overlay on gen2 is broken and can't address above 1G */
|
||||
if (IS_GEN(dev_priv, 2)) {
|
||||
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm, "failed to set DMA mask\n");
|
||||
|
||||
goto err_mem_regions;
|
||||
}
|
||||
}
|
||||
|
||||
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
|
||||
* using 32bit addressing, overwriting memory if HWS is located
|
||||
* above 4GB.
|
||||
*
|
||||
* The documentation also mentions an issue with undefined
|
||||
* behaviour if any general state is accessed within a page above 4GB,
|
||||
* which also needs to be handled carefully.
|
||||
*/
|
||||
if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
|
||||
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm, "failed to set DMA mask\n");
|
||||
|
||||
goto err_mem_regions;
|
||||
}
|
||||
}
|
||||
|
||||
cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
|
||||
|
||||
intel_gt_init_workarounds(dev_priv);
|
||||
@ -984,7 +1013,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
out_cleanup_irq:
|
||||
intel_irq_uninstall(i915);
|
||||
out_cleanup_modeset:
|
||||
/* FIXME */
|
||||
i915_driver_modeset_remove_noirq(i915);
|
||||
out_cleanup_hw:
|
||||
i915_driver_hw_remove(i915);
|
||||
intel_memory_regions_driver_release(i915);
|
||||
@ -1020,12 +1049,12 @@ void i915_driver_remove(struct drm_i915_private *i915)
|
||||
|
||||
intel_irq_uninstall(i915);
|
||||
|
||||
i915_driver_modeset_remove_noirq(i915);
|
||||
intel_modeset_driver_remove_noirq(i915);
|
||||
|
||||
i915_reset_error_state(i915);
|
||||
i915_gem_driver_remove(i915);
|
||||
|
||||
intel_power_domains_driver_remove(i915);
|
||||
i915_driver_modeset_remove_noirq(i915);
|
||||
|
||||
i915_driver_hw_remove(i915);
|
||||
|
||||
|
@ -108,8 +108,8 @@
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20200417"
|
||||
#define DRIVER_TIMESTAMP 1587105300
|
||||
#define DRIVER_DATE "20200430"
|
||||
#define DRIVER_TIMESTAMP 1588234401
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
|
@ -467,14 +467,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
|
||||
if (!erq->seqno)
|
||||
return;
|
||||
|
||||
err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, start %08x, head %08x, tail %08x\n",
|
||||
err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, head %08x, tail %08x\n",
|
||||
prefix, erq->pid, erq->context, erq->seqno,
|
||||
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||
&erq->flags) ? "!" : "",
|
||||
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&erq->flags) ? "+" : "",
|
||||
erq->sched_attr.priority,
|
||||
erq->start, erq->head, erq->tail);
|
||||
erq->head, erq->tail);
|
||||
}
|
||||
|
||||
static void error_print_context(struct drm_i915_error_state_buf *m,
|
||||
@ -1207,21 +1207,22 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
|
||||
static void record_request(const struct i915_request *request,
|
||||
struct i915_request_coredump *erq)
|
||||
{
|
||||
const struct i915_gem_context *ctx;
|
||||
|
||||
erq->flags = request->fence.flags;
|
||||
erq->context = request->fence.context;
|
||||
erq->seqno = request->fence.seqno;
|
||||
erq->sched_attr = request->sched.attr;
|
||||
erq->start = i915_ggtt_offset(request->ring->vma);
|
||||
erq->head = request->head;
|
||||
erq->tail = request->tail;
|
||||
|
||||
erq->pid = 0;
|
||||
rcu_read_lock();
|
||||
ctx = rcu_dereference(request->context->gem_context);
|
||||
if (ctx)
|
||||
erq->pid = pid_nr(ctx->pid);
|
||||
if (!intel_context_is_closed(request->context)) {
|
||||
const struct i915_gem_context *ctx;
|
||||
|
||||
ctx = rcu_dereference(request->context->gem_context);
|
||||
if (ctx)
|
||||
erq->pid = pid_nr(ctx->pid);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
@ -1319,26 +1320,6 @@ capture_user(struct intel_engine_capture_vma *capture,
|
||||
return capture;
|
||||
}
|
||||
|
||||
static struct i915_vma_coredump *
|
||||
capture_object(const struct intel_gt *gt,
|
||||
struct drm_i915_gem_object *obj,
|
||||
const char *name,
|
||||
struct i915_vma_compress *compress)
|
||||
{
|
||||
if (obj && i915_gem_object_has_pages(obj)) {
|
||||
struct i915_vma fake = {
|
||||
.node = { .start = U64_MAX, .size = obj->base.size },
|
||||
.size = obj->base.size,
|
||||
.pages = obj->mm.pages,
|
||||
.obj = obj,
|
||||
};
|
||||
|
||||
return i915_vma_coredump_create(gt, &fake, name, compress);
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void add_vma(struct intel_engine_coredump *ee,
|
||||
struct i915_vma_coredump *vma)
|
||||
{
|
||||
@ -1427,12 +1408,6 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
|
||||
engine->wa_ctx.vma,
|
||||
"WA context",
|
||||
compress));
|
||||
|
||||
add_vma(ee,
|
||||
capture_object(engine->gt,
|
||||
engine->default_state,
|
||||
"NULL context",
|
||||
compress));
|
||||
}
|
||||
|
||||
static struct intel_engine_coredump *
|
||||
|
@ -50,7 +50,6 @@ struct i915_request_coredump {
|
||||
pid_t pid;
|
||||
u32 context;
|
||||
u32 seqno;
|
||||
u32 start;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
struct i915_sched_attr sched_attr;
|
||||
|
@ -3358,7 +3358,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
|
||||
u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
|
||||
GEN8_PIPE_CDCLK_CRC_DONE;
|
||||
u32 de_pipe_enables;
|
||||
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
|
||||
u32 de_port_enables;
|
||||
@ -3369,13 +3370,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
de_misc_masked |= GEN8_DE_MISC_GSE;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
de_port_masked |= BXT_DE_PORT_GMBUS;
|
||||
} else {
|
||||
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
|
@ -171,6 +171,7 @@
|
||||
.engine_mask = BIT(RCS0), \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = false, \
|
||||
.dma_mask_size = 32, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
I9XX_CURSOR_OFFSETS, \
|
||||
I9XX_COLORS, \
|
||||
@ -190,6 +191,7 @@
|
||||
.engine_mask = BIT(RCS0), \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = false, \
|
||||
.dma_mask_size = 32, \
|
||||
I845_PIPE_OFFSETS, \
|
||||
I845_CURSOR_OFFSETS, \
|
||||
I9XX_COLORS, \
|
||||
@ -226,6 +228,7 @@ static const struct intel_device_info i865g_info = {
|
||||
.engine_mask = BIT(RCS0), \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = true, \
|
||||
.dma_mask_size = 32, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
I9XX_CURSOR_OFFSETS, \
|
||||
I9XX_COLORS, \
|
||||
@ -286,6 +289,7 @@ static const struct intel_device_info g33_info = {
|
||||
PLATFORM(INTEL_G33),
|
||||
.display.has_hotplug = 1,
|
||||
.display.has_overlay = 1,
|
||||
.dma_mask_size = 36,
|
||||
};
|
||||
|
||||
static const struct intel_device_info pnv_g_info = {
|
||||
@ -293,6 +297,7 @@ static const struct intel_device_info pnv_g_info = {
|
||||
PLATFORM(INTEL_PINEVIEW),
|
||||
.display.has_hotplug = 1,
|
||||
.display.has_overlay = 1,
|
||||
.dma_mask_size = 36,
|
||||
};
|
||||
|
||||
static const struct intel_device_info pnv_m_info = {
|
||||
@ -301,6 +306,7 @@ static const struct intel_device_info pnv_m_info = {
|
||||
.is_mobile = 1,
|
||||
.display.has_hotplug = 1,
|
||||
.display.has_overlay = 1,
|
||||
.dma_mask_size = 36,
|
||||
};
|
||||
|
||||
#define GEN4_FEATURES \
|
||||
@ -313,6 +319,7 @@ static const struct intel_device_info pnv_m_info = {
|
||||
.engine_mask = BIT(RCS0), \
|
||||
.has_snoop = true, \
|
||||
.has_coherent_ggtt = true, \
|
||||
.dma_mask_size = 36, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
I9XX_CURSOR_OFFSETS, \
|
||||
I965_COLORS, \
|
||||
@ -365,6 +372,7 @@ static const struct intel_device_info gm45_info = {
|
||||
.has_coherent_ggtt = true, \
|
||||
/* ilk does support rc6, but we do not implement [power] contexts */ \
|
||||
.has_rc6 = 0, \
|
||||
.dma_mask_size = 36, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
I9XX_CURSOR_OFFSETS, \
|
||||
ILK_COLORS, \
|
||||
@ -395,6 +403,7 @@ static const struct intel_device_info ilk_m_info = {
|
||||
.has_rc6 = 1, \
|
||||
.has_rc6p = 1, \
|
||||
.has_rps = true, \
|
||||
.dma_mask_size = 40, \
|
||||
.ppgtt_type = INTEL_PPGTT_ALIASING, \
|
||||
.ppgtt_size = 31, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
@ -445,6 +454,7 @@ static const struct intel_device_info snb_m_gt2_info = {
|
||||
.has_rc6 = 1, \
|
||||
.has_rc6p = 1, \
|
||||
.has_rps = true, \
|
||||
.dma_mask_size = 40, \
|
||||
.ppgtt_type = INTEL_PPGTT_ALIASING, \
|
||||
.ppgtt_size = 31, \
|
||||
IVB_PIPE_OFFSETS, \
|
||||
@ -504,6 +514,7 @@ static const struct intel_device_info vlv_info = {
|
||||
.has_rps = true,
|
||||
.display.has_gmch = 1,
|
||||
.display.has_hotplug = 1,
|
||||
.dma_mask_size = 40,
|
||||
.ppgtt_type = INTEL_PPGTT_ALIASING,
|
||||
.ppgtt_size = 31,
|
||||
.has_snoop = true,
|
||||
@ -554,6 +565,7 @@ static const struct intel_device_info hsw_gt3_info = {
|
||||
G75_FEATURES, \
|
||||
GEN(8), \
|
||||
.has_logical_ring_contexts = 1, \
|
||||
.dma_mask_size = 39, \
|
||||
.ppgtt_type = INTEL_PPGTT_FULL, \
|
||||
.ppgtt_size = 48, \
|
||||
.has_64bit_reloc = 1, \
|
||||
@ -602,6 +614,7 @@ static const struct intel_device_info chv_info = {
|
||||
.has_rps = true,
|
||||
.has_logical_ring_contexts = 1,
|
||||
.display.has_gmch = 1,
|
||||
.dma_mask_size = 39,
|
||||
.ppgtt_type = INTEL_PPGTT_ALIASING,
|
||||
.ppgtt_size = 32,
|
||||
.has_reset_engine = 1,
|
||||
@ -685,6 +698,7 @@ static const struct intel_device_info skl_gt4_info = {
|
||||
.has_logical_ring_contexts = 1, \
|
||||
.has_logical_ring_preemption = 1, \
|
||||
.has_gt_uc = 1, \
|
||||
.dma_mask_size = 39, \
|
||||
.ppgtt_type = INTEL_PPGTT_FULL, \
|
||||
.ppgtt_size = 48, \
|
||||
.has_reset_engine = 1, \
|
||||
|
@ -1263,8 +1263,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
|
||||
* dropped by GuC. They won't be part of the context
|
||||
* ID in the OA reports, so squash those lower bits.
|
||||
*/
|
||||
stream->specific_ctx_id =
|
||||
lower_32_bits(ce->lrc_desc) >> 12;
|
||||
stream->specific_ctx_id = ce->lrc.lrca >> 12;
|
||||
|
||||
/*
|
||||
* GuC uses the top bit to signal proxy submission, so
|
||||
@ -1281,11 +1280,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
|
||||
((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
|
||||
/*
|
||||
* Pick an unused context id
|
||||
* 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts
|
||||
* 0 - BITS_PER_LONG are used by other contexts
|
||||
* GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
|
||||
*/
|
||||
stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
|
||||
BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2098,7 +2096,7 @@ gen8_store_flex(struct i915_request *rq,
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
|
||||
offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
|
||||
do {
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = offset + flex->offset * sizeof(u32);
|
||||
|
@ -439,29 +439,9 @@ static u64 count_interrupts(struct drm_i915_private *i915)
|
||||
return sum;
|
||||
}
|
||||
|
||||
static void engine_event_destroy(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
engine = intel_engine_lookup_user(i915,
|
||||
engine_event_class(event),
|
||||
engine_event_instance(event));
|
||||
if (drm_WARN_ON_ONCE(&i915->drm, !engine))
|
||||
return;
|
||||
|
||||
if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
|
||||
intel_engine_supports_stats(engine))
|
||||
intel_disable_engine_stats(engine);
|
||||
}
|
||||
|
||||
static void i915_pmu_event_destroy(struct perf_event *event)
|
||||
{
|
||||
WARN_ON(event->parent);
|
||||
|
||||
if (is_engine_event(event))
|
||||
engine_event_destroy(event);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -514,23 +494,13 @@ static int engine_event_init(struct perf_event *event)
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||
struct intel_engine_cs *engine;
|
||||
u8 sample;
|
||||
int ret;
|
||||
|
||||
engine = intel_engine_lookup_user(i915, engine_event_class(event),
|
||||
engine_event_instance(event));
|
||||
if (!engine)
|
||||
return -ENODEV;
|
||||
|
||||
sample = engine_event_sample(event);
|
||||
ret = engine_event_status(engine, sample);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
|
||||
ret = intel_enable_engine_stats(engine);
|
||||
|
||||
return ret;
|
||||
return engine_event_status(engine, engine_event_sample(event));
|
||||
}
|
||||
|
||||
static int i915_pmu_event_init(struct perf_event *event)
|
||||
|
@ -34,8 +34,8 @@
|
||||
* Follow the style described here for new macros, and while changing existing
|
||||
* macros. Do **not** mass change existing definitions just to update the style.
|
||||
*
|
||||
* Layout
|
||||
* ~~~~~~
|
||||
* File Layout
|
||||
* ~~~~~~~~~~~
|
||||
*
|
||||
* Keep helper macros near the top. For example, _PIPE() and friends.
|
||||
*
|
||||
@ -561,6 +561,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
* Registers used only by the command parser
|
||||
*/
|
||||
#define BCS_SWCTRL _MMIO(0x22200)
|
||||
#define BCS_SRC_Y REG_BIT(0)
|
||||
#define BCS_DST_Y REG_BIT(1)
|
||||
|
||||
/* There are 16 GPR registers */
|
||||
#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8)
|
||||
@ -2657,6 +2659,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */
|
||||
#define RING_INSTPM(base) _MMIO((base) + 0xc0)
|
||||
#define RING_MI_MODE(base) _MMIO((base) + 0x9c)
|
||||
#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84)
|
||||
#define INSTPS _MMIO(0x2070) /* 965+ only */
|
||||
#define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
|
||||
#define ACTHD_I965 _MMIO(0x2074)
|
||||
@ -4013,31 +4016,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define GEN6_RP_STATE_LIMITS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
|
||||
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
|
||||
#define BXT_RP_STATE_CAP _MMIO(0x138170)
|
||||
|
||||
/*
|
||||
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
|
||||
* 8300) freezing up around GPU hangs. Looks as if even
|
||||
* scheduling/timer interrupts start misbehaving if the RPS
|
||||
* EI/thresholds are "bad", leading to a very sluggish or even
|
||||
* frozen machine.
|
||||
*/
|
||||
#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
|
||||
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
|
||||
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
|
||||
#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \
|
||||
(IS_GEN9_LP(dev_priv) ? \
|
||||
INTERVAL_0_833_US(us) : \
|
||||
INTERVAL_1_33_US(us)) : \
|
||||
INTERVAL_1_28_US(us))
|
||||
|
||||
#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
|
||||
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
|
||||
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
|
||||
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \
|
||||
(IS_GEN9_LP(dev_priv) ? \
|
||||
INTERVAL_0_833_TO_US(interval) : \
|
||||
INTERVAL_1_33_TO_US(interval)) : \
|
||||
INTERVAL_1_28_TO_US(interval))
|
||||
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
|
||||
|
||||
/*
|
||||
* Logical Context regs
|
||||
@ -9108,8 +9087,13 @@ enum {
|
||||
#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
|
||||
#define GEN6_PCODE_READ_D_COMP 0x10
|
||||
#define GEN6_PCODE_WRITE_D_COMP 0x11
|
||||
#define ICL_PCODE_EXIT_TCCOLD 0x12
|
||||
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
|
||||
#define DISPLAY_IPS_CONTROL 0x19
|
||||
#define TGL_PCODE_TCCOLD 0x26
|
||||
#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
|
||||
#define TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ 0
|
||||
#define TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ REG_BIT(0)
|
||||
/* See also IPS_CTL */
|
||||
#define IPS_PCODE_CONTROL (1 << 30)
|
||||
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
|
||||
@ -9396,6 +9380,22 @@ enum {
|
||||
#define AUD_PIN_BUF_CTL _MMIO(0x48414)
|
||||
#define AUD_PIN_BUF_ENABLE REG_BIT(31)
|
||||
|
||||
/* Display Audio Config Reg */
|
||||
#define AUD_CONFIG_BE _MMIO(0x65ef0)
|
||||
#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
|
||||
#define HBLANK_EARLY_ENABLE_TGL(pipe) (0x1 << (24 + (pipe)))
|
||||
#define HBLANK_START_COUNT_MASK(pipe) (0x7 << (3 + ((pipe) * 6)))
|
||||
#define HBLANK_START_COUNT(pipe, val) (((val) & 0x7) << (3 + ((pipe)) * 6))
|
||||
#define NUMBER_SAMPLES_PER_LINE_MASK(pipe) (0x3 << ((pipe) * 6))
|
||||
#define NUMBER_SAMPLES_PER_LINE(pipe, val) (((val) & 0x3) << ((pipe) * 6))
|
||||
|
||||
#define HBLANK_START_COUNT_8 0
|
||||
#define HBLANK_START_COUNT_16 1
|
||||
#define HBLANK_START_COUNT_32 2
|
||||
#define HBLANK_START_COUNT_64 3
|
||||
#define HBLANK_START_COUNT_96 4
|
||||
#define HBLANK_START_COUNT_128 5
|
||||
|
||||
/*
|
||||
* HSW - ICL power wells
|
||||
*
|
||||
|
@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj,
|
||||
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
|
||||
if (i915_is_ggtt(vm)) {
|
||||
if (unlikely(overflows_type(vma->size, u32)))
|
||||
goto err_vma;
|
||||
goto err_unlock;
|
||||
|
||||
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
|
||||
i915_gem_object_get_tiling(obj),
|
||||
i915_gem_object_get_stride(obj));
|
||||
if (unlikely(vma->fence_size < vma->size || /* overflow */
|
||||
vma->fence_size > vm->total))
|
||||
goto err_vma;
|
||||
goto err_unlock;
|
||||
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
|
||||
|
||||
@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj,
|
||||
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
|
||||
}
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
|
||||
rb = NULL;
|
||||
p = &obj->vma.tree.rb_node;
|
||||
while (*p) {
|
||||
@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj,
|
||||
|
||||
return vma;
|
||||
|
||||
err_unlock:
|
||||
spin_unlock(&obj->vma.lock);
|
||||
err_vma:
|
||||
i915_vma_free(vma);
|
||||
return ERR_PTR(-E2BIG);
|
||||
@ -520,7 +522,6 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
|
||||
GEM_BUG_ON(!obj);
|
||||
|
||||
i915_vma_unpin(vma);
|
||||
i915_vma_close(vma);
|
||||
|
||||
if (flags & I915_VMA_RELEASE_MAP)
|
||||
i915_gem_object_unpin_map(obj);
|
||||
@ -1021,13 +1022,8 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
|
||||
} while (1);
|
||||
}
|
||||
|
||||
void i915_vma_close(struct i915_vma *vma)
|
||||
static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
|
||||
{
|
||||
struct intel_gt *gt = vma->vm->gt;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_BUG_ON(i915_vma_is_closed(vma));
|
||||
|
||||
/*
|
||||
* We defer actually closing, unbinding and destroying the VMA until
|
||||
* the next idle point, or if the object is freed in the meantime. By
|
||||
@ -1040,9 +1036,25 @@ void i915_vma_close(struct i915_vma *vma)
|
||||
* causing us to rebind the VMA once more. This ends up being a lot
|
||||
* of wasted work for the steady state.
|
||||
*/
|
||||
spin_lock_irqsave(>->closed_lock, flags);
|
||||
GEM_BUG_ON(i915_vma_is_closed(vma));
|
||||
list_add(&vma->closed_link, >->closed_vma);
|
||||
spin_unlock_irqrestore(>->closed_lock, flags);
|
||||
}
|
||||
|
||||
void i915_vma_close(struct i915_vma *vma)
|
||||
{
|
||||
struct intel_gt *gt = vma->vm->gt;
|
||||
unsigned long flags;
|
||||
|
||||
if (i915_vma_is_ggtt(vma))
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(!atomic_read(&vma->open_count));
|
||||
if (atomic_dec_and_lock_irqsave(&vma->open_count,
|
||||
>->closed_lock,
|
||||
flags)) {
|
||||
__vma_close(vma, gt);
|
||||
spin_unlock_irqrestore(>->closed_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void __i915_vma_remove_closed(struct i915_vma *vma)
|
||||
|
@ -98,6 +98,7 @@ void intel_device_info_print_static(const struct intel_device_info *info,
|
||||
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
|
||||
drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
|
||||
drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
|
||||
drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
|
||||
|
||||
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
|
||||
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
|
||||
|
@ -158,6 +158,8 @@ struct intel_device_info {
|
||||
|
||||
enum intel_platform platform;
|
||||
|
||||
unsigned int dma_mask_size; /* available DMA address bits */
|
||||
|
||||
enum intel_ppgtt_type ppgtt_type;
|
||||
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
|
||||
|
||||
|
@ -3757,42 +3757,38 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool intel_can_enable_sagv(struct intel_atomic_state *state)
|
||||
void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_device *dev = state->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
|
||||
if (!intel_can_enable_sagv(state))
|
||||
intel_disable_sagv(dev_priv);
|
||||
}
|
||||
|
||||
void intel_sagv_post_plane_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
|
||||
if (intel_can_enable_sagv(state))
|
||||
intel_enable_sagv(dev_priv);
|
||||
}
|
||||
|
||||
static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = crtc_state->uapi.crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc;
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct intel_plane *plane;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
enum pipe pipe;
|
||||
int level, latency;
|
||||
|
||||
if (!intel_has_sagv(dev_priv))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If there are no active CRTCs, no additional checks need be performed
|
||||
*/
|
||||
if (hweight8(state->active_pipes) == 0)
|
||||
if (!crtc_state->hw.active)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* SKL+ workaround: bspec recommends we disable SAGV when we have
|
||||
* more then one pipe enabled
|
||||
*/
|
||||
if (hweight8(state->active_pipes) > 1)
|
||||
return false;
|
||||
|
||||
/* Since we're now guaranteed to only have one active CRTC... */
|
||||
pipe = ffs(state->active_pipes) - 1;
|
||||
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
|
||||
crtc_state = to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
|
||||
return false;
|
||||
|
||||
for_each_intel_plane_on_crtc(dev, crtc, plane) {
|
||||
struct skl_plane_wm *wm =
|
||||
const struct skl_plane_wm *wm =
|
||||
&crtc_state->wm.skl.optimal.planes[plane->id];
|
||||
|
||||
/* Skip this plane if it's not enabled */
|
||||
@ -3823,6 +3819,37 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool intel_can_enable_sagv(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc *crtc;
|
||||
const struct intel_crtc_state *crtc_state;
|
||||
enum pipe pipe;
|
||||
|
||||
if (!intel_has_sagv(dev_priv))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If there are no active CRTCs, no additional checks need be performed
|
||||
*/
|
||||
if (hweight8(state->active_pipes) == 0)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* SKL+ workaround: bspec recommends we disable SAGV when we have
|
||||
* more then one pipe enabled
|
||||
*/
|
||||
if (hweight8(state->active_pipes) > 1)
|
||||
return false;
|
||||
|
||||
/* Since we're now guaranteed to only have one active CRTC... */
|
||||
pipe = ffs(state->active_pipes) - 1;
|
||||
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
|
||||
crtc_state = to_intel_crtc_state(crtc->base.state);
|
||||
|
||||
return intel_crtc_can_enable_sagv(crtc_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate initial DBuf slice offset, based on slice size
|
||||
* and mask(i.e if slice size is 1024 and second slice is enabled
|
||||
@ -5428,8 +5455,8 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
|
||||
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
|
||||
}
|
||||
|
||||
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
|
||||
const struct skl_ddb_entry *b)
|
||||
static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
|
||||
const struct skl_ddb_entry *b)
|
||||
{
|
||||
return a->start < b->end && b->start < a->end;
|
||||
}
|
||||
@ -5880,8 +5907,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
|
||||
mutex_unlock(&dev_priv->wm.wm_mutex);
|
||||
}
|
||||
|
||||
static inline void skl_wm_level_from_reg_val(u32 val,
|
||||
struct skl_wm_level *level)
|
||||
static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
|
||||
{
|
||||
level->plane_en = val & PLANE_WM_EN;
|
||||
level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
|
||||
@ -6854,6 +6880,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
|
||||
I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
|
||||
TGL_VRH_GATING_DIS);
|
||||
|
||||
/* Wa_14011059788:tgl */
|
||||
intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
|
||||
0, DFR_DISABLE);
|
||||
}
|
||||
|
||||
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
|
@ -44,6 +44,8 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
|
||||
bool intel_can_enable_sagv(struct intel_atomic_state *state);
|
||||
int intel_enable_sagv(struct drm_i915_private *dev_priv);
|
||||
int intel_disable_sagv(struct drm_i915_private *dev_priv);
|
||||
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
|
||||
void intel_sagv_post_plane_update(struct intel_atomic_state *state);
|
||||
bool skl_wm_level_equals(const struct skl_wm_level *l1,
|
||||
const struct skl_wm_level *l2);
|
||||
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
|
||||
|
@ -336,7 +336,7 @@ void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
|
||||
intel_sbi_rw(i915, reg, destination, &value, false);
|
||||
}
|
||||
|
||||
static inline int gen6_check_mailbox_status(u32 mbox)
|
||||
static int gen6_check_mailbox_status(u32 mbox)
|
||||
{
|
||||
switch (mbox & GEN6_PCODE_ERROR_MASK) {
|
||||
case GEN6_PCODE_SUCCESS:
|
||||
@ -356,7 +356,7 @@ static inline int gen6_check_mailbox_status(u32 mbox)
|
||||
}
|
||||
}
|
||||
|
||||
static inline int gen7_check_mailbox_status(u32 mbox)
|
||||
static int gen7_check_mailbox_status(u32 mbox)
|
||||
{
|
||||
switch (mbox & GEN6_PCODE_ERROR_MASK) {
|
||||
case GEN6_PCODE_SUCCESS:
|
||||
|
@ -1092,8 +1092,7 @@ static const struct intel_forcewake_range __gen9_fw_ranges[] = {
|
||||
|
||||
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
|
||||
static const struct intel_forcewake_range __gen11_fw_ranges[] = {
|
||||
GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
|
||||
GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
|
||||
GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
|
||||
@ -1103,27 +1102,31 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
|
||||
GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8800, 0x8bff, 0),
|
||||
GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
|
||||
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x9560, 0x95ff, 0),
|
||||
GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x24000, 0x2407f, 0),
|
||||
GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
|
||||
GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
|
||||
GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
|
||||
GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
|
||||
GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
|
||||
GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
|
||||
GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
|
||||
GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
|
||||
GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
|
||||
GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
|
||||
GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
|
||||
GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
|
||||
};
|
||||
|
||||
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user