mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-16 09:34:22 +08:00
Merge tag 'drm-intel-next-2020-04-17' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
UAPI Changes: - drm/i915/perf: introduce global sseu pinning Allow userspace to request at perf/OA open full SSEU configuration on the system to be able to benchmark 3D workloads, at the cost of not being able to run media workloads. (Lionel) Userspace changes: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4021 - drm/i915/perf: add new open param to configure polling of OA buffer Let application choose how often the OA buffer should be checked on the CPU side for data availability for choosig between CPU overhead and realtime nature of data. Userspace changes: https://patchwork.freedesktop.org/series/74655/ (i915 perf recorder is a tool to capture i915 perf data for viewing in GPUVis.) - drm/i915/perf: remove generated code Removal of the machine generated perf/OA test configurations from i915. Used by Mesa v17.1-18.0, and shortly replaced by userspace supplied OA configurations. Removal of configs causes affected Mesa versions to fall back to earlier kernel behaviour (potentially missing metrics). (Lionel) Cross-subsystem Changes: - Backmerge of drm-next - Includes tag 'topic/phy-compliance-2020-04-08' from git://anongit.freedesktop.org/drm/drm-misc Driver Changes: - Fix for GitLab issue #27: Support 5k tiled dual DP display on SKL (Ville) - Fix https://github.com/thesofproject/linux/issues/1719: Broken audio after S3 resume on JSL platforms. (Kai) - Add new Tigerlake PCI IDs (Swathi D.) - Add missing Tigerlake W/As (Matt R.) - Extended Wa_2006604312 to EHL (Matt A) - Add DPCD link_rate quirk for Apple 15" MBP 2017 (v3) (Mario) - Make Wa_14010229206 apply to all Tigerlake steppings (Swathi d) - Extend hotplug detect retry on TypeC connectors to 5 seconds (Imre) - Yield the timeslice if caught waiting on a user semaphore (Chris) - Limit the residual W/A batch to Haswell due to instability on IVB/BYT (Chris) - TBT AUX should use TC power well ops on Tigerlake (Matt R) - Update PMINTRMSK holding fw to make it effective for RPS (Francisco, Chris) - Add YUV444 packed format support for skl+ (Stanislav) - Invalidate OA TLB when closing perf stream to avoid corruption (Umesh) - HDCP: fix Ri prime check done during link check (Oliver) - Rearm heartbeat on sysfs interval change (Chris) - Fix crtc nv12 etc. plane bitmasks for DPMS off (Ville) - Treat idling as a RPS downclock event (Chris) - Leave rps->cur_freq on unpark (Chris) - Ignore short pulse when EDP panel powered off (Anshuman) - Keep the engine awake until the next jiffie, to avoid ping-pong on moderate load (Chris) - Select the deepest available parking mode for rc6 on IVB (Chris) - Optimizations to direct submission execlist path (Chris) - Avoid NULL pointer dereference at intel_read_infoframe() (Chris) - Fix mode private_flags comparison at atomic_check (Uma, Ville) - Use forced codec wake on all gen9+ platforms (Kai) - Schedule oa_config after modifying the contexts (Chris, Lionel) - Explicitly reset both reg and context runtime on GPU reset (Chris) - Don't enable DDI IO power on a TypeC port in TBT mode (Imre) - Fixes to TGL, ICL and EHL vswing tables (Jose) - Fill all the unused space in the GGTT (Chris, imre) - Ignore readonly failures when updating relocs (Chris) - Attempt to find free space earlier for non-pinned VMAs (Chris) - Only wait for GPU activity before unbinding a GGTT fence (Chris) - Avoid data loss on small userspace perf OA polling (Ashutosh) - Watch out for unevictable nodes during eviction (Matt A) - Reinforce the barrier after GTT updates for Ironlake (Chris) - Convert various parts of driver to use drm_device based logging (Wambui, Jani) - Avoid dereferencing already closed context for engine (Chris) - Enable non-contiguous pipe fusing (Anshuman) - Add HW readout of Gamma LUT on ICL (Swati S.) - Use explicit flag to mark unreachable intel_context (Chris) - Cancel a hung context if already closed (Chris) - Add DP VSC/HDR SDP data structures and write routines (Gwan-gyeong) - Report context-is-closed prior to pinning at execbuf (Chris) - Mark timeline->cacheline as destroyed after rcu grace period (Chris) - Avoid live-lock with i915_vma_parked() (Chris) - Avoid gem_context->mutex for simple vma lookup (Chris) - Rely on direct submission to the queue (Chris) - Configure DSI transcoder to operate in TE GATE command mode (Vandita) - Add DI vblank calculation for command mode (Vandita) - Disable periodic command mode if programmed by GOP (Vandita) - Use private flags to indicate TE in cmd mode (Vandita) - Make fences a nice-to-have for FBC on GEN9+ (Jose) - Fix work queuing issue with mixed virtual engine/physical engine submissions (Chris) - Drop final few uses of drm_i915_private.engine (Chris) - Return early after MISSING_CASE for write_dp_sdp (Chris) - Include port sync state in the state dump (Ville) - ELSP workaround switching back to a completed context (Chris) - Include priority info in trace_ports (Chris) - Allow for different modes of interruptible i915_active_wait (Chris) - Split eb_vma into its own allocation (Chris) - Don't read perf head/tail pointers outside critical section (Lionel) - Pause CS flow before execlists reset (Chris) - Make fence revocation unequivocal (Chris) - Drop cached obj->bind_count (Chris) - Peek at the next submission for error interrupts (Chris) - Utilize rcu iteration of context engines (Chris) - Keep a per-engine request pool for power management ops (Chris) - Refactor port sync code into normal modeset flow (Ville) - Check current i915_vma.pin_count status first on unbind (Chris) - Free request pool from virtual engines (Chris) - Flush all the reloc_gpu batch (Chris) - Make exclusive awaits on i915_active optional and allow async waits (Chris) - Wait until the context is finally retired before releasing engines (Chris) - Prefer '%ps' for printing function symbol names (Chris) - Allow setting generic data pointer on intel GT debugfs (Andi) - Constify DP link computation code more (Ville) - Simplify MST master transcoder computation (Ville) - Move TRANS_DDI_FUNC_CTL2 programming where it belongs (Ville) - Move icl_get_trans_port_sync_config() into the DDI code (Ville) - Add definitions for VRR registers and bits (Aditya) - Refactor hardware fence code (Chris) - Start passing latency as parameter to WM calculation (Stanislav) - Kernel selftest and debug tracing improvements (Matt A, Chris, Mika) - Fixes to CI found corner cases and lockdep splats (Chris) - Overall fixes and refactoring to GEM code (Chris) - Overall fixes and refactoring to display code (Ville) - GuC/HuC code improvements (Daniele, Michal Wa) - Static code checker fixes (Nathan, Ville, Colin, Chris) - Fix spelling mistake (Chen) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200417111548.GA15033@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
commit
774f1eeb18
@ -391,19 +391,19 @@ Global GTT views
|
||||
GTT Fences and Swizzling
|
||||
------------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
|
||||
:internal:
|
||||
|
||||
Global GTT Fence Handling
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
|
||||
:doc: fence register handling
|
||||
|
||||
Hardware Tiling and Swizzling Details
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence_reg.c
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
|
||||
:doc: tiling swizzling details
|
||||
|
||||
Object Tiling IOCTLs
|
||||
|
@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr,
|
||||
unsigned int flags)
|
||||
{
|
||||
intel_private.driver->write_entry(addr, pg, flags);
|
||||
readl(intel_private.gtt + pg);
|
||||
if (intel_private.driver->chipset_flush)
|
||||
intel_private.driver->chipset_flush();
|
||||
}
|
||||
@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
|
||||
j++;
|
||||
}
|
||||
}
|
||||
wmb();
|
||||
readl(intel_private.gtt + j - 1);
|
||||
if (intel_private.driver->chipset_flush)
|
||||
intel_private.driver->chipset_flush();
|
||||
}
|
||||
@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void)
|
||||
|
||||
static void i9xx_chipset_flush(void)
|
||||
{
|
||||
wmb();
|
||||
if (intel_private.i9xx_flush_page)
|
||||
writel(1, intel_private.i9xx_flush_page);
|
||||
}
|
||||
|
@ -2530,7 +2530,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
|
||||
/* get phy test pattern and pattern parameters from DP receiver */
|
||||
core_link_read_dpcd(
|
||||
link,
|
||||
DP_TEST_PHY_PATTERN,
|
||||
DP_PHY_TEST_PATTERN,
|
||||
&dpcd_test_pattern.raw,
|
||||
sizeof(dpcd_test_pattern));
|
||||
core_link_read_dpcd(
|
||||
|
@ -1238,6 +1238,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
|
||||
{ OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
|
||||
/* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */
|
||||
{ OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) },
|
||||
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
|
||||
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
|
||||
};
|
||||
|
||||
#undef OUI
|
||||
@ -1533,3 +1535,97 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
|
||||
return num_bpc;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
|
||||
|
||||
/**
|
||||
* drm_dp_get_phy_test_pattern() - get the requested pattern from the sink.
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @data: DP phy compliance test parameters.
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
|
||||
struct drm_dp_phy_test_params *data)
|
||||
{
|
||||
int err;
|
||||
u8 rate, lanes;
|
||||
|
||||
err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate);
|
||||
if (err < 0)
|
||||
return err;
|
||||
data->link_rate = drm_dp_bw_code_to_link_rate(rate);
|
||||
|
||||
err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes);
|
||||
if (err < 0)
|
||||
return err;
|
||||
data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK;
|
||||
|
||||
if (lanes & DP_ENHANCED_FRAME_CAP)
|
||||
data->enhanced_frame_cap = true;
|
||||
|
||||
err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
switch (data->phy_pattern) {
|
||||
case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
|
||||
err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
|
||||
&data->custom80, sizeof(data->custom80));
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
break;
|
||||
case DP_PHY_TEST_PATTERN_CP2520:
|
||||
err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
|
||||
&data->hbr2_reset,
|
||||
sizeof(data->hbr2_reset));
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_get_phy_test_pattern);
|
||||
|
||||
/**
|
||||
* drm_dp_set_phy_test_pattern() - set the pattern to the sink.
|
||||
* @aux: DisplayPort AUX channel
|
||||
* @data: DP phy compliance test parameters.
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
|
||||
struct drm_dp_phy_test_params *data, u8 dp_rev)
|
||||
{
|
||||
int err, i;
|
||||
u8 link_config[2];
|
||||
u8 test_pattern;
|
||||
|
||||
link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate);
|
||||
link_config[1] = data->num_lanes;
|
||||
if (data->enhanced_frame_cap)
|
||||
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
test_pattern = data->phy_pattern;
|
||||
if (dp_rev < 0x12) {
|
||||
test_pattern = (test_pattern << 2) &
|
||||
DP_LINK_QUAL_PATTERN_11_MASK;
|
||||
err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET,
|
||||
test_pattern);
|
||||
if (err < 0)
|
||||
return err;
|
||||
} else {
|
||||
for (i = 0; i < data->num_lanes; i++) {
|
||||
err = drm_dp_dpcd_writeb(aux,
|
||||
DP_LINK_QUAL_LANE0_SET + i,
|
||||
test_pattern);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_set_phy_test_pattern);
|
||||
|
@ -89,6 +89,7 @@ gt-y += \
|
||||
gt/intel_engine_pool.o \
|
||||
gt/intel_engine_user.o \
|
||||
gt/intel_ggtt.o \
|
||||
gt/intel_ggtt_fencing.o \
|
||||
gt/intel_gt.o \
|
||||
gt/intel_gt_irq.o \
|
||||
gt/intel_gt_pm.o \
|
||||
@ -150,7 +151,6 @@ i915-y += \
|
||||
i915_buddy.o \
|
||||
i915_cmd_parser.o \
|
||||
i915_gem_evict.o \
|
||||
i915_gem_fence_reg.o \
|
||||
i915_gem_gtt.o \
|
||||
i915_gem.o \
|
||||
i915_globals.o \
|
||||
@ -164,14 +164,18 @@ i915-y += \
|
||||
|
||||
# general-purpose microcontroller (GuC) support
|
||||
i915-y += gt/uc/intel_uc.o \
|
||||
gt/uc/intel_uc_debugfs.o \
|
||||
gt/uc/intel_uc_fw.o \
|
||||
gt/uc/intel_guc.o \
|
||||
gt/uc/intel_guc_ads.o \
|
||||
gt/uc/intel_guc_ct.o \
|
||||
gt/uc/intel_guc_debugfs.o \
|
||||
gt/uc/intel_guc_fw.o \
|
||||
gt/uc/intel_guc_log.o \
|
||||
gt/uc/intel_guc_log_debugfs.o \
|
||||
gt/uc/intel_guc_submission.o \
|
||||
gt/uc/intel_huc.o \
|
||||
gt/uc/intel_huc_debugfs.o \
|
||||
gt/uc/intel_huc_fw.o
|
||||
|
||||
# modesetting core code
|
||||
@ -240,23 +244,6 @@ i915-y += \
|
||||
display/vlv_dsi.o \
|
||||
display/vlv_dsi_pll.o
|
||||
|
||||
# perf code
|
||||
i915-y += \
|
||||
oa/i915_oa_hsw.o \
|
||||
oa/i915_oa_bdw.o \
|
||||
oa/i915_oa_chv.o \
|
||||
oa/i915_oa_sklgt2.o \
|
||||
oa/i915_oa_sklgt3.o \
|
||||
oa/i915_oa_sklgt4.o \
|
||||
oa/i915_oa_bxt.o \
|
||||
oa/i915_oa_kblgt2.o \
|
||||
oa/i915_oa_kblgt3.o \
|
||||
oa/i915_oa_glk.o \
|
||||
oa/i915_oa_cflgt2.o \
|
||||
oa/i915_oa_cflgt3.o \
|
||||
oa/i915_oa_cnl.o \
|
||||
oa/i915_oa_icl.o \
|
||||
oa/i915_oa_tgl.o
|
||||
i915-y += i915_perf.o
|
||||
|
||||
# Post-mortem debug and GPU hang state capture
|
||||
|
@ -186,16 +186,19 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
|
||||
static int dsi_send_pkt_payld(struct intel_dsi_host *host,
|
||||
struct mipi_dsi_packet pkt)
|
||||
{
|
||||
struct intel_dsi *intel_dsi = host->intel_dsi;
|
||||
struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
|
||||
|
||||
/* payload queue can accept *256 bytes*, check limit */
|
||||
if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
|
||||
DRM_ERROR("payload size exceeds max queue limit\n");
|
||||
drm_err(&i915->drm, "payload size exceeds max queue limit\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* load data into command payload queue */
|
||||
if (!add_payld_to_queue(host, pkt.payload,
|
||||
pkt.payload_length)) {
|
||||
DRM_ERROR("adding payload to queue failed\n");
|
||||
drm_err(&i915->drm, "adding payload to queue failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -744,6 +747,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
|
||||
tmp |= VIDEO_MODE_SYNC_PULSE;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* FIXME: Retrieve this info from VBT.
|
||||
* As per the spec when dsi transcoder is operating
|
||||
* in TE GATE mode, TE comes from GPIO
|
||||
* which is UTIL PIN for DSI 0.
|
||||
* Also this GPIO would not be used for other
|
||||
* purposes is an assumption.
|
||||
*/
|
||||
tmp &= ~OP_MODE_MASK;
|
||||
tmp |= CMD_MODE_TE_GATE;
|
||||
tmp |= TE_SOURCE_GPIO;
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
|
||||
@ -837,14 +852,33 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
hactive = adjusted_mode->crtc_hdisplay;
|
||||
|
||||
if (is_vid_mode(intel_dsi))
|
||||
htotal = DIV_ROUND_UP(adjusted_mode->crtc_htotal * mul, div);
|
||||
else
|
||||
htotal = DIV_ROUND_UP((hactive + 160) * mul, div);
|
||||
|
||||
hsync_start = DIV_ROUND_UP(adjusted_mode->crtc_hsync_start * mul, div);
|
||||
hsync_end = DIV_ROUND_UP(adjusted_mode->crtc_hsync_end * mul, div);
|
||||
hsync_size = hsync_end - hsync_start;
|
||||
hback_porch = (adjusted_mode->crtc_htotal -
|
||||
adjusted_mode->crtc_hsync_end);
|
||||
vactive = adjusted_mode->crtc_vdisplay;
|
||||
|
||||
if (is_vid_mode(intel_dsi)) {
|
||||
vtotal = adjusted_mode->crtc_vtotal;
|
||||
} else {
|
||||
int bpp, line_time_us, byte_clk_period_ns;
|
||||
|
||||
if (crtc_state->dsc.compression_enable)
|
||||
bpp = crtc_state->dsc.compressed_bpp;
|
||||
else
|
||||
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
|
||||
|
||||
byte_clk_period_ns = 1000000 / afe_clk(encoder, crtc_state);
|
||||
line_time_us = (htotal * (bpp / 8) * byte_clk_period_ns) / (1000 * intel_dsi->lane_count);
|
||||
vtotal = vactive + DIV_ROUND_UP(400, line_time_us);
|
||||
}
|
||||
vsync_start = adjusted_mode->crtc_vsync_start;
|
||||
vsync_end = adjusted_mode->crtc_vsync_end;
|
||||
vsync_shift = hsync_start - htotal / 2;
|
||||
@ -873,7 +907,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
/* TRANS_HSYNC register to be programmed only for video mode */
|
||||
if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
|
||||
if (is_vid_mode(intel_dsi)) {
|
||||
if (intel_dsi->video_mode_format ==
|
||||
VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
|
||||
/* BSPEC: hsync size should be atleast 16 pixels */
|
||||
@ -916,22 +950,27 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
|
||||
if (vsync_start < vactive)
|
||||
drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
|
||||
|
||||
/* program TRANS_VSYNC register */
|
||||
/* program TRANS_VSYNC register for video mode only */
|
||||
if (is_vid_mode(intel_dsi)) {
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
dsi_trans = dsi_port_to_transcoder(port);
|
||||
intel_de_write(dev_priv, VSYNC(dsi_trans),
|
||||
(vsync_start - 1) | ((vsync_end - 1) << 16));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: It has to be programmed only for interlaced
|
||||
* FIXME: It has to be programmed only for video modes and interlaced
|
||||
* modes. Put the check condition here once interlaced
|
||||
* info available as described above.
|
||||
* program TRANS_VSYNCSHIFT register
|
||||
*/
|
||||
if (is_vid_mode(intel_dsi)) {
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
dsi_trans = dsi_port_to_transcoder(port);
|
||||
intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift);
|
||||
intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans),
|
||||
vsync_shift);
|
||||
}
|
||||
}
|
||||
|
||||
/* program TRANS_VBLANK register, should be same as vtotal programmed */
|
||||
@ -1016,6 +1055,32 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
|
||||
}
|
||||
}
|
||||
|
||||
static void gen11_dsi_config_util_pin(struct intel_encoder *encoder,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
u32 tmp;
|
||||
|
||||
/*
|
||||
* used as TE i/p for DSI0,
|
||||
* for dual link/DSI1 TE is from slave DSI1
|
||||
* through GPIO.
|
||||
*/
|
||||
if (is_vid_mode(intel_dsi) || (intel_dsi->ports & BIT(PORT_B)))
|
||||
return;
|
||||
|
||||
tmp = intel_de_read(dev_priv, UTIL_PIN_CTL);
|
||||
|
||||
if (enable) {
|
||||
tmp |= UTIL_PIN_DIRECTION_INPUT;
|
||||
tmp |= UTIL_PIN_ENABLE;
|
||||
} else {
|
||||
tmp &= ~UTIL_PIN_ENABLE;
|
||||
}
|
||||
intel_de_write(dev_priv, UTIL_PIN_CTL, tmp);
|
||||
}
|
||||
|
||||
static void
|
||||
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
@ -1037,6 +1102,9 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
|
||||
/* setup D-PHY timings */
|
||||
gen11_dsi_setup_dphy_timings(encoder, crtc_state);
|
||||
|
||||
/* Since transcoder is configured to take events from GPIO */
|
||||
gen11_dsi_config_util_pin(encoder, true);
|
||||
|
||||
/* step 4h: setup DSI protocol timeouts */
|
||||
gen11_dsi_setup_timeouts(encoder, crtc_state);
|
||||
|
||||
@ -1088,7 +1156,8 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
|
||||
wait_for_cmds_dispatched_to_panel(encoder);
|
||||
}
|
||||
|
||||
static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
|
||||
static void gen11_dsi_pre_pll_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -1099,7 +1168,8 @@ static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
|
||||
gen11_dsi_program_esc_clk_div(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
|
||||
static void gen11_dsi_pre_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -1118,7 +1188,8 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
|
||||
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
|
||||
}
|
||||
|
||||
static void gen11_dsi_enable(struct intel_encoder *encoder,
|
||||
static void gen11_dsi_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -1180,6 +1251,15 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
|
||||
enum transcoder dsi_trans;
|
||||
u32 tmp;
|
||||
|
||||
/* disable periodic update mode */
|
||||
if (is_cmd_mode(intel_dsi)) {
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port));
|
||||
tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE;
|
||||
intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/* put dsi link in ULPS */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
dsi_trans = dsi_port_to_transcoder(port);
|
||||
@ -1264,7 +1344,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
|
||||
}
|
||||
}
|
||||
|
||||
static void gen11_dsi_disable(struct intel_encoder *encoder,
|
||||
static void gen11_dsi_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -1286,11 +1367,14 @@ static void gen11_dsi_disable(struct intel_encoder *encoder,
|
||||
/* step3: disable port */
|
||||
gen11_dsi_disable_port(encoder);
|
||||
|
||||
gen11_dsi_config_util_pin(encoder, false);
|
||||
|
||||
/* step4: disable IO power */
|
||||
gen11_dsi_disable_io_power(encoder);
|
||||
}
|
||||
|
||||
static void gen11_dsi_post_disable(struct intel_encoder *encoder,
|
||||
static void gen11_dsi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -1347,6 +1431,22 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
|
||||
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
|
||||
}
|
||||
|
||||
static bool gen11_dsi_is_periodic_cmd_mode(struct intel_dsi *intel_dsi)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum transcoder dsi_trans;
|
||||
u32 val;
|
||||
|
||||
if (intel_dsi->ports == BIT(PORT_B))
|
||||
dsi_trans = TRANSCODER_DSI_1;
|
||||
else
|
||||
dsi_trans = TRANSCODER_DSI_0;
|
||||
|
||||
val = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
|
||||
return (val & DSI_PERIODIC_FRAME_UPDATE_ENABLE);
|
||||
}
|
||||
|
||||
static void gen11_dsi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
@ -1367,6 +1467,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
|
||||
gen11_dsi_get_timings(encoder, pipe_config);
|
||||
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
|
||||
pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc);
|
||||
|
||||
if (gen11_dsi_is_periodic_cmd_mode(intel_dsi))
|
||||
pipe_config->hw.adjusted_mode.private_flags |=
|
||||
I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
|
||||
}
|
||||
|
||||
static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
|
||||
@ -1417,6 +1521,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
|
||||
base);
|
||||
struct intel_connector *intel_connector = intel_dsi->attached_connector;
|
||||
@ -1446,10 +1551,32 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
|
||||
pipe_config->clock_set = true;
|
||||
|
||||
if (gen11_dsi_dsc_compute_config(encoder, pipe_config))
|
||||
DRM_DEBUG_KMS("Attempting to use DSC failed\n");
|
||||
drm_dbg_kms(&i915->drm, "Attempting to use DSC failed\n");
|
||||
|
||||
pipe_config->port_clock = afe_clk(encoder, pipe_config) / 5;
|
||||
|
||||
/* We would not operate in periodic command mode */
|
||||
pipe_config->hw.adjusted_mode.private_flags &=
|
||||
~I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE;
|
||||
|
||||
/*
|
||||
* In case of TE GATE cmd mode, we
|
||||
* receive TE from the slave if
|
||||
* dual link is enabled
|
||||
*/
|
||||
if (is_cmd_mode(intel_dsi)) {
|
||||
if (intel_dsi->ports == (BIT(PORT_B) | BIT(PORT_A)))
|
||||
pipe_config->hw.adjusted_mode.private_flags |=
|
||||
I915_MODE_FLAG_DSI_USE_TE1 |
|
||||
I915_MODE_FLAG_DSI_USE_TE0;
|
||||
else if (intel_dsi->ports == BIT(PORT_B))
|
||||
pipe_config->hw.adjusted_mode.private_flags |=
|
||||
I915_MODE_FLAG_DSI_USE_TE1;
|
||||
else
|
||||
pipe_config->hw.adjusted_mode.private_flags |=
|
||||
I915_MODE_FLAG_DSI_USE_TE0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -264,6 +264,20 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
|
||||
plane_state->hw.color_range = from_plane_state->uapi.color_range;
|
||||
}
|
||||
|
||||
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state)
|
||||
{
|
||||
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
|
||||
|
||||
crtc_state->active_planes &= ~BIT(plane->id);
|
||||
crtc_state->nv12_planes &= ~BIT(plane->id);
|
||||
crtc_state->c8_planes &= ~BIT(plane->id);
|
||||
crtc_state->data_rate[plane->id] = 0;
|
||||
crtc_state->min_cdclk[plane->id] = 0;
|
||||
|
||||
plane_state->uapi.visible = false;
|
||||
}
|
||||
|
||||
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
|
||||
struct intel_crtc_state *new_crtc_state,
|
||||
const struct intel_plane_state *old_plane_state,
|
||||
@ -273,12 +287,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
|
||||
const struct drm_framebuffer *fb = new_plane_state->hw.fb;
|
||||
int ret;
|
||||
|
||||
new_crtc_state->active_planes &= ~BIT(plane->id);
|
||||
new_crtc_state->nv12_planes &= ~BIT(plane->id);
|
||||
new_crtc_state->c8_planes &= ~BIT(plane->id);
|
||||
new_crtc_state->data_rate[plane->id] = 0;
|
||||
new_crtc_state->min_cdclk[plane->id] = 0;
|
||||
new_plane_state->uapi.visible = false;
|
||||
intel_plane_set_invisible(new_crtc_state, new_plane_state);
|
||||
|
||||
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
|
||||
return 0;
|
||||
|
@ -52,5 +52,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
|
||||
int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
|
||||
struct intel_plane *plane,
|
||||
bool *need_cdclk_calc);
|
||||
void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state);
|
||||
|
||||
#endif /* __INTEL_ATOMIC_PLANE_H__ */
|
||||
|
@ -252,12 +252,14 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
|
||||
i = ARRAY_SIZE(hdmi_audio_clock);
|
||||
|
||||
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
|
||||
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
|
||||
adjusted_mode->crtc_clock);
|
||||
i = 1;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Configuring HDMI audio for pixel clock %d (0x%08x)\n",
|
||||
hdmi_audio_clock[i].clock,
|
||||
hdmi_audio_clock[i].config);
|
||||
|
||||
@ -891,7 +893,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
|
||||
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
|
||||
|
||||
if (dev_priv->audio_power_refcount++ == 0) {
|
||||
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
|
||||
dev_priv->audio_freq_cntrl);
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
@ -931,7 +933,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
|
||||
unsigned long cookie;
|
||||
u32 tmp;
|
||||
|
||||
if (!IS_GEN(dev_priv, 9))
|
||||
if (INTEL_GEN(dev_priv) < 9)
|
||||
return;
|
||||
|
||||
cookie = i915_audio_component_get_power(kdev);
|
||||
@ -1173,7 +1175,7 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
|
||||
return;
|
||||
}
|
||||
|
||||
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
|
||||
AUD_FREQ_CNTRL);
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
|
@ -338,13 +338,14 @@ void intel_bw_crtc_update(struct intel_bw_state *bw_state,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
|
||||
bw_state->data_rate[crtc->pipe] =
|
||||
intel_bw_crtc_data_rate(crtc_state);
|
||||
bw_state->num_active_planes[crtc->pipe] =
|
||||
intel_bw_crtc_num_active_planes(crtc_state);
|
||||
|
||||
DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
|
||||
drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n",
|
||||
pipe_name(crtc->pipe),
|
||||
bw_state->data_rate[crtc->pipe],
|
||||
bw_state->num_active_planes[crtc->pipe]);
|
||||
|
@ -460,6 +460,16 @@ static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
|
||||
entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
|
||||
}
|
||||
|
||||
static void icl_lut_multi_seg_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
|
||||
{
|
||||
entry->red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, udw) << 6 |
|
||||
REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, ldw);
|
||||
entry->green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, udw) << 6 |
|
||||
REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, ldw);
|
||||
entry->blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, udw) << 6 |
|
||||
REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, ldw);
|
||||
}
|
||||
|
||||
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
@ -893,7 +903,7 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
|
||||
struct intel_dsb *dsb = intel_dsb_get(crtc);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
/* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
|
||||
/* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
|
||||
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
|
||||
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
|
||||
intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
|
||||
@ -1630,6 +1640,24 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
|
||||
}
|
||||
}
|
||||
|
||||
static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
|
||||
return 0;
|
||||
|
||||
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
return 8;
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
return 10;
|
||||
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
|
||||
return 16;
|
||||
default:
|
||||
MISSING_CASE(crtc_state->gamma_mode);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
@ -1641,7 +1669,9 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat
|
||||
else
|
||||
return i9xx_gamma_precision(crtc_state);
|
||||
} else {
|
||||
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
return icl_gamma_precision(crtc_state);
|
||||
else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
|
||||
return glk_gamma_precision(crtc_state);
|
||||
else if (IS_IRONLAKE(dev_priv))
|
||||
return ilk_gamma_precision(crtc_state);
|
||||
@ -1658,7 +1688,7 @@ static bool err_check(struct drm_color_lut *lut1,
|
||||
((abs((long)lut2->green - lut1->green)) <= err);
|
||||
}
|
||||
|
||||
static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
|
||||
static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
|
||||
struct drm_color_lut *lut2,
|
||||
int lut_size, u32 err)
|
||||
{
|
||||
@ -1690,16 +1720,8 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
|
||||
lut_size2 = drm_color_lut_size(blob2);
|
||||
|
||||
/* check sw and hw lut size */
|
||||
switch (gamma_mode) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
if (lut_size1 != lut_size2)
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(gamma_mode);
|
||||
return false;
|
||||
}
|
||||
|
||||
lut1 = blob1->data;
|
||||
lut2 = blob2->data;
|
||||
@ -1707,13 +1729,18 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
|
||||
err = 0xffff >> bit_precision;
|
||||
|
||||
/* check sw and hw lut entry to be equal */
|
||||
switch (gamma_mode) {
|
||||
switch (gamma_mode & GAMMA_MODE_MODE_MASK) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
case GAMMA_MODE_MODE_10BIT:
|
||||
if (!intel_color_lut_entry_equal(lut1, lut2,
|
||||
if (!intel_color_lut_entries_equal(lut1, lut2,
|
||||
lut_size2, err))
|
||||
return false;
|
||||
break;
|
||||
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
|
||||
if (!intel_color_lut_entries_equal(lut1, lut2,
|
||||
9, err))
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(gamma_mode);
|
||||
return false;
|
||||
@ -1946,6 +1973,63 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
|
||||
crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
|
||||
}
|
||||
|
||||
static struct drm_property_blob *
|
||||
icl_read_lut_multi_segment(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
struct drm_property_blob *blob;
|
||||
struct drm_color_lut *lut;
|
||||
|
||||
blob = drm_property_create_blob(&dev_priv->drm,
|
||||
sizeof(struct drm_color_lut) * lut_size,
|
||||
NULL);
|
||||
if (IS_ERR(blob))
|
||||
return NULL;
|
||||
|
||||
lut = blob->data;
|
||||
|
||||
intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe),
|
||||
PAL_PREC_AUTO_INCREMENT);
|
||||
|
||||
for (i = 0; i < 9; i++) {
|
||||
u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
|
||||
u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe));
|
||||
|
||||
icl_lut_multi_seg_pack(&lut[i], ldw, udw);
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
|
||||
|
||||
/*
|
||||
* FIXME readouts from PAL_PREC_DATA register aren't giving
|
||||
* correct values in the case of fine and coarse segments.
|
||||
* Restricting readouts only for super fine segment as of now.
|
||||
*/
|
||||
|
||||
return blob;
|
||||
}
|
||||
|
||||
static void icl_read_luts(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
|
||||
if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
|
||||
return;
|
||||
|
||||
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
|
||||
case GAMMA_MODE_MODE_8BIT:
|
||||
crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
|
||||
break;
|
||||
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
|
||||
crtc_state->hw.gamma_lut = icl_read_lut_multi_segment(crtc);
|
||||
break;
|
||||
default:
|
||||
crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
|
||||
}
|
||||
}
|
||||
|
||||
void intel_color_init(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
@ -1989,6 +2073,7 @@ void intel_color_init(struct intel_crtc *crtc)
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
dev_priv->display.load_luts = icl_load_luts;
|
||||
dev_priv->display.read_luts = icl_read_luts;
|
||||
} else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
|
||||
dev_priv->display.load_luts = glk_load_luts;
|
||||
dev_priv->display.read_luts = glk_read_luts;
|
||||
|
@ -290,7 +290,7 @@ intel_attach_colorspace_property(struct drm_connector *connector)
|
||||
return;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("Colorspace property not supported\n");
|
||||
MISSING_CASE(connector->connector_type);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -203,27 +203,31 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
|
||||
intel_de_write(dev_priv, crt->adpa_reg, adpa);
|
||||
}
|
||||
|
||||
static void intel_disable_crt(struct intel_encoder *encoder,
|
||||
static void intel_disable_crt(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
static void pch_disable_crt(struct intel_encoder *encoder,
|
||||
static void pch_disable_crt(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
}
|
||||
|
||||
static void pch_post_disable_crt(struct intel_encoder *encoder,
|
||||
static void pch_post_disable_crt(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
intel_disable_crt(encoder, old_crtc_state, old_conn_state);
|
||||
intel_disable_crt(state, encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static void hsw_disable_crt(struct intel_encoder *encoder,
|
||||
static void hsw_disable_crt(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -234,7 +238,8 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
|
||||
}
|
||||
|
||||
static void hsw_post_disable_crt(struct intel_encoder *encoder,
|
||||
static void hsw_post_disable_crt(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -250,19 +255,20 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
|
||||
|
||||
intel_ddi_disable_pipe_clock(old_crtc_state);
|
||||
|
||||
pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
|
||||
pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state);
|
||||
|
||||
lpt_disable_pch_transcoder(dev_priv);
|
||||
lpt_disable_iclkip(dev_priv);
|
||||
|
||||
intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
|
||||
intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state);
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
|
||||
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
|
||||
static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
|
||||
static void hsw_pre_pll_enable_crt(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -273,7 +279,8 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
|
||||
}
|
||||
|
||||
static void hsw_pre_enable_crt(struct intel_encoder *encoder,
|
||||
static void hsw_pre_enable_crt(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -290,7 +297,8 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
|
||||
intel_ddi_enable_pipe_clock(crtc_state);
|
||||
}
|
||||
|
||||
static void hsw_enable_crt(struct intel_encoder *encoder,
|
||||
static void hsw_enable_crt(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -314,7 +322,8 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
|
||||
static void intel_enable_crt(struct intel_encoder *encoder,
|
||||
static void intel_enable_crt(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -594,7 +603,8 @@ static struct edid *intel_crt_get_edid(struct drm_connector *connector,
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
|
||||
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
|
||||
DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
|
||||
drm_dbg_kms(connector->dev,
|
||||
"CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
|
||||
intel_gmbus_force_bit(i2c, true);
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
intel_gmbus_force_bit(i2c, false);
|
||||
|
@ -568,7 +568,7 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
|
||||
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
|
||||
};
|
||||
|
||||
static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = {
|
||||
static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_dp[] = {
|
||||
/* NT mV Trans mV db */
|
||||
{ 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
|
||||
{ 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
|
||||
@ -583,23 +583,51 @@ static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[]
|
||||
};
|
||||
|
||||
struct icl_mg_phy_ddi_buf_trans {
|
||||
u32 cri_txdeemph_override_5_0;
|
||||
u32 cri_txdeemph_override_11_6;
|
||||
u32 cri_txdeemph_override_5_0;
|
||||
u32 cri_txdeemph_override_17_12;
|
||||
};
|
||||
|
||||
static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
|
||||
static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr[] = {
|
||||
/* Voltage swing pre-emphasis */
|
||||
{ 0x0, 0x1B, 0x00 }, /* 0 0 */
|
||||
{ 0x0, 0x23, 0x08 }, /* 0 1 */
|
||||
{ 0x0, 0x2D, 0x12 }, /* 0 2 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 0 3 */
|
||||
{ 0x0, 0x23, 0x00 }, /* 1 0 */
|
||||
{ 0x0, 0x2B, 0x09 }, /* 1 1 */
|
||||
{ 0x0, 0x2E, 0x11 }, /* 1 2 */
|
||||
{ 0x0, 0x2F, 0x00 }, /* 2 0 */
|
||||
{ 0x0, 0x33, 0x0C }, /* 2 1 */
|
||||
{ 0x0, 0x00, 0x00 }, /* 3 0 */
|
||||
{ 0x18, 0x00, 0x00 }, /* 0 0 */
|
||||
{ 0x1D, 0x00, 0x05 }, /* 0 1 */
|
||||
{ 0x24, 0x00, 0x0C }, /* 0 2 */
|
||||
{ 0x2B, 0x00, 0x14 }, /* 0 3 */
|
||||
{ 0x21, 0x00, 0x00 }, /* 1 0 */
|
||||
{ 0x2B, 0x00, 0x08 }, /* 1 1 */
|
||||
{ 0x30, 0x00, 0x0F }, /* 1 2 */
|
||||
{ 0x31, 0x00, 0x03 }, /* 2 0 */
|
||||
{ 0x34, 0x00, 0x0B }, /* 2 1 */
|
||||
{ 0x3F, 0x00, 0x00 }, /* 3 0 */
|
||||
};
|
||||
|
||||
static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
|
||||
/* Voltage swing pre-emphasis */
|
||||
{ 0x18, 0x00, 0x00 }, /* 0 0 */
|
||||
{ 0x1D, 0x00, 0x05 }, /* 0 1 */
|
||||
{ 0x24, 0x00, 0x0C }, /* 0 2 */
|
||||
{ 0x2B, 0x00, 0x14 }, /* 0 3 */
|
||||
{ 0x26, 0x00, 0x00 }, /* 1 0 */
|
||||
{ 0x2C, 0x00, 0x07 }, /* 1 1 */
|
||||
{ 0x33, 0x00, 0x0C }, /* 1 2 */
|
||||
{ 0x2E, 0x00, 0x00 }, /* 2 0 */
|
||||
{ 0x36, 0x00, 0x09 }, /* 2 1 */
|
||||
{ 0x3F, 0x00, 0x00 }, /* 3 0 */
|
||||
};
|
||||
|
||||
static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi[] = {
|
||||
/* HDMI Preset VS Pre-emph */
|
||||
{ 0x1A, 0x0, 0x0 }, /* 1 400mV 0dB */
|
||||
{ 0x20, 0x0, 0x0 }, /* 2 500mV 0dB */
|
||||
{ 0x29, 0x0, 0x0 }, /* 3 650mV 0dB */
|
||||
{ 0x32, 0x0, 0x0 }, /* 4 800mV 0dB */
|
||||
{ 0x3F, 0x0, 0x0 }, /* 5 1000mV 0dB */
|
||||
{ 0x3A, 0x0, 0x5 }, /* 6 Full -1.5 dB */
|
||||
{ 0x39, 0x0, 0x6 }, /* 7 Full -1.8 dB */
|
||||
{ 0x38, 0x0, 0x7 }, /* 8 Full -2 dB */
|
||||
{ 0x37, 0x0, 0x8 }, /* 9 Full -2.5 dB */
|
||||
{ 0x36, 0x0, 0x9 }, /* 10 Full -3 dB */
|
||||
};
|
||||
|
||||
struct tgl_dkl_phy_ddi_buf_trans {
|
||||
@ -943,14 +971,29 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
||||
return icl_combo_phy_ddi_translations_dp_hbr2;
|
||||
}
|
||||
|
||||
static const struct icl_mg_phy_ddi_buf_trans *
|
||||
icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
||||
int *n_entries)
|
||||
{
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
*n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hdmi);
|
||||
return icl_mg_phy_ddi_translations_hdmi;
|
||||
} else if (rate > 270000) {
|
||||
*n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_hbr2_hbr3);
|
||||
return icl_mg_phy_ddi_translations_hbr2_hbr3;
|
||||
}
|
||||
|
||||
*n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations_rbr_hbr);
|
||||
return icl_mg_phy_ddi_translations_rbr_hbr;
|
||||
}
|
||||
|
||||
static const struct cnl_ddi_buf_trans *
|
||||
ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
|
||||
int *n_entries)
|
||||
{
|
||||
if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP &&
|
||||
rate > 270000) {
|
||||
*n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
|
||||
return ehl_combo_phy_ddi_translations_hbr2_hbr3;
|
||||
if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) {
|
||||
*n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_dp);
|
||||
return ehl_combo_phy_ddi_translations_dp;
|
||||
}
|
||||
|
||||
return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
|
||||
@ -989,7 +1032,8 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
|
||||
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
|
||||
0, &n_entries);
|
||||
else
|
||||
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
|
||||
icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0,
|
||||
&n_entries);
|
||||
default_entry = n_entries - 1;
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
|
||||
@ -1103,7 +1147,8 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
|
||||
if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
|
||||
return;
|
||||
}
|
||||
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
|
||||
drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n",
|
||||
port_name(port));
|
||||
}
|
||||
|
||||
static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
|
||||
@ -1250,7 +1295,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
|
||||
temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
|
||||
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
|
||||
DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"FDI link training done on step %d\n", i);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1259,7 +1305,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
* Results in less fireworks from the state checker.
|
||||
*/
|
||||
if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) {
|
||||
DRM_ERROR("FDI link training failed!\n");
|
||||
drm_err(&dev_priv->drm, "FDI link training failed!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1451,6 +1497,14 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
|
||||
intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
|
||||
}
|
||||
|
||||
static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
|
||||
{
|
||||
if (master_transcoder == TRANSCODER_EDP)
|
||||
return 0;
|
||||
else
|
||||
return master_transcoder + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
|
||||
*
|
||||
@ -1551,6 +1605,15 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
|
||||
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
|
||||
}
|
||||
|
||||
if (IS_GEN_RANGE(dev_priv, 8, 10) &&
|
||||
crtc_state->master_transcoder != INVALID_TRANSCODER) {
|
||||
u8 master_select =
|
||||
bdw_trans_port_sync_master_select(crtc_state->master_transcoder);
|
||||
|
||||
temp |= TRANS_DDI_PORT_SYNC_ENABLE |
|
||||
TRANS_DDI_PORT_SYNC_MASTER_SELECT(master_select);
|
||||
}
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
||||
@ -1559,12 +1622,28 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
u32 temp;
|
||||
u32 ctl;
|
||||
|
||||
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
enum transcoder master_transcoder = crtc_state->master_transcoder;
|
||||
u32 ctl2 = 0;
|
||||
|
||||
if (master_transcoder != INVALID_TRANSCODER) {
|
||||
u8 master_select =
|
||||
bdw_trans_port_sync_master_select(master_transcoder);
|
||||
|
||||
ctl2 |= PORT_SYNC_MODE_ENABLE |
|
||||
PORT_SYNC_MODE_MASTER_SELECT(master_select);
|
||||
}
|
||||
|
||||
intel_de_write(dev_priv,
|
||||
TRANS_DDI_FUNC_CTL2(cpu_transcoder), ctl2);
|
||||
}
|
||||
|
||||
ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state);
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
|
||||
temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
|
||||
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
|
||||
ctl |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
|
||||
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1577,11 +1656,11 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
u32 temp;
|
||||
u32 ctl;
|
||||
|
||||
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
|
||||
temp &= ~TRANS_DDI_FUNC_ENABLE;
|
||||
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
|
||||
ctl = intel_ddi_transcoder_func_reg_val_get(crtc_state);
|
||||
ctl &= ~TRANS_DDI_FUNC_ENABLE;
|
||||
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
|
||||
}
|
||||
|
||||
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
||||
@ -1589,24 +1668,35 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
u32 val;
|
||||
u32 ctl;
|
||||
|
||||
val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
val &= ~TRANS_DDI_FUNC_ENABLE;
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
intel_de_write(dev_priv,
|
||||
TRANS_DDI_FUNC_CTL2(cpu_transcoder), 0);
|
||||
|
||||
ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
|
||||
ctl &= ~TRANS_DDI_FUNC_ENABLE;
|
||||
|
||||
if (IS_GEN_RANGE(dev_priv, 8, 10))
|
||||
ctl &= ~(TRANS_DDI_PORT_SYNC_ENABLE |
|
||||
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
if (!intel_dp_mst_is_master_trans(crtc_state)) {
|
||||
val &= ~(TGL_TRANS_DDI_PORT_MASK |
|
||||
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
|
||||
TRANS_DDI_MODE_SELECT_MASK);
|
||||
}
|
||||
} else {
|
||||
val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
|
||||
ctl &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
|
||||
}
|
||||
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
|
||||
|
||||
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
|
||||
|
||||
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
|
||||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
|
||||
DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Quirk Increase DDI disabled time\n");
|
||||
/* Quirk time at 100ms for reliable operation */
|
||||
msleep(100);
|
||||
}
|
||||
@ -1667,7 +1757,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
|
||||
if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
|
||||
cpu_transcoder = TRANSCODER_EDP;
|
||||
else
|
||||
cpu_transcoder = (enum transcoder) pipe;
|
||||
@ -1729,7 +1819,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
|
||||
if (!(tmp & DDI_BUF_CTL_ENABLE))
|
||||
goto out;
|
||||
|
||||
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
|
||||
if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A) {
|
||||
tmp = intel_de_read(dev_priv,
|
||||
TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
|
||||
|
||||
@ -1787,18 +1877,21 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
if (!*pipe_mask)
|
||||
DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"No pipe for [ENCODER:%d:%s] found\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
|
||||
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
|
||||
DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
*pipe_mask);
|
||||
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
|
||||
}
|
||||
|
||||
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
|
||||
DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
*pipe_mask, mst_pipe_mask);
|
||||
else
|
||||
@ -1810,9 +1903,9 @@ out:
|
||||
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
|
||||
BXT_PHY_LANE_POWERDOWN_ACK |
|
||||
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
|
||||
DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
|
||||
"(PHY_CTL %08x)\n", encoder->base.base.id,
|
||||
encoder->base.name, tmp);
|
||||
drm_err(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] enabled but PHY powered down? (PHY_CTL %08x)\n",
|
||||
encoder->base.base.id, encoder->base.name, tmp);
|
||||
}
|
||||
|
||||
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
|
||||
@ -1978,7 +2071,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
|
||||
|
||||
/* Make sure that the requested I_boost is valid */
|
||||
if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) {
|
||||
DRM_ERROR("Invalid I_boost value %u\n", iboost);
|
||||
drm_err(&dev_priv->drm, "Invalid I_boost value %u\n", iboost);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2037,7 +2130,8 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
|
||||
icl_get_combo_buf_trans(dev_priv, encoder->type,
|
||||
intel_dp->link_rate, &n_entries);
|
||||
else
|
||||
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
|
||||
icl_get_mg_buf_trans(dev_priv, encoder->type,
|
||||
intel_dp->link_rate, &n_entries);
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
if (encoder->type == INTEL_OUTPUT_EDP)
|
||||
cnl_get_buf_trans_edp(dev_priv, &n_entries);
|
||||
@ -2237,7 +2331,9 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
|
||||
return;
|
||||
|
||||
if (level >= n_entries) {
|
||||
DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"DDI translation not found for level %d. Using %d instead.",
|
||||
level, n_entries - 1);
|
||||
level = n_entries - 1;
|
||||
}
|
||||
|
||||
@ -2350,20 +2446,27 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
int link_clock,
|
||||
u32 level)
|
||||
int link_clock, u32 level,
|
||||
enum intel_output_type type)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
|
||||
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
|
||||
u32 n_entries, val;
|
||||
int ln;
|
||||
int ln, rate = 0;
|
||||
|
||||
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
|
||||
ddi_translations = icl_mg_phy_ddi_translations;
|
||||
if (type != INTEL_OUTPUT_HDMI) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
rate = intel_dp->link_rate;
|
||||
}
|
||||
|
||||
ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate,
|
||||
&n_entries);
|
||||
/* The table does not have values for level 3 and level 9. */
|
||||
if (level >= n_entries || level == 3 || level == 9) {
|
||||
DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"DDI translation not found for level %d. Using %d instead.",
|
||||
level, n_entries - 2);
|
||||
level = n_entries - 2;
|
||||
}
|
||||
@ -2483,7 +2586,8 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
|
||||
if (intel_phy_is_combo(dev_priv, phy))
|
||||
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
|
||||
else
|
||||
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
|
||||
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level,
|
||||
type);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2698,7 +2802,8 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
|
||||
if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
|
||||
continue;
|
||||
|
||||
DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
|
||||
drm_notice(&dev_priv->drm,
|
||||
"PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
|
||||
phy_name(phy));
|
||||
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
|
||||
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
|
||||
@ -2936,11 +3041,14 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
|
||||
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
if (!crtc_state->fec_enable)
|
||||
return;
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
|
||||
DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to set FEC_READY in the sink\n");
|
||||
}
|
||||
|
||||
static void intel_ddi_enable_fec(struct intel_encoder *encoder,
|
||||
@ -2960,7 +3068,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
|
||||
|
||||
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
|
||||
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
|
||||
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
|
||||
drm_err(&dev_priv->drm,
|
||||
"Timed out waiting for FEC Enable Status\n");
|
||||
}
|
||||
|
||||
static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
|
||||
@ -2980,7 +3089,8 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
|
||||
intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
|
||||
}
|
||||
|
||||
static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
||||
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -3120,7 +3230,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
||||
intel_dsc_enable(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
||||
static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -3193,16 +3304,17 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
||||
intel_dsc_enable(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
||||
static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
|
||||
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
|
||||
else
|
||||
hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
|
||||
hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
|
||||
|
||||
/* MST will call a setting of MSA after an allocating of Virtual Channel
|
||||
* from MST encoder pre_enable callback.
|
||||
@ -3214,7 +3326,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
|
||||
static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -3254,7 +3367,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
|
||||
crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void intel_ddi_pre_enable(struct intel_encoder *encoder,
|
||||
static void intel_ddi_pre_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -3283,12 +3397,14 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
|
||||
intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
|
||||
intel_ddi_pre_enable_hdmi(state, encoder, crtc_state,
|
||||
conn_state);
|
||||
} else {
|
||||
struct intel_lspcon *lspcon =
|
||||
enc_to_intel_lspcon(encoder);
|
||||
|
||||
intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
|
||||
intel_ddi_pre_enable_dp(state, encoder, crtc_state,
|
||||
conn_state);
|
||||
if (lspcon->active) {
|
||||
struct intel_digital_port *dig_port =
|
||||
enc_to_dig_port(encoder);
|
||||
@ -3331,7 +3447,8 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
|
||||
intel_wait_ddi_buf_idle(dev_priv, port);
|
||||
}
|
||||
|
||||
static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
|
||||
static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -3387,7 +3504,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
|
||||
intel_ddi_clk_disable(encoder);
|
||||
}
|
||||
|
||||
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
|
||||
static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -3410,22 +3528,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
|
||||
}
|
||||
|
||||
static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
|
||||
if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
|
||||
transcoder_name(old_crtc_state->cpu_transcoder));
|
||||
|
||||
intel_de_write(dev_priv,
|
||||
TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
|
||||
}
|
||||
|
||||
static void intel_ddi_post_disable(struct intel_encoder *encoder,
|
||||
static void intel_ddi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -3439,9 +3543,6 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
|
||||
|
||||
intel_disable_pipe(old_crtc_state);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
icl_disable_transcoder_port_sync(old_crtc_state);
|
||||
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
intel_dsc_disable(old_crtc_state);
|
||||
@ -3466,11 +3567,11 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
|
||||
*/
|
||||
|
||||
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
|
||||
intel_ddi_post_disable_hdmi(encoder,
|
||||
old_crtc_state, old_conn_state);
|
||||
intel_ddi_post_disable_hdmi(state, encoder, old_crtc_state,
|
||||
old_conn_state);
|
||||
else
|
||||
intel_ddi_post_disable_dp(encoder,
|
||||
old_crtc_state, old_conn_state);
|
||||
intel_ddi_post_disable_dp(state, encoder, old_crtc_state,
|
||||
old_conn_state);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
icl_unmap_plls_to_ports(encoder);
|
||||
@ -3483,7 +3584,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
|
||||
intel_tc_port_put_link(dig_port);
|
||||
}
|
||||
|
||||
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
|
||||
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -3517,7 +3619,43 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
|
||||
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
|
||||
}
|
||||
|
||||
static void intel_enable_ddi_dp(struct intel_encoder *encoder,
|
||||
static void trans_port_sync_stop_link_train(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_connector_state *conn_state;
|
||||
struct drm_connector *conn;
|
||||
int i;
|
||||
|
||||
if (!crtc_state->sync_mode_slaves_mask)
|
||||
return;
|
||||
|
||||
for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
|
||||
struct intel_encoder *slave_encoder =
|
||||
to_intel_encoder(conn_state->best_encoder);
|
||||
struct intel_crtc *slave_crtc = to_intel_crtc(conn_state->crtc);
|
||||
const struct intel_crtc_state *slave_crtc_state;
|
||||
|
||||
if (!slave_crtc)
|
||||
continue;
|
||||
|
||||
slave_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, slave_crtc);
|
||||
|
||||
if (slave_crtc_state->master_transcoder !=
|
||||
crtc_state->cpu_transcoder)
|
||||
continue;
|
||||
|
||||
intel_dp_stop_link_train(enc_to_intel_dp(slave_encoder));
|
||||
}
|
||||
|
||||
usleep_range(200, 400);
|
||||
|
||||
intel_dp_stop_link_train(enc_to_intel_dp(encoder));
|
||||
}
|
||||
|
||||
static void intel_enable_ddi_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -3536,6 +3674,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
|
||||
|
||||
if (crtc_state->has_audio)
|
||||
intel_audio_codec_enable(encoder, crtc_state, conn_state);
|
||||
|
||||
trans_port_sync_stop_link_train(state, encoder, crtc_state);
|
||||
}
|
||||
|
||||
static i915_reg_t
|
||||
@ -3558,7 +3698,8 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
|
||||
return CHICKEN_TRANS(trans[port]);
|
||||
}
|
||||
|
||||
static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
|
||||
static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -3570,8 +3711,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
|
||||
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
|
||||
crtc_state->hdmi_high_tmds_clock_ratio,
|
||||
crtc_state->hdmi_scrambling))
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
|
||||
"scrambling/TMDS bit clock ratio\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
/* Display WA #1143: skl,kbl,cfl */
|
||||
@ -3620,7 +3761,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
|
||||
intel_audio_codec_enable(encoder, crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void intel_enable_ddi(struct intel_encoder *encoder,
|
||||
static void intel_enable_ddi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -3631,9 +3773,9 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
|
||||
intel_crtc_vblank_on(crtc_state);
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
|
||||
intel_enable_ddi_hdmi(state, encoder, crtc_state, conn_state);
|
||||
else
|
||||
intel_enable_ddi_dp(encoder, crtc_state, conn_state);
|
||||
intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
|
||||
|
||||
/* Enable hdcp if it's desired */
|
||||
if (conn_state->content_protection ==
|
||||
@ -3643,7 +3785,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
|
||||
(u8)conn_state->hdcp_content_type);
|
||||
}
|
||||
|
||||
static void intel_disable_ddi_dp(struct intel_encoder *encoder,
|
||||
static void intel_disable_ddi_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -3663,10 +3806,12 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
|
||||
false);
|
||||
}
|
||||
|
||||
static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
|
||||
static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct drm_connector *connector = old_conn_state->connector;
|
||||
|
||||
if (old_crtc_state->has_audio)
|
||||
@ -3675,23 +3820,28 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
|
||||
|
||||
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
|
||||
false, false))
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n",
|
||||
connector->base.id, connector->name);
|
||||
}
|
||||
|
||||
static void intel_disable_ddi(struct intel_encoder *encoder,
|
||||
static void intel_disable_ddi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
intel_hdcp_disable(to_intel_connector(old_conn_state->connector));
|
||||
|
||||
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
|
||||
intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
|
||||
intel_disable_ddi_hdmi(state, encoder, old_crtc_state,
|
||||
old_conn_state);
|
||||
else
|
||||
intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
|
||||
intel_disable_ddi_dp(state, encoder, old_crtc_state,
|
||||
old_conn_state);
|
||||
}
|
||||
|
||||
static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
|
||||
static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -3702,18 +3852,20 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
|
||||
intel_psr_update(intel_dp, crtc_state);
|
||||
intel_edp_drrs_enable(intel_dp, crtc_state);
|
||||
|
||||
intel_panel_update_backlight(encoder, crtc_state, conn_state);
|
||||
intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void intel_ddi_update_pipe(struct intel_encoder *encoder,
|
||||
static void intel_ddi_update_pipe(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
|
||||
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
|
||||
intel_ddi_update_pipe_dp(state, encoder, crtc_state,
|
||||
conn_state);
|
||||
|
||||
intel_hdcp_update_pipe(encoder, crtc_state, conn_state);
|
||||
intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3742,7 +3894,8 @@ intel_ddi_update_complete(struct intel_atomic_state *state,
|
||||
}
|
||||
|
||||
static void
|
||||
intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
|
||||
intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -3842,6 +3995,66 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
|
||||
crtc_state->min_voltage_level = 2;
|
||||
}
|
||||
|
||||
static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder)
|
||||
{
|
||||
u32 master_select;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
u32 ctl2 = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL2(cpu_transcoder));
|
||||
|
||||
if ((ctl2 & PORT_SYNC_MODE_ENABLE) == 0)
|
||||
return INVALID_TRANSCODER;
|
||||
|
||||
master_select = REG_FIELD_GET(PORT_SYNC_MODE_MASTER_SELECT_MASK, ctl2);
|
||||
} else {
|
||||
u32 ctl = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
|
||||
if ((ctl & TRANS_DDI_PORT_SYNC_ENABLE) == 0)
|
||||
return INVALID_TRANSCODER;
|
||||
|
||||
master_select = REG_FIELD_GET(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, ctl);
|
||||
}
|
||||
|
||||
if (master_select == 0)
|
||||
return TRANSCODER_EDP;
|
||||
else
|
||||
return master_select - 1;
|
||||
}
|
||||
|
||||
static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
u32 transcoders = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
|
||||
BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
|
||||
enum transcoder cpu_transcoder;
|
||||
|
||||
crtc_state->master_transcoder =
|
||||
bdw_transcoder_master_readout(dev_priv, crtc_state->cpu_transcoder);
|
||||
|
||||
for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
|
||||
enum intel_display_power_domain power_domain;
|
||||
intel_wakeref_t trans_wakeref;
|
||||
|
||||
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
|
||||
trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
|
||||
power_domain);
|
||||
|
||||
if (!trans_wakeref)
|
||||
continue;
|
||||
|
||||
if (bdw_transcoder_master_readout(dev_priv, cpu_transcoder) ==
|
||||
crtc_state->cpu_transcoder)
|
||||
crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain, trans_wakeref);
|
||||
}
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm,
|
||||
crtc_state->master_transcoder != INVALID_TRANSCODER &&
|
||||
crtc_state->sync_mode_slaves_mask);
|
||||
}
|
||||
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
@ -3927,7 +4140,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
pipe_config->fec_enable =
|
||||
intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
|
||||
|
||||
DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] Fec status: %u\n",
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
pipe_config->fec_enable);
|
||||
}
|
||||
@ -3966,7 +4180,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
* up by the BIOS, and thus we can't get the mode at module
|
||||
* load.
|
||||
*/
|
||||
DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
|
||||
pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
|
||||
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
|
||||
}
|
||||
@ -3993,6 +4208,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
intel_read_infoframe(encoder, pipe_config,
|
||||
HDMI_INFOFRAME_TYPE_DRM,
|
||||
&pipe_config->infoframes.drm);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
bdw_get_trans_port_sync_config(pipe_config);
|
||||
}
|
||||
|
||||
static enum intel_output_type
|
||||
@ -4022,7 +4240,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
|
||||
enum port port = encoder->port;
|
||||
int ret;
|
||||
|
||||
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
|
||||
if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP) && port == PORT_A)
|
||||
pipe_config->cpu_transcoder = TRANSCODER_EDP;
|
||||
|
||||
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
|
||||
@ -4094,7 +4312,11 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
|
||||
u8 transcoders = 0;
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 11)
|
||||
/*
|
||||
* We don't enable port sync on BDW due to missing w/as and
|
||||
* due to not having adjusted the modeset sequence appropriately.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) < 9)
|
||||
return 0;
|
||||
|
||||
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
|
||||
@ -4126,10 +4348,11 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
u8 port_sync_transcoders = 0;
|
||||
|
||||
DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]",
|
||||
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]",
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
|
||||
|
||||
@ -4270,7 +4493,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
|
||||
|
||||
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("Failed to read TMDS config: %d\n", ret);
|
||||
drm_err(&dev_priv->drm, "Failed to read TMDS config: %d\n",
|
||||
ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4294,15 +4518,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
|
||||
|
||||
static enum intel_hotplug_state
|
||||
intel_ddi_hotplug(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector,
|
||||
bool irq_received)
|
||||
struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
|
||||
enum phy phy = intel_port_to_phy(i915, encoder->port);
|
||||
bool is_tc = intel_phy_is_tc(i915, phy);
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
enum intel_hotplug_state state;
|
||||
int ret;
|
||||
|
||||
state = intel_encoder_hotplug(encoder, connector, irq_received);
|
||||
state = intel_encoder_hotplug(encoder, connector);
|
||||
|
||||
drm_modeset_acquire_init(&ctx, 0);
|
||||
|
||||
@ -4340,8 +4566,15 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
|
||||
* valid EDID. To solve this schedule another detection cycle if this
|
||||
* time around we didn't detect any change in the sink's connection
|
||||
* status.
|
||||
*
|
||||
* Type-c connectors which get their HPD signal deasserted then
|
||||
* reasserted, without unplugging/replugging the sink from the
|
||||
* connector, introduce a delay until the AUX channel communication
|
||||
* becomes functional. Retry the detection for 5 seconds on type-c
|
||||
* connectors to account for this delay.
|
||||
*/
|
||||
if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
|
||||
if (state == INTEL_HOTPLUG_UNCHANGED &&
|
||||
connector->hotplug_retries < (is_tc ? 5 : 1) &&
|
||||
!dig_port->dp.is_mst)
|
||||
state = INTEL_HOTPLUG_RETRY;
|
||||
|
||||
@ -4416,7 +4649,8 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
|
||||
* so we use the proper lane count for our calculations.
|
||||
*/
|
||||
if (intel_ddi_a_force_4_lanes(intel_dport)) {
|
||||
DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Forcing DDI_A_4_LANES for port A\n");
|
||||
intel_dport->saved_port_bits |= DDI_A_4_LANES;
|
||||
max_lanes = 4;
|
||||
}
|
||||
@ -4444,11 +4678,13 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
init_dp = true;
|
||||
init_lspcon = true;
|
||||
init_hdmi = false;
|
||||
DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
|
||||
drm_dbg_kms(&dev_priv->drm, "VBT says port %c has lspcon\n",
|
||||
port_name(port));
|
||||
}
|
||||
|
||||
if (!init_dp && !init_hdmi) {
|
||||
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"VBT says port %c is not DVI/HDMI/DP compatible, respect it\n",
|
||||
port_name(port));
|
||||
return;
|
||||
}
|
||||
@ -4528,14 +4764,16 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
if (init_lspcon) {
|
||||
if (lspcon_init(intel_dig_port))
|
||||
/* TODO: handle hdmi info frame part */
|
||||
DRM_DEBUG_KMS("LSPCON init success on port %c\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"LSPCON init success on port %c\n",
|
||||
port_name(port));
|
||||
else
|
||||
/*
|
||||
* LSPCON init faied, but DP init was success, so
|
||||
* lets try to drive as DP++ port.
|
||||
*/
|
||||
DRM_ERROR("LSPCON init failed on port %c\n",
|
||||
drm_err(&dev_priv->drm,
|
||||
"LSPCON init failed on port %c\n",
|
||||
port_name(port));
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,8 @@ struct intel_dp;
|
||||
struct intel_dpll_hw_state;
|
||||
struct intel_encoder;
|
||||
|
||||
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
|
||||
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *intel_encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state);
|
||||
void hsw_fdi_link_train(struct intel_encoder *encoder,
|
||||
|
@ -525,7 +525,7 @@ skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
|
||||
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
|
||||
}
|
||||
|
||||
/* Wa_2006604312:icl */
|
||||
/* Wa_2006604312:icl,ehl */
|
||||
static void
|
||||
icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
bool enable)
|
||||
@ -544,19 +544,25 @@ needs_modeset(const struct intel_crtc_state *state)
|
||||
return drm_atomic_crtc_needs_modeset(&state->uapi);
|
||||
}
|
||||
|
||||
bool
|
||||
is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
|
||||
crtc_state->sync_mode_slaves_mask);
|
||||
}
|
||||
|
||||
static bool
|
||||
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
return crtc_state->master_transcoder != INVALID_TRANSCODER;
|
||||
}
|
||||
|
||||
static bool
|
||||
is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
return crtc_state->sync_mode_slaves_mask != 0;
|
||||
}
|
||||
|
||||
bool
|
||||
is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
return is_trans_port_sync_master(crtc_state) ||
|
||||
is_trans_port_sync_slave(crtc_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
|
||||
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
|
||||
@ -620,45 +626,43 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
|
||||
return clock->dot / 5;
|
||||
}
|
||||
|
||||
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
|
||||
|
||||
/*
|
||||
* Returns whether the given set of divisors are valid for a given refclk with
|
||||
* the given connectors.
|
||||
*/
|
||||
static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
|
||||
static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
|
||||
const struct intel_limit *limit,
|
||||
const struct dpll *clock)
|
||||
{
|
||||
if (clock->n < limit->n.min || limit->n.max < clock->n)
|
||||
INTELPllInvalid("n out of range\n");
|
||||
return false;
|
||||
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
|
||||
INTELPllInvalid("p1 out of range\n");
|
||||
return false;
|
||||
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
|
||||
INTELPllInvalid("m2 out of range\n");
|
||||
return false;
|
||||
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
|
||||
INTELPllInvalid("m1 out of range\n");
|
||||
return false;
|
||||
|
||||
if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
|
||||
!IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
|
||||
if (clock->m1 <= clock->m2)
|
||||
INTELPllInvalid("m1 <= m2\n");
|
||||
return false;
|
||||
|
||||
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
|
||||
!IS_GEN9_LP(dev_priv)) {
|
||||
if (clock->p < limit->p.min || limit->p.max < clock->p)
|
||||
INTELPllInvalid("p out of range\n");
|
||||
return false;
|
||||
if (clock->m < limit->m.min || limit->m.max < clock->m)
|
||||
INTELPllInvalid("m out of range\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
|
||||
INTELPllInvalid("vco out of range\n");
|
||||
return false;
|
||||
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
|
||||
* connector, etc., rather than just a single range.
|
||||
*/
|
||||
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
|
||||
INTELPllInvalid("dot out of range\n");
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -725,7 +729,7 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
|
||||
int this_err;
|
||||
|
||||
i9xx_calc_dpll_params(refclk, &clock);
|
||||
if (!intel_PLL_is_valid(to_i915(dev),
|
||||
if (!intel_pll_is_valid(to_i915(dev),
|
||||
limit,
|
||||
&clock))
|
||||
continue;
|
||||
@ -781,7 +785,7 @@ pnv_find_best_dpll(const struct intel_limit *limit,
|
||||
int this_err;
|
||||
|
||||
pnv_calc_dpll_params(refclk, &clock);
|
||||
if (!intel_PLL_is_valid(to_i915(dev),
|
||||
if (!intel_pll_is_valid(to_i915(dev),
|
||||
limit,
|
||||
&clock))
|
||||
continue;
|
||||
@ -842,7 +846,7 @@ g4x_find_best_dpll(const struct intel_limit *limit,
|
||||
int this_err;
|
||||
|
||||
i9xx_calc_dpll_params(refclk, &clock);
|
||||
if (!intel_PLL_is_valid(to_i915(dev),
|
||||
if (!intel_pll_is_valid(to_i915(dev),
|
||||
limit,
|
||||
&clock))
|
||||
continue;
|
||||
@ -939,7 +943,7 @@ vlv_find_best_dpll(const struct intel_limit *limit,
|
||||
|
||||
vlv_calc_dpll_params(refclk, &clock);
|
||||
|
||||
if (!intel_PLL_is_valid(to_i915(dev),
|
||||
if (!intel_pll_is_valid(to_i915(dev),
|
||||
limit,
|
||||
&clock))
|
||||
continue;
|
||||
@ -1008,7 +1012,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
|
||||
|
||||
chv_calc_dpll_params(refclk, &clock);
|
||||
|
||||
if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
|
||||
if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
|
||||
continue;
|
||||
|
||||
if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
|
||||
@ -2910,6 +2914,7 @@ intel_fb_plane_get_subsampling(int *hsub, int *vsub,
|
||||
static int
|
||||
intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(fb->dev);
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
int main_plane;
|
||||
int hsub, vsub;
|
||||
@ -2938,7 +2943,8 @@ intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
|
||||
* x/y offsets must match between CCS and the main surface.
|
||||
*/
|
||||
if (main_x != ccs_x || main_y != ccs_y) {
|
||||
DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
|
||||
main_x, main_y,
|
||||
ccs_x, ccs_y,
|
||||
intel_fb->normal[main_plane].x,
|
||||
@ -3336,6 +3342,8 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
|
||||
return DRM_FORMAT_RGB565;
|
||||
case PLANE_CTL_FORMAT_NV12:
|
||||
return DRM_FORMAT_NV12;
|
||||
case PLANE_CTL_FORMAT_XYUV:
|
||||
return DRM_FORMAT_XYUV8888;
|
||||
case PLANE_CTL_FORMAT_P010:
|
||||
return DRM_FORMAT_P010;
|
||||
case PLANE_CTL_FORMAT_P012:
|
||||
@ -4580,6 +4588,8 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
|
||||
case DRM_FORMAT_XRGB16161616F:
|
||||
case DRM_FORMAT_ARGB16161616F:
|
||||
return PLANE_CTL_FORMAT_XRGB_16161616F;
|
||||
case DRM_FORMAT_XYUV8888:
|
||||
return PLANE_CTL_FORMAT_XYUV;
|
||||
case DRM_FORMAT_YUYV:
|
||||
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
|
||||
case DRM_FORMAT_YVYU:
|
||||
@ -4998,37 +5008,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
|
||||
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
|
||||
}
|
||||
|
||||
static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
u32 trans_ddi_func_ctl2_val;
|
||||
u8 master_select;
|
||||
|
||||
/*
|
||||
* Configure the master select and enable Transcoder Port Sync for
|
||||
* Slave CRTCs transcoder.
|
||||
*/
|
||||
if (crtc_state->master_transcoder == INVALID_TRANSCODER)
|
||||
return;
|
||||
|
||||
if (crtc_state->master_transcoder == TRANSCODER_EDP)
|
||||
master_select = 0;
|
||||
else
|
||||
master_select = crtc_state->master_transcoder + 1;
|
||||
|
||||
/* Set the master select bits for Tranascoder Port Sync */
|
||||
trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
|
||||
PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
|
||||
PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
|
||||
/* Enable Transcoder Port Sync */
|
||||
trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
|
||||
|
||||
intel_de_write(dev_priv,
|
||||
TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
|
||||
trans_ddi_func_ctl2_val);
|
||||
}
|
||||
|
||||
static void intel_fdi_normal_train(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
@ -6200,6 +6179,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
|
||||
case DRM_FORMAT_UYVY:
|
||||
case DRM_FORMAT_VYUY:
|
||||
case DRM_FORMAT_NV12:
|
||||
case DRM_FORMAT_XYUV8888:
|
||||
case DRM_FORMAT_P010:
|
||||
case DRM_FORMAT_P012:
|
||||
case DRM_FORMAT_P016:
|
||||
@ -6463,8 +6443,8 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
|
||||
/* Wa_2006604312:icl */
|
||||
if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
|
||||
/* Wa_2006604312:icl,ehl */
|
||||
if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -6534,7 +6514,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
|
||||
needs_nv12_wa(new_crtc_state))
|
||||
skl_wa_827(dev_priv, pipe, true);
|
||||
|
||||
/* Wa_2006604312:icl */
|
||||
/* Wa_2006604312:icl,ehl */
|
||||
if (!needs_scalerclk_wa(old_crtc_state) &&
|
||||
needs_scalerclk_wa(new_crtc_state))
|
||||
icl_wa_scalerclkgating(dev_priv, pipe, true);
|
||||
@ -6720,7 +6700,8 @@ static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
|
||||
continue;
|
||||
|
||||
if (encoder->pre_pll_enable)
|
||||
encoder->pre_pll_enable(encoder, crtc_state, conn_state);
|
||||
encoder->pre_pll_enable(state, encoder,
|
||||
crtc_state, conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6741,7 +6722,8 @@ static void intel_encoders_pre_enable(struct intel_atomic_state *state,
|
||||
continue;
|
||||
|
||||
if (encoder->pre_enable)
|
||||
encoder->pre_enable(encoder, crtc_state, conn_state);
|
||||
encoder->pre_enable(state, encoder,
|
||||
crtc_state, conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6762,7 +6744,8 @@ static void intel_encoders_enable(struct intel_atomic_state *state,
|
||||
continue;
|
||||
|
||||
if (encoder->enable)
|
||||
encoder->enable(encoder, crtc_state, conn_state);
|
||||
encoder->enable(state, encoder,
|
||||
crtc_state, conn_state);
|
||||
intel_opregion_notify_encoder(encoder, true);
|
||||
}
|
||||
}
|
||||
@ -6785,7 +6768,8 @@ static void intel_encoders_disable(struct intel_atomic_state *state,
|
||||
|
||||
intel_opregion_notify_encoder(encoder, false);
|
||||
if (encoder->disable)
|
||||
encoder->disable(encoder, old_crtc_state, old_conn_state);
|
||||
encoder->disable(state, encoder,
|
||||
old_crtc_state, old_conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6806,7 +6790,8 @@ static void intel_encoders_post_disable(struct intel_atomic_state *state,
|
||||
continue;
|
||||
|
||||
if (encoder->post_disable)
|
||||
encoder->post_disable(encoder, old_crtc_state, old_conn_state);
|
||||
encoder->post_disable(state, encoder,
|
||||
old_crtc_state, old_conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6827,7 +6812,8 @@ static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
|
||||
continue;
|
||||
|
||||
if (encoder->post_pll_disable)
|
||||
encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
|
||||
encoder->post_pll_disable(state, encoder,
|
||||
old_crtc_state, old_conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6848,7 +6834,8 @@ static void intel_encoders_update_pipe(struct intel_atomic_state *state,
|
||||
continue;
|
||||
|
||||
if (encoder->update_pipe)
|
||||
encoder->update_pipe(encoder, crtc_state, conn_state);
|
||||
encoder->update_pipe(state, encoder,
|
||||
crtc_state, conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -7037,9 +7024,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
|
||||
if (!transcoder_is_dsi(cpu_transcoder))
|
||||
intel_set_pipe_timings(new_crtc_state);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
icl_enable_trans_port_sync(new_crtc_state);
|
||||
|
||||
intel_set_pipe_src_size(new_crtc_state);
|
||||
|
||||
if (cpu_transcoder != TRANSCODER_EDP &&
|
||||
@ -9398,7 +9382,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
|
||||
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
|
||||
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
|
||||
pipe_config->shared_dpll = NULL;
|
||||
pipe_config->master_transcoder = INVALID_TRANSCODER;
|
||||
|
||||
ret = false;
|
||||
|
||||
@ -10622,7 +10605,6 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
|
||||
|
||||
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
|
||||
pipe_config->shared_dpll = NULL;
|
||||
pipe_config->master_transcoder = INVALID_TRANSCODER;
|
||||
|
||||
ret = false;
|
||||
tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
|
||||
@ -10891,7 +10873,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
|
||||
panel_transcoder_mask |=
|
||||
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
|
||||
|
||||
if (HAS_TRANSCODER_EDP(dev_priv))
|
||||
if (HAS_TRANSCODER(dev_priv, TRANSCODER_EDP))
|
||||
panel_transcoder_mask |= BIT(TRANSCODER_EDP);
|
||||
|
||||
/*
|
||||
@ -11085,61 +11067,6 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder)
|
||||
{
|
||||
u32 trans_port_sync, master_select;
|
||||
|
||||
trans_port_sync = intel_de_read(dev_priv,
|
||||
TRANS_DDI_FUNC_CTL2(cpu_transcoder));
|
||||
|
||||
if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
|
||||
return INVALID_TRANSCODER;
|
||||
|
||||
master_select = trans_port_sync &
|
||||
PORT_SYNC_MODE_MASTER_SELECT_MASK;
|
||||
if (master_select == 0)
|
||||
return TRANSCODER_EDP;
|
||||
else
|
||||
return master_select - 1;
|
||||
}
|
||||
|
||||
static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
u32 transcoders;
|
||||
enum transcoder cpu_transcoder;
|
||||
|
||||
crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
|
||||
crtc_state->cpu_transcoder);
|
||||
|
||||
transcoders = BIT(TRANSCODER_A) |
|
||||
BIT(TRANSCODER_B) |
|
||||
BIT(TRANSCODER_C) |
|
||||
BIT(TRANSCODER_D);
|
||||
for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
|
||||
enum intel_display_power_domain power_domain;
|
||||
intel_wakeref_t trans_wakeref;
|
||||
|
||||
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
|
||||
trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
|
||||
power_domain);
|
||||
|
||||
if (!trans_wakeref)
|
||||
continue;
|
||||
|
||||
if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
|
||||
crtc_state->cpu_transcoder)
|
||||
crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain, trans_wakeref);
|
||||
}
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm,
|
||||
crtc_state->master_transcoder != INVALID_TRANSCODER &&
|
||||
crtc_state->sync_mode_slaves_mask);
|
||||
}
|
||||
|
||||
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
@ -11271,10 +11198,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
|
||||
pipe_config->pixel_multiplier = 1;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11 &&
|
||||
!transcoder_is_dsi(pipe_config->cpu_transcoder))
|
||||
icl_get_trans_port_sync_config(pipe_config);
|
||||
|
||||
out:
|
||||
for_each_power_domain(power_domain, power_domain_mask)
|
||||
intel_display_power_put(dev_priv,
|
||||
@ -12377,10 +12300,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
|
||||
* only combine the results from all planes in the current place?
|
||||
*/
|
||||
if (!is_crtc_enabled) {
|
||||
plane_state->uapi.visible = visible = false;
|
||||
crtc_state->active_planes &= ~BIT(plane->id);
|
||||
crtc_state->data_rate[plane->id] = 0;
|
||||
crtc_state->min_cdclk[plane->id] = 0;
|
||||
intel_plane_set_invisible(crtc_state, plane_state);
|
||||
visible = false;
|
||||
}
|
||||
|
||||
if (!was_visible && !visible)
|
||||
@ -12886,9 +12807,10 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
|
||||
static void intel_dump_crtc_timings(struct drm_i915_private *i915,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
|
||||
drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
|
||||
"type: 0x%x flags: 0x%x\n",
|
||||
mode->crtc_clock,
|
||||
mode->crtc_hdisplay, mode->crtc_hsync_start,
|
||||
@ -13042,6 +12964,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
|
||||
transcoder_name(pipe_config->cpu_transcoder),
|
||||
pipe_config->pipe_bpp, pipe_config->dither);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
|
||||
transcoder_name(pipe_config->master_transcoder),
|
||||
pipe_config->sync_mode_slaves_mask);
|
||||
|
||||
if (pipe_config->has_pch_encoder)
|
||||
intel_dump_m_n_config(pipe_config, "fdi",
|
||||
pipe_config->fdi_lanes,
|
||||
@ -13079,7 +13006,7 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
|
||||
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
|
||||
drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
|
||||
drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
|
||||
intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
|
||||
intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
|
||||
pipe_config->port_clock,
|
||||
@ -14999,11 +14926,13 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
|
||||
}
|
||||
|
||||
static void commit_pipe_config(struct intel_atomic_state *state,
|
||||
struct intel_crtc_state *old_crtc_state,
|
||||
struct intel_crtc_state *new_crtc_state)
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
const struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
bool modeset = needs_modeset(new_crtc_state);
|
||||
|
||||
/*
|
||||
@ -15029,22 +14958,35 @@ static void commit_pipe_config(struct intel_atomic_state *state,
|
||||
dev_priv->display.atomic_update_watermarks(state, crtc);
|
||||
}
|
||||
|
||||
static void intel_update_crtc(struct intel_crtc *crtc,
|
||||
struct intel_atomic_state *state,
|
||||
struct intel_crtc_state *old_crtc_state,
|
||||
struct intel_crtc_state *new_crtc_state)
|
||||
static void intel_enable_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
bool modeset = needs_modeset(new_crtc_state);
|
||||
const struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
if (!needs_modeset(new_crtc_state))
|
||||
return;
|
||||
|
||||
if (modeset) {
|
||||
intel_crtc_update_active_timings(new_crtc_state);
|
||||
|
||||
dev_priv->display.crtc_enable(state, crtc);
|
||||
|
||||
/* vblanks work again, re-enable pipe CRC. */
|
||||
intel_crtc_enable_pipe_crc(crtc);
|
||||
} else {
|
||||
}
|
||||
|
||||
static void intel_update_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
const struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
bool modeset = needs_modeset(new_crtc_state);
|
||||
|
||||
if (!modeset) {
|
||||
if (new_crtc_state->preload_luts &&
|
||||
(new_crtc_state->uapi.color_mgmt_changed ||
|
||||
new_crtc_state->update_pipe))
|
||||
@ -15064,7 +15006,7 @@ static void intel_update_crtc(struct intel_crtc *crtc,
|
||||
/* Perform vblank evasion around commit operation */
|
||||
intel_pipe_update_start(new_crtc_state);
|
||||
|
||||
commit_pipe_config(state, old_crtc_state, new_crtc_state);
|
||||
commit_pipe_config(state, crtc);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
skl_update_planes_on_crtc(state, crtc);
|
||||
@ -15084,18 +15026,6 @@ static void intel_update_crtc(struct intel_crtc *crtc,
|
||||
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
|
||||
}
|
||||
|
||||
static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
|
||||
enum transcoder slave_transcoder;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm,
|
||||
!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
|
||||
|
||||
slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
|
||||
return intel_get_crtc_for_pipe(dev_priv,
|
||||
(enum pipe)slave_transcoder);
|
||||
}
|
||||
|
||||
static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
|
||||
struct intel_crtc_state *old_crtc_state,
|
||||
@ -15171,129 +15101,19 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
|
||||
|
||||
static void intel_commit_modeset_enables(struct intel_atomic_state *state)
|
||||
{
|
||||
struct intel_crtc_state *new_crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
|
||||
int i;
|
||||
|
||||
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
if (!new_crtc_state->hw.active)
|
||||
continue;
|
||||
|
||||
intel_update_crtc(crtc, state, old_crtc_state,
|
||||
new_crtc_state);
|
||||
intel_enable_crtc(state, crtc);
|
||||
intel_update_crtc(state, crtc);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
|
||||
struct intel_atomic_state *state,
|
||||
struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
|
||||
intel_crtc_update_active_timings(new_crtc_state);
|
||||
dev_priv->display.crtc_enable(state, crtc);
|
||||
intel_crtc_enable_pipe_crc(crtc);
|
||||
}
|
||||
|
||||
static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
|
||||
struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_connector *uninitialized_var(conn);
|
||||
struct drm_connector_state *conn_state;
|
||||
struct intel_dp *intel_dp;
|
||||
int i;
|
||||
|
||||
for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
|
||||
if (conn_state->crtc == &crtc->base)
|
||||
break;
|
||||
}
|
||||
intel_dp = intel_attached_dp(to_intel_connector(conn));
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: This is only called from port sync and it is identical to what will be
|
||||
* executed again in intel_update_crtc() over port sync pipes
|
||||
*/
|
||||
static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
|
||||
struct intel_atomic_state *state)
|
||||
{
|
||||
struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
bool modeset = needs_modeset(new_crtc_state);
|
||||
|
||||
if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
|
||||
intel_fbc_disable(crtc);
|
||||
else
|
||||
intel_fbc_enable(state, crtc);
|
||||
|
||||
/* Perform vblank evasion around commit operation */
|
||||
intel_pipe_update_start(new_crtc_state);
|
||||
commit_pipe_config(state, old_crtc_state, new_crtc_state);
|
||||
skl_update_planes_on_crtc(state, crtc);
|
||||
intel_pipe_update_end(new_crtc_state);
|
||||
|
||||
/*
|
||||
* We usually enable FIFO underrun interrupts as part of the
|
||||
* CRTC enable sequence during modesets. But when we inherit a
|
||||
* valid pipe configuration from the BIOS we need to take care
|
||||
* of enabling them on the CRTC's first fastset.
|
||||
*/
|
||||
if (new_crtc_state->update_pipe && !modeset &&
|
||||
old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
|
||||
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
|
||||
}
|
||||
|
||||
static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
|
||||
struct intel_atomic_state *state,
|
||||
struct intel_crtc_state *old_crtc_state,
|
||||
struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
|
||||
struct intel_crtc_state *new_slave_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, slave_crtc);
|
||||
struct intel_crtc_state *old_slave_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, slave_crtc);
|
||||
|
||||
drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
|
||||
!old_slave_crtc_state);
|
||||
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
|
||||
crtc->base.base.id, crtc->base.name,
|
||||
slave_crtc->base.base.id, slave_crtc->base.name);
|
||||
|
||||
/* Enable seq for slave with with DP_TP_CTL left Idle until the
|
||||
* master is ready
|
||||
*/
|
||||
intel_crtc_enable_trans_port_sync(slave_crtc,
|
||||
state,
|
||||
new_slave_crtc_state);
|
||||
|
||||
/* Enable seq for master with with DP_TP_CTL left Idle */
|
||||
intel_crtc_enable_trans_port_sync(crtc,
|
||||
state,
|
||||
new_crtc_state);
|
||||
|
||||
/* Set Slave's DP_TP_CTL to Normal */
|
||||
intel_set_dp_tp_ctl_normal(slave_crtc,
|
||||
state);
|
||||
|
||||
/* Set Master's DP_TP_CTL To Normal */
|
||||
usleep_range(200, 400);
|
||||
intel_set_dp_tp_ctl_normal(crtc,
|
||||
state);
|
||||
|
||||
/* Now do the post crtc enable for all master and slaves */
|
||||
intel_post_crtc_enable_updates(slave_crtc,
|
||||
state);
|
||||
intel_post_crtc_enable_updates(crtc,
|
||||
state);
|
||||
}
|
||||
|
||||
static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
@ -15365,8 +15185,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
|
||||
entries[pipe] = new_crtc_state->wm.skl.ddb;
|
||||
update_pipes &= ~BIT(pipe);
|
||||
|
||||
intel_update_crtc(crtc, state, old_crtc_state,
|
||||
new_crtc_state);
|
||||
intel_update_crtc(state, crtc);
|
||||
|
||||
/*
|
||||
* If this is an already active pipe, it's DDB changed,
|
||||
@ -15381,67 +15200,62 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
|
||||
}
|
||||
}
|
||||
|
||||
update_pipes = modeset_pipes;
|
||||
|
||||
/*
|
||||
* Enable all pipes that needs a modeset and do not depends on other
|
||||
* pipes
|
||||
*/
|
||||
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
if ((modeset_pipes & BIT(pipe)) == 0)
|
||||
continue;
|
||||
|
||||
if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
|
||||
is_trans_port_sync_slave(new_crtc_state))
|
||||
is_trans_port_sync_master(new_crtc_state))
|
||||
continue;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
|
||||
entries, I915_MAX_PIPES, pipe));
|
||||
|
||||
entries[pipe] = new_crtc_state->wm.skl.ddb;
|
||||
modeset_pipes &= ~BIT(pipe);
|
||||
|
||||
if (is_trans_port_sync_mode(new_crtc_state)) {
|
||||
struct intel_crtc *slave_crtc;
|
||||
|
||||
intel_update_trans_port_sync_crtcs(crtc, state,
|
||||
old_crtc_state,
|
||||
new_crtc_state);
|
||||
|
||||
slave_crtc = intel_get_slave_crtc(new_crtc_state);
|
||||
/* TODO: update entries[] of slave */
|
||||
modeset_pipes &= ~BIT(slave_crtc->pipe);
|
||||
|
||||
} else {
|
||||
intel_update_crtc(crtc, state, old_crtc_state,
|
||||
new_crtc_state);
|
||||
}
|
||||
intel_enable_crtc(state, crtc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Finally enable all pipes that needs a modeset and depends on
|
||||
* other pipes, right now it is only MST slaves as both port sync slave
|
||||
* and master are enabled together
|
||||
* Then we enable all remaining pipes that depend on other
|
||||
* pipes: MST slaves and port sync masters.
|
||||
*/
|
||||
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
if ((modeset_pipes & BIT(pipe)) == 0)
|
||||
continue;
|
||||
|
||||
modeset_pipes &= ~BIT(pipe);
|
||||
|
||||
intel_enable_crtc(state, crtc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Finally we do the plane updates/etc. for all pipes that got enabled.
|
||||
*/
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
if ((update_pipes & BIT(pipe)) == 0)
|
||||
continue;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
|
||||
entries, I915_MAX_PIPES, pipe));
|
||||
|
||||
entries[pipe] = new_crtc_state->wm.skl.ddb;
|
||||
modeset_pipes &= ~BIT(pipe);
|
||||
update_pipes &= ~BIT(pipe);
|
||||
|
||||
intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
|
||||
intel_update_crtc(state, crtc);
|
||||
}
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, modeset_pipes);
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, update_pipes);
|
||||
}
|
||||
|
||||
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
|
||||
@ -18261,11 +18075,12 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
|
||||
best_encoder = connector->base.state->best_encoder;
|
||||
connector->base.state->best_encoder = &encoder->base;
|
||||
|
||||
/* FIXME NULL atomic state passed! */
|
||||
if (encoder->disable)
|
||||
encoder->disable(encoder, crtc_state,
|
||||
encoder->disable(NULL, encoder, crtc_state,
|
||||
connector->base.state);
|
||||
if (encoder->post_disable)
|
||||
encoder->post_disable(encoder, crtc_state,
|
||||
encoder->post_disable(NULL, encoder, crtc_state,
|
||||
connector->base.state);
|
||||
|
||||
connector->base.state->best_encoder = best_encoder;
|
||||
@ -18802,15 +18617,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
|
||||
|
||||
static bool
|
||||
has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
|
||||
{
|
||||
if (cpu_transcoder == TRANSCODER_EDP)
|
||||
return HAS_TRANSCODER_EDP(dev_priv);
|
||||
else
|
||||
return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
|
||||
}
|
||||
|
||||
struct intel_display_error_state {
|
||||
|
||||
u32 power_well_driver;
|
||||
@ -18919,7 +18725,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
|
||||
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
|
||||
enum transcoder cpu_transcoder = transcoders[i];
|
||||
|
||||
if (!has_transcoder(dev_priv, cpu_transcoder))
|
||||
if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
|
||||
continue;
|
||||
|
||||
error->transcoder[i].available = true;
|
||||
|
@ -320,9 +320,13 @@ enum phy_fia {
|
||||
for_each_pipe(__dev_priv, __p) \
|
||||
for_each_if((__mask) & BIT(__p))
|
||||
|
||||
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
|
||||
#define for_each_cpu_transcoder(__dev_priv, __t) \
|
||||
for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
|
||||
for_each_if ((__mask) & (1 << (__t)))
|
||||
for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
|
||||
|
||||
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
|
||||
for_each_cpu_transcoder(__dev_priv, __t) \
|
||||
for_each_if ((__mask) & BIT(__t))
|
||||
|
||||
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
|
||||
for ((__p) = 0; \
|
||||
|
@ -1326,6 +1326,16 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
|
||||
intel_dp->compliance.test_data.vdisplay);
|
||||
seq_printf(m, "bpc: %u\n",
|
||||
intel_dp->compliance.test_data.bpc);
|
||||
} else if (intel_dp->compliance.test_type ==
|
||||
DP_TEST_LINK_PHY_TEST_PATTERN) {
|
||||
seq_printf(m, "pattern: %d\n",
|
||||
intel_dp->compliance.test_data.phytest.phy_pattern);
|
||||
seq_printf(m, "Number of lanes: %d\n",
|
||||
intel_dp->compliance.test_data.phytest.num_lanes);
|
||||
seq_printf(m, "Link Rate: %d\n",
|
||||
intel_dp->compliance.test_data.phytest.link_rate);
|
||||
seq_printf(m, "level: %02x\n",
|
||||
intel_dp->train_set[0]);
|
||||
}
|
||||
} else
|
||||
seq_puts(m, "0");
|
||||
@ -1358,7 +1368,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
|
||||
|
||||
if (encoder && connector->status == connector_status_connected) {
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
seq_printf(m, "%02lx", intel_dp->compliance.test_type);
|
||||
seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
|
||||
} else
|
||||
seq_puts(m, "0");
|
||||
}
|
||||
|
@ -1873,11 +1873,14 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
|
||||
static void print_power_domains(struct i915_power_domains *power_domains,
|
||||
const char *prefix, u64 mask)
|
||||
{
|
||||
struct drm_i915_private *i915 = container_of(power_domains,
|
||||
struct drm_i915_private,
|
||||
power_domains);
|
||||
enum intel_display_power_domain domain;
|
||||
|
||||
DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
|
||||
drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
|
||||
for_each_power_domain(domain, mask)
|
||||
DRM_DEBUG_DRIVER("%s use_count %d\n",
|
||||
drm_dbg(&i915->drm, "%s use_count %d\n",
|
||||
intel_display_power_domain_str(domain),
|
||||
power_domains->domain_use_count[domain]);
|
||||
}
|
||||
@ -1885,7 +1888,11 @@ static void print_power_domains(struct i915_power_domains *power_domains,
|
||||
static void
|
||||
print_async_put_domains_state(struct i915_power_domains *power_domains)
|
||||
{
|
||||
DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
|
||||
struct drm_i915_private *i915 = container_of(power_domains,
|
||||
struct drm_i915_private,
|
||||
power_domains);
|
||||
|
||||
drm_dbg(&i915->drm, "async_put_wakeref %u\n",
|
||||
power_domains->async_put_wakeref);
|
||||
|
||||
print_power_domains(power_domains, "async_put_domains[0]",
|
||||
@ -4140,7 +4147,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX D TBT1",
|
||||
.domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4151,7 +4158,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX E TBT2",
|
||||
.domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4162,7 +4169,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX F TBT3",
|
||||
.domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4173,7 +4180,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX G TBT4",
|
||||
.domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4184,7 +4191,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX H TBT5",
|
||||
.domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4195,7 +4202,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||
{
|
||||
.name = "AUX I TBT6",
|
||||
.domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
|
||||
.ops = &hsw_power_well_ops,
|
||||
.ops = &icl_tc_phy_aux_power_well_ops,
|
||||
.id = DISP_PW_ID_NONE,
|
||||
{
|
||||
.hsw.regs = &icl_aux_power_well_regs,
|
||||
@ -4480,7 +4487,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
|
||||
drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
|
||||
"Invalid number of dbuf slices requested\n");
|
||||
|
||||
DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
|
||||
drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
|
||||
req_slices);
|
||||
|
||||
/*
|
||||
* Might be running this in parallel to gen9_dc_off_power_well_enable
|
||||
@ -5016,7 +5024,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
|
||||
const struct buddy_page_mask *table;
|
||||
int i;
|
||||
|
||||
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
|
||||
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
|
||||
/* Wa_1409767108: tgl */
|
||||
table = wa_1409767108_buddy_page_masks;
|
||||
else
|
||||
|
@ -132,8 +132,7 @@ struct intel_encoder {
|
||||
u16 cloneable;
|
||||
u8 pipe_mask;
|
||||
enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector,
|
||||
bool irq_received);
|
||||
struct intel_connector *connector);
|
||||
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
|
||||
struct intel_crtc_state *,
|
||||
struct drm_connector_state *);
|
||||
@ -146,28 +145,35 @@ struct intel_encoder {
|
||||
void (*update_prepare)(struct intel_atomic_state *,
|
||||
struct intel_encoder *,
|
||||
struct intel_crtc *);
|
||||
void (*pre_pll_enable)(struct intel_encoder *,
|
||||
void (*pre_pll_enable)(struct intel_atomic_state *,
|
||||
struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
void (*pre_enable)(struct intel_encoder *,
|
||||
void (*pre_enable)(struct intel_atomic_state *,
|
||||
struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
void (*enable)(struct intel_encoder *,
|
||||
void (*enable)(struct intel_atomic_state *,
|
||||
struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
void (*update_complete)(struct intel_atomic_state *,
|
||||
struct intel_encoder *,
|
||||
struct intel_crtc *);
|
||||
void (*disable)(struct intel_encoder *,
|
||||
void (*disable)(struct intel_atomic_state *,
|
||||
struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
void (*post_disable)(struct intel_encoder *,
|
||||
void (*post_disable)(struct intel_atomic_state *,
|
||||
struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
void (*post_pll_disable)(struct intel_encoder *,
|
||||
void (*post_pll_disable)(struct intel_atomic_state *,
|
||||
struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
void (*update_pipe)(struct intel_encoder *,
|
||||
void (*update_pipe)(struct intel_atomic_state *,
|
||||
struct intel_encoder *,
|
||||
const struct intel_crtc_state *,
|
||||
const struct drm_connector_state *);
|
||||
/* Read out the current hw state of this connector, returning true if
|
||||
@ -425,6 +431,9 @@ struct intel_connector {
|
||||
struct edid *edid;
|
||||
struct edid *detect_edid;
|
||||
|
||||
/* Number of times hotplug detection was tried after an HPD interrupt */
|
||||
int hotplug_retries;
|
||||
|
||||
/* since POLL and HPD connectors may use the same HPD line keep the native
|
||||
state of connector->polled in case hotplug storm detection changes it */
|
||||
u8 polled;
|
||||
@ -640,6 +649,16 @@ struct intel_crtc_scaler_state {
|
||||
#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
|
||||
/* Flag to use the scanline counter instead of the pixel counter */
|
||||
#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
|
||||
/*
|
||||
* TE0 or TE1 flag is set if the crtc has a DSI encoder which
|
||||
* is operating in command mode.
|
||||
* Flag to use TE from DSI0 instead of VBI in command mode
|
||||
*/
|
||||
#define I915_MODE_FLAG_DSI_USE_TE0 (1<<3)
|
||||
/* Flag to use TE from DSI1 instead of VBI in command mode */
|
||||
#define I915_MODE_FLAG_DSI_USE_TE1 (1<<4)
|
||||
/* Flag to indicate mipi dsi periodic command mode where we do not get TE */
|
||||
#define I915_MODE_FLAG_DSI_PERIODIC_CMD_MODE (1<<5)
|
||||
|
||||
struct intel_wm_level {
|
||||
bool enable;
|
||||
@ -1015,6 +1034,7 @@ struct intel_crtc_state {
|
||||
union hdmi_infoframe spd;
|
||||
union hdmi_infoframe hdmi;
|
||||
union hdmi_infoframe drm;
|
||||
struct drm_dp_vsc_sdp vsc;
|
||||
} infoframes;
|
||||
|
||||
/* HDMI scrambling status */
|
||||
@ -1238,6 +1258,7 @@ struct intel_dp_compliance_data {
|
||||
u8 video_pattern;
|
||||
u16 hdisplay, vdisplay;
|
||||
u8 bpc;
|
||||
struct drm_dp_phy_test_params phytest;
|
||||
};
|
||||
|
||||
struct intel_dp_compliance {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -114,7 +114,11 @@ void intel_dp_vsc_enable(struct intel_dp *intel_dp,
|
||||
void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
bool intel_digital_port_connected(struct intel_encoder *encoder);
|
||||
void intel_dp_process_phy_request(struct intel_dp *intel_dp);
|
||||
|
||||
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
|
||||
{
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 reg_val = 0;
|
||||
|
||||
/* Early return when display use other mechanism to enable backlight. */
|
||||
@ -35,7 +36,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
|
||||
®_val) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
|
||||
drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
|
||||
DP_EDP_DISPLAY_CONTROL_REGISTER);
|
||||
return;
|
||||
}
|
||||
@ -46,7 +47,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
|
||||
reg_val) != 1) {
|
||||
DRM_DEBUG_KMS("Failed to %s aux backlight\n",
|
||||
drm_dbg_kms(&i915->drm, "Failed to %s aux backlight\n",
|
||||
enable ? "enable" : "disable");
|
||||
}
|
||||
}
|
||||
@ -58,6 +59,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
|
||||
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 read_val[2] = { 0x0 };
|
||||
u8 mode_reg;
|
||||
u16 level = 0;
|
||||
@ -65,7 +67,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
|
||||
&mode_reg) != 1) {
|
||||
DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to read the DPCD register 0x%x\n",
|
||||
DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
|
||||
return 0;
|
||||
}
|
||||
@ -80,7 +83,7 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
|
||||
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
|
||||
&read_val, sizeof(read_val)) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
|
||||
drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
|
||||
DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
|
||||
return 0;
|
||||
}
|
||||
@ -100,6 +103,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
|
||||
{
|
||||
struct intel_connector *connector = to_intel_connector(conn_state->connector);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 vals[2] = { 0x0 };
|
||||
|
||||
vals[0] = level;
|
||||
@ -111,7 +115,8 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
|
||||
}
|
||||
if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
|
||||
vals, sizeof(vals)) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to write aux backlight level\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to write aux backlight level\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -133,7 +138,8 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
|
||||
|
||||
freq = dev_priv->vbt.backlight.pwm_freq_hz;
|
||||
if (!freq) {
|
||||
DRM_DEBUG_KMS("Use panel default backlight frequency\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Use panel default backlight frequency\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -146,13 +152,14 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
|
||||
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
|
||||
|
||||
if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
|
||||
DRM_DEBUG_KMS("Actual frequency out of range\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "Actual frequency out of range\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Failed to write aux backlight freq\n");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -163,12 +170,13 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
|
||||
{
|
||||
struct intel_connector *connector = to_intel_connector(conn_state->connector);
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
|
||||
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
|
||||
drm_dbg_kms(&i915->drm, "Failed to read DPCD register 0x%x\n",
|
||||
DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
|
||||
return;
|
||||
}
|
||||
@ -186,7 +194,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_EDP_PWMGEN_BIT_COUNT,
|
||||
panel->backlight.pwmgen_bit_count) < 0)
|
||||
DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to write aux pwmgen bit count\n");
|
||||
|
||||
break;
|
||||
|
||||
@ -203,7 +212,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
|
||||
if (new_dpcd_buf != dpcd_buf) {
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to write aux backlight mode\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -237,9 +247,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
|
||||
* minimum value will applied automatically. So no need to check that.
|
||||
*/
|
||||
freq = i915->vbt.backlight.pwm_freq_hz;
|
||||
DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
|
||||
drm_dbg_kms(&i915->drm, "VBT defined backlight frequency %u Hz\n",
|
||||
freq);
|
||||
if (!freq) {
|
||||
DRM_DEBUG_KMS("Use panel default backlight frequency\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Use panel default backlight frequency\n");
|
||||
return max_backlight;
|
||||
}
|
||||
|
||||
@ -254,12 +266,14 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
|
||||
*/
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
|
||||
DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to read pwmgen bit count cap min\n");
|
||||
return max_backlight;
|
||||
}
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
|
||||
DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to read pwmgen bit count cap max\n");
|
||||
return max_backlight;
|
||||
}
|
||||
pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
|
||||
@ -268,7 +282,8 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
|
||||
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
|
||||
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
|
||||
if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
|
||||
DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"VBT defined backlight frequency out of range\n");
|
||||
return max_backlight;
|
||||
}
|
||||
|
||||
@ -279,10 +294,11 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
|
||||
break;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn);
|
||||
drm_dbg_kms(&i915->drm, "Using eDP pwmgen bit count of %d\n", pn);
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
|
||||
DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Failed to write aux pwmgen bit count\n");
|
||||
return max_backlight;
|
||||
}
|
||||
panel->backlight.pwmgen_bit_count = pn;
|
||||
@ -312,6 +328,7 @@ static bool
|
||||
intel_dp_aux_display_control_capable(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
/* Check the eDP Display control capabilities registers to determine if
|
||||
* the panel can support backlight control over the aux channel
|
||||
@ -319,7 +336,7 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
|
||||
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
|
||||
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) &&
|
||||
!(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
|
||||
DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
|
||||
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -329,8 +346,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
|
||||
{
|
||||
struct intel_panel *panel = &intel_connector->panel;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
|
||||
struct drm_device *dev = intel_connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
if (i915_modparams.enable_dpcd_backlight == 0 ||
|
||||
!intel_dp_aux_display_control_capable(intel_connector))
|
||||
@ -340,11 +356,11 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
|
||||
* There are a lot of machines that don't advertise the backlight
|
||||
* control interface to use properly in their VBIOS, :\
|
||||
*/
|
||||
if (dev_priv->vbt.backlight.type !=
|
||||
if (i915->vbt.backlight.type !=
|
||||
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
|
||||
!drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
|
||||
DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
|
||||
DRM_DEV_INFO(dev->dev,
|
||||
drm_info(&i915->drm,
|
||||
"Panel advertises DPCD backlight support, but "
|
||||
"VBT disagrees. If your backlight controls "
|
||||
"don't work try booting with "
|
||||
|
@ -34,8 +34,7 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
link_status[3], link_status[4], link_status[5]);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_get_adjust_train(struct intel_dp *intel_dp,
|
||||
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
||||
const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
u8 v = 0;
|
||||
@ -219,7 +218,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
||||
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
|
||||
|
||||
/* Update training set as requested by target */
|
||||
intel_get_adjust_train(intel_dp, link_status);
|
||||
intel_dp_get_adjust_train(intel_dp, link_status);
|
||||
if (!intel_dp_update_link_train(intel_dp)) {
|
||||
drm_err(&i915->drm,
|
||||
"failed to update link training\n");
|
||||
@ -338,7 +337,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
}
|
||||
|
||||
/* Update training set as requested by target */
|
||||
intel_get_adjust_train(intel_dp, link_status);
|
||||
intel_dp_get_adjust_train(intel_dp, link_status);
|
||||
if (!intel_dp_update_link_train(intel_dp)) {
|
||||
drm_err(&i915->drm,
|
||||
"failed to update link training\n");
|
||||
|
@ -6,8 +6,12 @@
|
||||
#ifndef __INTEL_DP_LINK_TRAINING_H__
|
||||
#define __INTEL_DP_LINK_TRAINING_H__
|
||||
|
||||
#include <drm/drm_dp_helper.h>
|
||||
|
||||
struct intel_dp;
|
||||
|
||||
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
|
||||
const u8 link_status[DP_LINK_STATUS_SIZE]);
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
|
||||
|
@ -47,9 +47,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
struct intel_dp *intel_dp = &intel_mst->primary->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
void *port = connector->port;
|
||||
bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
|
||||
DP_DPCD_QUIRK_CONSTANT_N);
|
||||
int bpp, slots = -EINVAL;
|
||||
@ -65,7 +65,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
false);
|
||||
|
||||
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
|
||||
port, crtc_state->pbn, 0);
|
||||
connector->port,
|
||||
crtc_state->pbn, 0);
|
||||
if (slots == -EDEADLK)
|
||||
return slots;
|
||||
if (slots >= 0)
|
||||
@ -73,7 +74,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
if (slots < 0) {
|
||||
DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
|
||||
drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
|
||||
slots);
|
||||
return slots;
|
||||
}
|
||||
|
||||
@ -88,56 +90,10 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over all connectors and return the smallest transcoder in the MST
|
||||
* stream
|
||||
*/
|
||||
static enum transcoder
|
||||
intel_dp_mst_master_trans_compute(struct intel_atomic_state *state,
|
||||
struct intel_dp *mst_port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_digital_connector_state *conn_state;
|
||||
struct intel_connector *connector;
|
||||
enum pipe ret = I915_MAX_PIPES;
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12)
|
||||
return INVALID_TRANSCODER;
|
||||
|
||||
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
|
||||
struct intel_crtc_state *crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
if (connector->mst_port != mst_port || !conn_state->base.crtc)
|
||||
continue;
|
||||
|
||||
crtc = to_intel_crtc(conn_state->base.crtc);
|
||||
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
if (!crtc_state->uapi.active)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Using crtc->pipe because crtc_state->cpu_transcoder is
|
||||
* computed, so others CRTCs could have non-computed
|
||||
* cpu_transcoder
|
||||
*/
|
||||
if (crtc->pipe < ret)
|
||||
ret = crtc->pipe;
|
||||
}
|
||||
|
||||
if (ret == I915_MAX_PIPES)
|
||||
return INVALID_TRANSCODER;
|
||||
|
||||
/* Simple cast works because TGL don't have a eDP transcoder */
|
||||
return (enum transcoder)ret;
|
||||
}
|
||||
|
||||
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_dp *intel_dp = &intel_mst->primary->dp;
|
||||
@ -147,7 +103,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
to_intel_digital_connector_state(conn_state);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&pipe_config->hw.adjusted_mode;
|
||||
void *port = connector->port;
|
||||
struct link_config_limits limits;
|
||||
int ret;
|
||||
|
||||
@ -159,7 +114,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
|
||||
pipe_config->has_audio =
|
||||
drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port);
|
||||
drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
|
||||
connector->port);
|
||||
else
|
||||
pipe_config->has_audio =
|
||||
intel_conn_state->force_audio == HDMI_AUDIO_ON;
|
||||
@ -201,7 +157,56 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
|
||||
|
||||
pipe_config->mst_master_transcoder = intel_dp_mst_master_trans_compute(state, intel_dp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over all connectors and return a mask of
|
||||
* all CPU transcoders streaming over the same DP link.
|
||||
*/
|
||||
static unsigned int
|
||||
intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
|
||||
struct intel_dp *mst_port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
const struct intel_digital_connector_state *conn_state;
|
||||
struct intel_connector *connector;
|
||||
u8 transcoders = 0;
|
||||
int i;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12)
|
||||
return 0;
|
||||
|
||||
for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
|
||||
const struct intel_crtc_state *crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
if (connector->mst_port != mst_port || !conn_state->base.crtc)
|
||||
continue;
|
||||
|
||||
crtc = to_intel_crtc(conn_state->base.crtc);
|
||||
crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
if (!crtc_state->hw.active)
|
||||
continue;
|
||||
|
||||
transcoders |= BIT(crtc_state->cpu_transcoder);
|
||||
}
|
||||
|
||||
return transcoders;
|
||||
}
|
||||
|
||||
static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
|
||||
struct intel_dp *intel_dp = &intel_mst->primary->dp;
|
||||
|
||||
/* lowest numbered transcoder will be designated master */
|
||||
crtc_state->mst_master_transcoder =
|
||||
ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -313,7 +318,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void intel_mst_disable_dp(struct intel_encoder *encoder,
|
||||
static void intel_mst_disable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -322,22 +328,25 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(old_conn_state->connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
|
||||
drm_dbg_kms(&i915->drm, "active links %d\n",
|
||||
intel_dp->active_mst_links);
|
||||
|
||||
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
|
||||
|
||||
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to update payload %d\n", ret);
|
||||
drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
|
||||
}
|
||||
if (old_crtc_state->has_audio)
|
||||
intel_audio_codec_disable(encoder,
|
||||
old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
|
||||
static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -371,7 +380,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
|
||||
|
||||
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
|
||||
DP_TP_STATUS_ACT_SENT, 1))
|
||||
DRM_ERROR("Timed out waiting for ACT sent when disabling\n");
|
||||
drm_err(&dev_priv->drm,
|
||||
"Timed out waiting for ACT sent when disabling\n");
|
||||
drm_dp_check_act_status(&intel_dp->mst_mgr);
|
||||
|
||||
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
|
||||
@ -402,13 +412,15 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
|
||||
|
||||
intel_mst->connector = NULL;
|
||||
if (last_mst_stream)
|
||||
intel_dig_port->base.post_disable(&intel_dig_port->base,
|
||||
intel_dig_port->base.post_disable(state, &intel_dig_port->base,
|
||||
old_crtc_state, NULL);
|
||||
|
||||
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
|
||||
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
|
||||
intel_dp->active_mst_links);
|
||||
}
|
||||
|
||||
static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
|
||||
static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -417,11 +429,12 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
|
||||
if (intel_dp->active_mst_links == 0)
|
||||
intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
|
||||
intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base,
|
||||
pipe_config, NULL);
|
||||
}
|
||||
|
||||
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
|
||||
static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -445,7 +458,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
|
||||
INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
|
||||
!intel_dp_mst_is_master_trans(pipe_config));
|
||||
|
||||
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
|
||||
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
|
||||
intel_dp->active_mst_links);
|
||||
|
||||
if (first_mst_stream)
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
@ -453,7 +467,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
|
||||
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
|
||||
|
||||
if (first_mst_stream)
|
||||
intel_dig_port->base.pre_enable(&intel_dig_port->base,
|
||||
intel_dig_port->base.pre_enable(state, &intel_dig_port->base,
|
||||
pipe_config, NULL);
|
||||
|
||||
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
|
||||
@ -461,7 +475,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
|
||||
pipe_config->pbn,
|
||||
pipe_config->dp_m_n.tu);
|
||||
if (!ret)
|
||||
DRM_ERROR("failed to allocate vcpi\n");
|
||||
drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
|
||||
|
||||
intel_dp->active_mst_links++;
|
||||
temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
|
||||
@ -484,7 +498,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
|
||||
intel_dp_set_m_n(pipe_config, M1_N1);
|
||||
}
|
||||
|
||||
static void intel_mst_enable_dp(struct intel_encoder *encoder,
|
||||
static void intel_mst_enable_dp(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -499,11 +514,12 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
|
||||
|
||||
intel_crtc_vblank_on(pipe_config);
|
||||
|
||||
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
|
||||
drm_dbg_kms(&dev_priv->drm, "active links %d\n",
|
||||
intel_dp->active_mst_links);
|
||||
|
||||
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
|
||||
DP_TP_STATUS_ACT_SENT, 1))
|
||||
DRM_ERROR("Timed out waiting for ACT sent\n");
|
||||
drm_err(&dev_priv->drm, "Timed out waiting for ACT sent\n");
|
||||
|
||||
drm_dp_check_act_status(&intel_dp->mst_mgr);
|
||||
|
||||
@ -786,6 +802,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
|
||||
intel_encoder->pipe_mask = ~0;
|
||||
|
||||
intel_encoder->compute_config = intel_dp_mst_compute_config;
|
||||
intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
|
||||
intel_encoder->disable = intel_mst_disable_dp;
|
||||
intel_encoder->post_disable = intel_mst_post_disable_dp;
|
||||
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
|
||||
|
@ -31,20 +31,21 @@ int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
|
||||
|
||||
int intel_dsi_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
drm_dbg_kms(&i915->drm, "\n");
|
||||
|
||||
if (!intel_connector->panel.fixed_mode) {
|
||||
DRM_DEBUG_KMS("no fixed mode\n");
|
||||
drm_dbg_kms(&i915->drm, "no fixed mode\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev,
|
||||
intel_connector->panel.fixed_mode);
|
||||
if (!mode) {
|
||||
DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
|
||||
drm_dbg_kms(&i915->drm, "drm_mode_duplicate failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -60,7 +61,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
|
||||
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "\n");
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
@ -453,8 +453,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
|
||||
|
||||
static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
{
|
||||
struct drm_device *drm_dev = intel_dsi->base.base.dev;
|
||||
struct device *dev = &drm_dev->pdev->dev;
|
||||
struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
|
||||
struct i2c_adapter *adapter;
|
||||
struct i2c_msg msg;
|
||||
int ret;
|
||||
@ -471,7 +470,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
|
||||
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
|
||||
if (!adapter) {
|
||||
DRM_DEV_ERROR(dev, "Cannot find a valid i2c bus for xfer\n");
|
||||
drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n");
|
||||
goto err_bus;
|
||||
}
|
||||
|
||||
@ -489,7 +488,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
|
||||
ret = i2c_transfer(adapter, &msg, 1);
|
||||
if (ret < 0)
|
||||
DRM_DEV_ERROR(dev,
|
||||
drm_err(&i915->drm,
|
||||
"Failed to xfer payload of size (%u) to reg (%u)\n",
|
||||
payload_size, reg_offset);
|
||||
|
||||
|
@ -183,7 +183,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
|
||||
pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock;
|
||||
}
|
||||
|
||||
static void intel_disable_dvo(struct intel_encoder *encoder,
|
||||
static void intel_disable_dvo(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -197,7 +198,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
|
||||
intel_de_read(dev_priv, dvo_reg);
|
||||
}
|
||||
|
||||
static void intel_enable_dvo(struct intel_encoder *encoder,
|
||||
static void intel_enable_dvo(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -272,7 +274,8 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_dvo_pre_enable(struct intel_encoder *encoder,
|
||||
static void intel_dvo_pre_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
|
@ -104,7 +104,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
|
||||
/* Wait for compressing bit to clear */
|
||||
if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
|
||||
FBC_STAT_COMPRESSING, 10)) {
|
||||
DRM_DEBUG_KMS("FBC idle timed out\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -485,7 +485,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
|
||||
if (!ret)
|
||||
goto err_llb;
|
||||
else if (ret > 1) {
|
||||
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
||||
drm_info(&dev_priv->drm,
|
||||
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
|
||||
|
||||
}
|
||||
|
||||
@ -521,7 +522,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
|
||||
dev_priv->dsm.start + compressed_llb->start);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
|
||||
fbc->compressed_fb.size, fbc->threshold);
|
||||
|
||||
return 0;
|
||||
@ -531,7 +533,7 @@ err_fb:
|
||||
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
|
||||
err_llb:
|
||||
if (drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
|
||||
drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
@ -606,6 +608,19 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
}
|
||||
|
||||
static bool rotation_is_valid(struct drm_i915_private *dev_priv,
|
||||
u32 pixel_format, unsigned int rotation)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
|
||||
drm_rotation_90_or_270(rotation))
|
||||
return false;
|
||||
else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
|
||||
rotation != DRM_MODE_ROTATE_0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* For some reason, the hardware tracking starts looking at whatever we
|
||||
* programmed as the display plane base address register. It does not look at
|
||||
@ -640,6 +655,22 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
|
||||
return effective_w <= max_w && effective_h <= max_h;
|
||||
}
|
||||
|
||||
static bool tiling_is_valid(struct drm_i915_private *dev_priv,
|
||||
uint64_t modifier)
|
||||
{
|
||||
switch (modifier) {
|
||||
case DRM_FORMAT_MOD_LINEAR:
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
return true;
|
||||
return false;
|
||||
case I915_FORMAT_MOD_X_TILED:
|
||||
case I915_FORMAT_MOD_Y_TILED:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state)
|
||||
@ -673,6 +704,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
|
||||
|
||||
cache->fb.format = fb->format;
|
||||
cache->fb.stride = fb->pitches[0];
|
||||
cache->fb.modifier = fb->modifier;
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
|
||||
!plane_state->vma->fence);
|
||||
@ -746,29 +778,39 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* The use of a CPU fence is mandatory in order to detect writes
|
||||
* by the CPU to the scanout and trigger updates to the FBC.
|
||||
/* The use of a CPU fence is one of two ways to detect writes by the
|
||||
* CPU to the scanout and trigger updates to the FBC.
|
||||
*
|
||||
* The other method is by software tracking (see
|
||||
* intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
|
||||
* the current compressed buffer and recompress it.
|
||||
*
|
||||
* Note that is possible for a tiled surface to be unmappable (and
|
||||
* so have no fence associated with it) due to aperture constaints
|
||||
* so have no fence associated with it) due to aperture constraints
|
||||
* at the time of pinning.
|
||||
*
|
||||
* FIXME with 90/270 degree rotation we should use the fence on
|
||||
* the normal GTT view (the rotated view doesn't even have a
|
||||
* fence). Would need changes to the FBC fence Y offset as well.
|
||||
* For now this will effecively disable FBC with 90/270 degree
|
||||
* For now this will effectively disable FBC with 90/270 degree
|
||||
* rotation.
|
||||
*/
|
||||
if (cache->fence_id < 0) {
|
||||
if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) {
|
||||
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
|
||||
return false;
|
||||
}
|
||||
if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
|
||||
cache->plane.rotation != DRM_MODE_ROTATE_0) {
|
||||
|
||||
if (!rotation_is_valid(dev_priv, cache->fb.format->format,
|
||||
cache->plane.rotation)) {
|
||||
fbc->no_fbc_reason = "rotation unsupported";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!tiling_is_valid(dev_priv, cache->fb.modifier)) {
|
||||
fbc->no_fbc_reason = "tiling unsupported";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!stride_is_valid(dev_priv, cache->fb.stride)) {
|
||||
fbc->no_fbc_reason = "framebuffer stride not supported";
|
||||
return false;
|
||||
@ -948,7 +990,8 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
|
||||
drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
|
||||
drm_WARN_ON(&dev_priv->drm, fbc->active);
|
||||
|
||||
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
|
||||
drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
__intel_fbc_cleanup_cfb(dev_priv);
|
||||
|
||||
@ -1176,7 +1219,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
|
||||
else
|
||||
cache->gen9_wa_cfb_stride = 0;
|
||||
|
||||
DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
|
||||
drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
|
||||
|
||||
fbc->crtc = crtc;
|
||||
@ -1238,7 +1282,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
|
||||
if (fbc->underrun_detected || !fbc->crtc)
|
||||
goto out;
|
||||
|
||||
DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n");
|
||||
fbc->underrun_detected = true;
|
||||
|
||||
intel_fbc_deactivate(dev_priv, "FIFO underrun");
|
||||
@ -1264,7 +1308,8 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
|
||||
return ret;
|
||||
|
||||
if (dev_priv->fbc.underrun_detected) {
|
||||
DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Re-allowing FBC after fifo underrun\n");
|
||||
dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
|
||||
}
|
||||
|
||||
@ -1335,7 +1380,8 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
|
||||
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
|
||||
if (intel_vtd_active() &&
|
||||
(IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
|
||||
DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
|
||||
drm_info(&dev_priv->drm,
|
||||
"Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1363,7 +1409,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
||||
mkwrite_device_info(dev_priv)->display.has_fbc = false;
|
||||
|
||||
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
|
||||
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
|
||||
drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
|
||||
i915_modparams.enable_fbc);
|
||||
|
||||
if (!HAS_FBC(dev_priv)) {
|
||||
|
@ -146,7 +146,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
if (IS_ERR(obj))
|
||||
obj = i915_gem_object_create_shmem(dev_priv, size);
|
||||
if (IS_ERR(obj)) {
|
||||
DRM_ERROR("failed to allocate framebuffer\n");
|
||||
drm_err(&dev_priv->drm, "failed to allocate framebuffer\n");
|
||||
return PTR_ERR(obj);
|
||||
}
|
||||
|
||||
@ -183,7 +183,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
if (intel_fb &&
|
||||
(sizes->fb_width > intel_fb->base.width ||
|
||||
sizes->fb_height > intel_fb->base.height)) {
|
||||
DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"BIOS fb too small (%dx%d), we require (%dx%d),"
|
||||
" releasing it\n",
|
||||
intel_fb->base.width, intel_fb->base.height,
|
||||
sizes->fb_width, sizes->fb_height);
|
||||
@ -191,13 +192,14 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
intel_fb = ifbdev->fb = NULL;
|
||||
}
|
||||
if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
|
||||
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"no BIOS fb, allocating a new one\n");
|
||||
ret = intelfb_alloc(helper, sizes);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_fb = ifbdev->fb;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("re-using BIOS fb\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
|
||||
prealloc = true;
|
||||
sizes->fb_width = intel_fb->base.width;
|
||||
sizes->fb_height = intel_fb->base.height;
|
||||
@ -220,7 +222,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
|
||||
info = drm_fb_helper_alloc_fbi(helper);
|
||||
if (IS_ERR(info)) {
|
||||
DRM_ERROR("Failed to allocate fb_info\n");
|
||||
drm_err(&dev_priv->drm, "Failed to allocate fb_info\n");
|
||||
ret = PTR_ERR(info);
|
||||
goto out_unpin;
|
||||
}
|
||||
@ -240,7 +242,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
|
||||
vaddr = i915_vma_pin_iomap(vma);
|
||||
if (IS_ERR(vaddr)) {
|
||||
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
|
||||
drm_err(&dev_priv->drm,
|
||||
"Failed to remap framebuffer into virtual memory\n");
|
||||
ret = PTR_ERR(vaddr);
|
||||
goto out_unpin;
|
||||
}
|
||||
@ -258,7 +261,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
|
||||
drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
|
||||
ifbdev->fb->base.width, ifbdev->fb->base.height,
|
||||
i915_ggtt_offset(vma));
|
||||
ifbdev->vma = vma;
|
||||
@ -309,6 +312,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
|
||||
static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
struct intel_fbdev *ifbdev)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dev);
|
||||
struct intel_framebuffer *fb = NULL;
|
||||
struct drm_crtc *crtc;
|
||||
struct intel_crtc *intel_crtc;
|
||||
@ -321,13 +325,15 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
if (!crtc->state->active || !obj) {
|
||||
DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"pipe %c not active or no fb, skipping\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (obj->base.size > max_size) {
|
||||
DRM_DEBUG_KMS("found possible fb from plane %c\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"found possible fb from plane %c\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
fb = to_intel_framebuffer(crtc->primary->state->fb);
|
||||
max_size = obj->base.size;
|
||||
@ -335,7 +341,8 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
}
|
||||
|
||||
if (!fb) {
|
||||
DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"no active fbs found, not using BIOS config\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -346,12 +353,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
if (!crtc->state->active) {
|
||||
DRM_DEBUG_KMS("pipe %c not active, skipping\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"pipe %c not active, skipping\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
continue;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
|
||||
drm_dbg_kms(&i915->drm, "checking plane %c for BIOS fb\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
|
||||
/*
|
||||
@ -362,7 +370,8 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
|
||||
cur_size = cur_size * fb->base.format->cpp[0];
|
||||
if (fb->base.pitches[0] < cur_size) {
|
||||
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"fb not wide enough for plane %c (%d vs %d)\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
cur_size, fb->base.pitches[0]);
|
||||
fb = NULL;
|
||||
@ -372,7 +381,8 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
|
||||
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
|
||||
cur_size *= fb->base.pitches[0];
|
||||
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"pipe %c area: %dx%d, bpp: %d, size: %d\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
crtc->state->adjusted_mode.crtc_hdisplay,
|
||||
crtc->state->adjusted_mode.crtc_vdisplay,
|
||||
@ -380,20 +390,23 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
cur_size);
|
||||
|
||||
if (cur_size > max_size) {
|
||||
DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"fb not big enough for plane %c (%d vs %d)\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
cur_size, max_size);
|
||||
fb = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"fb big enough for plane %c (%d >= %d)\n",
|
||||
pipe_name(intel_crtc->pipe),
|
||||
max_size, cur_size);
|
||||
}
|
||||
|
||||
if (!fb) {
|
||||
DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"BIOS fb not suitable for all pipes, not using\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -415,7 +428,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
|
||||
}
|
||||
|
||||
|
||||
DRM_DEBUG_KMS("using BIOS fb for initial console\n");
|
||||
drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
|
||||
return true;
|
||||
|
||||
out:
|
||||
@ -522,8 +535,9 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
|
||||
* processing, fbdev will perform a full connector reprobe if a hotplug event
|
||||
* was received while HPD was suspended.
|
||||
*/
|
||||
static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
|
||||
static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
|
||||
{
|
||||
struct intel_fbdev *ifbdev = i915->fbdev;
|
||||
bool send_hpd = false;
|
||||
|
||||
mutex_lock(&ifbdev->hpd_lock);
|
||||
@ -533,7 +547,7 @@ static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
|
||||
mutex_unlock(&ifbdev->hpd_lock);
|
||||
|
||||
if (send_hpd) {
|
||||
DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
|
||||
drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
|
||||
drm_fb_helper_hotplug_event(&ifbdev->helper);
|
||||
}
|
||||
}
|
||||
@ -588,7 +602,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
|
||||
drm_fb_helper_set_suspend(&ifbdev->helper, state);
|
||||
console_unlock();
|
||||
|
||||
intel_fbdev_hpd_set_suspend(ifbdev, state);
|
||||
intel_fbdev_hpd_set_suspend(dev_priv, state);
|
||||
}
|
||||
|
||||
void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||
|
@ -71,6 +71,7 @@ struct intel_global_state *
|
||||
intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
|
||||
struct intel_global_obj *obj)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
int index, num_objs, i;
|
||||
size_t size;
|
||||
struct __intel_global_objs_state *arr;
|
||||
@ -106,7 +107,7 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
|
||||
|
||||
state->num_global_objs = num_objs;
|
||||
|
||||
DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
|
||||
drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
|
||||
obj, obj_state, state);
|
||||
|
||||
return obj_state;
|
||||
|
@ -1391,6 +1391,7 @@ static
|
||||
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
union {
|
||||
struct hdcp2_rep_stream_manage stream_manage;
|
||||
@ -1431,7 +1432,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
|
||||
hdcp->seq_num_m++;
|
||||
|
||||
if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
|
||||
DRM_DEBUG_KMS("seq_num_m roll over.\n");
|
||||
drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -2075,7 +2076,8 @@ int intel_hdcp_disable(struct intel_connector *connector)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_hdcp_update_pipe(struct intel_encoder *encoder,
|
||||
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
|
@ -11,6 +11,7 @@
|
||||
struct drm_connector;
|
||||
struct drm_connector_state;
|
||||
struct drm_i915_private;
|
||||
struct intel_atomic_state;
|
||||
struct intel_connector;
|
||||
struct intel_crtc_state;
|
||||
struct intel_encoder;
|
||||
@ -26,7 +27,8 @@ int intel_hdcp_init(struct intel_connector *connector,
|
||||
int intel_hdcp_enable(struct intel_connector *connector,
|
||||
enum transcoder cpu_transcoder, u8 content_type);
|
||||
int intel_hdcp_disable(struct intel_connector *connector);
|
||||
void intel_hdcp_update_pipe(struct intel_encoder *encoder,
|
||||
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
|
||||
|
@ -707,12 +707,14 @@ void intel_read_infoframe(struct intel_encoder *encoder,
|
||||
/* see comment above for the reason for this offset */
|
||||
ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
|
||||
drm_dbg_kms(encoder->base.dev,
|
||||
"Failed to unpack infoframe type 0x%02x\n", type);
|
||||
return;
|
||||
}
|
||||
|
||||
if (frame->any.type != type)
|
||||
DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
|
||||
drm_dbg_kms(encoder->base.dev,
|
||||
"Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
|
||||
frame->any.type, type);
|
||||
}
|
||||
|
||||
@ -853,7 +855,8 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
|
||||
|
||||
ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
|
||||
if (ret < 0) {
|
||||
DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"couldn't set HDR metadata in infoframe\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -893,7 +896,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
|
||||
if (!(val & VIDEO_DIP_ENABLE))
|
||||
return;
|
||||
if (port != (val & VIDEO_DIP_PORT_MASK)) {
|
||||
DRM_DEBUG_KMS("video DIP still enabled on port %c\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"video DIP still enabled on port %c\n",
|
||||
(val & VIDEO_DIP_PORT_MASK) >> 29);
|
||||
return;
|
||||
}
|
||||
@ -906,7 +910,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
|
||||
|
||||
if (port != (val & VIDEO_DIP_PORT_MASK)) {
|
||||
if (val & VIDEO_DIP_ENABLE) {
|
||||
DRM_DEBUG_KMS("video DIP already enabled on port %c\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"video DIP already enabled on port %c\n",
|
||||
(val & VIDEO_DIP_PORT_MASK) >> 29);
|
||||
return;
|
||||
}
|
||||
@ -1264,7 +1269,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
|
||||
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
|
||||
drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
|
||||
enable ? "Enabling" : "Disabling");
|
||||
|
||||
drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
|
||||
@ -1346,13 +1351,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
|
||||
ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
|
||||
DRM_HDCP_AN_LEN);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
|
||||
drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = intel_gmbus_output_aksv(adapter);
|
||||
if (ret < 0) {
|
||||
DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
|
||||
drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
@ -1361,11 +1367,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
|
||||
static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
|
||||
u8 *bksv)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
|
||||
int ret;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
|
||||
DRM_HDCP_KSV_LEN);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
|
||||
drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1373,11 +1382,14 @@ static
|
||||
int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
|
||||
u8 *bstatus)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
|
||||
int ret;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
|
||||
bstatus, DRM_HDCP_BSTATUS_LEN);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
|
||||
drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1385,12 +1397,14 @@ static
|
||||
int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
|
||||
bool *repeater_present)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
int ret;
|
||||
u8 val;
|
||||
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
|
||||
drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
*repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
|
||||
@ -1401,11 +1415,14 @@ static
|
||||
int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
|
||||
u8 *ri_prime)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
|
||||
int ret;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
|
||||
ri_prime, DRM_HDCP_RI_LEN);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
|
||||
drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1413,12 +1430,14 @@ static
|
||||
int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
|
||||
bool *ksv_ready)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
int ret;
|
||||
u8 val;
|
||||
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
|
||||
drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
*ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
|
||||
@ -1429,11 +1448,13 @@ static
|
||||
int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
|
||||
int num_downstream, u8 *ksv_fifo)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
int ret;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
|
||||
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Read ksv fifo over DDC failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
@ -1443,6 +1464,7 @@ static
|
||||
int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
|
||||
int i, u32 *part)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
int ret;
|
||||
|
||||
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
|
||||
@ -1451,7 +1473,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
|
||||
part, DRM_HDCP_V_PRIME_PART_LEN);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
|
||||
drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
|
||||
i, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1474,12 +1497,14 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
|
||||
|
||||
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
|
||||
if (ret) {
|
||||
DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
|
||||
drm_err(&dev_priv->drm,
|
||||
"Disable HDCP signalling failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
|
||||
if (ret) {
|
||||
DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
|
||||
drm_err(&dev_priv->drm,
|
||||
"Enable HDCP signalling failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1500,7 +1525,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
|
||||
|
||||
ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable);
|
||||
if (ret) {
|
||||
DRM_ERROR("%s HDCP signalling failed (%d)\n",
|
||||
drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
|
||||
enable ? "Enable" : "Disable", ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1536,10 +1561,13 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
|
||||
intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
|
||||
|
||||
/* Wait for Ri prime match */
|
||||
if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
|
||||
if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
|
||||
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
|
||||
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
|
||||
DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
|
||||
intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
|
||||
drm_err(&i915->drm,
|
||||
"Ri' mismatch detected, link check failed (%x)\n",
|
||||
intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
|
||||
port)));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -1588,16 +1616,18 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
|
||||
}
|
||||
|
||||
static inline
|
||||
int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
|
||||
int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
|
||||
u8 msg_id, bool *msg_ready,
|
||||
ssize_t *msg_sz)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
|
||||
int ret;
|
||||
|
||||
ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
|
||||
ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
|
||||
if (ret < 0) {
|
||||
DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
|
||||
drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1617,6 +1647,7 @@ static ssize_t
|
||||
intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
|
||||
u8 msg_id, bool paired)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
bool msg_ready = false;
|
||||
int timeout, ret;
|
||||
ssize_t msg_sz = 0;
|
||||
@ -1631,7 +1662,7 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
|
||||
!ret && msg_ready && msg_sz, timeout * 1000,
|
||||
1000, 5 * 1000);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
|
||||
drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n",
|
||||
msg_id, ret, timeout);
|
||||
|
||||
return ret ? ret : msg_sz;
|
||||
@ -1651,6 +1682,7 @@ static
|
||||
int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
||||
u8 msg_id, void *buf, size_t size)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
|
||||
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
|
||||
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
|
||||
unsigned int offset;
|
||||
@ -1666,7 +1698,8 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
||||
* available buffer.
|
||||
*/
|
||||
if (ret > size) {
|
||||
DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"msg_sz(%zd) is more than exp size(%zu)\n",
|
||||
ret, size);
|
||||
return -1;
|
||||
}
|
||||
@ -1674,7 +1707,8 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
||||
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
|
||||
ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
|
||||
drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
|
||||
msg_id, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1870,15 +1904,17 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
|
||||
drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink);
|
||||
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
|
||||
drm_WARN_ON(&i915->drm, !pipe_config->has_hdmi_sink);
|
||||
drm_dbg_kms(&i915->drm, "Enabling HDMI audio on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void g4x_enable_hdmi(struct intel_encoder *encoder,
|
||||
static void g4x_enable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -1900,7 +1936,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
|
||||
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void ibx_enable_hdmi(struct intel_encoder *encoder,
|
||||
static void ibx_enable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -1951,7 +1988,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
|
||||
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void cpt_enable_hdmi(struct intel_encoder *encoder,
|
||||
static void cpt_enable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -2004,13 +2042,15 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
|
||||
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void vlv_enable_hdmi(struct intel_encoder *encoder,
|
||||
static void vlv_enable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
}
|
||||
|
||||
static void intel_disable_hdmi(struct intel_encoder *encoder,
|
||||
static void intel_disable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -2068,7 +2108,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
|
||||
}
|
||||
|
||||
static void g4x_disable_hdmi(struct intel_encoder *encoder,
|
||||
static void g4x_disable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -2076,10 +2117,11 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
|
||||
intel_audio_codec_disable(encoder,
|
||||
old_crtc_state, old_conn_state);
|
||||
|
||||
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
|
||||
intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static void pch_disable_hdmi(struct intel_encoder *encoder,
|
||||
static void pch_disable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -2088,11 +2130,12 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
|
||||
old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static void pch_post_disable_hdmi(struct intel_encoder *encoder,
|
||||
static void pch_post_disable_hdmi(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
|
||||
intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
|
||||
@ -2289,10 +2332,12 @@ static bool
|
||||
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
|
||||
struct intel_crtc_state *config)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(config->uapi.crtc);
|
||||
|
||||
if (!connector->ycbcr_420_allowed) {
|
||||
DRM_ERROR("Platform doesn't support YCBCR420 output\n");
|
||||
drm_err(&i915->drm,
|
||||
"Platform doesn't support YCBCR420 output\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2300,7 +2345,8 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
|
||||
|
||||
/* YCBCR 420 output conversion needs a scaler */
|
||||
if (skl_update_scaler_crtc(config)) {
|
||||
DRM_DEBUG_KMS("Scaler allocation for output failed\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Scaler allocation for output failed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2341,6 +2387,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
|
||||
static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
@ -2365,12 +2412,14 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
|
||||
if (crtc_state->pipe_bpp > bpc * 3)
|
||||
crtc_state->pipe_bpp = bpc * 3;
|
||||
|
||||
DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"picking %d bpc for HDMI output (pipe bpp: %d)\n",
|
||||
bpc, crtc_state->pipe_bpp);
|
||||
|
||||
if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
|
||||
false, crtc_state->has_hdmi_sink) != MODE_OK) {
|
||||
DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"unsupported HDMI clock (%d kHz), rejecting mode\n",
|
||||
crtc_state->port_clock);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2434,7 +2483,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
|
||||
if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
|
||||
DRM_ERROR("Can't support YCBCR420 output\n");
|
||||
drm_err(&dev_priv->drm,
|
||||
"Can't support YCBCR420 output\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -2474,25 +2524,26 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
}
|
||||
}
|
||||
|
||||
intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
|
||||
intel_hdmi_compute_gcp_infoframe(encoder, pipe_config,
|
||||
conn_state);
|
||||
|
||||
if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
|
||||
DRM_DEBUG_KMS("bad AVI infoframe\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
|
||||
DRM_DEBUG_KMS("bad SPD infoframe\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
|
||||
DRM_DEBUG_KMS("bad HDMI infoframe\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
|
||||
DRM_DEBUG_KMS("bad DRM infoframe\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2542,7 +2593,8 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
|
||||
*/
|
||||
if (has_edid && !connector->override_edid &&
|
||||
intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
|
||||
DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Assuming DP dual mode adaptor presence based on VBT\n");
|
||||
type = DRM_DP_DUAL_MODE_TYPE1_DVI;
|
||||
} else {
|
||||
type = DRM_DP_DUAL_MODE_NONE;
|
||||
@ -2556,7 +2608,8 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
|
||||
hdmi->dp_dual_mode.max_tmds_clock =
|
||||
drm_dp_dual_mode_max_tmds_clock(type, adapter);
|
||||
|
||||
DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
|
||||
drm_dp_get_dual_mode_type_name(type),
|
||||
hdmi->dp_dual_mode.max_tmds_clock);
|
||||
}
|
||||
@ -2578,7 +2631,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
|
||||
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
|
||||
DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
|
||||
intel_gmbus_force_bit(i2c, true);
|
||||
edid = drm_get_edid(connector, i2c);
|
||||
intel_gmbus_force_bit(i2c, false);
|
||||
@ -2610,7 +2664,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
@ -2642,7 +2696,9 @@ out:
|
||||
static void
|
||||
intel_hdmi_force(struct drm_connector *connector)
|
||||
{
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
intel_hdmi_unset_edid(connector);
|
||||
@ -2664,7 +2720,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
|
||||
return intel_connector_update_modes(connector, edid);
|
||||
}
|
||||
|
||||
static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
static void intel_hdmi_pre_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -2678,7 +2735,8 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -2695,12 +2753,13 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
pipe_config->has_infoframe,
|
||||
pipe_config, conn_state);
|
||||
|
||||
g4x_enable_hdmi(encoder, pipe_config, conn_state);
|
||||
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport, 0x0);
|
||||
}
|
||||
|
||||
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
|
||||
static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -2709,7 +2768,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
|
||||
vlv_phy_pre_pll_enable(encoder, pipe_config);
|
||||
}
|
||||
|
||||
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
|
||||
static void chv_hdmi_pre_pll_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -2718,14 +2778,16 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
|
||||
chv_phy_pre_pll_enable(encoder, pipe_config);
|
||||
}
|
||||
|
||||
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
|
||||
static void chv_hdmi_post_pll_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
chv_phy_post_pll_disable(encoder, old_crtc_state);
|
||||
}
|
||||
|
||||
static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
|
||||
static void vlv_hdmi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -2733,7 +2795,8 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
|
||||
vlv_phy_reset_lanes(encoder, old_crtc_state);
|
||||
}
|
||||
|
||||
static void chv_hdmi_post_disable(struct intel_encoder *encoder,
|
||||
static void chv_hdmi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -2748,7 +2811,8 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
|
||||
vlv_dpio_put(dev_priv);
|
||||
}
|
||||
|
||||
static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -2766,7 +2830,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
pipe_config->has_infoframe,
|
||||
pipe_config, conn_state);
|
||||
|
||||
g4x_enable_hdmi(encoder, pipe_config, conn_state);
|
||||
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport, 0x0);
|
||||
|
||||
@ -2785,6 +2849,7 @@ intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
|
||||
|
||||
static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||
struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
|
||||
struct kobject *i2c_kobj = &adapter->dev.kobj;
|
||||
struct kobject *connector_kobj = &connector->kdev->kobj;
|
||||
@ -2792,7 +2857,7 @@ static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
|
||||
|
||||
ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name);
|
||||
if (ret)
|
||||
DRM_ERROR("Failed to create i2c symlink (%d)\n", ret);
|
||||
drm_err(&i915->drm, "Failed to create i2c symlink (%d)\n", ret);
|
||||
}
|
||||
|
||||
static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector)
|
||||
@ -2921,7 +2986,8 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
|
||||
if (!sink_scrambling->supported)
|
||||
return true;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
|
||||
connector->base.id, connector->name,
|
||||
yesno(scrambling), high_tmds_clock_ratio ? 40 : 10);
|
||||
|
||||
@ -3065,7 +3131,8 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
|
||||
|
||||
ddc_pin = intel_bios_alternate_ddc_pin(encoder);
|
||||
if (ddc_pin) {
|
||||
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Using DDC pin 0x%x for port %c (VBT)\n",
|
||||
ddc_pin, port_name(port));
|
||||
return ddc_pin;
|
||||
}
|
||||
@ -3083,7 +3150,8 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
|
||||
else
|
||||
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
|
||||
|
||||
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Using DDC pin 0x%x for port %c (platform default)\n",
|
||||
ddc_pin, port_name(port));
|
||||
|
||||
return ddc_pin;
|
||||
@ -3141,7 +3209,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
enum port port = intel_encoder->port;
|
||||
struct cec_connector_info conn_info;
|
||||
|
||||
DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Adding HDMI connector on [ENCODER:%d:%s]\n",
|
||||
intel_encoder->base.base.id, intel_encoder->base.name);
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
|
||||
@ -3186,7 +3255,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
int ret = intel_hdcp_init(intel_connector,
|
||||
&intel_hdmi_hdcp_shim);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"HDCP init failed, skipping.\n");
|
||||
}
|
||||
|
||||
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
|
||||
@ -3205,16 +3275,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
cec_notifier_conn_register(dev->dev, port_identifier(port),
|
||||
&conn_info);
|
||||
if (!intel_hdmi->cec_notifier)
|
||||
DRM_DEBUG_KMS("CEC notifier get failed\n");
|
||||
drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
|
||||
}
|
||||
|
||||
static enum intel_hotplug_state
|
||||
intel_hdmi_hotplug(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector, bool irq_received)
|
||||
struct intel_connector *connector)
|
||||
{
|
||||
enum intel_hotplug_state state;
|
||||
|
||||
state = intel_encoder_hotplug(encoder, connector, irq_received);
|
||||
state = intel_encoder_hotplug(encoder, connector);
|
||||
|
||||
/*
|
||||
* On many platforms the HDMI live state signal is known to be
|
||||
@ -3228,7 +3298,7 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
|
||||
* time around we didn't detect any change in the sink's connection
|
||||
* status.
|
||||
*/
|
||||
if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
|
||||
if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
|
||||
state = INTEL_HOTPLUG_RETRY;
|
||||
|
||||
return state;
|
||||
|
@ -270,8 +270,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
||||
|
||||
enum intel_hotplug_state
|
||||
intel_encoder_hotplug(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector,
|
||||
bool irq_received)
|
||||
struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
enum drm_connector_status old_status;
|
||||
@ -392,12 +391,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
||||
struct intel_encoder *encoder =
|
||||
intel_attached_encoder(connector);
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Connector %s (pin %i) received hotplug event.\n",
|
||||
connector->base.name, pin);
|
||||
if (hpd_event_bits & hpd_bit)
|
||||
connector->hotplug_retries = 0;
|
||||
else
|
||||
connector->hotplug_retries++;
|
||||
|
||||
switch (encoder->hotplug(encoder, connector,
|
||||
hpd_event_bits & hpd_bit)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Connector %s (pin %i) received hotplug event. (retry %d)\n",
|
||||
connector->base.name, pin,
|
||||
connector->hotplug_retries);
|
||||
|
||||
switch (encoder->hotplug(encoder, connector)) {
|
||||
case INTEL_HOTPLUG_UNCHANGED:
|
||||
break;
|
||||
case INTEL_HOTPLUG_CHANGED:
|
||||
|
@ -15,8 +15,7 @@ enum port;
|
||||
|
||||
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
|
||||
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector,
|
||||
bool irq_received);
|
||||
struct intel_connector *connector);
|
||||
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
||||
u32 pin_mask, u32 long_mask);
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv);
|
||||
|
@ -220,7 +220,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
|
||||
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
|
||||
}
|
||||
|
||||
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
|
||||
static void intel_pre_enable_lvds(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -301,7 +302,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
|
||||
/*
|
||||
* Sets the power state for the panel.
|
||||
*/
|
||||
static void intel_enable_lvds(struct intel_encoder *encoder,
|
||||
static void intel_enable_lvds(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -323,7 +325,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
|
||||
intel_panel_enable_backlight(pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void intel_disable_lvds(struct intel_encoder *encoder,
|
||||
static void intel_disable_lvds(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -341,28 +344,31 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
|
||||
intel_de_posting_read(dev_priv, lvds_encoder->reg);
|
||||
}
|
||||
|
||||
static void gmch_disable_lvds(struct intel_encoder *encoder,
|
||||
static void gmch_disable_lvds(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
|
||||
{
|
||||
intel_panel_disable_backlight(old_conn_state);
|
||||
|
||||
intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
|
||||
intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static void pch_disable_lvds(struct intel_encoder *encoder,
|
||||
static void pch_disable_lvds(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
intel_panel_disable_backlight(old_conn_state);
|
||||
}
|
||||
|
||||
static void pch_post_disable_lvds(struct intel_encoder *encoder,
|
||||
static void pch_post_disable_lvds(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
|
||||
intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
|
@ -1342,7 +1342,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
|
||||
if (!HAS_OVERLAY(dev_priv))
|
||||
return;
|
||||
|
||||
engine = dev_priv->engine[RCS0];
|
||||
engine = dev_priv->gt.engine[RCS0];
|
||||
if (!engine || !engine->kernel_context)
|
||||
return;
|
||||
|
||||
|
@ -684,9 +684,10 @@ static void
|
||||
intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
|
||||
{
|
||||
struct intel_connector *connector = to_intel_connector(conn_state->connector);
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
|
||||
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
|
||||
drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level);
|
||||
|
||||
level = intel_panel_compute_brightness(connector, level);
|
||||
panel->backlight.set(conn_state, level);
|
||||
@ -867,7 +868,7 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
|
||||
* another client is not activated.
|
||||
*/
|
||||
if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
|
||||
drm_dbg(&dev_priv->drm,
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Skipping backlight disable on vga switch\n");
|
||||
return;
|
||||
}
|
||||
@ -1244,7 +1245,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
|
||||
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
|
||||
drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val);
|
||||
drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
|
||||
return val;
|
||||
}
|
||||
|
||||
@ -1335,6 +1336,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
|
||||
|
||||
int intel_backlight_device_register(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
struct backlight_properties props;
|
||||
|
||||
@ -1374,13 +1376,14 @@ int intel_backlight_device_register(struct intel_connector *connector)
|
||||
&intel_backlight_device_ops, &props);
|
||||
|
||||
if (IS_ERR(panel->backlight.device)) {
|
||||
DRM_ERROR("Failed to register backlight: %ld\n",
|
||||
drm_err(&i915->drm, "Failed to register backlight: %ld\n",
|
||||
PTR_ERR(panel->backlight.device));
|
||||
panel->backlight.device = NULL;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Connector %s backlight sysfs interface registered\n",
|
||||
connector->base.name);
|
||||
|
||||
return 0;
|
||||
@ -1931,7 +1934,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_panel_update_backlight(struct intel_encoder *encoder,
|
||||
void intel_panel_update_backlight(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
|
@ -37,7 +37,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
|
||||
enum pipe pipe);
|
||||
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_panel_update_backlight(struct intel_encoder *encoder,
|
||||
void intel_panel_update_backlight(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
|
||||
|
@ -137,41 +137,42 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
|
||||
intel_de_write(dev_priv, imr_reg, val);
|
||||
}
|
||||
|
||||
static void psr_event_print(u32 val, bool psr2_enabled)
|
||||
static void psr_event_print(struct drm_i915_private *i915,
|
||||
u32 val, bool psr2_enabled)
|
||||
{
|
||||
DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
|
||||
drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
|
||||
if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
|
||||
DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
|
||||
drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
|
||||
if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
|
||||
DRM_DEBUG_KMS("\tPSR2 disabled\n");
|
||||
drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
|
||||
if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
|
||||
DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
|
||||
drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
|
||||
if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
|
||||
DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
|
||||
drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
|
||||
if (val & PSR_EVENT_GRAPHICS_RESET)
|
||||
DRM_DEBUG_KMS("\tGraphics reset\n");
|
||||
drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
|
||||
if (val & PSR_EVENT_PCH_INTERRUPT)
|
||||
DRM_DEBUG_KMS("\tPCH interrupt\n");
|
||||
drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
|
||||
if (val & PSR_EVENT_MEMORY_UP)
|
||||
DRM_DEBUG_KMS("\tMemory up\n");
|
||||
drm_dbg_kms(&i915->drm, "\tMemory up\n");
|
||||
if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
|
||||
DRM_DEBUG_KMS("\tFront buffer modification\n");
|
||||
drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
|
||||
if (val & PSR_EVENT_WD_TIMER_EXPIRE)
|
||||
DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
|
||||
drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
|
||||
if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
|
||||
DRM_DEBUG_KMS("\tPIPE registers updated\n");
|
||||
drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
|
||||
if (val & PSR_EVENT_REGISTER_UPDATE)
|
||||
DRM_DEBUG_KMS("\tRegister updated\n");
|
||||
drm_dbg_kms(&i915->drm, "\tRegister updated\n");
|
||||
if (val & PSR_EVENT_HDCP_ENABLE)
|
||||
DRM_DEBUG_KMS("\tHDCP enabled\n");
|
||||
drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
|
||||
if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
|
||||
DRM_DEBUG_KMS("\tKVMR session enabled\n");
|
||||
drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
|
||||
if (val & PSR_EVENT_VBI_ENABLE)
|
||||
DRM_DEBUG_KMS("\tVBI enabled\n");
|
||||
drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
|
||||
if (val & PSR_EVENT_LPSP_MODE_EXIT)
|
||||
DRM_DEBUG_KMS("\tLPSP mode exited\n");
|
||||
drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
|
||||
if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
|
||||
DRM_DEBUG_KMS("\tPSR disabled\n");
|
||||
drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
|
||||
}
|
||||
|
||||
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
|
||||
@ -209,7 +210,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
|
||||
|
||||
intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
|
||||
val);
|
||||
psr_event_print(val, psr2_enabled);
|
||||
psr_event_print(dev_priv, val, psr2_enabled);
|
||||
}
|
||||
}
|
||||
|
||||
@ -249,18 +250,21 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
|
||||
|
||||
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 val = 8; /* assume the worst if we can't read the value */
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
|
||||
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
|
||||
else
|
||||
DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Unable to get sink synchronization latency, assuming 8 frames\n");
|
||||
return val;
|
||||
}
|
||||
|
||||
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u16 val;
|
||||
ssize_t r;
|
||||
|
||||
@ -273,7 +277,8 @@ static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
|
||||
|
||||
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
|
||||
if (r != 2)
|
||||
DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Unable to read DP_PSR2_SU_X_GRANULARITY\n");
|
||||
|
||||
/*
|
||||
* Spec says that if the value read is 0 the default granularity should
|
||||
|
@ -1430,7 +1430,8 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
|
||||
#undef UPDATE_PROPERTY
|
||||
}
|
||||
|
||||
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
|
||||
static void intel_sdvo_pre_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *intel_encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -1727,7 +1728,8 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
|
||||
SDVO_AUDIO_PRESENCE_DETECT);
|
||||
}
|
||||
|
||||
static void intel_disable_sdvo(struct intel_encoder *encoder,
|
||||
static void intel_disable_sdvo(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -1775,20 +1777,23 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
|
||||
}
|
||||
}
|
||||
|
||||
static void pch_disable_sdvo(struct intel_encoder *encoder,
|
||||
static void pch_disable_sdvo(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
}
|
||||
|
||||
static void pch_post_disable_sdvo(struct intel_encoder *encoder,
|
||||
static void pch_post_disable_sdvo(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
|
||||
intel_disable_sdvo(state, encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
static void intel_enable_sdvo(struct intel_encoder *encoder,
|
||||
static void intel_enable_sdvo(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -1934,12 +1939,11 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
|
||||
|
||||
static enum intel_hotplug_state
|
||||
intel_sdvo_hotplug(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector,
|
||||
bool irq_received)
|
||||
struct intel_connector *connector)
|
||||
{
|
||||
intel_sdvo_enable_hotplug(encoder);
|
||||
|
||||
return intel_encoder_hotplug(encoder, connector, irq_received);
|
||||
return intel_encoder_hotplug(encoder, connector);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -2503,6 +2503,7 @@ static const u32 skl_plane_formats[] = {
|
||||
DRM_FORMAT_YVYU,
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_VYUY,
|
||||
DRM_FORMAT_XYUV8888,
|
||||
};
|
||||
|
||||
static const u32 skl_planar_formats[] = {
|
||||
@ -2521,6 +2522,7 @@ static const u32 skl_planar_formats[] = {
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_VYUY,
|
||||
DRM_FORMAT_NV12,
|
||||
DRM_FORMAT_XYUV8888,
|
||||
};
|
||||
|
||||
static const u32 glk_planar_formats[] = {
|
||||
@ -2539,6 +2541,7 @@ static const u32 glk_planar_formats[] = {
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_VYUY,
|
||||
DRM_FORMAT_NV12,
|
||||
DRM_FORMAT_XYUV8888,
|
||||
DRM_FORMAT_P010,
|
||||
DRM_FORMAT_P012,
|
||||
DRM_FORMAT_P016,
|
||||
@ -2562,6 +2565,7 @@ static const u32 icl_sdr_y_plane_formats[] = {
|
||||
DRM_FORMAT_Y210,
|
||||
DRM_FORMAT_Y212,
|
||||
DRM_FORMAT_Y216,
|
||||
DRM_FORMAT_XYUV8888,
|
||||
DRM_FORMAT_XVYU2101010,
|
||||
DRM_FORMAT_XVYU12_16161616,
|
||||
DRM_FORMAT_XVYU16161616,
|
||||
@ -2589,6 +2593,7 @@ static const u32 icl_sdr_uv_plane_formats[] = {
|
||||
DRM_FORMAT_Y210,
|
||||
DRM_FORMAT_Y212,
|
||||
DRM_FORMAT_Y216,
|
||||
DRM_FORMAT_XYUV8888,
|
||||
DRM_FORMAT_XVYU2101010,
|
||||
DRM_FORMAT_XVYU12_16161616,
|
||||
DRM_FORMAT_XVYU16161616,
|
||||
@ -2620,6 +2625,7 @@ static const u32 icl_hdr_plane_formats[] = {
|
||||
DRM_FORMAT_Y210,
|
||||
DRM_FORMAT_Y212,
|
||||
DRM_FORMAT_Y216,
|
||||
DRM_FORMAT_XYUV8888,
|
||||
DRM_FORMAT_XVYU2101010,
|
||||
DRM_FORMAT_XVYU12_16161616,
|
||||
DRM_FORMAT_XVYU16161616,
|
||||
@ -2790,6 +2796,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
|
||||
case DRM_FORMAT_UYVY:
|
||||
case DRM_FORMAT_VYUY:
|
||||
case DRM_FORMAT_NV12:
|
||||
case DRM_FORMAT_XYUV8888:
|
||||
case DRM_FORMAT_P010:
|
||||
case DRM_FORMAT_P012:
|
||||
case DRM_FORMAT_P016:
|
||||
@ -2817,19 +2824,25 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
|
||||
}
|
||||
}
|
||||
|
||||
static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id)
|
||||
static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
|
||||
enum plane_id plane_id)
|
||||
{
|
||||
/* Wa_14010477008:tgl[a0..c0] */
|
||||
if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0))
|
||||
return false;
|
||||
|
||||
return plane_id < PLANE_SPRITE4;
|
||||
}
|
||||
|
||||
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
|
||||
u32 format, u64 modifier)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(_plane->dev);
|
||||
struct intel_plane *plane = to_intel_plane(_plane);
|
||||
|
||||
switch (modifier) {
|
||||
case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
|
||||
if (!gen12_plane_supports_mc_ccs(plane->id))
|
||||
if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
|
||||
return false;
|
||||
/* fall through */
|
||||
case DRM_FORMAT_MOD_LINEAR:
|
||||
@ -2854,6 +2867,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
|
||||
case DRM_FORMAT_UYVY:
|
||||
case DRM_FORMAT_VYUY:
|
||||
case DRM_FORMAT_NV12:
|
||||
case DRM_FORMAT_XYUV8888:
|
||||
case DRM_FORMAT_P010:
|
||||
case DRM_FORMAT_P012:
|
||||
case DRM_FORMAT_P016:
|
||||
@ -2998,9 +3012,10 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
}
|
||||
|
||||
static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id)
|
||||
static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
|
||||
enum plane_id plane_id)
|
||||
{
|
||||
if (gen12_plane_supports_mc_ccs(plane_id))
|
||||
if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
|
||||
return gen12_plane_format_modifiers_mc_ccs;
|
||||
else
|
||||
return gen12_plane_format_modifiers_rc_ccs;
|
||||
@ -3070,7 +3085,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
|
||||
|
||||
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
modifiers = gen12_get_plane_modifiers(plane_id);
|
||||
modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
|
||||
plane_funcs = &gen12_plane_funcs;
|
||||
} else {
|
||||
if (plane->has_ccs)
|
||||
|
@ -152,6 +152,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
|
||||
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
|
||||
u32 live_status_mask)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
u32 valid_hpd_mask;
|
||||
|
||||
if (dig_port->tc_legacy_port)
|
||||
@ -164,7 +165,8 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
|
||||
return;
|
||||
|
||||
/* If live status mismatches the VBT flag, trust the live status. */
|
||||
DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
|
||||
drm_err(&i915->drm,
|
||||
"Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
|
||||
dig_port->tc_port_name, live_status_mask);
|
||||
|
||||
dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
|
||||
@ -233,8 +235,7 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
|
||||
if (val == 0xffffffff) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
|
||||
dig_port->tc_port_name,
|
||||
enableddisabled(enable));
|
||||
dig_port->tc_port_name, enableddisabled(enable));
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -286,10 +287,11 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
|
||||
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
|
||||
int required_lanes)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
int max_lanes;
|
||||
|
||||
if (!icl_tc_phy_status_complete(dig_port)) {
|
||||
DRM_DEBUG_KMS("Port %s: PHY not ready\n",
|
||||
drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
|
||||
dig_port->tc_port_name);
|
||||
goto out_set_tbt_alt_mode;
|
||||
}
|
||||
@ -311,13 +313,14 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
|
||||
* became disconnected. Not necessary for legacy mode.
|
||||
*/
|
||||
if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
|
||||
DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
|
||||
drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
|
||||
dig_port->tc_port_name);
|
||||
goto out_set_safe_mode;
|
||||
}
|
||||
|
||||
if (max_lanes < required_lanes) {
|
||||
DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Port %s: PHY max lanes %d < required lanes %d\n",
|
||||
dig_port->tc_port_name,
|
||||
max_lanes, required_lanes);
|
||||
goto out_set_safe_mode;
|
||||
@ -357,14 +360,16 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
|
||||
|
||||
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
|
||||
if (!icl_tc_phy_status_complete(dig_port)) {
|
||||
DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
|
||||
drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
|
||||
dig_port->tc_port_name);
|
||||
return dig_port->tc_mode == TC_PORT_TBT_ALT;
|
||||
}
|
||||
|
||||
if (icl_tc_phy_is_in_safe_mode(dig_port)) {
|
||||
DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
|
||||
drm_dbg_kms(&i915->drm, "Port %s: PHY still in safe mode\n",
|
||||
dig_port->tc_port_name);
|
||||
|
||||
return false;
|
||||
@ -438,6 +443,7 @@ intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
|
||||
|
||||
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
struct intel_encoder *encoder = &dig_port->base;
|
||||
int active_links = 0;
|
||||
|
||||
@ -451,7 +457,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
|
||||
|
||||
if (active_links) {
|
||||
if (!icl_tc_phy_is_connected(dig_port))
|
||||
DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Port %s: PHY disconnected with %d active link(s)\n",
|
||||
dig_port->tc_port_name, active_links);
|
||||
intel_tc_port_link_init_refcount(dig_port, active_links);
|
||||
|
||||
@ -462,7 +469,7 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
|
||||
icl_tc_phy_connect(dig_port, 1);
|
||||
|
||||
out:
|
||||
DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
|
||||
drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
|
||||
dig_port->tc_port_name,
|
||||
tc_port_mode_name(dig_port->tc_mode));
|
||||
|
||||
|
@ -914,7 +914,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
|
||||
}
|
||||
|
||||
static void
|
||||
intel_enable_tv(struct intel_encoder *encoder,
|
||||
intel_enable_tv(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -930,7 +931,8 @@ intel_enable_tv(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
static void
|
||||
intel_disable_tv(struct intel_encoder *encoder,
|
||||
intel_disable_tv(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
@ -1414,7 +1416,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
|
||||
(color_conversion->bv << 16) | color_conversion->av);
|
||||
}
|
||||
|
||||
static void intel_tv_pre_enable(struct intel_encoder *encoder,
|
||||
static void intel_tv_pre_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -1698,13 +1701,13 @@ intel_tv_detect(struct drm_connector *connector,
|
||||
struct drm_modeset_acquire_ctx *ctx,
|
||||
bool force)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->dev);
|
||||
struct intel_tv *intel_tv = intel_attached_tv(to_intel_connector(connector));
|
||||
enum drm_connector_status status;
|
||||
int type;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
|
||||
connector->base.id, connector->name,
|
||||
force);
|
||||
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] force=%d\n",
|
||||
connector->base.id, connector->name, force);
|
||||
|
||||
if (force) {
|
||||
struct intel_load_detect_pipe tmp;
|
||||
|
@ -759,7 +759,8 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder);
|
||||
* DSI port enable has to be done before pipe and plane enable, so we do it in
|
||||
* the pre_enable hook instead of the enable hook.
|
||||
*/
|
||||
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
|
||||
static void intel_dsi_pre_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -858,7 +859,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
|
||||
}
|
||||
|
||||
static void bxt_dsi_enable(struct intel_encoder *encoder,
|
||||
static void bxt_dsi_enable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
@ -871,14 +873,16 @@ static void bxt_dsi_enable(struct intel_encoder *encoder,
|
||||
* DSI port disable has to be done after pipe and plane disable, so we do it in
|
||||
* the post_disable hook.
|
||||
*/
|
||||
static void intel_dsi_disable(struct intel_encoder *encoder,
|
||||
static void intel_dsi_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
enum port port;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
drm_dbg_kms(&i915->drm, "\n");
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
|
||||
intel_panel_disable_backlight(old_conn_state);
|
||||
@ -906,7 +910,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
vlv_dsi_clear_device_ready(encoder);
|
||||
}
|
||||
|
||||
static void intel_dsi_post_disable(struct intel_encoder *encoder,
|
||||
static void intel_dsi_post_disable(struct intel_atomic_state *state,
|
||||
struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
|
@ -570,23 +570,19 @@ static void engines_idle_release(struct i915_gem_context *ctx,
|
||||
engines->ctx = i915_gem_context_get(ctx);
|
||||
|
||||
for_each_gem_engine(ce, engines, it) {
|
||||
struct dma_fence *fence;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
/* serialises with execbuf */
|
||||
set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
|
||||
if (!intel_context_pin_if_active(ce))
|
||||
continue;
|
||||
|
||||
fence = i915_active_fence_get(&ce->timeline->last_request);
|
||||
if (fence) {
|
||||
err = i915_sw_fence_await_dma_fence(&engines->fence,
|
||||
fence, 0,
|
||||
GFP_KERNEL);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
/* Wait until context is finally scheduled out and retired */
|
||||
err = i915_sw_fence_await_active(&engines->fence,
|
||||
&ce->active,
|
||||
I915_ACTIVE_AWAIT_BARRIER);
|
||||
intel_context_unpin(ce);
|
||||
if (err < 0)
|
||||
if (err)
|
||||
goto kill;
|
||||
}
|
||||
|
||||
@ -757,21 +753,46 @@ err_free:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static inline struct i915_gem_engines *
|
||||
__context_engines_await(const struct i915_gem_context *ctx)
|
||||
{
|
||||
struct i915_gem_engines *engines;
|
||||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
engines = rcu_dereference(ctx->engines);
|
||||
GEM_BUG_ON(!engines);
|
||||
|
||||
if (unlikely(!i915_sw_fence_await(&engines->fence)))
|
||||
continue;
|
||||
|
||||
if (likely(engines == rcu_access_pointer(ctx->engines)))
|
||||
break;
|
||||
|
||||
i915_sw_fence_complete(&engines->fence);
|
||||
} while (1);
|
||||
rcu_read_unlock();
|
||||
|
||||
return engines;
|
||||
}
|
||||
|
||||
static int
|
||||
context_apply_all(struct i915_gem_context *ctx,
|
||||
int (*fn)(struct intel_context *ce, void *data),
|
||||
void *data)
|
||||
{
|
||||
struct i915_gem_engines_iter it;
|
||||
struct i915_gem_engines *e;
|
||||
struct intel_context *ce;
|
||||
int err = 0;
|
||||
|
||||
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
|
||||
e = __context_engines_await(ctx);
|
||||
for_each_gem_engine(ce, e, it) {
|
||||
err = fn(ce, data);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
i915_sw_fence_complete(&e->fence);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -786,11 +807,13 @@ static int __apply_ppgtt(struct intel_context *ce, void *vm)
|
||||
static struct i915_address_space *
|
||||
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_address_space *old = i915_gem_context_vm(ctx);
|
||||
struct i915_address_space *old;
|
||||
|
||||
old = rcu_replace_pointer(ctx->vm,
|
||||
i915_vm_open(vm),
|
||||
lockdep_is_held(&ctx->mutex));
|
||||
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
|
||||
|
||||
rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
|
||||
context_apply_all(ctx, __apply_ppgtt, vm);
|
||||
|
||||
return old;
|
||||
@ -1069,30 +1092,6 @@ static void cb_retire(struct i915_active *base)
|
||||
kfree(cb);
|
||||
}
|
||||
|
||||
static inline struct i915_gem_engines *
|
||||
__context_engines_await(const struct i915_gem_context *ctx)
|
||||
{
|
||||
struct i915_gem_engines *engines;
|
||||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
engines = rcu_dereference(ctx->engines);
|
||||
if (unlikely(!engines))
|
||||
break;
|
||||
|
||||
if (unlikely(!i915_sw_fence_await(&engines->fence)))
|
||||
continue;
|
||||
|
||||
if (likely(engines == rcu_access_pointer(ctx->engines)))
|
||||
break;
|
||||
|
||||
i915_sw_fence_complete(&engines->fence);
|
||||
} while (1);
|
||||
rcu_read_unlock();
|
||||
|
||||
return engines;
|
||||
}
|
||||
|
||||
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
|
||||
static int context_barrier_task(struct i915_gem_context *ctx,
|
||||
intel_engine_mask_t engines,
|
||||
@ -1401,8 +1400,8 @@ static int get_ringsize(struct i915_gem_context *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
user_to_context_sseu(struct drm_i915_private *i915,
|
||||
int
|
||||
i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
|
||||
const struct drm_i915_gem_context_param_sseu *user,
|
||||
struct intel_sseu *context)
|
||||
{
|
||||
@ -1539,7 +1538,7 @@ static int set_sseu(struct i915_gem_context *ctx,
|
||||
goto out_ce;
|
||||
}
|
||||
|
||||
ret = user_to_context_sseu(i915, &user_sseu, &sseu);
|
||||
ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu);
|
||||
if (ret)
|
||||
goto out_ce;
|
||||
|
||||
|
@ -225,4 +225,8 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
|
||||
struct i915_lut_handle *i915_lut_handle_alloc(void);
|
||||
void i915_lut_handle_free(struct i915_lut_handle *lut);
|
||||
|
||||
int i915_gem_user_to_context_sseu(struct drm_i915_private *i915,
|
||||
const struct drm_i915_gem_context_param_sseu *user,
|
||||
struct intel_sseu *context);
|
||||
|
||||
#endif /* !__I915_GEM_CONTEXT_H__ */
|
||||
|
@ -369,7 +369,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
||||
struct i915_vma *vma;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
if (!atomic_read(&obj->bind_count))
|
||||
if (list_empty(&obj->vma.list))
|
||||
return;
|
||||
|
||||
mutex_lock(&i915->ggtt.vm.mutex);
|
||||
|
@ -40,6 +40,11 @@ struct eb_vma {
|
||||
u32 handle;
|
||||
};
|
||||
|
||||
struct eb_vma_array {
|
||||
struct kref kref;
|
||||
struct eb_vma vma[];
|
||||
};
|
||||
|
||||
enum {
|
||||
FORCE_CPU_RELOC = 1,
|
||||
FORCE_GTT_RELOC,
|
||||
@ -52,7 +57,6 @@ enum {
|
||||
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
|
||||
#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
|
||||
#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
|
||||
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
|
||||
|
||||
#define __EXEC_HAS_RELOC BIT(31)
|
||||
#define __EXEC_INTERNAL_FLAGS (~0u << 31)
|
||||
@ -283,6 +287,7 @@ struct i915_execbuffer {
|
||||
*/
|
||||
int lut_size;
|
||||
struct hlist_head *buckets; /** ht for relocation handles */
|
||||
struct eb_vma_array *array;
|
||||
};
|
||||
|
||||
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
|
||||
@ -292,8 +297,62 @@ static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
|
||||
eb->args->batch_len);
|
||||
}
|
||||
|
||||
static struct eb_vma_array *eb_vma_array_create(unsigned int count)
|
||||
{
|
||||
struct eb_vma_array *arr;
|
||||
|
||||
arr = kvmalloc(struct_size(arr, vma, count), GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!arr)
|
||||
return NULL;
|
||||
|
||||
kref_init(&arr->kref);
|
||||
arr->vma[0].vma = NULL;
|
||||
|
||||
return arr;
|
||||
}
|
||||
|
||||
static inline void eb_unreserve_vma(struct eb_vma *ev)
|
||||
{
|
||||
struct i915_vma *vma = ev->vma;
|
||||
|
||||
if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
|
||||
__i915_vma_unpin_fence(vma);
|
||||
|
||||
if (ev->flags & __EXEC_OBJECT_HAS_PIN)
|
||||
__i915_vma_unpin(vma);
|
||||
|
||||
ev->flags &= ~(__EXEC_OBJECT_HAS_PIN |
|
||||
__EXEC_OBJECT_HAS_FENCE);
|
||||
}
|
||||
|
||||
static void eb_vma_array_destroy(struct kref *kref)
|
||||
{
|
||||
struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref);
|
||||
struct eb_vma *ev = arr->vma;
|
||||
|
||||
while (ev->vma) {
|
||||
eb_unreserve_vma(ev);
|
||||
i915_vma_put(ev->vma);
|
||||
ev++;
|
||||
}
|
||||
|
||||
kvfree(arr);
|
||||
}
|
||||
|
||||
static void eb_vma_array_put(struct eb_vma_array *arr)
|
||||
{
|
||||
kref_put(&arr->kref, eb_vma_array_destroy);
|
||||
}
|
||||
|
||||
static int eb_create(struct i915_execbuffer *eb)
|
||||
{
|
||||
/* Allocate an extra slot for use by the command parser + sentinel */
|
||||
eb->array = eb_vma_array_create(eb->buffer_count + 2);
|
||||
if (!eb->array)
|
||||
return -ENOMEM;
|
||||
|
||||
eb->vma = eb->array->vma;
|
||||
|
||||
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
|
||||
unsigned int size = 1 + ilog2(eb->buffer_count);
|
||||
|
||||
@ -327,8 +386,10 @@ static int eb_create(struct i915_execbuffer *eb)
|
||||
break;
|
||||
} while (--size);
|
||||
|
||||
if (unlikely(!size))
|
||||
if (unlikely(!size)) {
|
||||
eb_vma_array_put(eb->array);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
eb->lut_size = size;
|
||||
} else {
|
||||
@ -368,6 +429,32 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
|
||||
return false;
|
||||
}
|
||||
|
||||
static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
|
||||
unsigned int exec_flags)
|
||||
{
|
||||
u64 pin_flags = 0;
|
||||
|
||||
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
|
||||
pin_flags |= PIN_GLOBAL;
|
||||
|
||||
/*
|
||||
* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
|
||||
* limit address to the first 4GBs for unflagged objects.
|
||||
*/
|
||||
if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
|
||||
pin_flags |= PIN_ZONE_4G;
|
||||
|
||||
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
|
||||
pin_flags |= PIN_MAPPABLE;
|
||||
|
||||
if (exec_flags & EXEC_OBJECT_PINNED)
|
||||
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
|
||||
else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
|
||||
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
|
||||
|
||||
return pin_flags;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
eb_pin_vma(struct i915_execbuffer *eb,
|
||||
const struct drm_i915_gem_exec_object2 *entry,
|
||||
@ -385,9 +472,20 @@ eb_pin_vma(struct i915_execbuffer *eb,
|
||||
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
|
||||
pin_flags |= PIN_GLOBAL;
|
||||
|
||||
if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
|
||||
/* Attempt to reuse the current location if available */
|
||||
if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) {
|
||||
if (entry->flags & EXEC_OBJECT_PINNED)
|
||||
return false;
|
||||
|
||||
/* Failing that pick any _free_ space if suitable */
|
||||
if (unlikely(i915_vma_pin(vma,
|
||||
entry->pad_to_size,
|
||||
entry->alignment,
|
||||
eb_pin_flags(entry, ev->flags) |
|
||||
PIN_USER | PIN_NOEVICT)))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
|
||||
if (unlikely(i915_vma_pin_fence(vma))) {
|
||||
i915_vma_unpin(vma);
|
||||
@ -402,26 +500,6 @@ eb_pin_vma(struct i915_execbuffer *eb,
|
||||
return !eb_vma_misplaced(entry, vma, ev->flags);
|
||||
}
|
||||
|
||||
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
|
||||
{
|
||||
GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
|
||||
|
||||
if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
|
||||
__i915_vma_unpin_fence(vma);
|
||||
|
||||
__i915_vma_unpin(vma);
|
||||
}
|
||||
|
||||
static inline void
|
||||
eb_unreserve_vma(struct eb_vma *ev)
|
||||
{
|
||||
if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
|
||||
return;
|
||||
|
||||
__eb_unreserve_vma(ev->vma, ev->flags);
|
||||
ev->flags &= ~__EXEC_OBJECT_RESERVED;
|
||||
}
|
||||
|
||||
static int
|
||||
eb_validate_vma(struct i915_execbuffer *eb,
|
||||
struct drm_i915_gem_exec_object2 *entry,
|
||||
@ -481,7 +559,7 @@ eb_add_vma(struct i915_execbuffer *eb,
|
||||
|
||||
GEM_BUG_ON(i915_vma_is_closed(vma));
|
||||
|
||||
ev->vma = i915_vma_get(vma);
|
||||
ev->vma = vma;
|
||||
ev->exec = entry;
|
||||
ev->flags = entry->flags;
|
||||
|
||||
@ -547,28 +625,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
|
||||
u64 pin_flags)
|
||||
{
|
||||
struct drm_i915_gem_exec_object2 *entry = ev->exec;
|
||||
unsigned int exec_flags = ev->flags;
|
||||
struct i915_vma *vma = ev->vma;
|
||||
int err;
|
||||
|
||||
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
|
||||
pin_flags |= PIN_GLOBAL;
|
||||
|
||||
/*
|
||||
* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
|
||||
* limit address to the first 4GBs for unflagged objects.
|
||||
*/
|
||||
if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
|
||||
pin_flags |= PIN_ZONE_4G;
|
||||
|
||||
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
|
||||
pin_flags |= PIN_MAPPABLE;
|
||||
|
||||
if (exec_flags & EXEC_OBJECT_PINNED)
|
||||
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
|
||||
else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
|
||||
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
|
||||
|
||||
if (drm_mm_node_allocated(&vma->node) &&
|
||||
eb_vma_misplaced(entry, vma, ev->flags)) {
|
||||
err = i915_vma_unbind(vma);
|
||||
@ -578,7 +637,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
|
||||
|
||||
err = i915_vma_pin(vma,
|
||||
entry->pad_to_size, entry->alignment,
|
||||
pin_flags);
|
||||
eb_pin_flags(entry, ev->flags) | pin_flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -587,7 +646,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
|
||||
eb->args->flags |= __EXEC_HAS_RELOC;
|
||||
}
|
||||
|
||||
if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
|
||||
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
|
||||
err = i915_vma_pin_fence(vma);
|
||||
if (unlikely(err)) {
|
||||
i915_vma_unpin(vma);
|
||||
@ -595,10 +654,10 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
|
||||
}
|
||||
|
||||
if (vma->fence)
|
||||
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
|
||||
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
|
||||
}
|
||||
|
||||
ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
|
||||
ev->flags |= __EXEC_OBJECT_HAS_PIN;
|
||||
GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
|
||||
|
||||
return 0;
|
||||
@ -728,77 +787,117 @@ static int eb_select_context(struct i915_execbuffer *eb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
||||
static int __eb_add_lut(struct i915_execbuffer *eb,
|
||||
u32 handle, struct i915_vma *vma)
|
||||
{
|
||||
struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned int i, batch;
|
||||
struct i915_gem_context *ctx = eb->gem_context;
|
||||
struct i915_lut_handle *lut;
|
||||
int err;
|
||||
|
||||
if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
|
||||
return -ENOENT;
|
||||
lut = i915_lut_handle_alloc();
|
||||
if (unlikely(!lut))
|
||||
return -ENOMEM;
|
||||
|
||||
i915_vma_get(vma);
|
||||
if (!atomic_fetch_inc(&vma->open_count))
|
||||
i915_vma_reopen(vma);
|
||||
lut->handle = handle;
|
||||
lut->ctx = ctx;
|
||||
|
||||
/* Check that the context hasn't been closed in the meantime */
|
||||
err = -EINTR;
|
||||
if (!mutex_lock_interruptible(&ctx->mutex)) {
|
||||
err = -ENOENT;
|
||||
if (likely(!i915_gem_context_is_closed(ctx)))
|
||||
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
|
||||
if (err == 0) { /* And nor has this handle */
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
if (idr_find(&eb->file->object_idr, handle) == obj) {
|
||||
list_add(&lut->obj_link, &obj->lut_list);
|
||||
} else {
|
||||
radix_tree_delete(&ctx->handles_vma, handle);
|
||||
err = -ENOENT;
|
||||
}
|
||||
i915_gem_object_unlock(obj);
|
||||
}
|
||||
mutex_unlock(&ctx->mutex);
|
||||
}
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
atomic_dec(&vma->open_count);
|
||||
i915_vma_put(vma);
|
||||
i915_lut_handle_free(lut);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
|
||||
{
|
||||
do {
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
int err;
|
||||
|
||||
rcu_read_lock();
|
||||
vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
|
||||
if (likely(vma))
|
||||
vma = i915_vma_tryget(vma);
|
||||
rcu_read_unlock();
|
||||
if (likely(vma))
|
||||
return vma;
|
||||
|
||||
obj = i915_gem_object_lookup(eb->file, handle);
|
||||
if (unlikely(!obj))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
vma = i915_vma_instance(obj, eb->context->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
i915_gem_object_put(obj);
|
||||
return vma;
|
||||
}
|
||||
|
||||
err = __eb_add_lut(eb, handle, vma);
|
||||
if (likely(!err))
|
||||
return vma;
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
if (err != -EEXIST)
|
||||
return ERR_PTR(err);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
||||
{
|
||||
unsigned int batch = eb_batch_index(eb);
|
||||
unsigned int i;
|
||||
int err = 0;
|
||||
|
||||
INIT_LIST_HEAD(&eb->relocs);
|
||||
INIT_LIST_HEAD(&eb->unbound);
|
||||
|
||||
batch = eb_batch_index(eb);
|
||||
|
||||
for (i = 0; i < eb->buffer_count; i++) {
|
||||
u32 handle = eb->exec[i].handle;
|
||||
struct i915_lut_handle *lut;
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = radix_tree_lookup(handles_vma, handle);
|
||||
if (likely(vma))
|
||||
goto add_vma;
|
||||
|
||||
obj = i915_gem_object_lookup(eb->file, handle);
|
||||
if (unlikely(!obj)) {
|
||||
err = -ENOENT;
|
||||
goto err_vma;
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, eb->context->vm, NULL);
|
||||
vma = eb_lookup_vma(eb, eb->exec[i].handle);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto err_obj;
|
||||
break;
|
||||
}
|
||||
|
||||
lut = i915_lut_handle_alloc();
|
||||
if (unlikely(!lut)) {
|
||||
err = -ENOMEM;
|
||||
goto err_obj;
|
||||
}
|
||||
|
||||
err = radix_tree_insert(handles_vma, handle, vma);
|
||||
if (unlikely(err)) {
|
||||
i915_lut_handle_free(lut);
|
||||
goto err_obj;
|
||||
}
|
||||
|
||||
/* transfer ref to lut */
|
||||
if (!atomic_fetch_inc(&vma->open_count))
|
||||
i915_vma_reopen(vma);
|
||||
lut->handle = handle;
|
||||
lut->ctx = eb->gem_context;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
list_add(&lut->obj_link, &obj->lut_list);
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
add_vma:
|
||||
err = eb_validate_vma(eb, &eb->exec[i], vma);
|
||||
if (unlikely(err))
|
||||
goto err_vma;
|
||||
if (unlikely(err)) {
|
||||
i915_vma_put(vma);
|
||||
break;
|
||||
}
|
||||
|
||||
eb_add_vma(eb, i, batch, vma);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_obj:
|
||||
i915_gem_object_put(obj);
|
||||
err_vma:
|
||||
eb->vma[i].vma = NULL;
|
||||
return err;
|
||||
}
|
||||
@ -823,31 +922,13 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
|
||||
}
|
||||
}
|
||||
|
||||
static void eb_release_vmas(const struct i915_execbuffer *eb)
|
||||
{
|
||||
const unsigned int count = eb->buffer_count;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
struct eb_vma *ev = &eb->vma[i];
|
||||
struct i915_vma *vma = ev->vma;
|
||||
|
||||
if (!vma)
|
||||
break;
|
||||
|
||||
eb->vma[i].vma = NULL;
|
||||
|
||||
if (ev->flags & __EXEC_OBJECT_HAS_PIN)
|
||||
__eb_unreserve_vma(vma, ev->flags);
|
||||
|
||||
i915_vma_put(vma);
|
||||
}
|
||||
}
|
||||
|
||||
static void eb_destroy(const struct i915_execbuffer *eb)
|
||||
{
|
||||
GEM_BUG_ON(eb->reloc_cache.rq);
|
||||
|
||||
if (eb->array)
|
||||
eb_vma_array_put(eb->array);
|
||||
|
||||
if (eb->lut_size > 0)
|
||||
kfree(eb->buckets);
|
||||
}
|
||||
@ -1220,6 +1301,17 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
|
||||
return cmd;
|
||||
}
|
||||
|
||||
static inline bool use_reloc_gpu(struct i915_vma *vma)
|
||||
{
|
||||
if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
|
||||
return true;
|
||||
|
||||
if (DBG_FORCE_RELOC)
|
||||
return false;
|
||||
|
||||
return !dma_resv_test_signaled_rcu(vma->resv, true);
|
||||
}
|
||||
|
||||
static u64
|
||||
relocate_entry(struct i915_vma *vma,
|
||||
const struct drm_i915_gem_relocation_entry *reloc,
|
||||
@ -1231,9 +1323,7 @@ relocate_entry(struct i915_vma *vma,
|
||||
bool wide = eb->reloc_cache.use_64bit_reloc;
|
||||
void *vaddr;
|
||||
|
||||
if (!eb->reloc_cache.vaddr &&
|
||||
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
|
||||
!dma_resv_test_signaled_rcu(vma->resv, true))) {
|
||||
if (!eb->reloc_cache.vaddr && use_reloc_gpu(vma)) {
|
||||
const unsigned int gen = eb->reloc_cache.gen;
|
||||
unsigned int len;
|
||||
u32 *batch;
|
||||
@ -1411,12 +1501,11 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
|
||||
{
|
||||
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
|
||||
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
|
||||
struct drm_i915_gem_relocation_entry __user *urelocs;
|
||||
const struct drm_i915_gem_exec_object2 *entry = ev->exec;
|
||||
unsigned int remain;
|
||||
struct drm_i915_gem_relocation_entry __user *urelocs =
|
||||
u64_to_user_ptr(entry->relocs_ptr);
|
||||
unsigned long remain = entry->relocation_count;
|
||||
|
||||
urelocs = u64_to_user_ptr(entry->relocs_ptr);
|
||||
remain = entry->relocation_count;
|
||||
if (unlikely(remain > N_RELOC(ULONG_MAX)))
|
||||
return -EINVAL;
|
||||
|
||||
@ -1431,7 +1520,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
|
||||
do {
|
||||
struct drm_i915_gem_relocation_entry *r = stack;
|
||||
unsigned int count =
|
||||
min_t(unsigned int, remain, ARRAY_SIZE(stack));
|
||||
min_t(unsigned long, remain, ARRAY_SIZE(stack));
|
||||
unsigned int copied;
|
||||
|
||||
/*
|
||||
@ -1494,9 +1583,7 @@ static int eb_relocate(struct i915_execbuffer *eb)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&eb->gem_context->mutex);
|
||||
err = eb_lookup_vmas(eb);
|
||||
mutex_unlock(&eb->gem_context->mutex);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -1597,19 +1684,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
|
||||
err = i915_vma_move_to_active(vma, eb->request, flags);
|
||||
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
__eb_unreserve_vma(vma, flags);
|
||||
i915_vma_put(vma);
|
||||
|
||||
ev->vma = NULL;
|
||||
eb_unreserve_vma(ev);
|
||||
}
|
||||
ww_acquire_fini(&acquire);
|
||||
|
||||
eb_vma_array_put(fetch_and_zero(&eb->array));
|
||||
|
||||
if (unlikely(err))
|
||||
goto err_skip;
|
||||
|
||||
eb->exec = NULL;
|
||||
|
||||
/* Unconditionally flush any chipset caches (for streaming writes). */
|
||||
intel_gt_chipset_flush(eb->engine->gt);
|
||||
return 0;
|
||||
@ -1784,7 +1867,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
|
||||
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
|
||||
dma_resv_unlock(shadow->resv);
|
||||
|
||||
dma_fence_work_commit(&pw->base);
|
||||
dma_fence_work_commit_imm(&pw->base);
|
||||
return 0;
|
||||
|
||||
err_batch_unlock:
|
||||
@ -1861,6 +1944,7 @@ static int eb_parse(struct i915_execbuffer *eb)
|
||||
eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
|
||||
eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
|
||||
eb->batch = &eb->vma[eb->buffer_count++];
|
||||
eb->vma[eb->buffer_count].vma = NULL;
|
||||
|
||||
eb->trampoline = trampoline;
|
||||
eb->batch_start_offset = 0;
|
||||
@ -2348,9 +2432,7 @@ static void eb_request_add(struct i915_execbuffer *eb)
|
||||
__i915_request_skip(rq);
|
||||
}
|
||||
|
||||
local_bh_disable();
|
||||
__i915_request_queue(rq, &attr);
|
||||
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
|
||||
|
||||
/* Try to clean up the client's timeline after submitting the request */
|
||||
if (prev)
|
||||
@ -2386,8 +2468,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||
args->flags |= __EXEC_HAS_RELOC;
|
||||
|
||||
eb.exec = exec;
|
||||
eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
|
||||
eb.vma[0].vma = NULL;
|
||||
|
||||
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
|
||||
reloc_cache_init(&eb.reloc_cache, eb.i915);
|
||||
@ -2594,8 +2674,6 @@ err_parse:
|
||||
if (batch->private)
|
||||
intel_engine_pool_put(batch->private);
|
||||
err_vma:
|
||||
if (eb.exec)
|
||||
eb_release_vmas(&eb);
|
||||
if (eb.trampoline)
|
||||
i915_vma_unpin(eb.trampoline);
|
||||
eb_unpin_engine(&eb);
|
||||
@ -2615,7 +2693,7 @@ err_in_fence:
|
||||
|
||||
static size_t eb_element_size(void)
|
||||
{
|
||||
return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
|
||||
return sizeof(struct drm_i915_gem_exec_object2);
|
||||
}
|
||||
|
||||
static bool check_buffer_count(size_t count)
|
||||
@ -2671,7 +2749,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
|
||||
/* Copy in the exec list from userland */
|
||||
exec_list = kvmalloc_array(count, sizeof(*exec_list),
|
||||
__GFP_NOWARN | GFP_KERNEL);
|
||||
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
|
||||
exec2_list = kvmalloc_array(count, eb_element_size(),
|
||||
__GFP_NOWARN | GFP_KERNEL);
|
||||
if (exec_list == NULL || exec2_list == NULL) {
|
||||
drm_dbg(&i915->drm,
|
||||
@ -2749,8 +2827,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Allocate an extra slot for use by the command parser */
|
||||
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
|
||||
exec2_list = kvmalloc_array(count, eb_element_size(),
|
||||
__GFP_NOWARN | GFP_KERNEL);
|
||||
if (exec2_list == NULL) {
|
||||
drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
|
||||
|
@ -206,7 +206,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
||||
}
|
||||
obj->mmo.offsets = RB_ROOT;
|
||||
|
||||
GEM_BUG_ON(atomic_read(&obj->bind_count));
|
||||
GEM_BUG_ON(obj->userfault_count);
|
||||
GEM_BUG_ON(!list_empty(&obj->lut_list));
|
||||
|
||||
|
@ -179,9 +179,6 @@ struct drm_i915_gem_object {
|
||||
#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
|
||||
#define STRIDE_MASK (~TILING_MASK)
|
||||
|
||||
/** Count of VMA actually bound by this object */
|
||||
atomic_t bind_count;
|
||||
|
||||
struct {
|
||||
/*
|
||||
* Protects the pages and their use. Do not use directly, but
|
||||
|
@ -199,8 +199,6 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
|
||||
if (i915_gem_object_has_pinned_pages(obj))
|
||||
return -EBUSY;
|
||||
|
||||
GEM_BUG_ON(atomic_read(&obj->bind_count));
|
||||
|
||||
/* May be called by shrinker from within get_pages() (on another bo) */
|
||||
mutex_lock(&obj->mm.lock);
|
||||
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
|
||||
|
@ -26,18 +26,6 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
|
||||
if (!i915_gem_object_is_shrinkable(obj))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Only report true if by unbinding the object and putting its pages
|
||||
* we can actually make forward progress towards freeing physical
|
||||
* pages.
|
||||
*
|
||||
* If the pages are pinned for any other reason than being bound
|
||||
* to the GPU, simply unbinding from the GPU is not going to succeed
|
||||
* in releasing our pin count on the pages themselves.
|
||||
*/
|
||||
if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We can only return physical pages to the system if we can either
|
||||
* discard the contents (because the user has marked them as being
|
||||
@ -54,6 +42,8 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
|
||||
flags = 0;
|
||||
if (shrink & I915_SHRINK_ACTIVE)
|
||||
flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
|
||||
if (!(shrink & I915_SHRINK_BOUND))
|
||||
flags = I915_GEM_OBJECT_UNBIND_TEST;
|
||||
|
||||
if (i915_gem_object_unbind(obj, flags) == 0)
|
||||
__i915_gem_object_put_pages(obj);
|
||||
@ -194,10 +184,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
|
||||
i915_gem_object_is_framebuffer(obj))
|
||||
continue;
|
||||
|
||||
if (!(shrink & I915_SHRINK_BOUND) &&
|
||||
atomic_read(&obj->bind_count))
|
||||
continue;
|
||||
|
||||
if (!can_release_pages(obj))
|
||||
continue;
|
||||
|
||||
|
@ -381,14 +381,14 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
|
||||
mutex_init(&i915->mm.stolen_lock);
|
||||
|
||||
if (intel_vgpu_active(i915)) {
|
||||
dev_notice(i915->drm.dev,
|
||||
drm_notice(&i915->drm,
|
||||
"%s, disabling use of stolen memory\n",
|
||||
"iGVT-g active");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (intel_vtd_active() && INTEL_GEN(i915) < 8) {
|
||||
dev_notice(i915->drm.dev,
|
||||
drm_notice(&i915->drm,
|
||||
"%s, disabling use of stolen memory\n",
|
||||
"DMAR active");
|
||||
return 0;
|
||||
|
@ -88,8 +88,7 @@ static void huge_put_pages(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops huge_ops = {
|
||||
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
|
||||
I915_GEM_OBJECT_IS_SHRINKABLE,
|
||||
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
|
||||
.get_pages = huge_get_pages,
|
||||
.put_pages = huge_put_pages,
|
||||
};
|
||||
|
@ -1925,7 +1925,7 @@ static int mock_context_barrier(void *arg)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rq = igt_request_alloc(ctx, i915->engine[RCS0]);
|
||||
rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]);
|
||||
if (IS_ERR(rq)) {
|
||||
pr_err("Request allocation failed!\n");
|
||||
goto out;
|
||||
|
@ -1156,9 +1156,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
|
||||
if (err)
|
||||
goto out_unmap;
|
||||
|
||||
GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
|
||||
!atomic_read(&obj->bind_count));
|
||||
|
||||
err = check_present(addr, obj->base.size);
|
||||
if (err) {
|
||||
pr_err("%s: was not present\n", obj->mm.region->name);
|
||||
@ -1175,7 +1172,6 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
|
||||
pr_err("Failed to unbind object!\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
GEM_BUG_ON(atomic_read(&obj->bind_count));
|
||||
|
||||
if (type != I915_MMAP_TYPE_GTT) {
|
||||
__i915_gem_object_put_pages(obj);
|
||||
|
@ -14,7 +14,7 @@ static int igt_gem_object(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int err = -ENOMEM;
|
||||
int err;
|
||||
|
||||
/* Basic test to ensure we can create an object */
|
||||
|
||||
|
@ -32,5 +32,5 @@ void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
|
||||
{ "engines", &engines_fops },
|
||||
};
|
||||
|
||||
debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
|
||||
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
|
||||
}
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "debugfs_engines.h"
|
||||
#include "debugfs_gt.h"
|
||||
#include "debugfs_gt_pm.h"
|
||||
#include "uc/intel_uc_debugfs.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
void debugfs_gt_register(struct intel_gt *gt)
|
||||
@ -24,17 +25,19 @@ void debugfs_gt_register(struct intel_gt *gt)
|
||||
|
||||
debugfs_engines_register(gt, root);
|
||||
debugfs_gt_pm_register(gt, root);
|
||||
|
||||
intel_uc_debugfs_register(>->uc, root);
|
||||
}
|
||||
|
||||
void debugfs_gt_register_files(struct intel_gt *gt,
|
||||
struct dentry *root,
|
||||
void intel_gt_debugfs_register_files(struct dentry *root,
|
||||
const struct debugfs_gt_file *files,
|
||||
unsigned long count)
|
||||
unsigned long count, void *data)
|
||||
{
|
||||
while (count--) {
|
||||
if (!files->eval || files->eval(gt))
|
||||
umode_t mode = files->fops->write ? 0644 : 0444;
|
||||
if (!files->eval || files->eval(data))
|
||||
debugfs_create_file(files->name,
|
||||
0444, root, gt,
|
||||
mode, root, data,
|
||||
files->fops);
|
||||
|
||||
files++;
|
||||
|
@ -28,12 +28,11 @@ void debugfs_gt_register(struct intel_gt *gt);
|
||||
struct debugfs_gt_file {
|
||||
const char *name;
|
||||
const struct file_operations *fops;
|
||||
bool (*eval)(const struct intel_gt *gt);
|
||||
bool (*eval)(void *data);
|
||||
};
|
||||
|
||||
void debugfs_gt_register_files(struct intel_gt *gt,
|
||||
struct dentry *root,
|
||||
void intel_gt_debugfs_register_files(struct dentry *root,
|
||||
const struct debugfs_gt_file *files,
|
||||
unsigned long count);
|
||||
unsigned long count, void *data);
|
||||
|
||||
#endif /* DEBUGFS_GT_H */
|
||||
|
@ -506,8 +506,10 @@ static int llc_show(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool llc_eval(const struct intel_gt *gt)
|
||||
static bool llc_eval(void *data)
|
||||
{
|
||||
struct intel_gt *gt = data;
|
||||
|
||||
return HAS_LLC(gt->i915);
|
||||
}
|
||||
|
||||
@ -580,8 +582,10 @@ static int rps_boost_show(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool rps_eval(const struct intel_gt *gt)
|
||||
static bool rps_eval(void *data)
|
||||
{
|
||||
struct intel_gt *gt = data;
|
||||
|
||||
return HAS_RPS(gt->i915);
|
||||
}
|
||||
|
||||
@ -597,5 +601,5 @@ void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
|
||||
{ "rps_boost", &rps_boost_fops, rps_eval },
|
||||
};
|
||||
|
||||
debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
|
||||
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
|
||||
if (!--b->irq_enabled)
|
||||
irq_disable(engine);
|
||||
|
||||
b->irq_armed = false;
|
||||
WRITE_ONCE(b->irq_armed, false);
|
||||
intel_gt_pm_put_async(engine->gt);
|
||||
}
|
||||
|
||||
@ -73,7 +73,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
unsigned long flags;
|
||||
|
||||
if (!b->irq_armed)
|
||||
if (!READ_ONCE(b->irq_armed))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&b->irq_lock, flags);
|
||||
@ -233,7 +233,7 @@ static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
|
||||
* which we can add a new waiter and avoid the cost of re-enabling
|
||||
* the irq.
|
||||
*/
|
||||
b->irq_armed = true;
|
||||
WRITE_ONCE(b->irq_armed, true);
|
||||
|
||||
/*
|
||||
* Since we are waiting on a request, the GPU should be busy
|
||||
|
@ -114,6 +114,11 @@ int __intel_context_do_pin(struct intel_context *ce)
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
if (unlikely(intel_context_is_closed(ce))) {
|
||||
err = -ENOENT;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
|
||||
err = intel_context_active_acquire(ce);
|
||||
if (unlikely(err))
|
||||
|
@ -199,6 +199,8 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
|
||||
int intel_engines_init_mmio(struct intel_gt *gt);
|
||||
int intel_engines_init(struct intel_gt *gt);
|
||||
|
||||
void intel_engine_free_request_pool(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_engines_release(struct intel_gt *gt);
|
||||
void intel_engines_free(struct intel_gt *gt);
|
||||
|
||||
|
@ -347,8 +347,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
|
||||
gt->engine_class[info->class][info->instance] = engine;
|
||||
gt->engine[id] = engine;
|
||||
|
||||
i915->engine[id] = engine;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -425,17 +423,27 @@ void intel_engines_release(struct intel_gt *gt)
|
||||
engine->release = NULL;
|
||||
|
||||
memset(&engine->reset, 0, sizeof(engine->reset));
|
||||
|
||||
gt->i915->engine[id] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void intel_engine_free_request_pool(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!engine->request_pool)
|
||||
return;
|
||||
|
||||
kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
|
||||
}
|
||||
|
||||
void intel_engines_free(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
/* Free the requests! dma-resv keeps fences around for an eternity */
|
||||
rcu_barrier();
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
intel_engine_free_request_pool(engine);
|
||||
kfree(engine);
|
||||
gt->engine[id] = NULL;
|
||||
}
|
||||
@ -1225,6 +1233,49 @@ static void print_request(struct drm_printer *m,
|
||||
name);
|
||||
}
|
||||
|
||||
static struct intel_timeline *get_timeline(struct i915_request *rq)
|
||||
{
|
||||
struct intel_timeline *tl;
|
||||
|
||||
/*
|
||||
* Even though we are holding the engine->active.lock here, there
|
||||
* is no control over the submission queue per-se and we are
|
||||
* inspecting the active state at a random point in time, with an
|
||||
* unknown queue. Play safe and make sure the timeline remains valid.
|
||||
* (Only being used for pretty printing, one extra kref shouldn't
|
||||
* cause a camel stampede!)
|
||||
*/
|
||||
rcu_read_lock();
|
||||
tl = rcu_dereference(rq->timeline);
|
||||
if (!kref_get_unless_zero(&tl->kref))
|
||||
tl = NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return tl;
|
||||
}
|
||||
|
||||
static int print_ring(char *buf, int sz, struct i915_request *rq)
|
||||
{
|
||||
int len = 0;
|
||||
|
||||
if (!i915_request_signaled(rq)) {
|
||||
struct intel_timeline *tl = get_timeline(rq);
|
||||
|
||||
len = scnprintf(buf, sz,
|
||||
"ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
|
||||
i915_ggtt_offset(rq->ring->vma),
|
||||
tl ? tl->hwsp_offset : 0,
|
||||
hwsp_seqno(rq),
|
||||
DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
|
||||
1000 * 1000));
|
||||
|
||||
if (tl)
|
||||
intel_timeline_put(tl);
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
|
||||
{
|
||||
const size_t rowsize = 8 * sizeof(u32);
|
||||
@ -1254,27 +1305,6 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
|
||||
}
|
||||
}
|
||||
|
||||
static struct intel_timeline *get_timeline(struct i915_request *rq)
|
||||
{
|
||||
struct intel_timeline *tl;
|
||||
|
||||
/*
|
||||
* Even though we are holding the engine->active.lock here, there
|
||||
* is no control over the submission queue per-se and we are
|
||||
* inspecting the active state at a random point in time, with an
|
||||
* unknown queue. Play safe and make sure the timeline remains valid.
|
||||
* (Only being used for pretty printing, one extra kref shouldn't
|
||||
* cause a camel stampede!)
|
||||
*/
|
||||
rcu_read_lock();
|
||||
tl = rcu_dereference(rq->timeline);
|
||||
if (!kref_get_unless_zero(&tl->kref))
|
||||
tl = NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return tl;
|
||||
}
|
||||
|
||||
static const char *repr_timer(const struct timer_list *t)
|
||||
{
|
||||
if (!READ_ONCE(t->expires))
|
||||
@ -1295,6 +1325,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
||||
|
||||
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
|
||||
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
|
||||
if (HAS_EXECLISTS(dev_priv)) {
|
||||
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
|
||||
drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
|
||||
}
|
||||
drm_printf(m, "\tRING_START: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_START));
|
||||
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
|
||||
@ -1387,39 +1423,24 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
||||
int len;
|
||||
|
||||
len = scnprintf(hdr, sizeof(hdr),
|
||||
"\t\tActive[%d]: ",
|
||||
(int)(port - execlists->active));
|
||||
if (!i915_request_signaled(rq)) {
|
||||
struct intel_timeline *tl = get_timeline(rq);
|
||||
|
||||
len += scnprintf(hdr + len, sizeof(hdr) - len,
|
||||
"ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
|
||||
i915_ggtt_offset(rq->ring->vma),
|
||||
tl ? tl->hwsp_offset : 0,
|
||||
hwsp_seqno(rq),
|
||||
DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
|
||||
1000 * 1000));
|
||||
|
||||
if (tl)
|
||||
intel_timeline_put(tl);
|
||||
}
|
||||
"\t\tActive[%d]: ccid:%08x, ",
|
||||
(int)(port - execlists->active),
|
||||
upper_32_bits(rq->context->lrc_desc));
|
||||
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
|
||||
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
|
||||
print_request(m, rq, hdr);
|
||||
}
|
||||
for (port = execlists->pending; (rq = *port); port++) {
|
||||
struct intel_timeline *tl = get_timeline(rq);
|
||||
char hdr[80];
|
||||
char hdr[160];
|
||||
int len;
|
||||
|
||||
snprintf(hdr, sizeof(hdr),
|
||||
"\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
|
||||
len = scnprintf(hdr, sizeof(hdr),
|
||||
"\t\tPending[%d]: ccid:%08x, ",
|
||||
(int)(port - execlists->pending),
|
||||
i915_ggtt_offset(rq->ring->vma),
|
||||
tl ? tl->hwsp_offset : 0,
|
||||
hwsp_seqno(rq));
|
||||
upper_32_bits(rq->context->lrc_desc));
|
||||
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
|
||||
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
|
||||
print_request(m, rq, hdr);
|
||||
|
||||
if (tl)
|
||||
intel_timeline_put(tl);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
execlists_active_unlock_bh(execlists);
|
||||
|
@ -31,7 +31,7 @@ static bool next_heartbeat(struct intel_engine_cs *engine)
|
||||
delay = msecs_to_jiffies_timeout(delay);
|
||||
if (delay >= HZ)
|
||||
delay = round_jiffies_up_relative(delay);
|
||||
schedule_delayed_work(&engine->heartbeat.work, delay);
|
||||
mod_delayed_work(system_wq, &engine->heartbeat.work, delay);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
|
||||
* Ergo, if we put ourselves on the timelines.active_list
|
||||
* (se intel_timeline_enter()) before we increment the
|
||||
* engine->wakeref.count, we may see the request completion and retire
|
||||
* it causing an undeflow of the engine->wakeref.
|
||||
* it causing an underflow of the engine->wakeref.
|
||||
*/
|
||||
flags = __timeline_mark_lock(ce);
|
||||
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
|
||||
|
@ -37,6 +37,12 @@ static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
|
||||
intel_wakeref_put_async(&engine->wakeref);
|
||||
}
|
||||
|
||||
static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
|
||||
unsigned long delay)
|
||||
{
|
||||
intel_wakeref_put_delay(&engine->wakeref, delay);
|
||||
}
|
||||
|
||||
static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
|
||||
{
|
||||
intel_wakeref_unlock_wait(&engine->wakeref);
|
||||
|
@ -156,6 +156,15 @@ struct intel_engine_execlists {
|
||||
*/
|
||||
struct i915_priolist default_priolist;
|
||||
|
||||
/**
|
||||
* @yield: CCID at the time of the last semaphore-wait interrupt.
|
||||
*
|
||||
* Instead of leaving a semaphore busy-spinning on an engine, we would
|
||||
* like to switch to another ready context, i.e. yielding the semaphore
|
||||
* timeslice.
|
||||
*/
|
||||
u32 yield;
|
||||
|
||||
/**
|
||||
* @error_interrupt: CS Master EIR
|
||||
*
|
||||
@ -308,6 +317,9 @@ struct intel_engine_cs {
|
||||
struct list_head hold; /* ready requests, but on hold */
|
||||
} active;
|
||||
|
||||
/* keep a request in reserve for a [pm] barrier under oom */
|
||||
struct i915_request *request_pool;
|
||||
|
||||
struct llist_head barrier_tasks;
|
||||
|
||||
struct intel_context *kernel_context; /* pinned */
|
||||
|
@ -65,7 +65,7 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
|
||||
ggtt->mappable_end);
|
||||
}
|
||||
|
||||
i915_ggtt_init_fences(ggtt);
|
||||
intel_ggtt_init_fences(ggtt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -715,11 +715,13 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
|
||||
*/
|
||||
void i915_ggtt_driver_release(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
||||
struct pagevec *pvec;
|
||||
|
||||
fini_aliasing_ppgtt(&i915->ggtt);
|
||||
fini_aliasing_ppgtt(ggtt);
|
||||
|
||||
ggtt_cleanup_hw(&i915->ggtt);
|
||||
intel_ggtt_fini_fences(ggtt);
|
||||
ggtt_cleanup_hw(ggtt);
|
||||
|
||||
pvec = &i915->mm.wc_stash.pvec;
|
||||
if (pvec->nr) {
|
||||
@ -784,13 +786,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
||||
else
|
||||
ggtt->gsm = ioremap_wc(phys_addr, size);
|
||||
if (!ggtt->gsm) {
|
||||
DRM_ERROR("Failed to map the ggtt page table\n");
|
||||
drm_err(&i915->drm, "Failed to map the ggtt page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
|
||||
if (ret) {
|
||||
DRM_ERROR("Scratch setup failed\n");
|
||||
drm_err(&i915->drm, "Scratch setup failed\n");
|
||||
/* iounmap will also get called at remove, but meh */
|
||||
iounmap(ggtt->gsm);
|
||||
return ret;
|
||||
@ -850,7 +852,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
|
||||
if (err)
|
||||
DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
|
||||
drm_err(&i915->drm,
|
||||
"Can't set DMA mask/consistent mask (%d)\n", err);
|
||||
|
||||
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
if (IS_CHERRYVIEW(i915))
|
||||
@ -997,7 +1000,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
||||
* just a coarse sanity check.
|
||||
*/
|
||||
if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
|
||||
DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
|
||||
drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
|
||||
&ggtt->mappable_end);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
@ -1005,7 +1009,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
|
||||
if (err)
|
||||
DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
|
||||
drm_err(&i915->drm,
|
||||
"Can't set DMA mask/consistent mask (%d)\n", err);
|
||||
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
size = gen6_get_total_gtt_size(snb_gmch_ctl);
|
||||
@ -1052,7 +1057,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
||||
|
||||
ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
|
||||
if (!ret) {
|
||||
DRM_ERROR("failed to set up gmch\n");
|
||||
drm_err(&i915->drm, "failed to set up gmch\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -1075,7 +1080,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
||||
ggtt->vm.vma_ops.clear_pages = clear_pages;
|
||||
|
||||
if (unlikely(ggtt->do_idle_maps))
|
||||
dev_notice(i915->drm.dev,
|
||||
drm_notice(&i915->drm,
|
||||
"Applying Ironlake quirks for intel_iommu\n");
|
||||
|
||||
return 0;
|
||||
@ -1100,7 +1105,8 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
|
||||
return ret;
|
||||
|
||||
if ((ggtt->vm.total - 1) >> 32) {
|
||||
DRM_ERROR("We never expected a Global GTT with more than 32bits"
|
||||
drm_err(&i915->drm,
|
||||
"We never expected a Global GTT with more than 32bits"
|
||||
" of address space! Found %lldM!\n",
|
||||
ggtt->vm.total >> 20);
|
||||
ggtt->vm.total = 1ULL << 32;
|
||||
@ -1109,16 +1115,18 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
|
||||
}
|
||||
|
||||
if (ggtt->mappable_end > ggtt->vm.total) {
|
||||
DRM_ERROR("mappable aperture extends past end of GGTT,"
|
||||
drm_err(&i915->drm,
|
||||
"mappable aperture extends past end of GGTT,"
|
||||
" aperture=%pa, total=%llx\n",
|
||||
&ggtt->mappable_end, ggtt->vm.total);
|
||||
ggtt->mappable_end = ggtt->vm.total;
|
||||
}
|
||||
|
||||
/* GMADR is the PCI mmio aperture into the global GTT. */
|
||||
DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
|
||||
DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
|
||||
DRM_DEBUG_DRIVER("DSM size = %lluM\n",
|
||||
drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
|
||||
drm_dbg(&i915->drm, "GMADR size = %lluM\n",
|
||||
(u64)ggtt->mappable_end >> 20);
|
||||
drm_dbg(&i915->drm, "DSM size = %lluM\n",
|
||||
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
|
||||
|
||||
return 0;
|
||||
@ -1137,7 +1145,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
|
||||
return ret;
|
||||
|
||||
if (intel_vtd_active())
|
||||
dev_info(i915->drm.dev, "VT-d active for gfx access\n");
|
||||
drm_info(&i915->drm, "VT-d active for gfx access\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1212,6 +1220,8 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
|
||||
|
||||
if (INTEL_GEN(ggtt->vm.i915) >= 8)
|
||||
setup_private_pat(ggtt->vm.gt->uncore);
|
||||
|
||||
intel_ggtt_restore_fences(ggtt);
|
||||
}
|
||||
|
||||
static struct scatterlist *
|
||||
|
@ -68,8 +68,7 @@ static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
|
||||
return fence->ggtt->vm.gt->uncore;
|
||||
}
|
||||
|
||||
static void i965_write_fence_reg(struct i915_fence_reg *fence,
|
||||
struct i915_vma *vma)
|
||||
static void i965_write_fence_reg(struct i915_fence_reg *fence)
|
||||
{
|
||||
i915_reg_t fence_reg_lo, fence_reg_hi;
|
||||
int fence_pitch_shift;
|
||||
@ -87,18 +86,16 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
|
||||
}
|
||||
|
||||
val = 0;
|
||||
if (vma) {
|
||||
unsigned int stride = i915_gem_object_get_stride(vma->obj);
|
||||
if (fence->tiling) {
|
||||
unsigned int stride = fence->stride;
|
||||
|
||||
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
|
||||
GEM_BUG_ON(!IS_ALIGNED(stride, 128));
|
||||
|
||||
val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
|
||||
val |= vma->node.start;
|
||||
val = fence->start + fence->size - I965_FENCE_PAGE;
|
||||
val <<= 32;
|
||||
val |= fence->start;
|
||||
val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
|
||||
if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
|
||||
if (fence->tiling == I915_TILING_Y)
|
||||
val |= BIT(I965_FENCE_TILING_Y_SHIFT);
|
||||
val |= I965_FENCE_REG_VALID;
|
||||
}
|
||||
@ -125,21 +122,15 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_write_fence_reg(struct i915_fence_reg *fence,
|
||||
struct i915_vma *vma)
|
||||
static void i915_write_fence_reg(struct i915_fence_reg *fence)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = 0;
|
||||
if (vma) {
|
||||
unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
|
||||
if (fence->tiling) {
|
||||
unsigned int stride = fence->stride;
|
||||
unsigned int tiling = fence->tiling;
|
||||
bool is_y_tiled = tiling == I915_TILING_Y;
|
||||
unsigned int stride = i915_gem_object_get_stride(vma->obj);
|
||||
|
||||
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
|
||||
GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
|
||||
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
|
||||
|
||||
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
|
||||
stride /= 128;
|
||||
@ -147,10 +138,10 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
|
||||
stride /= 512;
|
||||
GEM_BUG_ON(!is_power_of_2(stride));
|
||||
|
||||
val = vma->node.start;
|
||||
val = fence->start;
|
||||
if (is_y_tiled)
|
||||
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
|
||||
val |= I915_FENCE_SIZE_BITS(vma->fence_size);
|
||||
val |= I915_FENCE_SIZE_BITS(fence->size);
|
||||
val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
|
||||
|
||||
val |= I830_FENCE_REG_VALID;
|
||||
@ -165,25 +156,18 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
|
||||
}
|
||||
}
|
||||
|
||||
static void i830_write_fence_reg(struct i915_fence_reg *fence,
|
||||
struct i915_vma *vma)
|
||||
static void i830_write_fence_reg(struct i915_fence_reg *fence)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = 0;
|
||||
if (vma) {
|
||||
unsigned int stride = i915_gem_object_get_stride(vma->obj);
|
||||
if (fence->tiling) {
|
||||
unsigned int stride = fence->stride;
|
||||
|
||||
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
|
||||
GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
|
||||
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
|
||||
GEM_BUG_ON(!is_power_of_2(stride / 128));
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
|
||||
|
||||
val = vma->node.start;
|
||||
if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
|
||||
val = fence->start;
|
||||
if (fence->tiling == I915_TILING_Y)
|
||||
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
|
||||
val |= I830_FENCE_SIZE_BITS(vma->fence_size);
|
||||
val |= I830_FENCE_SIZE_BITS(fence->size);
|
||||
val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
|
||||
val |= I830_FENCE_REG_VALID;
|
||||
}
|
||||
@ -197,8 +181,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
|
||||
}
|
||||
}
|
||||
|
||||
static void fence_write(struct i915_fence_reg *fence,
|
||||
struct i915_vma *vma)
|
||||
static void fence_write(struct i915_fence_reg *fence)
|
||||
{
|
||||
struct drm_i915_private *i915 = fence_to_i915(fence);
|
||||
|
||||
@ -209,18 +192,21 @@ static void fence_write(struct i915_fence_reg *fence,
|
||||
*/
|
||||
|
||||
if (IS_GEN(i915, 2))
|
||||
i830_write_fence_reg(fence, vma);
|
||||
i830_write_fence_reg(fence);
|
||||
else if (IS_GEN(i915, 3))
|
||||
i915_write_fence_reg(fence, vma);
|
||||
i915_write_fence_reg(fence);
|
||||
else
|
||||
i965_write_fence_reg(fence, vma);
|
||||
i965_write_fence_reg(fence);
|
||||
|
||||
/*
|
||||
* Access through the fenced region afterwards is
|
||||
* ordered by the posting reads whilst writing the registers.
|
||||
*/
|
||||
}
|
||||
|
||||
fence->dirty = false;
|
||||
static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
|
||||
{
|
||||
return INTEL_GEN(fence_to_i915(fence)) < 4;
|
||||
}
|
||||
|
||||
static int fence_update(struct i915_fence_reg *fence,
|
||||
@ -232,27 +218,32 @@ static int fence_update(struct i915_fence_reg *fence,
|
||||
struct i915_vma *old;
|
||||
int ret;
|
||||
|
||||
fence->tiling = 0;
|
||||
if (vma) {
|
||||
GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
|
||||
!i915_gem_object_get_tiling(vma->obj));
|
||||
|
||||
if (!i915_vma_is_map_and_fenceable(vma))
|
||||
return -EINVAL;
|
||||
|
||||
if (drm_WARN(&uncore->i915->drm,
|
||||
!i915_gem_object_get_stride(vma->obj) ||
|
||||
!i915_gem_object_get_tiling(vma->obj),
|
||||
"bogus fence setup with stride: 0x%x, tiling mode: %i\n",
|
||||
i915_gem_object_get_stride(vma->obj),
|
||||
i915_gem_object_get_tiling(vma->obj)))
|
||||
return -EINVAL;
|
||||
|
||||
if (gpu_uses_fence_registers(fence)) {
|
||||
/* implicit 'unfenced' GPU blits */
|
||||
ret = i915_vma_sync(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
fence->start = vma->node.start;
|
||||
fence->size = vma->fence_size;
|
||||
fence->stride = i915_gem_object_get_stride(vma->obj);
|
||||
fence->tiling = i915_gem_object_get_tiling(vma->obj);
|
||||
}
|
||||
WRITE_ONCE(fence->dirty, false);
|
||||
|
||||
old = xchg(&fence->vma, NULL);
|
||||
if (old) {
|
||||
/* XXX Ideally we would move the waiting to outside the mutex */
|
||||
ret = i915_vma_sync(old);
|
||||
ret = i915_active_wait(&fence->active);
|
||||
if (ret) {
|
||||
fence->vma = old;
|
||||
return ret;
|
||||
@ -276,7 +267,7 @@ static int fence_update(struct i915_fence_reg *fence,
|
||||
/*
|
||||
* We only need to update the register itself if the device is awake.
|
||||
* If the device is currently powered down, we will defer the write
|
||||
* to the runtime resume, see i915_gem_restore_fences().
|
||||
* to the runtime resume, see intel_ggtt_restore_fences().
|
||||
*
|
||||
* This only works for removing the fence register, on acquisition
|
||||
* the caller must hold the rpm wakeref. The fence register must
|
||||
@ -290,7 +281,7 @@ static int fence_update(struct i915_fence_reg *fence,
|
||||
}
|
||||
|
||||
WRITE_ONCE(fence->vma, vma);
|
||||
fence_write(fence, vma);
|
||||
fence_write(fence);
|
||||
|
||||
if (vma) {
|
||||
vma->fence = fence;
|
||||
@ -307,23 +298,26 @@ static int fence_update(struct i915_fence_reg *fence,
|
||||
*
|
||||
* This function force-removes any fence from the given object, which is useful
|
||||
* if the kernel wants to do untiled GTT access.
|
||||
*
|
||||
* Returns:
|
||||
*
|
||||
* 0 on success, negative error code on failure.
|
||||
*/
|
||||
int i915_vma_revoke_fence(struct i915_vma *vma)
|
||||
void i915_vma_revoke_fence(struct i915_vma *vma)
|
||||
{
|
||||
struct i915_fence_reg *fence = vma->fence;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
lockdep_assert_held(&vma->vm->mutex);
|
||||
if (!fence)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (atomic_read(&fence->pin_count))
|
||||
return -EBUSY;
|
||||
GEM_BUG_ON(fence->vma != vma);
|
||||
GEM_BUG_ON(!i915_active_is_idle(&fence->active));
|
||||
GEM_BUG_ON(atomic_read(&fence->pin_count));
|
||||
|
||||
return fence_update(fence, NULL);
|
||||
fence->tiling = 0;
|
||||
WRITE_ONCE(fence->vma, NULL);
|
||||
vma->fence = NULL;
|
||||
|
||||
with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
|
||||
fence_write(fence);
|
||||
}
|
||||
|
||||
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
|
||||
@ -487,34 +481,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence)
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_restore_fences - restore fence state
|
||||
* intel_ggtt_restore_fences - restore fence state
|
||||
* @ggtt: Global GTT
|
||||
*
|
||||
* Restore the hw fence state to match the software tracking again, to be called
|
||||
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
|
||||
* the fences, to be reacquired by the user later.
|
||||
*/
|
||||
void i915_gem_restore_fences(struct i915_ggtt *ggtt)
|
||||
void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
|
||||
{
|
||||
int i;
|
||||
|
||||
rcu_read_lock(); /* keep obj alive as we dereference */
|
||||
for (i = 0; i < ggtt->num_fences; i++) {
|
||||
struct i915_fence_reg *reg = &ggtt->fence_regs[i];
|
||||
struct i915_vma *vma = READ_ONCE(reg->vma);
|
||||
|
||||
GEM_BUG_ON(vma && vma->fence != reg);
|
||||
|
||||
/*
|
||||
* Commit delayed tiling changes if we have an object still
|
||||
* attached to the fence, otherwise just clear the fence.
|
||||
*/
|
||||
if (vma && !i915_gem_object_is_tiled(vma->obj))
|
||||
vma = NULL;
|
||||
|
||||
fence_write(reg, vma);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
for (i = 0; i < ggtt->num_fences; i++)
|
||||
fence_write(&ggtt->fence_regs[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -746,7 +725,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
||||
* bit 17 of its physical address and therefore being interpreted differently
|
||||
* by the GPU.
|
||||
*/
|
||||
static void i915_gem_swizzle_page(struct page *page)
|
||||
static void swizzle_page(struct page *page)
|
||||
{
|
||||
char temp[64];
|
||||
char *vaddr;
|
||||
@ -791,7 +770,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
|
||||
for_each_sgt_page(page, sgt_iter, pages) {
|
||||
char new_bit_17 = page_to_phys(page) >> 17;
|
||||
if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
|
||||
i915_gem_swizzle_page(page);
|
||||
swizzle_page(page);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
i++;
|
||||
@ -836,7 +815,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
}
|
||||
|
||||
void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
|
||||
void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct drm_i915_private *i915 = ggtt->vm.i915;
|
||||
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
|
||||
@ -864,18 +843,37 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
|
||||
if (intel_vgpu_active(i915))
|
||||
num_fences = intel_uncore_read(uncore,
|
||||
vgtif_reg(avail_rs.fence_num));
|
||||
ggtt->fence_regs = kcalloc(num_fences,
|
||||
sizeof(*ggtt->fence_regs),
|
||||
GFP_KERNEL);
|
||||
if (!ggtt->fence_regs)
|
||||
num_fences = 0;
|
||||
|
||||
/* Initialize fence registers to zero */
|
||||
for (i = 0; i < num_fences; i++) {
|
||||
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
|
||||
|
||||
i915_active_init(&fence->active, NULL, NULL);
|
||||
fence->ggtt = ggtt;
|
||||
fence->id = i;
|
||||
list_add_tail(&fence->link, &ggtt->fence_list);
|
||||
}
|
||||
ggtt->num_fences = num_fences;
|
||||
|
||||
i915_gem_restore_fences(ggtt);
|
||||
intel_ggtt_restore_fences(ggtt);
|
||||
}
|
||||
|
||||
void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ggtt->num_fences; i++) {
|
||||
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
|
||||
|
||||
i915_active_fini(&fence->active);
|
||||
}
|
||||
|
||||
kfree(ggtt->fence_regs);
|
||||
}
|
||||
|
||||
void intel_gt_init_swizzling(struct intel_gt *gt)
|
@ -22,12 +22,14 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __I915_FENCE_REG_H__
|
||||
#define __I915_FENCE_REG_H__
|
||||
#ifndef __INTEL_GGTT_FENCING_H__
|
||||
#define __INTEL_GGTT_FENCING_H__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "i915_active.h"
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
struct i915_ggtt;
|
||||
struct i915_vma;
|
||||
@ -41,6 +43,7 @@ struct i915_fence_reg {
|
||||
struct i915_ggtt *ggtt;
|
||||
struct i915_vma *vma;
|
||||
atomic_t pin_count;
|
||||
struct i915_active active;
|
||||
int id;
|
||||
/**
|
||||
* Whether the tiling parameters for the currently
|
||||
@ -51,20 +54,24 @@ struct i915_fence_reg {
|
||||
* command (such as BLT on gen2/3), as a "fence".
|
||||
*/
|
||||
bool dirty;
|
||||
u32 start;
|
||||
u32 size;
|
||||
u32 tiling;
|
||||
u32 stride;
|
||||
};
|
||||
|
||||
/* i915_gem_fence_reg.c */
|
||||
struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
|
||||
void i915_unreserve_fence(struct i915_fence_reg *fence);
|
||||
|
||||
void i915_gem_restore_fences(struct i915_ggtt *ggtt);
|
||||
void intel_ggtt_restore_fences(struct i915_ggtt *ggtt);
|
||||
|
||||
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages);
|
||||
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages);
|
||||
|
||||
void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
|
||||
void intel_ggtt_init_fences(struct i915_ggtt *ggtt);
|
||||
void intel_ggtt_fini_fences(struct i915_ggtt *ggtt);
|
||||
|
||||
void intel_gt_init_swizzling(struct intel_gt *gt);
|
||||
|
@ -635,8 +635,7 @@ void intel_gt_driver_remove(struct intel_gt *gt)
|
||||
{
|
||||
__intel_gt_disable(gt);
|
||||
|
||||
intel_uc_fini_hw(>->uc);
|
||||
intel_uc_fini(>->uc);
|
||||
intel_uc_driver_remove(>->uc);
|
||||
|
||||
intel_engines_release(gt);
|
||||
}
|
||||
|
@ -39,6 +39,15 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
|
||||
}
|
||||
}
|
||||
|
||||
if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
|
||||
WRITE_ONCE(engine->execlists.yield,
|
||||
ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
|
||||
ENGINE_TRACE(engine, "semaphore yield: %08x\n",
|
||||
engine->execlists.yield);
|
||||
if (del_timer(&engine->execlists.timer))
|
||||
tasklet = true;
|
||||
}
|
||||
|
||||
if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
|
||||
tasklet = true;
|
||||
|
||||
@ -228,7 +237,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
|
||||
const u32 irqs =
|
||||
GT_CS_MASTER_ERROR_INTERRUPT |
|
||||
GT_RENDER_USER_INTERRUPT |
|
||||
GT_CONTEXT_SWITCH_INTERRUPT;
|
||||
GT_CONTEXT_SWITCH_INTERRUPT |
|
||||
GT_WAIT_SEMAPHORE_INTERRUPT;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
const u32 dmask = irqs << 16 | irqs;
|
||||
const u32 smask = irqs << 16;
|
||||
@ -366,7 +376,8 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
|
||||
const u32 irqs =
|
||||
GT_CS_MASTER_ERROR_INTERRUPT |
|
||||
GT_RENDER_USER_INTERRUPT |
|
||||
GT_CONTEXT_SWITCH_INTERRUPT;
|
||||
GT_CONTEXT_SWITCH_INTERRUPT |
|
||||
GT_WAIT_SEMAPHORE_INTERRUPT;
|
||||
const u32 gt_interrupts[] = {
|
||||
irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
|
||||
irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
|
||||
|
@ -204,7 +204,7 @@ int intel_gt_resume(struct intel_gt *gt)
|
||||
/* Only when the HW is re-initialised, can we replay the requests */
|
||||
err = intel_gt_init_hw(gt);
|
||||
if (err) {
|
||||
dev_err(gt->i915->drm.dev,
|
||||
drm_err(>->i915->drm,
|
||||
"Failed to initialize GPU, declaring it wedged!\n");
|
||||
goto err_wedged;
|
||||
}
|
||||
@ -220,7 +220,7 @@ int intel_gt_resume(struct intel_gt *gt)
|
||||
|
||||
intel_engine_pm_put(engine);
|
||||
if (err) {
|
||||
dev_err(gt->i915->drm.dev,
|
||||
drm_err(>->i915->drm,
|
||||
"Failed to restart %s (%d)\n",
|
||||
engine->name, err);
|
||||
goto err_wedged;
|
||||
@ -324,6 +324,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
|
||||
{
|
||||
GT_TRACE(gt, "\n");
|
||||
intel_gt_init_swizzling(gt);
|
||||
intel_ggtt_restore_fences(gt->ggtt);
|
||||
|
||||
return intel_uc_runtime_resume(>->uc);
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ static bool flush_submission(struct intel_gt *gt)
|
||||
for_each_engine(engine, gt, id) {
|
||||
intel_engine_flush_submission(engine);
|
||||
active |= flush_work(&engine->retire_work);
|
||||
active |= flush_work(&engine->wakeref.work);
|
||||
active |= flush_delayed_work(&engine->wakeref.work);
|
||||
}
|
||||
|
||||
return active;
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <drm/drm_mm.h>
|
||||
|
||||
#include "gt/intel_reset.h"
|
||||
#include "i915_gem_fence_reg.h"
|
||||
#include "i915_selftest.h"
|
||||
#include "i915_vma_types.h"
|
||||
|
||||
@ -135,6 +134,8 @@ typedef u64 gen8_pte_t;
|
||||
#define GEN8_PDE_IPS_64K BIT(11)
|
||||
#define GEN8_PDE_PS_2M BIT(7)
|
||||
|
||||
struct i915_fence_reg;
|
||||
|
||||
#define for_each_sgt_daddr(__dp, __iter, __sgt) \
|
||||
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
|
||||
|
||||
@ -333,7 +334,7 @@ struct i915_ggtt {
|
||||
u32 pin_bias;
|
||||
|
||||
unsigned int num_fences;
|
||||
struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
|
||||
struct i915_fence_reg *fence_regs;
|
||||
struct list_head fence_list;
|
||||
|
||||
/**
|
||||
|
@ -238,6 +238,17 @@ __execlists_update_reg_state(const struct intel_context *ce,
|
||||
const struct intel_engine_cs *engine,
|
||||
u32 head);
|
||||
|
||||
static u32 intel_context_get_runtime(const struct intel_context *ce)
|
||||
{
|
||||
/*
|
||||
* We can use either ppHWSP[16] which is recorded before the context
|
||||
* switch (and so excludes the cost of context switches) or use the
|
||||
* value from the context image itself, which is saved/restored earlier
|
||||
* and so includes the cost of the save.
|
||||
*/
|
||||
return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
|
||||
}
|
||||
|
||||
static void mark_eio(struct i915_request *rq)
|
||||
{
|
||||
if (i915_request_completed(rq))
|
||||
@ -1154,6 +1165,7 @@ static void restore_default_state(struct intel_context *ce,
|
||||
engine->context_size - PAGE_SIZE);
|
||||
|
||||
execlists_init_reg_state(regs, ce, engine, ce->ring, false);
|
||||
ce->runtime.last = intel_context_get_runtime(ce);
|
||||
}
|
||||
|
||||
static void reset_active(struct i915_request *rq,
|
||||
@ -1195,17 +1207,6 @@ static void reset_active(struct i915_request *rq,
|
||||
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
|
||||
}
|
||||
|
||||
static u32 intel_context_get_runtime(const struct intel_context *ce)
|
||||
{
|
||||
/*
|
||||
* We can use either ppHWSP[16] which is recorded before the context
|
||||
* switch (and so excludes the cost of context switches) or use the
|
||||
* value from the context image itself, which is saved/restored earlier
|
||||
* and so includes the cost of the save.
|
||||
*/
|
||||
return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
|
||||
}
|
||||
|
||||
static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
@ -1415,6 +1416,23 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc
|
||||
}
|
||||
}
|
||||
|
||||
static __maybe_unused char *
|
||||
dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
|
||||
{
|
||||
if (!rq)
|
||||
return "";
|
||||
|
||||
snprintf(buf, buflen, "%s%llx:%lld%s prio %d",
|
||||
prefix,
|
||||
rq->fence.context, rq->fence.seqno,
|
||||
i915_request_completed(rq) ? "!" :
|
||||
i915_request_started(rq) ? "*" :
|
||||
"",
|
||||
rq_prio(rq));
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static __maybe_unused void
|
||||
trace_ports(const struct intel_engine_execlists *execlists,
|
||||
const char *msg,
|
||||
@ -1422,18 +1440,14 @@ trace_ports(const struct intel_engine_execlists *execlists,
|
||||
{
|
||||
const struct intel_engine_cs *engine =
|
||||
container_of(execlists, typeof(*engine), execlists);
|
||||
char __maybe_unused p0[40], p1[40];
|
||||
|
||||
if (!ports[0])
|
||||
return;
|
||||
|
||||
ENGINE_TRACE(engine, "%s { %llx:%lld%s, %llx:%lld }\n", msg,
|
||||
ports[0]->fence.context,
|
||||
ports[0]->fence.seqno,
|
||||
i915_request_completed(ports[0]) ? "!" :
|
||||
i915_request_started(ports[0]) ? "*" :
|
||||
"",
|
||||
ports[1] ? ports[1]->fence.context : 0,
|
||||
ports[1] ? ports[1]->fence.seqno : 0);
|
||||
ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
|
||||
dump_port(p0, sizeof(p0), "", ports[0]),
|
||||
dump_port(p1, sizeof(p1), ", ", ports[1]));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
@ -1754,7 +1768,8 @@ static void defer_active(struct intel_engine_cs *engine)
|
||||
}
|
||||
|
||||
static bool
|
||||
need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
|
||||
need_timeslice(const struct intel_engine_cs *engine,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
int hint;
|
||||
|
||||
@ -1768,6 +1783,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
|
||||
return hint >= effective_prio(rq);
|
||||
}
|
||||
|
||||
static bool
|
||||
timeslice_yield(const struct intel_engine_execlists *el,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
/*
|
||||
* Once bitten, forever smitten!
|
||||
*
|
||||
* If the active context ever busy-waited on a semaphore,
|
||||
* it will be treated as a hog until the end of its timeslice (i.e.
|
||||
* until it is scheduled out and replaced by a new submission,
|
||||
* possibly even its own lite-restore). The HW only sends an interrupt
|
||||
* on the first miss, and we do know if that semaphore has been
|
||||
* signaled, or even if it is now stuck on another semaphore. Play
|
||||
* safe, yield if it might be stuck -- it will be given a fresh
|
||||
* timeslice in the near future.
|
||||
*/
|
||||
return upper_32_bits(rq->context->lrc_desc) == READ_ONCE(el->yield);
|
||||
}
|
||||
|
||||
static bool
|
||||
timeslice_expired(const struct intel_engine_execlists *el,
|
||||
const struct i915_request *rq)
|
||||
{
|
||||
return timer_expired(&el->timer) || timeslice_yield(el, rq);
|
||||
}
|
||||
|
||||
static int
|
||||
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
|
||||
{
|
||||
@ -1783,8 +1824,7 @@ timeslice(const struct intel_engine_cs *engine)
|
||||
return READ_ONCE(engine->props.timeslice_duration_ms);
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
active_timeslice(const struct intel_engine_cs *engine)
|
||||
static unsigned long active_timeslice(const struct intel_engine_cs *engine)
|
||||
{
|
||||
const struct intel_engine_execlists *execlists = &engine->execlists;
|
||||
const struct i915_request *rq = *execlists->active;
|
||||
@ -1800,16 +1840,25 @@ active_timeslice(const struct intel_engine_cs *engine)
|
||||
|
||||
static void set_timeslice(struct intel_engine_cs *engine)
|
||||
{
|
||||
unsigned long duration;
|
||||
|
||||
if (!intel_engine_has_timeslices(engine))
|
||||
return;
|
||||
|
||||
set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
|
||||
duration = active_timeslice(engine);
|
||||
ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
|
||||
|
||||
set_timer_ms(&engine->execlists.timer, duration);
|
||||
}
|
||||
|
||||
static void start_timeslice(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists *execlists = &engine->execlists;
|
||||
int prio = queue_prio(execlists);
|
||||
const int prio = queue_prio(execlists);
|
||||
unsigned long duration;
|
||||
|
||||
if (!intel_engine_has_timeslices(engine))
|
||||
return;
|
||||
|
||||
WRITE_ONCE(execlists->switch_priority_hint, prio);
|
||||
if (prio == INT_MIN)
|
||||
@ -1818,7 +1867,12 @@ static void start_timeslice(struct intel_engine_cs *engine)
|
||||
if (timer_pending(&execlists->timer))
|
||||
return;
|
||||
|
||||
set_timer_ms(&execlists->timer, timeslice(engine));
|
||||
duration = timeslice(engine);
|
||||
ENGINE_TRACE(engine,
|
||||
"start timeslicing, prio:%d, interval:%lu",
|
||||
prio, duration);
|
||||
|
||||
set_timer_ms(&execlists->timer, duration);
|
||||
}
|
||||
|
||||
static void record_preemption(struct intel_engine_execlists *execlists)
|
||||
@ -1915,11 +1969,26 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
* of trouble.
|
||||
*/
|
||||
active = READ_ONCE(execlists->active);
|
||||
while ((last = *active) && i915_request_completed(last))
|
||||
active++;
|
||||
|
||||
if (last) {
|
||||
/*
|
||||
* In theory we can skip over completed contexts that have not
|
||||
* yet been processed by events (as those events are in flight):
|
||||
*
|
||||
* while ((last = *active) && i915_request_completed(last))
|
||||
* active++;
|
||||
*
|
||||
* However, the GPU cannot handle this as it will ultimately
|
||||
* find itself trying to jump back into a context it has just
|
||||
* completed and barf.
|
||||
*/
|
||||
|
||||
if ((last = *active)) {
|
||||
if (need_preempt(engine, last, rb)) {
|
||||
if (i915_request_completed(last)) {
|
||||
tasklet_hi_schedule(&execlists->tasklet);
|
||||
return;
|
||||
}
|
||||
|
||||
ENGINE_TRACE(engine,
|
||||
"preempting last=%llx:%lld, prio=%d, hint=%d\n",
|
||||
last->fence.context,
|
||||
@ -1946,13 +2015,19 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
|
||||
last = NULL;
|
||||
} else if (need_timeslice(engine, last) &&
|
||||
timer_expired(&engine->execlists.timer)) {
|
||||
timeslice_expired(execlists, last)) {
|
||||
if (i915_request_completed(last)) {
|
||||
tasklet_hi_schedule(&execlists->tasklet);
|
||||
return;
|
||||
}
|
||||
|
||||
ENGINE_TRACE(engine,
|
||||
"expired last=%llx:%lld, prio=%d, hint=%d\n",
|
||||
"expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
|
||||
last->fence.context,
|
||||
last->fence.seqno,
|
||||
last->sched.attr.priority,
|
||||
execlists->queue_priority_hint);
|
||||
execlists->queue_priority_hint,
|
||||
yesno(timeslice_yield(execlists, last)));
|
||||
|
||||
ring_set_paused(engine, 1);
|
||||
defer_active(engine);
|
||||
@ -2213,6 +2288,7 @@ done:
|
||||
}
|
||||
clear_ports(port + 1, last_port - port);
|
||||
|
||||
WRITE_ONCE(execlists->yield, -1);
|
||||
execlists_submit_ports(engine);
|
||||
set_preempt_timeout(engine, *active);
|
||||
} else {
|
||||
@ -2308,6 +2384,13 @@ gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
|
||||
return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
|
||||
}
|
||||
|
||||
static inline void flush_hwsp(const struct i915_request *rq)
|
||||
{
|
||||
mb();
|
||||
clflush((void *)READ_ONCE(rq->hwsp_seqno));
|
||||
mb();
|
||||
}
|
||||
|
||||
static void process_csb(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
@ -2384,8 +2467,6 @@ static void process_csb(struct intel_engine_cs *engine)
|
||||
if (promote) {
|
||||
struct i915_request * const *old = execlists->active;
|
||||
|
||||
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
|
||||
|
||||
ring_set_paused(engine, 0);
|
||||
|
||||
/* Point active to the new ELSP; prevent overwriting */
|
||||
@ -2398,6 +2479,7 @@ static void process_csb(struct intel_engine_cs *engine)
|
||||
execlists_schedule_out(*old++);
|
||||
|
||||
/* switch pending to inflight */
|
||||
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
|
||||
memcpy(execlists->inflight,
|
||||
execlists->pending,
|
||||
execlists_num_ports(execlists) *
|
||||
@ -2419,13 +2501,24 @@ static void process_csb(struct intel_engine_cs *engine)
|
||||
* user interrupt and CSB is processed.
|
||||
*/
|
||||
if (GEM_SHOW_DEBUG() &&
|
||||
!i915_request_completed(*execlists->active) &&
|
||||
!reset_in_progress(execlists)) {
|
||||
struct i915_request *rq __maybe_unused =
|
||||
*execlists->active;
|
||||
!i915_request_completed(*execlists->active)) {
|
||||
struct i915_request *rq = *execlists->active;
|
||||
const u32 *regs __maybe_unused =
|
||||
rq->context->lrc_reg_state;
|
||||
|
||||
/*
|
||||
* Flush the breadcrumb before crying foul.
|
||||
*
|
||||
* Since we have hit this on icl and seen the
|
||||
* breadcrumb advance as we print out the debug
|
||||
* info (so the problem corrected itself without
|
||||
* lasting damage), and we know that icl suffers
|
||||
* from missing global observation points in
|
||||
* execlists, presume that affects even more
|
||||
* coherency.
|
||||
*/
|
||||
flush_hwsp(rq);
|
||||
|
||||
ENGINE_TRACE(engine,
|
||||
"ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
|
||||
ENGINE_READ(engine, RING_START),
|
||||
@ -2446,6 +2539,9 @@ static void process_csb(struct intel_engine_cs *engine)
|
||||
regs[CTX_RING_HEAD],
|
||||
regs[CTX_RING_TAIL]);
|
||||
|
||||
/* Still? Declare it caput! */
|
||||
if (!i915_request_completed(rq) &&
|
||||
!reset_in_progress(execlists))
|
||||
GEM_BUG_ON("context completed before request");
|
||||
}
|
||||
|
||||
@ -2736,6 +2832,45 @@ err_cap:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct i915_request *
|
||||
active_context(struct intel_engine_cs *engine, u32 ccid)
|
||||
{
|
||||
const struct intel_engine_execlists * const el = &engine->execlists;
|
||||
struct i915_request * const *port, *rq;
|
||||
|
||||
/*
|
||||
* Use the most recent result from process_csb(), but just in case
|
||||
* we trigger an error (via interrupt) before the first CS event has
|
||||
* been written, peek at the next submission.
|
||||
*/
|
||||
|
||||
for (port = el->active; (rq = *port); port++) {
|
||||
if (upper_32_bits(rq->context->lrc_desc) == ccid) {
|
||||
ENGINE_TRACE(engine,
|
||||
"ccid found at active:%zd\n",
|
||||
port - el->active);
|
||||
return rq;
|
||||
}
|
||||
}
|
||||
|
||||
for (port = el->pending; (rq = *port); port++) {
|
||||
if (upper_32_bits(rq->context->lrc_desc) == ccid) {
|
||||
ENGINE_TRACE(engine,
|
||||
"ccid found at pending:%zd\n",
|
||||
port - el->pending);
|
||||
return rq;
|
||||
}
|
||||
}
|
||||
|
||||
ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static u32 active_ccid(struct intel_engine_cs *engine)
|
||||
{
|
||||
return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
|
||||
}
|
||||
|
||||
static bool execlists_capture(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct execlists_capture *cap;
|
||||
@ -2753,7 +2888,7 @@ static bool execlists_capture(struct intel_engine_cs *engine)
|
||||
return true;
|
||||
|
||||
spin_lock_irq(&engine->active.lock);
|
||||
cap->rq = execlists_active(&engine->execlists);
|
||||
cap->rq = active_context(engine, active_ccid(engine));
|
||||
if (cap->rq) {
|
||||
cap->rq = active_request(cap->rq->context->timeline, cap->rq);
|
||||
cap->rq = i915_request_get_rcu(cap->rq);
|
||||
@ -2901,10 +3036,14 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
|
||||
if (reset_in_progress(execlists))
|
||||
return; /* defer until we restart the engine following reset */
|
||||
|
||||
if (execlists->tasklet.func == execlists_submission_tasklet)
|
||||
/* Hopefully we clear execlists->pending[] to let us through */
|
||||
if (READ_ONCE(execlists->pending[0]) &&
|
||||
tasklet_trylock(&execlists->tasklet)) {
|
||||
process_csb(engine);
|
||||
tasklet_unlock(&execlists->tasklet);
|
||||
}
|
||||
|
||||
__execlists_submission_tasklet(engine);
|
||||
else
|
||||
tasklet_hi_schedule(&execlists->tasklet);
|
||||
}
|
||||
|
||||
static void submit_queue(struct intel_engine_cs *engine,
|
||||
@ -2990,7 +3129,7 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
|
||||
vaddr += engine->context_size;
|
||||
|
||||
if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
|
||||
dev_err_once(engine->i915->drm.dev,
|
||||
drm_err_once(&engine->i915->drm,
|
||||
"%s context redzone overwritten!\n",
|
||||
engine->name);
|
||||
}
|
||||
@ -3442,7 +3581,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
|
||||
|
||||
ret = lrc_setup_wa_ctx(engine);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
|
||||
drm_dbg(&engine->i915->drm,
|
||||
"Failed to setup context WA page: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3485,7 +3625,7 @@ static void enable_error_interrupt(struct intel_engine_cs *engine)
|
||||
|
||||
status = ENGINE_READ(engine, RING_ESR);
|
||||
if (unlikely(status)) {
|
||||
dev_err(engine->i915->drm.dev,
|
||||
drm_err(&engine->i915->drm,
|
||||
"engine '%s' resumed still in error: %08x\n",
|
||||
engine->name, status);
|
||||
__intel_gt_reset(engine->gt, engine->mask);
|
||||
@ -3549,7 +3689,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
|
||||
bool unexpected = false;
|
||||
|
||||
if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
|
||||
DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
|
||||
drm_dbg(&engine->i915->drm,
|
||||
"STOP_RING still set in RING_MI_MODE\n");
|
||||
unexpected = true;
|
||||
}
|
||||
|
||||
@ -3609,6 +3750,7 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
|
||||
*
|
||||
* FIXME: Wa for more modern gens needs to be validated
|
||||
*/
|
||||
ring_set_paused(engine, 1);
|
||||
intel_engine_stop_cs(engine);
|
||||
}
|
||||
|
||||
@ -4449,6 +4591,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
|
||||
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
|
||||
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
|
||||
engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
|
||||
engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
|
||||
}
|
||||
|
||||
static void rcs_submission_override(struct intel_engine_cs *engine)
|
||||
@ -4493,7 +4636,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
|
||||
* because we only expect rare glitches but nothing
|
||||
* critical to prevent us from using GPU
|
||||
*/
|
||||
DRM_ERROR("WA batch buffer initialization failed\n");
|
||||
drm_err(&i915->drm, "WA batch buffer initialization failed\n");
|
||||
|
||||
if (HAS_LOGICAL_RING_ELSQ(i915)) {
|
||||
execlists->submit_reg = uncore->regs +
|
||||
@ -4575,6 +4718,7 @@ static void init_common_reg_state(u32 * const regs,
|
||||
regs[CTX_CONTEXT_CONTROL] = ctl;
|
||||
|
||||
regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
|
||||
regs[CTX_TIMESTAMP] = 0;
|
||||
}
|
||||
|
||||
static void init_wa_bb_reg_state(u32 * const regs,
|
||||
@ -4668,7 +4812,8 @@ populate_lr_context(struct intel_context *ce,
|
||||
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
ret = PTR_ERR(vaddr);
|
||||
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
|
||||
drm_dbg(&engine->i915->drm,
|
||||
"Could not map object pages! (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4761,7 +4906,8 @@ static int __execlists_context_alloc(struct intel_context *ce,
|
||||
|
||||
ret = populate_lr_context(ce, ctx_obj, engine, ring);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
|
||||
drm_dbg(&engine->i915->drm,
|
||||
"Failed to populate LRC: %d\n", ret);
|
||||
goto error_ring_free;
|
||||
}
|
||||
|
||||
@ -4814,6 +4960,8 @@ static void virtual_context_destroy(struct kref *kref)
|
||||
__execlists_context_fini(&ve->context);
|
||||
intel_context_fini(&ve->context);
|
||||
|
||||
intel_engine_free_request_pool(&ve->base);
|
||||
|
||||
kfree(ve->bonds);
|
||||
kfree(ve);
|
||||
}
|
||||
@ -4994,10 +5142,8 @@ static void virtual_submission_tasklet(unsigned long data)
|
||||
submit_engine:
|
||||
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
|
||||
node->prio = prio;
|
||||
if (first && prio > sibling->execlists.queue_priority_hint) {
|
||||
sibling->execlists.queue_priority_hint = prio;
|
||||
if (first && prio > sibling->execlists.queue_priority_hint)
|
||||
tasklet_hi_schedule(&sibling->execlists.tasklet);
|
||||
}
|
||||
|
||||
spin_unlock(&sibling->active.lock);
|
||||
}
|
||||
|
@ -246,16 +246,18 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
|
||||
ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
|
||||
&rc6vids, NULL);
|
||||
if (IS_GEN(i915, 6) && ret) {
|
||||
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
|
||||
drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
|
||||
} else if (IS_GEN(i915, 6) &&
|
||||
(GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
|
||||
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
|
||||
drm_dbg(&i915->drm,
|
||||
"You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
|
||||
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
|
||||
rc6vids &= 0xffff00;
|
||||
rc6vids |= GEN6_ENCODE_RC6_VID(450);
|
||||
ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
|
||||
if (ret)
|
||||
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
|
||||
drm_err(&i915->drm,
|
||||
"Couldn't fix incorrect rc6 voltage\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -263,14 +265,15 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
|
||||
static int chv_rc6_init(struct intel_rc6 *rc6)
|
||||
{
|
||||
struct intel_uncore *uncore = rc6_to_uncore(rc6);
|
||||
struct drm_i915_private *i915 = rc6_to_i915(rc6);
|
||||
resource_size_t pctx_paddr, paddr;
|
||||
resource_size_t pctx_size = 32 * SZ_1K;
|
||||
u32 pcbr;
|
||||
|
||||
pcbr = intel_uncore_read(uncore, VLV_PCBR);
|
||||
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
|
||||
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
|
||||
paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
|
||||
drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
|
||||
paddr = i915->dsm.end + 1 - pctx_size;
|
||||
GEM_BUG_ON(paddr > U32_MAX);
|
||||
|
||||
pctx_paddr = (paddr & ~4095);
|
||||
@ -304,7 +307,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
|
||||
goto out;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
|
||||
drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
|
||||
|
||||
/*
|
||||
* From the Gunit register HAS:
|
||||
@ -316,7 +319,8 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
|
||||
*/
|
||||
pctx = i915_gem_object_create_stolen(i915, pctx_size);
|
||||
if (IS_ERR(pctx)) {
|
||||
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
|
||||
drm_dbg(&i915->drm,
|
||||
"not enough stolen space for PCTX, disabling\n");
|
||||
return PTR_ERR(pctx);
|
||||
}
|
||||
|
||||
@ -398,14 +402,14 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
|
||||
rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
|
||||
rc_sw_target &= RC_SW_TARGET_STATE_MASK;
|
||||
rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
|
||||
DRM_DEBUG_DRIVER("BIOS enabled RC states: "
|
||||
drm_dbg(&i915->drm, "BIOS enabled RC states: "
|
||||
"HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
|
||||
onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
|
||||
onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
|
||||
rc_sw_target);
|
||||
|
||||
if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
|
||||
DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
|
||||
drm_dbg(&i915->drm, "RC6 Base location not set properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
@ -417,7 +421,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
|
||||
intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
|
||||
if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
|
||||
rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
|
||||
DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
|
||||
drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
@ -425,24 +429,25 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
|
||||
(intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
|
||||
(intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
|
||||
(intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
|
||||
DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
|
||||
drm_dbg(&i915->drm,
|
||||
"Engine Idle wait time not set properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
|
||||
!intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
|
||||
!intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
|
||||
DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
|
||||
drm_dbg(&i915->drm, "Pushbus not setup properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
|
||||
DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
|
||||
drm_dbg(&i915->drm, "GFX pause not setup properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
|
||||
DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
|
||||
drm_dbg(&i915->drm, "GPM control not setup properly.\n");
|
||||
enable_rc6 = false;
|
||||
}
|
||||
|
||||
@ -463,7 +468,7 @@ static bool rc6_supported(struct intel_rc6 *rc6)
|
||||
return false;
|
||||
|
||||
if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
|
||||
dev_notice(i915->drm.dev,
|
||||
drm_notice(&i915->drm,
|
||||
"RC6 and powersaving disabled by BIOS\n");
|
||||
return false;
|
||||
}
|
||||
@ -495,7 +500,7 @@ static bool pctx_corrupted(struct intel_rc6 *rc6)
|
||||
if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
|
||||
return false;
|
||||
|
||||
dev_notice(i915->drm.dev,
|
||||
drm_notice(&i915->drm,
|
||||
"RC6 context corruption, disabling runtime power management\n");
|
||||
return true;
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ static int render_state_setup(struct intel_renderstate *so,
|
||||
}
|
||||
|
||||
if (rodata->reloc[reloc_index] != -1) {
|
||||
DRM_ERROR("only %d relocs resolved\n", reloc_index);
|
||||
drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ static bool mark_guilty(struct i915_request *rq)
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev_notice(ctx->i915->drm.dev,
|
||||
drm_notice(&ctx->i915->drm,
|
||||
"%s context reset due to GPU hang\n",
|
||||
ctx->name);
|
||||
|
||||
@ -755,7 +755,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
|
||||
for_each_engine(engine, gt, id)
|
||||
__intel_engine_reset(engine, stalled_mask & engine->mask);
|
||||
|
||||
i915_gem_restore_fences(gt->ggtt);
|
||||
intel_ggtt_restore_fences(gt->ggtt);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -1031,7 +1031,7 @@ void intel_gt_reset(struct intel_gt *gt,
|
||||
goto unlock;
|
||||
|
||||
if (reason)
|
||||
dev_notice(gt->i915->drm.dev,
|
||||
drm_notice(>->i915->drm,
|
||||
"Resetting chip for %s\n", reason);
|
||||
atomic_inc(>->i915->gpu_error.reset_count);
|
||||
|
||||
@ -1039,7 +1039,7 @@ void intel_gt_reset(struct intel_gt *gt,
|
||||
|
||||
if (!intel_has_gpu_reset(gt)) {
|
||||
if (i915_modparams.reset)
|
||||
dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
|
||||
drm_err(>->i915->drm, "GPU reset not supported\n");
|
||||
else
|
||||
drm_dbg(>->i915->drm, "GPU reset disabled\n");
|
||||
goto error;
|
||||
@ -1049,7 +1049,7 @@ void intel_gt_reset(struct intel_gt *gt,
|
||||
intel_runtime_pm_disable_interrupts(gt->i915);
|
||||
|
||||
if (do_reset(gt, stalled_mask)) {
|
||||
dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
|
||||
drm_err(>->i915->drm, "Failed to reset chip\n");
|
||||
goto taint;
|
||||
}
|
||||
|
||||
@ -1111,7 +1111,7 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
|
||||
/**
|
||||
* intel_engine_reset - reset GPU engine to recover from a hang
|
||||
* @engine: engine to reset
|
||||
* @msg: reason for GPU reset; or NULL for no dev_notice()
|
||||
* @msg: reason for GPU reset; or NULL for no drm_notice()
|
||||
*
|
||||
* Reset a specific GPU engine. Useful if a hang is detected.
|
||||
* Returns zero on successful reset or otherwise an error code.
|
||||
@ -1136,7 +1136,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
|
||||
reset_prepare_engine(engine);
|
||||
|
||||
if (msg)
|
||||
dev_notice(engine->i915->drm.dev,
|
||||
drm_notice(&engine->i915->drm,
|
||||
"Resetting %s for %s\n", engine->name, msg);
|
||||
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
|
||||
|
||||
@ -1381,7 +1381,7 @@ static void intel_wedge_me(struct work_struct *work)
|
||||
{
|
||||
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
|
||||
|
||||
dev_err(w->gt->i915->drm.dev,
|
||||
drm_err(&w->gt->i915->drm,
|
||||
"%s timed out, cancelling all in-flight rendering.\n",
|
||||
w->name);
|
||||
intel_gt_set_wedged(w->gt);
|
||||
|
@ -88,6 +88,8 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
|
||||
static inline void
|
||||
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
|
||||
{
|
||||
unsigned int head = READ_ONCE(ring->head);
|
||||
|
||||
GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
|
||||
|
||||
/*
|
||||
@ -105,8 +107,7 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
|
||||
* into the same cacheline as ring->head.
|
||||
*/
|
||||
#define cacheline(a) round_down(a, CACHELINE_BYTES)
|
||||
GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
|
||||
tail < ring->head);
|
||||
GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
|
||||
#undef cacheline
|
||||
}
|
||||
|
||||
|
@ -577,7 +577,8 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
|
||||
RING_INSTPM(engine->mmio_base),
|
||||
INSTPM_SYNC_FLUSH, 0,
|
||||
1000))
|
||||
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
||||
drm_err(&dev_priv->drm,
|
||||
"%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
||||
engine->name);
|
||||
}
|
||||
|
||||
@ -601,7 +602,8 @@ static bool stop_ring(struct intel_engine_cs *engine)
|
||||
MODE_IDLE,
|
||||
MODE_IDLE,
|
||||
1000)) {
|
||||
DRM_ERROR("%s : timed out trying to stop ring\n",
|
||||
drm_err(&dev_priv->drm,
|
||||
"%s : timed out trying to stop ring\n",
|
||||
engine->name);
|
||||
|
||||
/*
|
||||
@ -661,7 +663,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
|
||||
/* WaClearRingBufHeadRegAtInit:ctg,elk */
|
||||
if (!stop_ring(engine)) {
|
||||
/* G45 ring initialization often fails to reset head to zero */
|
||||
DRM_DEBUG_DRIVER("%s head not reset to zero "
|
||||
drm_dbg(&dev_priv->drm, "%s head not reset to zero "
|
||||
"ctl %08x head %08x tail %08x start %08x\n",
|
||||
engine->name,
|
||||
ENGINE_READ(engine, RING_CTL),
|
||||
@ -670,7 +672,8 @@ static int xcs_resume(struct intel_engine_cs *engine)
|
||||
ENGINE_READ(engine, RING_START));
|
||||
|
||||
if (!stop_ring(engine)) {
|
||||
DRM_ERROR("failed to set %s head to zero "
|
||||
drm_err(&dev_priv->drm,
|
||||
"failed to set %s head to zero "
|
||||
"ctl %08x head %08x tail %08x start %08x\n",
|
||||
engine->name,
|
||||
ENGINE_READ(engine, RING_CTL),
|
||||
@ -719,7 +722,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
|
||||
RING_CTL(engine->mmio_base),
|
||||
RING_VALID, RING_VALID,
|
||||
50)) {
|
||||
DRM_ERROR("%s initialization failed "
|
||||
drm_err(&dev_priv->drm, "%s initialization failed "
|
||||
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
|
||||
engine->name,
|
||||
ENGINE_READ(engine, RING_CTL),
|
||||
|
@ -81,13 +81,14 @@ static void rps_enable_interrupts(struct intel_rps *rps)
|
||||
events = (GEN6_PM_RP_UP_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_TIMEOUT);
|
||||
|
||||
WRITE_ONCE(rps->pm_events, events);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
gen6_gt_pm_enable_irq(gt, rps->pm_events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
|
||||
set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq));
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
|
||||
}
|
||||
|
||||
static void gen6_rps_reset_interrupts(struct intel_rps *rps)
|
||||
@ -120,7 +121,9 @@ static void rps_disable_interrupts(struct intel_rps *rps)
|
||||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
|
||||
WRITE_ONCE(rps->pm_events, 0);
|
||||
set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
|
||||
|
||||
intel_uncore_write(gt->uncore,
|
||||
GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
|
||||
@ -183,14 +186,12 @@ static void gen5_rps_init(struct intel_rps *rps)
|
||||
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
|
||||
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
|
||||
MEMMODE_FSTART_SHIFT;
|
||||
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
|
||||
drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
|
||||
fmax, fmin, fstart);
|
||||
|
||||
rps->min_freq = fmax;
|
||||
rps->efficient_freq = fstart;
|
||||
rps->max_freq = fmin;
|
||||
|
||||
rps->idle_freq = rps->min_freq;
|
||||
rps->cur_freq = rps->idle_freq;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
@ -453,7 +454,8 @@ static bool gen5_rps_enable(struct intel_rps *rps)
|
||||
|
||||
if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
|
||||
MEMCTL_CMD_STS) == 0, 10))
|
||||
DRM_ERROR("stuck trying to change perf mode\n");
|
||||
drm_err(&uncore->i915->drm,
|
||||
"stuck trying to change perf mode\n");
|
||||
mdelay(1);
|
||||
|
||||
gen5_rps_set(rps, rps->cur_freq);
|
||||
@ -712,8 +714,6 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update)
|
||||
|
||||
void intel_rps_unpark(struct intel_rps *rps)
|
||||
{
|
||||
u8 freq;
|
||||
|
||||
if (!rps->enabled)
|
||||
return;
|
||||
|
||||
@ -725,9 +725,10 @@ void intel_rps_unpark(struct intel_rps *rps)
|
||||
|
||||
WRITE_ONCE(rps->active, true);
|
||||
|
||||
freq = max(rps->cur_freq, rps->efficient_freq),
|
||||
freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
|
||||
intel_rps_set(rps, freq);
|
||||
intel_rps_set(rps,
|
||||
clamp(rps->cur_freq,
|
||||
rps->min_freq_softlimit,
|
||||
rps->max_freq_softlimit));
|
||||
|
||||
rps->last_adj = 0;
|
||||
|
||||
@ -893,12 +894,13 @@ static void gen6_rps_init(struct intel_rps *rps)
|
||||
|
||||
static bool rps_reset(struct intel_rps *rps)
|
||||
{
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
/* force a reset */
|
||||
rps->power.mode = -1;
|
||||
rps->last_freq = -1;
|
||||
|
||||
if (rps_set(rps, rps->min_freq, true)) {
|
||||
DRM_ERROR("Failed to reset RPS to initial values\n");
|
||||
drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1049,8 +1051,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
|
||||
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
|
||||
"GPLL not enabled\n");
|
||||
|
||||
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
|
||||
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
|
||||
drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
|
||||
drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
|
||||
|
||||
return rps_reset(rps);
|
||||
}
|
||||
@ -1147,8 +1149,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
|
||||
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
|
||||
"GPLL not enabled\n");
|
||||
|
||||
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
|
||||
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
|
||||
drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
|
||||
drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
|
||||
|
||||
return rps_reset(rps);
|
||||
}
|
||||
@ -1305,7 +1307,8 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
|
||||
CCK_GPLL_CLOCK_CONTROL,
|
||||
i915->czclk_freq);
|
||||
|
||||
DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
|
||||
drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
|
||||
rps->gpll_ref_freq);
|
||||
}
|
||||
|
||||
static void vlv_rps_init(struct intel_rps *rps)
|
||||
@ -1333,28 +1336,24 @@ static void vlv_rps_init(struct intel_rps *rps)
|
||||
i915->mem_freq = 1333;
|
||||
break;
|
||||
}
|
||||
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
|
||||
drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
|
||||
|
||||
rps->max_freq = vlv_rps_max_freq(rps);
|
||||
rps->rp0_freq = rps->max_freq;
|
||||
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->max_freq),
|
||||
rps->max_freq);
|
||||
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
|
||||
|
||||
rps->efficient_freq = vlv_rps_rpe_freq(rps);
|
||||
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->efficient_freq),
|
||||
rps->efficient_freq);
|
||||
drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
|
||||
|
||||
rps->rp1_freq = vlv_rps_guar_freq(rps);
|
||||
DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->rp1_freq),
|
||||
rps->rp1_freq);
|
||||
drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
|
||||
|
||||
rps->min_freq = vlv_rps_min_freq(rps);
|
||||
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->min_freq),
|
||||
rps->min_freq);
|
||||
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
|
||||
|
||||
vlv_iosf_sb_put(i915,
|
||||
BIT(VLV_IOSF_SB_PUNIT) |
|
||||
@ -1384,28 +1383,24 @@ static void chv_rps_init(struct intel_rps *rps)
|
||||
i915->mem_freq = 1600;
|
||||
break;
|
||||
}
|
||||
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
|
||||
drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
|
||||
|
||||
rps->max_freq = chv_rps_max_freq(rps);
|
||||
rps->rp0_freq = rps->max_freq;
|
||||
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->max_freq),
|
||||
rps->max_freq);
|
||||
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
|
||||
|
||||
rps->efficient_freq = chv_rps_rpe_freq(rps);
|
||||
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->efficient_freq),
|
||||
rps->efficient_freq);
|
||||
drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
|
||||
|
||||
rps->rp1_freq = chv_rps_guar_freq(rps);
|
||||
DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->rp1_freq),
|
||||
rps->rp1_freq);
|
||||
drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
|
||||
|
||||
rps->min_freq = chv_rps_min_freq(rps);
|
||||
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->min_freq),
|
||||
rps->min_freq);
|
||||
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
|
||||
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
|
||||
|
||||
vlv_iosf_sb_put(i915,
|
||||
BIT(VLV_IOSF_SB_PUNIT) |
|
||||
@ -1468,6 +1463,7 @@ static void rps_work(struct work_struct *work)
|
||||
{
|
||||
struct intel_rps *rps = container_of(work, typeof(*rps), work);
|
||||
struct intel_gt *gt = rps_to_gt(rps);
|
||||
struct drm_i915_private *i915 = rps_to_i915(rps);
|
||||
bool client_boost = false;
|
||||
int new_freq, adj, min, max;
|
||||
u32 pm_iir = 0;
|
||||
@ -1543,7 +1539,7 @@ static void rps_work(struct work_struct *work)
|
||||
new_freq = clamp_t(int, new_freq, min, max);
|
||||
|
||||
if (intel_rps_set(rps, new_freq)) {
|
||||
DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
|
||||
drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
|
||||
rps->last_adj = 0;
|
||||
}
|
||||
|
||||
@ -1665,7 +1661,8 @@ void intel_rps_init(struct intel_rps *rps)
|
||||
sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
|
||||
¶ms, NULL);
|
||||
if (params & BIT(31)) { /* OC supported */
|
||||
DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
|
||||
drm_dbg(&i915->drm,
|
||||
"Overclocking supported, max: %dMHz, overclock: %dMHz\n",
|
||||
(rps->max_freq & 0xff) * 50,
|
||||
(params & 0xff) * 50);
|
||||
rps->max_freq = params & 0xff;
|
||||
@ -1675,7 +1672,9 @@ void intel_rps_init(struct intel_rps *rps)
|
||||
/* Finally allow us to boost to max by default */
|
||||
rps->boost_freq = rps->max_freq;
|
||||
rps->idle_freq = rps->min_freq;
|
||||
rps->cur_freq = rps->idle_freq;
|
||||
|
||||
/* Start in the middle, from here we will autotune based on workload */
|
||||
rps->cur_freq = rps->efficient_freq;
|
||||
|
||||
rps->pm_intrmsk_mbz = 0;
|
||||
|
||||
@ -1927,3 +1926,7 @@ bool i915_gpu_turbo_disable(void)
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftest_rps.c"
|
||||
#endif
|
||||
|
@ -65,7 +65,6 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
|
||||
{
|
||||
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
|
||||
bool subslice_pg = sseu->has_subslice_pg;
|
||||
struct intel_sseu ctx_sseu;
|
||||
u8 slices, subslices;
|
||||
u32 rpcs = 0;
|
||||
|
||||
@ -78,31 +77,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
|
||||
|
||||
/*
|
||||
* If i915/perf is active, we want a stable powergating configuration
|
||||
* on the system.
|
||||
*
|
||||
* We could choose full enablement, but on ICL we know there are use
|
||||
* cases which disable slices for functional, apart for performance
|
||||
* reasons. So in this case we select a known stable subset.
|
||||
* on the system. Use the configuration pinned by i915/perf.
|
||||
*/
|
||||
if (!i915->perf.exclusive_stream) {
|
||||
ctx_sseu = *req_sseu;
|
||||
} else {
|
||||
ctx_sseu = intel_sseu_from_device_info(sseu);
|
||||
if (i915->perf.exclusive_stream)
|
||||
req_sseu = &i915->perf.sseu;
|
||||
|
||||
if (IS_GEN(i915, 11)) {
|
||||
/*
|
||||
* We only need subslice count so it doesn't matter
|
||||
* which ones we select - just turn off low bits in the
|
||||
* amount of half of all available subslices per slice.
|
||||
*/
|
||||
ctx_sseu.subslice_mask =
|
||||
~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
|
||||
ctx_sseu.slice_mask = 0x1;
|
||||
}
|
||||
}
|
||||
|
||||
slices = hweight8(ctx_sseu.slice_mask);
|
||||
subslices = hweight8(ctx_sseu.subslice_mask);
|
||||
slices = hweight8(req_sseu->slice_mask);
|
||||
subslices = hweight8(req_sseu->subslice_mask);
|
||||
|
||||
/*
|
||||
* Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
|
||||
@ -175,13 +156,13 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
|
||||
if (sseu->has_eu_pg) {
|
||||
u32 val;
|
||||
|
||||
val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
|
||||
val = req_sseu->min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
|
||||
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
|
||||
val &= GEN8_RPCS_EU_MIN_MASK;
|
||||
|
||||
rpcs |= val;
|
||||
|
||||
val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
|
||||
val = req_sseu->max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
|
||||
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
|
||||
val &= GEN8_RPCS_EU_MAX_MASK;
|
||||
|
||||
|
@ -119,6 +119,15 @@ static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
|
||||
spin_unlock_irqrestore(>->hwsp_lock, flags);
|
||||
}
|
||||
|
||||
static void __rcu_cacheline_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct intel_timeline_cacheline *cl =
|
||||
container_of(rcu, typeof(*cl), rcu);
|
||||
|
||||
i915_active_fini(&cl->active);
|
||||
kfree(cl);
|
||||
}
|
||||
|
||||
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
|
||||
{
|
||||
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
|
||||
@ -127,8 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
|
||||
i915_vma_put(cl->hwsp->vma);
|
||||
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
|
||||
|
||||
i915_active_fini(&cl->active);
|
||||
kfree_rcu(cl, rcu);
|
||||
call_rcu(&cl->rcu, __rcu_cacheline_free);
|
||||
}
|
||||
|
||||
__i915_active_call
|
||||
|
@ -837,7 +837,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
|
||||
GEN10_L3BANK_MASK;
|
||||
|
||||
DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
|
||||
drm_dbg(&i915->drm, "L3 fuse = %x\n", l3_fuse);
|
||||
l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
|
||||
} else {
|
||||
l3_en = ~0;
|
||||
@ -846,7 +846,8 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
slice = fls(sseu->slice_mask) - 1;
|
||||
subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
|
||||
if (!subslice) {
|
||||
DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
|
||||
drm_warn(&i915->drm,
|
||||
"No common index found between subslice mask %x and L3 bank mask %x!\n",
|
||||
intel_sseu_get_subslices(sseu, slice), l3_en);
|
||||
subslice = fls(l3_en);
|
||||
drm_WARN_ON(&i915->drm, !subslice);
|
||||
@ -861,7 +862,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);
|
||||
drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
|
||||
|
||||
wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
|
||||
}
|
||||
@ -942,6 +943,8 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
static void
|
||||
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
|
||||
{
|
||||
wa_init_mcr(i915, wal);
|
||||
|
||||
/* Wa_1409420604:tgl */
|
||||
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
|
||||
wa_write_or(wal,
|
||||
@ -1379,12 +1382,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
GEN7_FF_THREAD_MODE,
|
||||
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
|
||||
|
||||
/*
|
||||
* Wa_1409085225:tgl
|
||||
* Wa_14010229206:tgl
|
||||
*/
|
||||
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
|
||||
|
||||
/* Wa_1408615072:tgl */
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
VSUNIT_CLKGATE_DIS_TGL);
|
||||
@ -1402,6 +1399,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
wa_masked_en(wal,
|
||||
GEN9_CS_DEBUG_MODE1,
|
||||
FF_DOP_CLOCK_GATE_DISABLE);
|
||||
|
||||
/*
|
||||
* Wa_1409085225:tgl
|
||||
* Wa_14010229206:tgl
|
||||
*/
|
||||
wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
|
||||
}
|
||||
|
||||
if (IS_GEN(i915, 11)) {
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include "selftest_llc.h"
|
||||
#include "selftest_rc6.h"
|
||||
#include "selftest_rps.h"
|
||||
|
||||
static int live_gt_resume(void *arg)
|
||||
{
|
||||
@ -52,6 +53,7 @@ int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(live_rc6_manual),
|
||||
SUBTEST(live_rps_interrupt),
|
||||
SUBTEST(live_gt_resume),
|
||||
};
|
||||
|
||||
|
@ -68,26 +68,41 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine,
|
||||
engine->props.heartbeat_interval_ms = saved;
|
||||
}
|
||||
|
||||
static bool is_active(struct i915_request *rq)
|
||||
{
|
||||
if (i915_request_is_active(rq))
|
||||
return true;
|
||||
|
||||
if (i915_request_on_hold(rq))
|
||||
return true;
|
||||
|
||||
if (i915_request_started(rq))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int wait_for_submit(struct intel_engine_cs *engine,
|
||||
struct i915_request *rq,
|
||||
unsigned long timeout)
|
||||
{
|
||||
timeout += jiffies;
|
||||
do {
|
||||
cond_resched();
|
||||
bool done = time_after(jiffies, timeout);
|
||||
|
||||
if (i915_request_completed(rq)) /* that was quick! */
|
||||
return 0;
|
||||
|
||||
/* Wait until the HW has acknowleged the submission (or err) */
|
||||
intel_engine_flush_submission(engine);
|
||||
|
||||
if (READ_ONCE(engine->execlists.pending[0]))
|
||||
continue;
|
||||
|
||||
if (i915_request_is_active(rq))
|
||||
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
|
||||
return 0;
|
||||
|
||||
if (i915_request_started(rq)) /* that was quick! */
|
||||
return 0;
|
||||
} while (time_before(jiffies, timeout));
|
||||
|
||||
if (done)
|
||||
return -ETIME;
|
||||
|
||||
cond_resched();
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static int wait_for_reset(struct intel_engine_cs *engine,
|
||||
@ -634,7 +649,7 @@ static int live_error_interrupt(void *arg)
|
||||
error_repr(p->error[i]));
|
||||
|
||||
if (!i915_request_started(client[i])) {
|
||||
pr_debug("%s: %s request not stated!\n",
|
||||
pr_err("%s: %s request not started!\n",
|
||||
engine->name,
|
||||
error_repr(p->error[i]));
|
||||
err = -ETIME;
|
||||
@ -644,9 +659,10 @@ static int live_error_interrupt(void *arg)
|
||||
/* Kick the tasklet to process the error */
|
||||
intel_engine_flush_submission(engine);
|
||||
if (client[i]->fence.error != p->error[i]) {
|
||||
pr_err("%s: %s request completed with wrong error code: %d\n",
|
||||
pr_err("%s: %s request (%s) with wrong error code: %d\n",
|
||||
engine->name,
|
||||
error_repr(p->error[i]),
|
||||
i915_request_completed(client[i]) ? "completed" : "running",
|
||||
client[i]->fence.error);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
@ -929,7 +945,7 @@ create_rewinder(struct intel_context *ce,
|
||||
goto err;
|
||||
}
|
||||
|
||||
cs = intel_ring_begin(rq, 10);
|
||||
cs = intel_ring_begin(rq, 14);
|
||||
if (IS_ERR(cs)) {
|
||||
err = PTR_ERR(cs);
|
||||
goto err;
|
||||
@ -941,8 +957,8 @@ create_rewinder(struct intel_context *ce,
|
||||
*cs++ = MI_SEMAPHORE_WAIT |
|
||||
MI_SEMAPHORE_GLOBAL_GTT |
|
||||
MI_SEMAPHORE_POLL |
|
||||
MI_SEMAPHORE_SAD_NEQ_SDD;
|
||||
*cs++ = 0;
|
||||
MI_SEMAPHORE_SAD_GTE_SDD;
|
||||
*cs++ = idx;
|
||||
*cs++ = offset;
|
||||
*cs++ = 0;
|
||||
|
||||
@ -951,6 +967,11 @@ create_rewinder(struct intel_context *ce,
|
||||
*cs++ = offset + idx * sizeof(u32);
|
||||
*cs++ = 0;
|
||||
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = offset;
|
||||
*cs++ = 0;
|
||||
*cs++ = idx + 1;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
rq->sched.attr.priority = I915_PRIORITY_MASK;
|
||||
@ -984,7 +1005,7 @@ static int live_timeslice_rewind(void *arg)
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
enum { A1, A2, B1 };
|
||||
enum { X = 1, Y, Z };
|
||||
enum { X = 1, Z, Y };
|
||||
struct i915_request *rq[3] = {};
|
||||
struct intel_context *ce;
|
||||
unsigned long heartbeat;
|
||||
@ -1017,13 +1038,13 @@ static int live_timeslice_rewind(void *arg)
|
||||
goto err;
|
||||
}
|
||||
|
||||
rq[0] = create_rewinder(ce, NULL, slot, 1);
|
||||
rq[0] = create_rewinder(ce, NULL, slot, X);
|
||||
if (IS_ERR(rq[0])) {
|
||||
intel_context_put(ce);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rq[1] = create_rewinder(ce, NULL, slot, 2);
|
||||
rq[1] = create_rewinder(ce, NULL, slot, Y);
|
||||
intel_context_put(ce);
|
||||
if (IS_ERR(rq[1]))
|
||||
goto err;
|
||||
@ -1041,7 +1062,7 @@ static int live_timeslice_rewind(void *arg)
|
||||
goto err;
|
||||
}
|
||||
|
||||
rq[2] = create_rewinder(ce, rq[0], slot, 3);
|
||||
rq[2] = create_rewinder(ce, rq[0], slot, Z);
|
||||
intel_context_put(ce);
|
||||
if (IS_ERR(rq[2]))
|
||||
goto err;
|
||||
@ -1052,18 +1073,14 @@ static int live_timeslice_rewind(void *arg)
|
||||
engine->name);
|
||||
goto err;
|
||||
}
|
||||
GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
|
||||
|
||||
/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
|
||||
GEM_BUG_ON(!i915_request_is_active(rq[A1]));
|
||||
GEM_BUG_ON(!i915_request_is_active(rq[A2]));
|
||||
GEM_BUG_ON(!i915_request_is_active(rq[B1]));
|
||||
|
||||
if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
|
||||
/* Wait for the timeslice to kick in */
|
||||
del_timer(&engine->execlists.timer);
|
||||
tasklet_hi_schedule(&engine->execlists.tasklet);
|
||||
intel_engine_flush_submission(engine);
|
||||
|
||||
}
|
||||
/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
|
||||
GEM_BUG_ON(!i915_request_is_active(rq[A1]));
|
||||
GEM_BUG_ON(!i915_request_is_active(rq[B1]));
|
||||
@ -1228,8 +1245,14 @@ static int live_timeslice_queue(void *arg)
|
||||
if (err)
|
||||
goto err_rq;
|
||||
|
||||
/* Wait until we ack the release_queue and start timeslicing */
|
||||
do {
|
||||
cond_resched();
|
||||
intel_engine_flush_submission(engine);
|
||||
} while (READ_ONCE(engine->execlists.pending[0]));
|
||||
|
||||
if (!READ_ONCE(engine->execlists.timer.expires) &&
|
||||
execlists_active(&engine->execlists) == rq &&
|
||||
!i915_request_completed(rq)) {
|
||||
struct drm_printer p =
|
||||
drm_info_printer(gt->i915->drm.dev);
|
||||
@ -2030,6 +2053,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
|
||||
if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
|
||||
return 0;
|
||||
|
||||
if (!intel_has_reset_engine(arg->engine->gt))
|
||||
return 0;
|
||||
|
||||
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
|
||||
rq = spinner_create_request(&arg->a.spin,
|
||||
arg->a.ctx, arg->engine,
|
||||
@ -2630,7 +2656,7 @@ static int create_gang(struct intel_engine_cs *engine,
|
||||
if (IS_ERR(rq))
|
||||
goto err_obj;
|
||||
|
||||
rq->batch = vma;
|
||||
rq->batch = i915_vma_get(vma);
|
||||
i915_request_get(rq);
|
||||
|
||||
i915_vma_lock(vma);
|
||||
@ -2654,6 +2680,7 @@ static int create_gang(struct intel_engine_cs *engine,
|
||||
return 0;
|
||||
|
||||
err_rq:
|
||||
i915_vma_put(rq->batch);
|
||||
i915_request_put(rq);
|
||||
err_obj:
|
||||
i915_gem_object_put(obj);
|
||||
@ -2750,6 +2777,7 @@ static int live_preempt_gang(void *arg)
|
||||
err = -ETIME;
|
||||
}
|
||||
|
||||
i915_vma_put(rq->batch);
|
||||
i915_request_put(rq);
|
||||
rq = n;
|
||||
}
|
||||
@ -5155,7 +5183,6 @@ static int compare_isolation(struct intel_engine_cs *engine,
|
||||
A[0][x], B[0][x], B[1][x],
|
||||
poison, lrc[dw + 1]);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
dw += 2;
|
||||
@ -5294,6 +5321,7 @@ static int live_lrc_isolation(void *arg)
|
||||
0xffffffff,
|
||||
0xffff0000,
|
||||
};
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* Our goal is try and verify that per-context state cannot be
|
||||
@ -5304,7 +5332,6 @@ static int live_lrc_isolation(void *arg)
|
||||
*/
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
/* Just don't even ask */
|
||||
@ -5315,23 +5342,25 @@ static int live_lrc_isolation(void *arg)
|
||||
intel_engine_pm_get(engine);
|
||||
if (engine->pinned_default_state) {
|
||||
for (i = 0; i < ARRAY_SIZE(poison); i++) {
|
||||
err = __lrc_isolation(engine, poison[i]);
|
||||
if (err)
|
||||
break;
|
||||
int result;
|
||||
|
||||
err = __lrc_isolation(engine, ~poison[i]);
|
||||
if (err)
|
||||
break;
|
||||
result = __lrc_isolation(engine, poison[i]);
|
||||
if (result && !err)
|
||||
err = result;
|
||||
|
||||
result = __lrc_isolation(engine, ~poison[i]);
|
||||
if (result && !err)
|
||||
err = result;
|
||||
}
|
||||
}
|
||||
intel_engine_pm_put(engine);
|
||||
if (igt_flush_test(gt->i915))
|
||||
if (igt_flush_test(gt->i915)) {
|
||||
err = -EIO;
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static void garbage_reset(struct intel_engine_cs *engine,
|
||||
|
@ -12,6 +12,22 @@
|
||||
|
||||
#include "selftests/i915_random.h"
|
||||
|
||||
static u64 energy_uJ(struct intel_rc6 *rc6)
|
||||
{
|
||||
unsigned long long power;
|
||||
u32 units;
|
||||
|
||||
if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
|
||||
return 0;
|
||||
|
||||
units = (power & 0x1f00) >> 8;
|
||||
|
||||
if (rdmsrl_safe(MSR_PP1_ENERGY_STATUS, &power))
|
||||
return 0;
|
||||
|
||||
return (1000000 * power) >> units; /* convert to uJ */
|
||||
}
|
||||
|
||||
static u64 rc6_residency(struct intel_rc6 *rc6)
|
||||
{
|
||||
u64 result;
|
||||
@ -31,7 +47,9 @@ int live_rc6_manual(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
struct intel_rc6 *rc6 = >->rc6;
|
||||
u64 rc0_power, rc6_power;
|
||||
intel_wakeref_t wakeref;
|
||||
ktime_t dt;
|
||||
u64 res[2];
|
||||
int err = 0;
|
||||
|
||||
@ -54,7 +72,12 @@ int live_rc6_manual(void *arg)
|
||||
msleep(1); /* wakeup is not immediate, takes about 100us on icl */
|
||||
|
||||
res[0] = rc6_residency(rc6);
|
||||
|
||||
dt = ktime_get();
|
||||
rc0_power = energy_uJ(rc6);
|
||||
msleep(250);
|
||||
rc0_power = energy_uJ(rc6) - rc0_power;
|
||||
dt = ktime_sub(ktime_get(), dt);
|
||||
res[1] = rc6_residency(rc6);
|
||||
if ((res[1] - res[0]) >> 10) {
|
||||
pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n",
|
||||
@ -63,13 +86,24 @@ int live_rc6_manual(void *arg)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt));
|
||||
if (!rc0_power) {
|
||||
pr_err("No power measured while in RC0\n");
|
||||
err = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Manually enter RC6 */
|
||||
intel_rc6_park(rc6);
|
||||
|
||||
res[0] = rc6_residency(rc6);
|
||||
intel_uncore_forcewake_flush(rc6_to_uncore(rc6), FORCEWAKE_ALL);
|
||||
dt = ktime_get();
|
||||
rc6_power = energy_uJ(rc6);
|
||||
msleep(100);
|
||||
rc6_power = energy_uJ(rc6) - rc6_power;
|
||||
dt = ktime_sub(ktime_get(), dt);
|
||||
res[1] = rc6_residency(rc6);
|
||||
|
||||
if (res[1] == res[0]) {
|
||||
pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
|
||||
intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
|
||||
@ -78,6 +112,15 @@ int live_rc6_manual(void *arg)
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, ktime_to_ns(dt));
|
||||
pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n",
|
||||
rc0_power, rc6_power);
|
||||
if (2 * rc6_power > rc0_power) {
|
||||
pr_err("GPU leaked energy while in RC6!\n");
|
||||
err = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Restore what should have been the original state! */
|
||||
intel_rc6_unpark(rc6);
|
||||
|
||||
|
223
drivers/gpu/drm/i915/gt/selftest_rps.c
Normal file
223
drivers/gpu/drm/i915/gt/selftest_rps.c
Normal file
@ -0,0 +1,223 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "intel_engine_pm.h"
|
||||
#include "intel_gt_pm.h"
|
||||
#include "intel_rc6.h"
|
||||
#include "selftest_rps.h"
|
||||
#include "selftests/igt_flush_test.h"
|
||||
#include "selftests/igt_spinner.h"
|
||||
|
||||
static void dummy_rps_work(struct work_struct *wrk)
|
||||
{
|
||||
}
|
||||
|
||||
static int __rps_up_interrupt(struct intel_rps *rps,
|
||||
struct intel_engine_cs *engine,
|
||||
struct igt_spinner *spin)
|
||||
{
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
struct i915_request *rq;
|
||||
u32 timeout;
|
||||
|
||||
if (!intel_engine_can_store_dword(engine))
|
||||
return 0;
|
||||
|
||||
intel_gt_pm_wait_for_idle(engine->gt);
|
||||
GEM_BUG_ON(rps->active);
|
||||
|
||||
rps->pm_iir = 0;
|
||||
rps->cur_freq = rps->min_freq;
|
||||
|
||||
rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
|
||||
if (!igt_wait_for_spinner(spin, rq)) {
|
||||
pr_err("%s: RPS spinner did not start\n",
|
||||
engine->name);
|
||||
i915_request_put(rq);
|
||||
intel_gt_set_wedged(engine->gt);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!rps->active) {
|
||||
pr_err("%s: RPS not enabled on starting spinner\n",
|
||||
engine->name);
|
||||
igt_spinner_end(spin);
|
||||
i915_request_put(rq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) {
|
||||
pr_err("%s: RPS did not register UP interrupt\n",
|
||||
engine->name);
|
||||
i915_request_put(rq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rps->last_freq != rps->min_freq) {
|
||||
pr_err("%s: RPS did not program min frequency\n",
|
||||
engine->name);
|
||||
i915_request_put(rq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
timeout = intel_uncore_read(uncore, GEN6_RP_UP_EI);
|
||||
timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout);
|
||||
|
||||
usleep_range(2 * timeout, 3 * timeout);
|
||||
GEM_BUG_ON(i915_request_completed(rq));
|
||||
|
||||
igt_spinner_end(spin);
|
||||
i915_request_put(rq);
|
||||
|
||||
if (rps->cur_freq != rps->min_freq) {
|
||||
pr_err("%s: Frequency unexpectedly changed [up], now %d!\n",
|
||||
engine->name, intel_rps_read_actual_frequency(rps));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) {
|
||||
pr_err("%s: UP interrupt not recorded for spinner, pm_iir:%x, prev_up:%x, up_threshold:%x, up_ei:%x\n",
|
||||
engine->name, rps->pm_iir,
|
||||
intel_uncore_read(uncore, GEN6_RP_PREV_UP),
|
||||
intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
|
||||
intel_uncore_read(uncore, GEN6_RP_UP_EI));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intel_gt_pm_wait_for_idle(engine->gt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __rps_down_interrupt(struct intel_rps *rps,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
u32 timeout;
|
||||
|
||||
mutex_lock(&rps->lock);
|
||||
GEM_BUG_ON(!rps->active);
|
||||
intel_rps_set(rps, rps->max_freq);
|
||||
mutex_unlock(&rps->lock);
|
||||
|
||||
if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) {
|
||||
pr_err("%s: RPS did not register DOWN interrupt\n",
|
||||
engine->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rps->last_freq != rps->max_freq) {
|
||||
pr_err("%s: RPS did not program max frequency\n",
|
||||
engine->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
|
||||
timeout = GT_PM_INTERVAL_TO_US(engine->i915, timeout);
|
||||
|
||||
/* Flush any previous EI */
|
||||
usleep_range(timeout, 2 * timeout);
|
||||
|
||||
/* Reset the interrupt status */
|
||||
rps_disable_interrupts(rps);
|
||||
GEM_BUG_ON(rps->pm_iir);
|
||||
rps_enable_interrupts(rps);
|
||||
|
||||
/* And then wait for the timeout, for real this time */
|
||||
usleep_range(2 * timeout, 3 * timeout);
|
||||
|
||||
if (rps->cur_freq != rps->max_freq) {
|
||||
pr_err("%s: Frequency unexpectedly changed [down], now %d!\n",
|
||||
engine->name,
|
||||
intel_rps_read_actual_frequency(rps));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) {
|
||||
pr_err("%s: DOWN interrupt not recorded for idle, pm_iir:%x, prev_down:%x, down_threshold:%x, down_ei:%x [prev_up:%x, up_threshold:%x, up_ei:%x]\n",
|
||||
engine->name, rps->pm_iir,
|
||||
intel_uncore_read(uncore, GEN6_RP_PREV_DOWN),
|
||||
intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD),
|
||||
intel_uncore_read(uncore, GEN6_RP_DOWN_EI),
|
||||
intel_uncore_read(uncore, GEN6_RP_PREV_UP),
|
||||
intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD),
|
||||
intel_uncore_read(uncore, GEN6_RP_UP_EI));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int live_rps_interrupt(void *arg)
|
||||
{
|
||||
struct intel_gt *gt = arg;
|
||||
struct intel_rps *rps = >->rps;
|
||||
void (*saved_work)(struct work_struct *wrk);
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
struct igt_spinner spin;
|
||||
u32 pm_events;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* First, let's check whether or not we are receiving interrupts.
|
||||
*/
|
||||
|
||||
if (!rps->enabled || rps->max_freq <= rps->min_freq)
|
||||
return 0;
|
||||
|
||||
intel_gt_pm_get(gt);
|
||||
pm_events = rps->pm_events;
|
||||
intel_gt_pm_put(gt);
|
||||
if (!pm_events) {
|
||||
pr_err("No RPS PM events registered, but RPS is enabled?\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (igt_spinner_init(&spin, gt))
|
||||
return -ENOMEM;
|
||||
|
||||
intel_gt_pm_wait_for_idle(gt);
|
||||
saved_work = rps->work.func;
|
||||
rps->work.func = dummy_rps_work;
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
/* Keep the engine busy with a spinner; expect an UP! */
|
||||
if (pm_events & GEN6_PM_RP_UP_THRESHOLD) {
|
||||
err = __rps_up_interrupt(rps, engine, &spin);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Keep the engine awake but idle and check for DOWN */
|
||||
if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
|
||||
intel_engine_pm_get(engine);
|
||||
intel_rc6_disable(>->rc6);
|
||||
|
||||
err = __rps_down_interrupt(rps, engine);
|
||||
|
||||
intel_rc6_enable(>->rc6);
|
||||
intel_engine_pm_put(engine);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
if (igt_flush_test(gt->i915))
|
||||
err = -EIO;
|
||||
|
||||
igt_spinner_fini(&spin);
|
||||
|
||||
intel_gt_pm_wait_for_idle(gt);
|
||||
rps->work.func = saved_work;
|
||||
|
||||
return err;
|
||||
}
|
11
drivers/gpu/drm/i915/gt/selftest_rps.h
Normal file
11
drivers/gpu/drm/i915/gt/selftest_rps.h
Normal file
@ -0,0 +1,11 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef SELFTEST_RPS_H
|
||||
#define SELFTEST_RPS_H
|
||||
|
||||
int live_rps_interrupt(void *arg);
|
||||
|
||||
#endif /* SELFTEST_RPS_H */
|
@ -169,7 +169,7 @@ void intel_guc_init_early(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
|
||||
|
||||
intel_guc_fw_init_early(guc);
|
||||
intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
|
||||
intel_guc_ct_init_early(&guc->ct);
|
||||
intel_guc_log_init_early(&guc->log);
|
||||
intel_guc_submission_init_early(guc);
|
||||
@ -723,3 +723,47 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_guc_load_status - dump information about GuC load status
|
||||
* @guc: the GuC
|
||||
* @p: the &drm_printer
|
||||
*
|
||||
* Pretty printer for GuC load status.
|
||||
*/
|
||||
void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
if (!intel_guc_is_supported(guc)) {
|
||||
drm_printf(p, "GuC not supported\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!intel_guc_is_wanted(guc)) {
|
||||
drm_printf(p, "GuC disabled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
intel_uc_fw_dump(&guc->fw, p);
|
||||
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref) {
|
||||
u32 status = intel_uncore_read(uncore, GUC_STATUS);
|
||||
u32 i;
|
||||
|
||||
drm_printf(p, "\nGuC status 0x%08x:\n", status);
|
||||
drm_printf(p, "\tBootrom status = 0x%x\n",
|
||||
(status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
|
||||
drm_printf(p, "\tuKernel status = 0x%x\n",
|
||||
(status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
|
||||
drm_printf(p, "\tMIA Core status = 0x%x\n",
|
||||
(status & GS_MIA_MASK) >> GS_MIA_SHIFT);
|
||||
drm_puts(p, "\nScratch registers:\n");
|
||||
for (i = 0; i < 16; i++) {
|
||||
drm_printf(p, "\t%2d: \t0x%x\n",
|
||||
i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -74,6 +74,11 @@ struct intel_guc {
|
||||
struct mutex send_mutex;
|
||||
};
|
||||
|
||||
static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
|
||||
{
|
||||
return container_of(log, struct intel_guc, log);
|
||||
}
|
||||
|
||||
static
|
||||
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
|
||||
{
|
||||
@ -190,4 +195,6 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
|
||||
int intel_guc_reset_engine(struct intel_guc *guc,
|
||||
struct intel_engine_cs *engine);
|
||||
|
||||
void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
|
||||
|
||||
#endif
|
||||
|
42
drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
Normal file
42
drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
Normal file
@ -0,0 +1,42 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "gt/debugfs_gt.h"
|
||||
#include "intel_guc.h"
|
||||
#include "intel_guc_debugfs.h"
|
||||
#include "intel_guc_log_debugfs.h"
|
||||
|
||||
static int guc_info_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct intel_guc *guc = m->private;
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
if (!intel_guc_is_supported(guc))
|
||||
return -ENODEV;
|
||||
|
||||
intel_guc_load_status(guc, &p);
|
||||
drm_puts(&p, "\n");
|
||||
intel_guc_log_info(&guc->log, &p);
|
||||
|
||||
/* Add more as required ... */
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info);
|
||||
|
||||
void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
|
||||
{
|
||||
static const struct debugfs_gt_file files[] = {
|
||||
{ "guc_info", &guc_info_fops, NULL },
|
||||
};
|
||||
|
||||
if (!intel_guc_is_supported(guc))
|
||||
return;
|
||||
|
||||
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), guc);
|
||||
intel_guc_log_debugfs_register(&guc->log, root);
|
||||
}
|
14
drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
Normal file
14
drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.h
Normal file
@ -0,0 +1,14 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef DEBUGFS_GUC_H
|
||||
#define DEBUGFS_GUC_H
|
||||
|
||||
struct intel_guc;
|
||||
struct dentry;
|
||||
|
||||
void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root);
|
||||
|
||||
#endif /* DEBUGFS_GUC_H */
|
@ -13,20 +13,6 @@
|
||||
#include "intel_guc_fw.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
/**
|
||||
* intel_guc_fw_init_early() - initializes GuC firmware struct
|
||||
* @guc: intel_guc struct
|
||||
*
|
||||
* On platforms with GuC selects firmware for uploading
|
||||
*/
|
||||
void intel_guc_fw_init_early(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
|
||||
|
||||
intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, HAS_GT_UC(i915),
|
||||
INTEL_INFO(i915)->platform, INTEL_REVID(i915));
|
||||
}
|
||||
|
||||
static void guc_prepare_xfer(struct intel_uncore *uncore)
|
||||
{
|
||||
u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user