mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 07:04:10 +08:00
Merge tag 'drm-intel-next-2019-04-17' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
UAPI Changes: - uAPI "Fixes:" patch for the upcoming kernel 5.1, included here too We have an Ack from the media folks (only current user) for this late tweak Cross-subsystem Changes: - ALSA: hda: Fix racy display power access (Takashi, Chris) Driver Changes: - DDI and MIPI-DSI clocks fixes for Icelake (Vandita) - Fix Icelake frequency change/locking (RPS) (Mika) - Temporarily disable ppGTT read-only bit on Icelake (Mika) - Add missing Icelake W/As (Mika) - Enable 12 deep CSB status FIFO on Icelake (Mika) - Inherit more Icelake code for Elkhartlake (Bob, Jani) - Handle catastrophic error on engine reset (Mika) - Shortcut readiness to reset check (Mika) - Regression fix for GEM_BUSY causing us to report a mixed uabi-class request as not busy (Chris) - Revert back to max link rate and lane count on eDP (Jani) - Fix pipe BPP readout for BXT/GLK DSI (Ville) - Set DP min_bpp to 8*3 for non-RGB output formats (Ville) - Enable coarse preemption boundaries for Gen8 (Chris) - Do not enable FEC without DSC (Ville) - Restore correct BXT DDI latency optim setting calculation (Ville) - Always reset context's RING registers to avoid running workload twice during reset (Chris) - Set GPU wedged on driver unload (Janusz) - Consolidate two similar barries from timeline into one (Chris) - Only reset the pinned kernel contexts on resume (Chris) - Wakeref tracking improvements (Chris, Imre) - Lockdep fixes for shrinker interactions (Chris) - Bump ready tasks ahead of busywaits in prep of semaphore use (Chris) - Huge step in splitting display code into fine grained files (Jani) - Refactor the IRQ init/reset macros for code saving (Paulo) - Convert IRQ initialization code to uncore MMIO access (Paulo) - Convert workarounds code to use uncore MMIO access (Chris) - Nuke drm_crtc_state and use intel_atomic_state instead (Manasi) - Update SKL clock-gating WA (Radhakrishna, Ville) - Isolate GuC reset code flow (Chris) - Expose force_dsc_enable through debugfs (Manasi) - Header standalone compile testing framework (Jani) - Code cleanups to reduce driver footprint (Chris) - PSR code fixes and cleanups (Jose) - Sparse and kerneldoc updates (Chris) - Suppress spurious combo PHY B warning (Vile) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190418080426.GA6409@jlahtine-desk.ger.corp.intel.com
This commit is contained in:
commit
b1c4f7fead
@ -525,7 +525,8 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
|
||||
INTEL_I945G_IDS(&gen3_early_ops),
|
||||
INTEL_I945GM_IDS(&gen3_early_ops),
|
||||
INTEL_VLV_IDS(&gen6_early_ops),
|
||||
INTEL_PINEVIEW_IDS(&gen3_early_ops),
|
||||
INTEL_PINEVIEW_G_IDS(&gen3_early_ops),
|
||||
INTEL_PINEVIEW_M_IDS(&gen3_early_ops),
|
||||
INTEL_I965G_IDS(&gen3_early_ops),
|
||||
INTEL_G33_IDS(&gen3_early_ops),
|
||||
INTEL_I965GM_IDS(&gen3_early_ops),
|
||||
@ -547,6 +548,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
|
||||
INTEL_GLK_IDS(&gen9_early_ops),
|
||||
INTEL_CNL_IDS(&gen9_early_ops),
|
||||
INTEL_ICL_11_IDS(&gen11_early_ops),
|
||||
INTEL_EHL_IDS(&gen11_early_ops),
|
||||
};
|
||||
|
||||
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
|
||||
|
1
drivers/gpu/drm/i915/.gitignore
vendored
Normal file
1
drivers/gpu/drm/i915/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
header_test_*.c
|
@ -32,10 +32,13 @@ CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
|
||||
subdir-ccflags-y += \
|
||||
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
|
||||
|
||||
# Extra header tests
|
||||
include $(src)/Makefile.header-test
|
||||
|
||||
# Please keep these build lists sorted!
|
||||
|
||||
# core driver code
|
||||
i915-y := i915_drv.o \
|
||||
i915-y += i915_drv.o \
|
||||
i915_irq.o \
|
||||
i915_memcpy.o \
|
||||
i915_mm.o \
|
||||
@ -57,15 +60,6 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
|
||||
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
|
||||
|
||||
# Test the headers are compilable as standalone units
|
||||
i915-$(CONFIG_DRM_I915_WERROR) += \
|
||||
test_i915_active_types_standalone.o \
|
||||
test_i915_gem_context_types_standalone.o \
|
||||
test_i915_timeline_types_standalone.o \
|
||||
test_intel_context_types_standalone.o \
|
||||
test_intel_engine_types_standalone.o \
|
||||
test_intel_workarounds_types_standalone.o
|
||||
|
||||
# GEM code
|
||||
i915-y += \
|
||||
i915_active.o \
|
||||
|
47
drivers/gpu/drm/i915/Makefile.header-test
Normal file
47
drivers/gpu/drm/i915/Makefile.header-test
Normal file
@ -0,0 +1,47 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
# Copyright © 2019 Intel Corporation
|
||||
|
||||
# Test the headers are compilable as standalone units
|
||||
header_test := \
|
||||
i915_active_types.h \
|
||||
i915_gem_context_types.h \
|
||||
i915_priolist_types.h \
|
||||
i915_scheduler_types.h \
|
||||
i915_timeline_types.h \
|
||||
intel_atomic_plane.h \
|
||||
intel_audio.h \
|
||||
intel_cdclk.h \
|
||||
intel_color.h \
|
||||
intel_connector.h \
|
||||
intel_context_types.h \
|
||||
intel_crt.h \
|
||||
intel_csr.h \
|
||||
intel_ddi.h \
|
||||
intel_dp.h \
|
||||
intel_dvo.h \
|
||||
intel_engine_types.h \
|
||||
intel_fbc.h \
|
||||
intel_fbdev.h \
|
||||
intel_frontbuffer.h \
|
||||
intel_hdcp.h \
|
||||
intel_hdmi.h \
|
||||
intel_lspcon.h \
|
||||
intel_lvds.h \
|
||||
intel_panel.h \
|
||||
intel_pipe_crc.h \
|
||||
intel_pm.h \
|
||||
intel_psr.h \
|
||||
intel_sdvo.h \
|
||||
intel_sprite.h \
|
||||
intel_tv.h \
|
||||
intel_workarounds_types.h
|
||||
|
||||
quiet_cmd_header_test = HDRTEST $@
|
||||
cmd_header_test = echo "\#include \"$(<F)\"" > $@
|
||||
|
||||
header_test_%.c: %.h
|
||||
$(call cmd,header_test)
|
||||
|
||||
i915-$(CONFIG_DRM_I915_WERROR) += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
|
||||
|
||||
clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
|
@ -1077,6 +1077,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
|
||||
bool index_mode = false;
|
||||
unsigned int post_sync;
|
||||
int ret = 0;
|
||||
u32 hws_pga, val;
|
||||
|
||||
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
|
||||
|
||||
@ -1100,6 +1101,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
|
||||
index_mode = true;
|
||||
ret |= cmd_address_audit(s, gma, sizeof(u64),
|
||||
index_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (index_mode) {
|
||||
hws_pga = s->vgpu->hws_pga[s->ring_id];
|
||||
gma = hws_pga + gma;
|
||||
patch_value(s, cmd_ptr(s, 2), gma);
|
||||
val = cmd_val(s, 1) & (~(1 << 21));
|
||||
patch_value(s, cmd_ptr(s, 1), val);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1317,8 +1327,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
|
||||
info->tile_val << 10);
|
||||
}
|
||||
|
||||
vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, info->event);
|
||||
if (info->plane == PLANE_PRIMARY)
|
||||
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
|
||||
|
||||
if (info->async_flip)
|
||||
intel_vgpu_trigger_virtual_event(vgpu, info->event);
|
||||
else
|
||||
set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1563,6 +1579,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
|
||||
unsigned long gma;
|
||||
bool index_mode = false;
|
||||
int ret = 0;
|
||||
u32 hws_pga, val;
|
||||
|
||||
/* Check post-sync and ppgtt bit */
|
||||
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
|
||||
@ -1573,6 +1590,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
|
||||
if (cmd_val(s, 0) & (1 << 21))
|
||||
index_mode = true;
|
||||
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (index_mode) {
|
||||
hws_pga = s->vgpu->hws_pga[s->ring_id];
|
||||
gma = hws_pga + gma;
|
||||
patch_value(s, cmd_ptr(s, 1), gma);
|
||||
val = cmd_val(s, 0) & (~(1 << 21));
|
||||
patch_value(s, cmd_ptr(s, 0), val);
|
||||
}
|
||||
}
|
||||
/* Check notify bit */
|
||||
if ((cmd_val(s, 0) & (1 << 8)))
|
||||
|
@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
|
||||
if (!pipe_is_enabled(vgpu, pipe))
|
||||
continue;
|
||||
|
||||
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
}
|
||||
|
||||
|
@ -526,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
|
||||
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
|
||||
}
|
||||
|
||||
static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
|
||||
static void clean_execlist(struct intel_vgpu *vgpu,
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
unsigned int tmp;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
|
||||
kfree(s->ring_scan_buffer[engine->id]);
|
||||
@ -541,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
|
||||
}
|
||||
|
||||
static void reset_execlist(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask)
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int tmp;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
|
||||
init_vgpu_execlist(vgpu, engine->id);
|
||||
}
|
||||
|
||||
static int init_execlist(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask)
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
reset_execlist(vgpu, engine_mask);
|
||||
return 0;
|
||||
|
@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
|
||||
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
|
||||
|
||||
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask);
|
||||
intel_engine_mask_t engine_mask);
|
||||
|
||||
#endif /*_GVT_EXECLIST_H_*/
|
||||
|
@ -2504,6 +2504,7 @@ static void clean_spt_oos(struct intel_gvt *gvt)
|
||||
list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
|
||||
oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
|
||||
list_del(&oos_page->list);
|
||||
free_page((unsigned long)oos_page->mem);
|
||||
kfree(oos_page);
|
||||
}
|
||||
}
|
||||
@ -2524,6 +2525,12 @@ static int setup_spt_oos(struct intel_gvt *gvt)
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
|
||||
if (!oos_page->mem) {
|
||||
ret = -ENOMEM;
|
||||
kfree(oos_page);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&oos_page->list);
|
||||
INIT_LIST_HEAD(&oos_page->vm_list);
|
||||
|
@ -222,7 +222,7 @@ struct intel_vgpu_oos_page {
|
||||
struct list_head list;
|
||||
struct list_head vm_list;
|
||||
int id;
|
||||
unsigned char mem[I915_GTT_PAGE_SIZE];
|
||||
void *mem;
|
||||
};
|
||||
|
||||
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
|
||||
|
@ -94,7 +94,6 @@ struct intel_vgpu_fence {
|
||||
|
||||
struct intel_vgpu_mmio {
|
||||
void *vreg;
|
||||
void *sreg;
|
||||
};
|
||||
|
||||
#define INTEL_GVT_MAX_BAR_NUM 4
|
||||
@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space {
|
||||
|
||||
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
|
||||
|
||||
#define INTEL_GVT_MAX_PIPE 4
|
||||
|
||||
struct intel_vgpu_irq {
|
||||
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
|
||||
DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
|
||||
DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
|
||||
INTEL_GVT_EVENT_MAX);
|
||||
};
|
||||
|
||||
@ -144,9 +141,9 @@ enum {
|
||||
|
||||
struct intel_vgpu_submission_ops {
|
||||
const char *name;
|
||||
int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
|
||||
void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
|
||||
void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
|
||||
int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
|
||||
void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
|
||||
void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
|
||||
};
|
||||
|
||||
struct intel_vgpu_submission {
|
||||
@ -449,10 +446,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
|
||||
#define vgpu_vreg64(vgpu, offset) \
|
||||
(*(u64 *)(vgpu->mmio.vreg + (offset)))
|
||||
#define vgpu_sreg_t(vgpu, reg) \
|
||||
(*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
|
||||
#define vgpu_sreg(vgpu, offset) \
|
||||
(*(u32 *)(vgpu->mmio.sreg + (offset)))
|
||||
|
||||
#define for_each_active_vgpu(gvt, vgpu, id) \
|
||||
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
|
||||
@ -488,7 +481,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask);
|
||||
intel_engine_mask_t engine_mask);
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
|
||||
|
@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
||||
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
unsigned int engine_mask = 0;
|
||||
intel_engine_mask_t engine_mask = 0;
|
||||
u32 data;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
@ -750,18 +750,19 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
unsigned int index = DSPSURF_TO_PIPE(offset);
|
||||
i915_reg_t surflive_reg = DSPSURFLIVE(index);
|
||||
int flip_event[] = {
|
||||
[PIPE_A] = PRIMARY_A_FLIP_DONE,
|
||||
[PIPE_B] = PRIMARY_B_FLIP_DONE,
|
||||
[PIPE_C] = PRIMARY_C_FLIP_DONE,
|
||||
};
|
||||
u32 pipe = DSPSURF_TO_PIPE(offset);
|
||||
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
|
||||
vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
|
||||
|
||||
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
|
||||
|
||||
if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
else
|
||||
set_bit(event, vgpu->irq.flip_done_event[pipe]);
|
||||
|
||||
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -771,18 +772,42 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
unsigned int index = SPRSURF_TO_PIPE(offset);
|
||||
i915_reg_t surflive_reg = SPRSURFLIVE(index);
|
||||
int flip_event[] = {
|
||||
[PIPE_A] = SPRITE_A_FLIP_DONE,
|
||||
[PIPE_B] = SPRITE_B_FLIP_DONE,
|
||||
[PIPE_C] = SPRITE_C_FLIP_DONE,
|
||||
};
|
||||
u32 pipe = SPRSURF_TO_PIPE(offset);
|
||||
int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
|
||||
vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
else
|
||||
set_bit(event, vgpu->irq.flip_done_event[pipe]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int reg50080_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data,
|
||||
unsigned int bytes)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
enum pipe pipe = REG_50080_TO_PIPE(offset);
|
||||
enum plane_id plane = REG_50080_TO_PLANE(offset);
|
||||
int event = SKL_FLIP_EVENT(pipe, plane);
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
if (plane == PLANE_PRIMARY) {
|
||||
vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
|
||||
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
|
||||
} else {
|
||||
vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
|
||||
}
|
||||
|
||||
if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
else
|
||||
set_bit(event, vgpu->irq.flip_done_event[pipe]);
|
||||
|
||||
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1969,6 +1994,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
|
||||
MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
|
||||
MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
|
||||
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
|
||||
reg50080_mmio_write);
|
||||
|
||||
MMIO_D(DSPCNTR(PIPE_B), D_ALL);
|
||||
MMIO_D(DSPADDR(PIPE_B), D_ALL);
|
||||
@ -1978,6 +2005,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
|
||||
MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
|
||||
MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
|
||||
MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
|
||||
reg50080_mmio_write);
|
||||
|
||||
MMIO_D(DSPCNTR(PIPE_C), D_ALL);
|
||||
MMIO_D(DSPADDR(PIPE_C), D_ALL);
|
||||
@ -1987,6 +2016,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
|
||||
MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
|
||||
MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
|
||||
MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
|
||||
reg50080_mmio_write);
|
||||
|
||||
MMIO_D(SPRCTL(PIPE_A), D_ALL);
|
||||
MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
|
||||
@ -2000,6 +2031,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(SPROFFSET(PIPE_A), D_ALL);
|
||||
MMIO_D(SPRSCALE(PIPE_A), D_ALL);
|
||||
MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
|
||||
MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
|
||||
reg50080_mmio_write);
|
||||
|
||||
MMIO_D(SPRCTL(PIPE_B), D_ALL);
|
||||
MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
|
||||
@ -2013,6 +2046,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(SPROFFSET(PIPE_B), D_ALL);
|
||||
MMIO_D(SPRSCALE(PIPE_B), D_ALL);
|
||||
MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
|
||||
MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
|
||||
reg50080_mmio_write);
|
||||
|
||||
MMIO_D(SPRCTL(PIPE_C), D_ALL);
|
||||
MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
|
||||
@ -2026,6 +2061,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(SPROFFSET(PIPE_C), D_ALL);
|
||||
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
|
||||
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
|
||||
MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
|
||||
reg50080_mmio_write);
|
||||
|
||||
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
|
||||
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
|
||||
@ -2827,26 +2864,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
|
||||
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
|
||||
|
||||
MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
|
||||
MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
|
||||
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
|
||||
MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
|
||||
MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
|
||||
MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
|
||||
MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
|
||||
MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_D(DC_STATE_EN, D_SKL_PLUS);
|
||||
MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
|
||||
MMIO_D(CDCLK_CTL, D_SKL_PLUS);
|
||||
MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
|
||||
MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
|
||||
MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
|
||||
MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
|
||||
MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
|
||||
MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
|
||||
|
||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
|
||||
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
|
||||
@ -2965,40 +3002,41 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
|
||||
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
|
||||
|
||||
MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
|
||||
MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
|
||||
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
|
||||
MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
|
||||
MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
|
||||
|
||||
MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
|
||||
MMIO_D(SKL_DFSM, D_SKL_PLUS);
|
||||
MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
|
||||
|
||||
MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
|
||||
MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
|
||||
NULL, NULL);
|
||||
MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
|
||||
MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
|
||||
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
|
||||
MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
|
||||
/* TRTT */
|
||||
MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
|
||||
MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
|
||||
NULL, gen9_trtte_write);
|
||||
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
|
||||
|
||||
@ -3011,7 +3049,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
|
||||
|
||||
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
|
||||
MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
|
||||
MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
|
||||
@ -3042,8 +3080,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
|
||||
MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
|
||||
MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
|
||||
MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3265,7 +3303,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
|
||||
/* Special MMIO blocks. */
|
||||
static struct gvt_mmio_block mmio_blocks[] = {
|
||||
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
|
||||
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
|
||||
{D_ALL, MCHBAR_MIRROR_REG_BASE, 0x4000, NULL, NULL},
|
||||
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
|
||||
pvinfo_mmio_read, pvinfo_mmio_write},
|
||||
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
|
||||
@ -3489,12 +3527,11 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
return mmio_info->read(vgpu, offset, pdata, bytes);
|
||||
else {
|
||||
u64 ro_mask = mmio_info->ro_mask;
|
||||
u32 old_vreg = 0, old_sreg = 0;
|
||||
u32 old_vreg = 0;
|
||||
u64 data = 0;
|
||||
|
||||
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
|
||||
old_vreg = vgpu_vreg(vgpu, offset);
|
||||
old_sreg = vgpu_sreg(vgpu, offset);
|
||||
}
|
||||
|
||||
if (likely(!ro_mask))
|
||||
@ -3516,8 +3553,6 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
||||
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
|
||||
| (vgpu_vreg(vgpu, offset) & mask);
|
||||
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
|
||||
| (vgpu_sreg(vgpu, offset) & mask);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
|
||||
|
||||
if (dmlr) {
|
||||
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
|
||||
* touched
|
||||
*/
|
||||
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
|
||||
memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
|
||||
}
|
||||
|
||||
}
|
||||
@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
||||
|
||||
vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
intel_vgpu_reset_mmio(vgpu, true);
|
||||
|
||||
return 0;
|
||||
@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
vgpu->mmio.vreg = NULL;
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
|
||||
{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
|
||||
{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
|
||||
{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
|
||||
{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
|
||||
{BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
|
||||
{RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
|
||||
};
|
||||
|
||||
@ -119,7 +119,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
||||
{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
|
||||
{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
|
||||
{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
|
||||
{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
|
||||
{BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
|
||||
|
||||
{VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
|
||||
|
||||
|
@ -60,6 +60,37 @@
|
||||
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
|
||||
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
|
||||
|
||||
#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe))
|
||||
|
||||
#define PLANE_CTL_ASYNC_FLIP (1 << 9)
|
||||
#define REG50080_FLIP_TYPE_MASK 0x3
|
||||
#define REG50080_FLIP_TYPE_ASYNC 0x1
|
||||
|
||||
#define REG_50080(_pipe, _plane) ({ \
|
||||
typeof(_pipe) (p) = (_pipe); \
|
||||
typeof(_plane) (q) = (_plane); \
|
||||
(((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
|
||||
(_MMIO(0x50090))) : \
|
||||
(((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
|
||||
(_MMIO(0x50098))) : \
|
||||
(((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
|
||||
(_MMIO(0x5009C))) : \
|
||||
(_MMIO(0x50080))))); })
|
||||
|
||||
#define REG_50080_TO_PIPE(_reg) ({ \
|
||||
typeof(_reg) (reg) = (_reg); \
|
||||
(((reg) == 0x50080 || (reg) == 0x50090) ? (PIPE_A) : \
|
||||
(((reg) == 0x50088 || (reg) == 0x50098) ? (PIPE_B) : \
|
||||
(((reg) == 0x5008C || (reg) == 0x5009C) ? (PIPE_C) : \
|
||||
(INVALID_PIPE)))); })
|
||||
|
||||
#define REG_50080_TO_PLANE(_reg) ({ \
|
||||
typeof(_reg) (reg) = (_reg); \
|
||||
(((reg) == 0x50080 || (reg) == 0x50088 || (reg) == 0x5008C) ? \
|
||||
(PLANE_PRIMARY) : \
|
||||
(((reg) == 0x50090 || (reg) == 0x50098 || (reg) == 0x5009C) ? \
|
||||
(PLANE_SPRITE0) : (I915_MAX_PLANES))); })
|
||||
|
||||
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
|
||||
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
|
||||
|
||||
@ -95,4 +126,7 @@
|
||||
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
|
||||
#define VF_GUARDBAND _MMIO(0x83a4)
|
||||
|
||||
/* define the effective range of MCHBAR register on Sandybridge+ */
|
||||
#define MCHBAR_MIRROR_REG_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
|
||||
|
||||
#endif
|
||||
|
@ -850,13 +850,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
|
||||
}
|
||||
|
||||
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask)
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_vgpu_workload *pos, *n;
|
||||
unsigned int tmp;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
/* free the unsubmited workloads in the queues. */
|
||||
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
|
||||
@ -1149,7 +1149,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask)
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
|
||||
@ -1239,7 +1239,7 @@ out_shadow_ctx:
|
||||
*
|
||||
*/
|
||||
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int interface)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
|
@ -142,12 +142,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
|
||||
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
|
||||
|
||||
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask);
|
||||
intel_engine_mask_t engine_mask);
|
||||
|
||||
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int interface);
|
||||
|
||||
extern const struct intel_vgpu_submission_ops
|
||||
@ -160,6 +160,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
|
||||
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
|
||||
|
||||
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
|
||||
unsigned long engine_mask);
|
||||
intel_engine_mask_t engine_mask);
|
||||
|
||||
#endif
|
||||
|
@ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
* GPU engines. For FLR, engine_mask is ignored.
|
||||
*/
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask)
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
|
||||
intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
|
||||
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
|
||||
|
@ -26,14 +26,21 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/sort.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include <drm/drm_debugfs.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include "intel_drv.h"
|
||||
#include "intel_guc_submission.h"
|
||||
|
||||
#include "i915_reset.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_fbc.h"
|
||||
#include "intel_guc_submission.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_hdmi.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_psr.h"
|
||||
|
||||
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
|
||||
{
|
||||
@ -826,11 +833,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
|
||||
|
||||
} else if (!HAS_PCH_SPLIT(dev_priv)) {
|
||||
seq_printf(m, "Interrupt enable: %08x\n",
|
||||
I915_READ(IER));
|
||||
I915_READ(GEN2_IER));
|
||||
seq_printf(m, "Interrupt identity: %08x\n",
|
||||
I915_READ(IIR));
|
||||
I915_READ(GEN2_IIR));
|
||||
seq_printf(m, "Interrupt mask: %08x\n",
|
||||
I915_READ(IMR));
|
||||
I915_READ(GEN2_IMR));
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
seq_printf(m, "Pipe %c stat: %08x\n",
|
||||
pipe_name(pipe),
|
||||
@ -2087,8 +2094,8 @@ static int i915_llc(struct seq_file *m, void *data)
|
||||
const bool edram = INTEL_GEN(dev_priv) > 8;
|
||||
|
||||
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
|
||||
seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
|
||||
intel_uncore_edram_size(dev_priv)/1024/1024);
|
||||
seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
|
||||
dev_priv->edram_size_mb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2245,7 +2252,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
|
||||
const struct intel_guc *guc = &dev_priv->guc;
|
||||
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
|
||||
struct intel_guc_client *client = guc->execbuf_client;
|
||||
unsigned int tmp;
|
||||
intel_engine_mask_t tmp;
|
||||
int index;
|
||||
|
||||
if (!USES_GUC_SUBMISSION(dev_priv))
|
||||
@ -4814,6 +4821,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
|
||||
yesno(crtc_state->dsc_params.compression_enable));
|
||||
seq_printf(m, "DSC_Sink_Support: %s\n",
|
||||
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
|
||||
seq_printf(m, "Force_DSC_Enable: %s\n",
|
||||
yesno(intel_dp->force_dsc_en));
|
||||
if (!intel_dp_is_edp(intel_dp))
|
||||
seq_printf(m, "FEC_Sink_Support: %s\n",
|
||||
yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
|
||||
|
@ -48,12 +48,19 @@
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "i915_pmu.h"
|
||||
#include "i915_reset.h"
|
||||
#include "i915_query.h"
|
||||
#include "i915_reset.h"
|
||||
#include "i915_trace.h"
|
||||
#include "i915_vgpu.h"
|
||||
#include "intel_audio.h"
|
||||
#include "intel_cdclk.h"
|
||||
#include "intel_csr.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_fbdev.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_sprite.h"
|
||||
#include "intel_uc.h"
|
||||
#include "intel_workarounds.h"
|
||||
|
||||
@ -868,10 +875,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
|
||||
if (i915_inject_load_failure())
|
||||
return -ENODEV;
|
||||
|
||||
intel_device_info_subplatform_init(dev_priv);
|
||||
|
||||
intel_uncore_init_early(&dev_priv->uncore);
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
spin_lock_init(&dev_priv->gpu_error.lock);
|
||||
mutex_init(&dev_priv->backlight_lock);
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
|
||||
mutex_init(&dev_priv->sb_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
@ -954,7 +964,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
|
||||
if (i915_get_bridge_dev(dev_priv))
|
||||
return -EIO;
|
||||
|
||||
ret = intel_uncore_init(&dev_priv->uncore);
|
||||
ret = intel_uncore_init_mmio(&dev_priv->uncore);
|
||||
if (ret < 0)
|
||||
goto err_bridge;
|
||||
|
||||
@ -963,7 +973,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
|
||||
|
||||
intel_device_info_init_mmio(dev_priv);
|
||||
|
||||
intel_uncore_prune(&dev_priv->uncore);
|
||||
intel_uncore_prune_mmio_domains(&dev_priv->uncore);
|
||||
|
||||
intel_uc_init_mmio(dev_priv);
|
||||
|
||||
@ -977,7 +987,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
|
||||
|
||||
err_uncore:
|
||||
intel_teardown_mchbar(dev_priv);
|
||||
intel_uncore_fini(&dev_priv->uncore);
|
||||
intel_uncore_fini_mmio(&dev_priv->uncore);
|
||||
err_bridge:
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
|
||||
@ -991,7 +1001,7 @@ err_bridge:
|
||||
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
intel_teardown_mchbar(dev_priv);
|
||||
intel_uncore_fini(&dev_priv->uncore);
|
||||
intel_uncore_fini_mmio(&dev_priv->uncore);
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
}
|
||||
|
||||
@ -1441,6 +1451,45 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
|
||||
dram_info->ranks, yesno(dram_info->is_16gb_dimm));
|
||||
}
|
||||
|
||||
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
|
||||
{
|
||||
const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
|
||||
const unsigned int sets[4] = { 1, 1, 2, 2 };
|
||||
|
||||
return EDRAM_NUM_BANKS(cap) *
|
||||
ways[EDRAM_WAYS_IDX(cap)] *
|
||||
sets[EDRAM_SETS_IDX(cap)];
|
||||
}
|
||||
|
||||
static void edram_detect(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 edram_cap = 0;
|
||||
|
||||
if (!(IS_HASWELL(dev_priv) ||
|
||||
IS_BROADWELL(dev_priv) ||
|
||||
INTEL_GEN(dev_priv) >= 9))
|
||||
return;
|
||||
|
||||
edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
|
||||
|
||||
/* NB: We can't write IDICR yet because we don't have gt funcs set up */
|
||||
|
||||
if (!(edram_cap & EDRAM_ENABLED))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The needed capability bits for size calculation are not there with
|
||||
* pre gen9 so return 128MB always.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) < 9)
|
||||
dev_priv->edram_size_mb = 128;
|
||||
else
|
||||
dev_priv->edram_size_mb =
|
||||
gen9_edram_size_mb(dev_priv, edram_cap);
|
||||
|
||||
DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_init_hw - setup state requiring device access
|
||||
* @dev_priv: device private
|
||||
@ -1483,6 +1532,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
||||
|
||||
intel_sanitize_options(dev_priv);
|
||||
|
||||
/* needs to be done before ggtt probe */
|
||||
edram_detect(dev_priv);
|
||||
|
||||
i915_perf_init(dev_priv);
|
||||
|
||||
ret = i915_ggtt_probe_hw(dev_priv);
|
||||
@ -1718,10 +1770,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
|
||||
if (drm_debug & DRM_UT_DRIVER) {
|
||||
struct drm_printer p = drm_debug_printer("i915 device info:");
|
||||
|
||||
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
|
||||
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
|
||||
INTEL_DEVID(dev_priv),
|
||||
INTEL_REVID(dev_priv),
|
||||
intel_platform_name(INTEL_INFO(dev_priv)->platform),
|
||||
intel_subplatform(RUNTIME_INFO(dev_priv),
|
||||
INTEL_INFO(dev_priv)->platform),
|
||||
INTEL_GEN(dev_priv));
|
||||
|
||||
intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
|
||||
@ -1764,8 +1818,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
memcpy(device_info, match_info, sizeof(*device_info));
|
||||
RUNTIME_INFO(i915)->device_id = pdev->device;
|
||||
|
||||
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
|
||||
BITS_PER_TYPE(device_info->platform_mask));
|
||||
BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
|
||||
|
||||
return i915;
|
||||
@ -1862,6 +1914,13 @@ void i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
i915_driver_unregister(dev_priv);
|
||||
|
||||
/*
|
||||
* After unregistering the device to prevent any new users, cancel
|
||||
* all in-flight requests so that we can quickly unbind the active
|
||||
* resources.
|
||||
*/
|
||||
i915_gem_set_wedged(dev_priv);
|
||||
|
||||
/* Flush any external code that still may be under the RCU lock */
|
||||
synchronize_rcu();
|
||||
|
||||
|
@ -66,13 +66,14 @@
|
||||
#include "intel_device_info.h"
|
||||
#include "intel_display.h"
|
||||
#include "intel_dpll_mgr.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "intel_lrc.h"
|
||||
#include "intel_opregion.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include "intel_uc.h"
|
||||
#include "intel_uncore.h"
|
||||
#include "intel_wopcm.h"
|
||||
#include "intel_workarounds.h"
|
||||
#include "intel_uc.h"
|
||||
|
||||
#include "i915_gem.h"
|
||||
#include "i915_gem_context.h"
|
||||
@ -92,8 +93,8 @@
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20190328"
|
||||
#define DRIVER_TIMESTAMP 1553776914
|
||||
#define DRIVER_DATE "20190417"
|
||||
#define DRIVER_TIMESTAMP 1555492067
|
||||
|
||||
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
|
||||
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
|
||||
@ -282,7 +283,8 @@ struct drm_i915_display_funcs {
|
||||
void (*get_cdclk)(struct drm_i915_private *dev_priv,
|
||||
struct intel_cdclk_state *cdclk_state);
|
||||
void (*set_cdclk)(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state);
|
||||
const struct intel_cdclk_state *cdclk_state,
|
||||
enum pipe pipe);
|
||||
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
|
||||
enum i9xx_plane_id i9xx_plane);
|
||||
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
|
||||
@ -325,6 +327,7 @@ struct drm_i915_display_funcs {
|
||||
/* display clock increase/decrease */
|
||||
/* pll clock increase/decrease */
|
||||
|
||||
int (*color_check)(struct intel_crtc_state *crtc_state);
|
||||
/*
|
||||
* Program double buffered color management registers during
|
||||
* vblank evasion. The registers should then latch during the
|
||||
@ -373,14 +376,6 @@ enum i915_cache_level {
|
||||
|
||||
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
|
||||
|
||||
enum fb_op_origin {
|
||||
ORIGIN_GTT,
|
||||
ORIGIN_CPU,
|
||||
ORIGIN_CS,
|
||||
ORIGIN_FLIP,
|
||||
ORIGIN_DIRTYFB,
|
||||
};
|
||||
|
||||
struct intel_fbc {
|
||||
/* This is always the inner lock when overlapping with struct_mutex and
|
||||
* it's the outer lock when overlapping with stolen_lock. */
|
||||
@ -1628,6 +1623,8 @@ struct drm_i915_private {
|
||||
struct intel_cdclk_state actual;
|
||||
/* The current hardware cdclk state */
|
||||
struct intel_cdclk_state hw;
|
||||
|
||||
int force_min_cdclk;
|
||||
} cdclk;
|
||||
|
||||
/**
|
||||
@ -1706,8 +1703,11 @@ struct drm_i915_private {
|
||||
|
||||
struct intel_l3_parity l3_parity;
|
||||
|
||||
/* Cannot be determined by PCIID. You must always read a register. */
|
||||
u32 edram_cap;
|
||||
/*
|
||||
* edram size in MB.
|
||||
* Cannot be determined by PCIID. You must always read a register.
|
||||
*/
|
||||
u32 edram_size_mb;
|
||||
|
||||
/*
|
||||
* Protects RPS/RC6 register access and PCU communication.
|
||||
@ -1747,6 +1747,7 @@ struct drm_i915_private {
|
||||
*
|
||||
*/
|
||||
struct mutex av_mutex;
|
||||
int audio_power_refcount;
|
||||
|
||||
struct {
|
||||
struct mutex mutex;
|
||||
@ -1994,7 +1995,6 @@ struct drm_i915_private {
|
||||
|
||||
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
|
||||
struct {
|
||||
void (*resume)(struct drm_i915_private *);
|
||||
void (*cleanup_engine)(struct intel_engine_cs *engine);
|
||||
|
||||
struct i915_gt_timelines {
|
||||
@ -2298,7 +2298,69 @@ static inline unsigned int i915_sg_segment_size(void)
|
||||
#define IS_REVID(p, since, until) \
|
||||
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
|
||||
|
||||
#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
|
||||
static __always_inline unsigned int
|
||||
__platform_mask_index(const struct intel_runtime_info *info,
|
||||
enum intel_platform p)
|
||||
{
|
||||
const unsigned int pbits =
|
||||
BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
|
||||
|
||||
/* Expand the platform_mask array if this fails. */
|
||||
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
|
||||
pbits * ARRAY_SIZE(info->platform_mask));
|
||||
|
||||
return p / pbits;
|
||||
}
|
||||
|
||||
static __always_inline unsigned int
|
||||
__platform_mask_bit(const struct intel_runtime_info *info,
|
||||
enum intel_platform p)
|
||||
{
|
||||
const unsigned int pbits =
|
||||
BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
|
||||
|
||||
return p % pbits + INTEL_SUBPLATFORM_BITS;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
|
||||
{
|
||||
const unsigned int pi = __platform_mask_index(info, p);
|
||||
|
||||
return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
|
||||
{
|
||||
const struct intel_runtime_info *info = RUNTIME_INFO(i915);
|
||||
const unsigned int pi = __platform_mask_index(info, p);
|
||||
const unsigned int pb = __platform_mask_bit(info, p);
|
||||
|
||||
BUILD_BUG_ON(!__builtin_constant_p(p));
|
||||
|
||||
return info->platform_mask[pi] & BIT(pb);
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
IS_SUBPLATFORM(const struct drm_i915_private *i915,
|
||||
enum intel_platform p, unsigned int s)
|
||||
{
|
||||
const struct intel_runtime_info *info = RUNTIME_INFO(i915);
|
||||
const unsigned int pi = __platform_mask_index(info, p);
|
||||
const unsigned int pb = __platform_mask_bit(info, p);
|
||||
const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
|
||||
const u32 mask = info->platform_mask[pi];
|
||||
|
||||
BUILD_BUG_ON(!__builtin_constant_p(p));
|
||||
BUILD_BUG_ON(!__builtin_constant_p(s));
|
||||
BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
|
||||
|
||||
/* Shift and test on the MSB position so sign flag can be used. */
|
||||
return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
|
||||
}
|
||||
|
||||
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
|
||||
|
||||
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
|
||||
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
|
||||
@ -2313,11 +2375,11 @@ static inline unsigned int i915_sg_segment_size(void)
|
||||
#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
|
||||
#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
|
||||
#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
|
||||
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
|
||||
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
|
||||
#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
|
||||
#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
|
||||
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
|
||||
#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
|
||||
#define IS_IRONLAKE_M(dev_priv) \
|
||||
(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
|
||||
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
|
||||
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 1)
|
||||
@ -2333,46 +2395,34 @@ static inline unsigned int i915_sg_segment_size(void)
|
||||
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
|
||||
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
|
||||
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
|
||||
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
|
||||
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
|
||||
#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
|
||||
((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
|
||||
(INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
|
||||
(INTEL_DEVID(dev_priv) & 0xf) == 0xe))
|
||||
/* ULX machines are also considered ULT. */
|
||||
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
|
||||
#define IS_BDW_ULT(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
|
||||
#define IS_BDW_ULX(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
|
||||
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 3)
|
||||
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
|
||||
#define IS_HSW_ULT(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
|
||||
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 3)
|
||||
#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 1)
|
||||
/* ULX machines are also considered ULT. */
|
||||
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
|
||||
INTEL_DEVID(dev_priv) == 0x0A1E)
|
||||
#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
|
||||
INTEL_DEVID(dev_priv) == 0x1913 || \
|
||||
INTEL_DEVID(dev_priv) == 0x1916 || \
|
||||
INTEL_DEVID(dev_priv) == 0x1921 || \
|
||||
INTEL_DEVID(dev_priv) == 0x1926)
|
||||
#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
|
||||
INTEL_DEVID(dev_priv) == 0x1915 || \
|
||||
INTEL_DEVID(dev_priv) == 0x191E)
|
||||
#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
|
||||
INTEL_DEVID(dev_priv) == 0x5913 || \
|
||||
INTEL_DEVID(dev_priv) == 0x5916 || \
|
||||
INTEL_DEVID(dev_priv) == 0x5921 || \
|
||||
INTEL_DEVID(dev_priv) == 0x5926)
|
||||
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
|
||||
INTEL_DEVID(dev_priv) == 0x5915 || \
|
||||
INTEL_DEVID(dev_priv) == 0x591E)
|
||||
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
|
||||
INTEL_DEVID(dev_priv) == 0x87C0 || \
|
||||
INTEL_DEVID(dev_priv) == 0x87CA)
|
||||
#define IS_HSW_ULX(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
|
||||
#define IS_SKL_ULT(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
|
||||
#define IS_SKL_ULX(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
|
||||
#define IS_KBL_ULT(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
|
||||
#define IS_KBL_ULX(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
|
||||
#define IS_AML_ULX(dev_priv) \
|
||||
(IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
|
||||
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 2)
|
||||
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
|
||||
@ -2383,16 +2433,16 @@ static inline unsigned int i915_sg_segment_size(void)
|
||||
INTEL_INFO(dev_priv)->gt == 2)
|
||||
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 3)
|
||||
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
|
||||
#define IS_CFL_ULT(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
|
||||
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 2)
|
||||
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
|
||||
INTEL_INFO(dev_priv)->gt == 3)
|
||||
#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
|
||||
(INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
|
||||
#define IS_ICL_WITH_PORT_F(dev_priv) (IS_ICELAKE(dev_priv) && \
|
||||
INTEL_DEVID(dev_priv) != 0x8A51)
|
||||
#define IS_CNL_WITH_PORT_F(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
|
||||
#define IS_ICL_WITH_PORT_F(dev_priv) \
|
||||
IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
|
||||
|
||||
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
|
||||
|
||||
@ -2451,7 +2501,6 @@ static inline unsigned int i915_sg_segment_size(void)
|
||||
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
|
||||
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
|
||||
|
||||
#define ALL_ENGINES (~0u)
|
||||
#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
|
||||
|
||||
#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
|
||||
@ -2467,7 +2516,7 @@ static inline unsigned int i915_sg_segment_size(void)
|
||||
|
||||
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
|
||||
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
|
||||
#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
|
||||
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
|
||||
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
|
||||
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
|
||||
|
||||
@ -2860,6 +2909,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
|
||||
int pass = 2;
|
||||
do {
|
||||
rcu_barrier();
|
||||
i915_gem_drain_freed_objects(i915);
|
||||
drain_workqueue(i915->wq);
|
||||
} while (--pass);
|
||||
}
|
||||
|
@ -50,6 +50,7 @@
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "intel_mocs.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_workarounds.h"
|
||||
|
||||
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
|
||||
@ -308,7 +309,7 @@ static void __start_cpu_write(struct drm_i915_gem_object *obj)
|
||||
obj->cache_dirty = true;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
bool needs_clflush)
|
||||
@ -2202,7 +2203,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
|
||||
struct page *page;
|
||||
|
||||
__i915_gem_object_release_shmem(obj, pages, true);
|
||||
|
||||
i915_gem_gtt_finish_pages(obj, pages);
|
||||
|
||||
if (i915_gem_object_needs_bit17_swizzle(obj))
|
||||
@ -2789,7 +2789,11 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
u64 remain, offset;
|
||||
unsigned int pg;
|
||||
|
||||
/* Before we instantiate/pin the backing store for our use, we
|
||||
/* Caller already validated user args */
|
||||
GEM_BUG_ON(!access_ok(user_data, arg->size));
|
||||
|
||||
/*
|
||||
* Before we instantiate/pin the backing store for our use, we
|
||||
* can prepopulate the shmemfs filp efficiently using a write into
|
||||
* the pagecache. We avoid the penalty of instantiating all the
|
||||
* pages, important if the user is just writing to a few and never
|
||||
@ -2803,7 +2807,8 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED)
|
||||
return -EFAULT;
|
||||
|
||||
/* Before the pages are instantiated the object is treated as being
|
||||
/*
|
||||
* Before the pages are instantiated the object is treated as being
|
||||
* in the CPU domain. The pages will be clflushed as required before
|
||||
* use, and we can freely write into the pages directly. If userspace
|
||||
* races pwrite with any other operation; corruption will ensue -
|
||||
@ -2819,20 +2824,32 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
struct page *page;
|
||||
void *data, *vaddr;
|
||||
int err;
|
||||
char c;
|
||||
|
||||
len = PAGE_SIZE - pg;
|
||||
if (len > remain)
|
||||
len = remain;
|
||||
|
||||
/* Prefault the user page to reduce potential recursion */
|
||||
err = __get_user(c, user_data);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = __get_user(c, user_data + len - 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = pagecache_write_begin(obj->base.filp, mapping,
|
||||
offset, len, 0,
|
||||
&page, &data);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
vaddr = kmap(page);
|
||||
unwritten = copy_from_user(vaddr + pg, user_data, len);
|
||||
kunmap(page);
|
||||
vaddr = kmap_atomic(page);
|
||||
unwritten = __copy_from_user_inatomic(vaddr + pg,
|
||||
user_data,
|
||||
len);
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
err = pagecache_write_end(obj->base.filp, mapping,
|
||||
offset, len, len - unwritten,
|
||||
@ -2840,8 +2857,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* We don't handle -EFAULT, leave it to the caller to check */
|
||||
if (unwritten)
|
||||
return -EFAULT;
|
||||
return -ENODEV;
|
||||
|
||||
remain -= len;
|
||||
user_data += len;
|
||||
@ -3824,16 +3842,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
|
||||
return vma;
|
||||
}
|
||||
|
||||
static __always_inline unsigned int __busy_read_flag(unsigned int id)
|
||||
static __always_inline u32 __busy_read_flag(u8 id)
|
||||
{
|
||||
if (id == I915_ENGINE_CLASS_INVALID)
|
||||
return 0xffff0000;
|
||||
if (id == (u8)I915_ENGINE_CLASS_INVALID)
|
||||
return 0xffff0000u;
|
||||
|
||||
GEM_BUG_ON(id >= 16);
|
||||
return 0x10000 << id;
|
||||
return 0x10000u << id;
|
||||
}
|
||||
|
||||
static __always_inline unsigned int __busy_write_id(unsigned int id)
|
||||
static __always_inline u32 __busy_write_id(u8 id)
|
||||
{
|
||||
/*
|
||||
* The uABI guarantees an active writer is also amongst the read
|
||||
@ -3844,15 +3862,14 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
|
||||
* last_read - hence we always set both read and write busy for
|
||||
* last_write.
|
||||
*/
|
||||
if (id == I915_ENGINE_CLASS_INVALID)
|
||||
return 0xffffffff;
|
||||
if (id == (u8)I915_ENGINE_CLASS_INVALID)
|
||||
return 0xffffffffu;
|
||||
|
||||
return (id + 1) | __busy_read_flag(id);
|
||||
}
|
||||
|
||||
static __always_inline unsigned int
|
||||
__busy_set_if_active(const struct dma_fence *fence,
|
||||
unsigned int (*flag)(unsigned int id))
|
||||
__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
|
||||
{
|
||||
const struct i915_request *rq;
|
||||
|
||||
@ -3872,6 +3889,8 @@ __busy_set_if_active(const struct dma_fence *fence,
|
||||
if (i915_request_completed(rq))
|
||||
return 0;
|
||||
|
||||
/* Beware type-expansion follies! */
|
||||
BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
|
||||
return flag(rq->engine->uabi_class);
|
||||
}
|
||||
|
||||
@ -4494,7 +4513,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
|
||||
* guarantee that the context image is complete. So let's just reset
|
||||
* it and start again.
|
||||
*/
|
||||
i915->gt.resume(i915);
|
||||
intel_gt_resume(i915);
|
||||
|
||||
if (i915_gem_init_hw(i915))
|
||||
goto err_wedged;
|
||||
@ -4834,13 +4853,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
||||
|
||||
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
|
||||
|
||||
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
|
||||
dev_priv->gt.resume = intel_lr_context_resume;
|
||||
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
|
||||
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
|
||||
} else {
|
||||
dev_priv->gt.resume = intel_legacy_submission_resume;
|
||||
else
|
||||
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
|
||||
}
|
||||
|
||||
i915_timelines_init(dev_priv);
|
||||
|
||||
|
@ -73,8 +73,6 @@ struct drm_i915_private;
|
||||
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
|
||||
#endif
|
||||
|
||||
#define I915_NUM_ENGINES 8
|
||||
|
||||
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
|
||||
|
||||
void i915_gem_park(struct drm_i915_private *i915);
|
||||
|
@ -562,7 +562,7 @@ static void init_contexts(struct drm_i915_private *i915)
|
||||
|
||||
static bool needs_preempt_context(struct drm_i915_private *i915)
|
||||
{
|
||||
return HAS_LOGICAL_RING_PREEMPTION(i915);
|
||||
return HAS_EXECLISTS(i915);
|
||||
}
|
||||
|
||||
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
|
||||
@ -858,9 +858,9 @@ static void cb_retire(struct i915_active *base)
|
||||
kfree(cb);
|
||||
}
|
||||
|
||||
I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault);
|
||||
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
|
||||
static int context_barrier_task(struct i915_gem_context *ctx,
|
||||
unsigned long engines,
|
||||
intel_engine_mask_t engines,
|
||||
int (*emit)(struct i915_request *rq, void *data),
|
||||
void (*task)(void *data),
|
||||
void *data)
|
||||
@ -922,7 +922,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
|
||||
}
|
||||
|
||||
int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
|
||||
unsigned long mask)
|
||||
intel_engine_mask_t mask)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
@ -969,10 +969,10 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_ppgtt(struct i915_gem_context *ctx,
|
||||
static int get_ppgtt(struct drm_i915_file_private *file_priv,
|
||||
struct i915_gem_context *ctx,
|
||||
struct drm_i915_gem_context_param *args)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = ctx->file_priv;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
int ret;
|
||||
|
||||
@ -1028,6 +1028,7 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
u32 base = engine->mmio_base;
|
||||
u32 *cs;
|
||||
int i;
|
||||
|
||||
@ -1040,9 +1041,9 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(2);
|
||||
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, 0));
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
|
||||
*cs++ = upper_32_bits(pd_daddr);
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, 0));
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
|
||||
*cs++ = lower_32_bits(pd_daddr);
|
||||
|
||||
*cs++ = MI_NOOP;
|
||||
@ -1056,9 +1057,9 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
|
||||
for (i = GEN8_3LVL_PDPES; i--; ) {
|
||||
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
|
||||
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
|
||||
*cs++ = upper_32_bits(pd_daddr);
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
|
||||
*cs++ = lower_32_bits(pd_daddr);
|
||||
}
|
||||
*cs++ = MI_NOOP;
|
||||
@ -1071,10 +1072,10 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_ppgtt(struct i915_gem_context *ctx,
|
||||
static int set_ppgtt(struct drm_i915_file_private *file_priv,
|
||||
struct i915_gem_context *ctx,
|
||||
struct drm_i915_gem_context_param *args)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = ctx->file_priv;
|
||||
struct i915_hw_ppgtt *ppgtt, *old;
|
||||
int err;
|
||||
|
||||
@ -1166,7 +1167,7 @@ static int
|
||||
gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
|
||||
{
|
||||
struct drm_i915_private *i915 = ce->engine->i915;
|
||||
struct i915_request *rq, *prev;
|
||||
struct i915_request *rq;
|
||||
intel_wakeref_t wakeref;
|
||||
int ret;
|
||||
|
||||
@ -1191,16 +1192,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
|
||||
}
|
||||
|
||||
/* Queue this switch after all other activity by this context. */
|
||||
prev = i915_active_request_raw(&ce->ring->timeline->last_request,
|
||||
&i915->drm.struct_mutex);
|
||||
if (prev && !i915_request_completed(prev)) {
|
||||
ret = i915_request_await_dma_fence(rq, &prev->fence);
|
||||
if (ret < 0)
|
||||
goto out_add;
|
||||
}
|
||||
|
||||
/* Order all following requests to be after. */
|
||||
ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
|
||||
ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
|
||||
if (ret)
|
||||
goto out_add;
|
||||
|
||||
@ -1394,8 +1386,8 @@ static int set_sseu(struct i915_gem_context *ctx,
|
||||
return -EINVAL;
|
||||
|
||||
engine = intel_engine_lookup_user(i915,
|
||||
user_sseu.engine_class,
|
||||
user_sseu.engine_instance);
|
||||
user_sseu.engine.engine_class,
|
||||
user_sseu.engine.engine_instance);
|
||||
if (!engine)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1416,7 +1408,8 @@ static int set_sseu(struct i915_gem_context *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ctx_setparam(struct i915_gem_context *ctx,
|
||||
static int ctx_setparam(struct drm_i915_file_private *fpriv,
|
||||
struct i915_gem_context *ctx,
|
||||
struct drm_i915_gem_context_param *args)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -1485,7 +1478,7 @@ static int ctx_setparam(struct i915_gem_context *ctx,
|
||||
break;
|
||||
|
||||
case I915_CONTEXT_PARAM_VM:
|
||||
ret = set_ppgtt(ctx, args);
|
||||
ret = set_ppgtt(fpriv, ctx, args);
|
||||
break;
|
||||
|
||||
case I915_CONTEXT_PARAM_BAN_PERIOD:
|
||||
@ -1513,7 +1506,7 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
|
||||
if (local.param.ctx_id)
|
||||
return -EINVAL;
|
||||
|
||||
return ctx_setparam(arg->ctx, &local.param);
|
||||
return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
|
||||
}
|
||||
|
||||
static const i915_user_extension_fn create_extensions[] = {
|
||||
@ -1633,8 +1626,8 @@ static int get_sseu(struct i915_gem_context *ctx,
|
||||
return -EINVAL;
|
||||
|
||||
engine = intel_engine_lookup_user(ctx->i915,
|
||||
user_sseu.engine_class,
|
||||
user_sseu.engine_instance);
|
||||
user_sseu.engine.engine_class,
|
||||
user_sseu.engine.engine_instance);
|
||||
if (!engine)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1712,7 +1705,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
break;
|
||||
|
||||
case I915_CONTEXT_PARAM_VM:
|
||||
ret = get_ppgtt(ctx, args);
|
||||
ret = get_ppgtt(file_priv, ctx, args);
|
||||
break;
|
||||
|
||||
case I915_CONTEXT_PARAM_BAN_PERIOD:
|
||||
@ -1737,7 +1730,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
||||
if (!ctx)
|
||||
return -ENOENT;
|
||||
|
||||
ret = ctx_setparam(ctx, args);
|
||||
ret = ctx_setparam(file_priv, ctx, args);
|
||||
|
||||
i915_gem_context_put(ctx);
|
||||
return ret;
|
||||
|
@ -142,7 +142,7 @@ void i915_gem_context_close(struct drm_file *file);
|
||||
|
||||
int i915_switch_context(struct i915_request *rq);
|
||||
int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
|
||||
unsigned long engine_mask);
|
||||
intel_engine_mask_t engine_mask);
|
||||
|
||||
void i915_gem_context_release(struct kref *ctx_ref);
|
||||
struct i915_gem_context *
|
||||
|
@ -1228,7 +1228,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
|
||||
vm->scratch_pte =
|
||||
gen8_pte_encode(vm->scratch_page.daddr,
|
||||
I915_CACHE_LLC,
|
||||
PTE_READ_ONLY);
|
||||
vm->has_read_only);
|
||||
|
||||
vm->scratch_pt = alloc_pt(vm);
|
||||
if (IS_ERR(vm->scratch_pt)) {
|
||||
@ -1548,8 +1548,13 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
|
||||
|
||||
ppgtt_init(i915, ppgtt);
|
||||
|
||||
/* From bdw, there is support for read-only pages in the PPGTT. */
|
||||
ppgtt->vm.has_read_only = true;
|
||||
/*
|
||||
* From bdw, there is hw support for read-only pages in the PPGTT.
|
||||
*
|
||||
* Gen11 has HSDES#:1807136187 unresolved. Disable ro support
|
||||
* for now.
|
||||
*/
|
||||
ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
|
||||
|
||||
/* There are only few exceptions for gen >=6. chv and bxt.
|
||||
* And we are not sure about the latter so play safe for now.
|
||||
|
@ -390,7 +390,7 @@ struct i915_hw_ppgtt {
|
||||
struct i915_address_space vm;
|
||||
struct kref ref;
|
||||
|
||||
unsigned long pd_dirty_engines;
|
||||
intel_engine_mask_t pd_dirty_engines;
|
||||
union {
|
||||
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
|
||||
struct i915_page_directory_pointer pdp; /* GEN8+ */
|
||||
|
@ -502,4 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
|
||||
unsigned int cache_level);
|
||||
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
|
||||
|
||||
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages,
|
||||
bool needs_clflush);
|
||||
|
||||
#endif
|
||||
|
@ -673,9 +673,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
|
||||
if (!pages)
|
||||
return;
|
||||
|
||||
if (obj->mm.madv != I915_MADV_WILLNEED)
|
||||
obj->mm.dirty = false;
|
||||
|
||||
__i915_gem_object_release_shmem(obj, pages, true);
|
||||
i915_gem_gtt_finish_pages(obj, pages);
|
||||
|
||||
for_each_sgt_page(page, sgt_iter, pages) {
|
||||
|
@ -17,6 +17,33 @@
|
||||
|
||||
static LIST_HEAD(globals);
|
||||
|
||||
static atomic_t active;
|
||||
static atomic_t epoch;
|
||||
static struct park_work {
|
||||
struct rcu_work work;
|
||||
int epoch;
|
||||
} park;
|
||||
|
||||
static void i915_globals_shrink(void)
|
||||
{
|
||||
struct i915_global *global;
|
||||
|
||||
/*
|
||||
* kmem_cache_shrink() discards empty slabs and reorders partially
|
||||
* filled slabs to prioritise allocating from the mostly full slabs,
|
||||
* with the aim of reducing fragmentation.
|
||||
*/
|
||||
list_for_each_entry(global, &globals, link)
|
||||
global->shrink();
|
||||
}
|
||||
|
||||
static void __i915_globals_park(struct work_struct *work)
|
||||
{
|
||||
/* Confirm nothing woke up in the last grace period */
|
||||
if (park.epoch == atomic_read(&epoch))
|
||||
i915_globals_shrink();
|
||||
}
|
||||
|
||||
void __init i915_global_register(struct i915_global *global)
|
||||
{
|
||||
GEM_BUG_ON(!global->shrink);
|
||||
@ -57,44 +84,12 @@ int __init i915_globals_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
INIT_RCU_WORK(&park.work, __i915_globals_park);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_globals_shrink(void)
|
||||
{
|
||||
struct i915_global *global;
|
||||
|
||||
/*
|
||||
* kmem_cache_shrink() discards empty slabs and reorders partially
|
||||
* filled slabs to prioritise allocating from the mostly full slabs,
|
||||
* with the aim of reducing fragmentation.
|
||||
*/
|
||||
list_for_each_entry(global, &globals, link)
|
||||
global->shrink();
|
||||
}
|
||||
|
||||
static atomic_t active;
|
||||
static atomic_t epoch;
|
||||
struct park_work {
|
||||
struct rcu_work work;
|
||||
int epoch;
|
||||
};
|
||||
|
||||
static void __i915_globals_park(struct work_struct *work)
|
||||
{
|
||||
struct park_work *wrk = container_of(work, typeof(*wrk), work.work);
|
||||
|
||||
/* Confirm nothing woke up in the last grace period */
|
||||
if (wrk->epoch == atomic_read(&epoch))
|
||||
i915_globals_shrink();
|
||||
|
||||
kfree(wrk);
|
||||
}
|
||||
|
||||
void i915_globals_park(void)
|
||||
{
|
||||
struct park_work *wrk;
|
||||
|
||||
/*
|
||||
* Defer shrinking the global slab caches (and other work) until
|
||||
* after a RCU grace period has completed with no activity. This
|
||||
@ -107,13 +102,8 @@ void i915_globals_park(void)
|
||||
if (!atomic_dec_and_test(&active))
|
||||
return;
|
||||
|
||||
wrk = kmalloc(sizeof(*wrk), GFP_KERNEL);
|
||||
if (!wrk)
|
||||
return;
|
||||
|
||||
wrk->epoch = atomic_inc_return(&epoch);
|
||||
INIT_RCU_WORK(&wrk->work, __i915_globals_park);
|
||||
queue_rcu_work(system_wq, &wrk->work);
|
||||
park.epoch = atomic_inc_return(&epoch);
|
||||
queue_rcu_work(system_wq, &park.work);
|
||||
}
|
||||
|
||||
void i915_globals_unpark(void)
|
||||
@ -125,8 +115,8 @@ void i915_globals_unpark(void)
|
||||
void __exit i915_globals_exit(void)
|
||||
{
|
||||
/* Flush any residual park_work */
|
||||
rcu_barrier();
|
||||
flush_scheduled_work();
|
||||
atomic_inc(&epoch);
|
||||
flush_rcu_work(&park.work);
|
||||
|
||||
__i915_globals_cleanup();
|
||||
|
||||
|
@ -677,6 +677,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
|
||||
err_printf(m, "Reset count: %u\n", error->reset_count);
|
||||
err_printf(m, "Suspend count: %u\n", error->suspend_count);
|
||||
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
|
||||
err_printf(m, "Subplatform: 0x%x\n",
|
||||
intel_subplatform(&error->runtime_info,
|
||||
error->device_info.platform));
|
||||
err_print_pciid(m, m->i915);
|
||||
|
||||
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
|
||||
@ -1093,7 +1096,7 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
|
||||
* It's only a small step better than a random number in its current form.
|
||||
*/
|
||||
static u32 i915_error_generate_code(struct i915_gpu_state *error,
|
||||
unsigned long engine_mask)
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
/*
|
||||
* IPEHR would be an ideal way to detect errors, as it's the gross
|
||||
@ -1212,20 +1215,23 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
||||
|
||||
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
|
||||
|
||||
if (IS_GEN(dev_priv, 6))
|
||||
if (IS_GEN(dev_priv, 6)) {
|
||||
ee->vm_info.pp_dir_base =
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
|
||||
else if (IS_GEN(dev_priv, 7))
|
||||
} else if (IS_GEN(dev_priv, 7)) {
|
||||
ee->vm_info.pp_dir_base =
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE);
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE);
|
||||
} else if (INTEL_GEN(dev_priv) >= 8) {
|
||||
u32 base = engine->mmio_base;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
ee->vm_info.pdp[i] =
|
||||
I915_READ(GEN8_RING_PDP_UDW(engine, i));
|
||||
I915_READ(GEN8_RING_PDP_UDW(base, i));
|
||||
ee->vm_info.pdp[i] <<= 32;
|
||||
ee->vm_info.pdp[i] |=
|
||||
I915_READ(GEN8_RING_PDP_LDW(engine, i));
|
||||
I915_READ(GEN8_RING_PDP_LDW(base, i));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1629,16 +1635,17 @@ static void capture_reg_state(struct i915_gpu_state *error)
|
||||
error->gtier[0] = I915_READ(GTIER);
|
||||
error->ngtier = 1;
|
||||
} else if (IS_GEN(dev_priv, 2)) {
|
||||
error->ier = I915_READ16(IER);
|
||||
error->ier = I915_READ16(GEN2_IER);
|
||||
} else if (!IS_VALLEYVIEW(dev_priv)) {
|
||||
error->ier = I915_READ(IER);
|
||||
error->ier = I915_READ(GEN2_IER);
|
||||
}
|
||||
error->eir = I915_READ(EIR);
|
||||
error->pgtbl_er = I915_READ(PGTBL_ER);
|
||||
}
|
||||
|
||||
static const char *
|
||||
error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
|
||||
error_msg(struct i915_gpu_state *error,
|
||||
intel_engine_mask_t engines, const char *msg)
|
||||
{
|
||||
int len;
|
||||
int i;
|
||||
@ -1648,7 +1655,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
|
||||
engines &= ~BIT(i);
|
||||
|
||||
len = scnprintf(error->error_msg, sizeof(error->error_msg),
|
||||
"GPU HANG: ecode %d:%lx:0x%08x",
|
||||
"GPU HANG: ecode %d:%x:0x%08x",
|
||||
INTEL_GEN(error->i915), engines,
|
||||
i915_error_generate_code(error, engines));
|
||||
if (engines) {
|
||||
@ -1787,7 +1794,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
|
||||
* to pick up.
|
||||
*/
|
||||
void i915_capture_error_state(struct drm_i915_private *i915,
|
||||
unsigned long engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
const char *msg)
|
||||
{
|
||||
static bool warned;
|
||||
|
@ -263,7 +263,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
|
||||
|
||||
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
|
||||
void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
||||
unsigned long engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
const char *error_msg);
|
||||
|
||||
static inline struct i915_gpu_state *
|
||||
|
@ -28,16 +28,19 @@
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/circ_buf.h>
|
||||
#include <drm/drm_irq.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysrq.h>
|
||||
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_irq.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_psr.h"
|
||||
|
||||
/**
|
||||
* DOC: interrupt handling
|
||||
@ -133,92 +136,120 @@ static const u32 hpd_icp[HPD_NUM_PINS] = {
|
||||
[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
|
||||
};
|
||||
|
||||
/* IIR can theoretically queue up two events. Be paranoid. */
|
||||
#define GEN8_IRQ_RESET_NDX(type, which) do { \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IMR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), 0); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
|
||||
POSTING_READ(GEN8_##type##_IIR(which)); \
|
||||
} while (0)
|
||||
static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
|
||||
i915_reg_t iir, i915_reg_t ier)
|
||||
{
|
||||
intel_uncore_write(uncore, imr, 0xffffffff);
|
||||
intel_uncore_posting_read(uncore, imr);
|
||||
|
||||
#define GEN3_IRQ_RESET(type) do { \
|
||||
I915_WRITE(type##IMR, 0xffffffff); \
|
||||
POSTING_READ(type##IMR); \
|
||||
I915_WRITE(type##IER, 0); \
|
||||
I915_WRITE(type##IIR, 0xffffffff); \
|
||||
POSTING_READ(type##IIR); \
|
||||
I915_WRITE(type##IIR, 0xffffffff); \
|
||||
POSTING_READ(type##IIR); \
|
||||
} while (0)
|
||||
intel_uncore_write(uncore, ier, 0);
|
||||
|
||||
#define GEN2_IRQ_RESET(type) do { \
|
||||
I915_WRITE16(type##IMR, 0xffff); \
|
||||
POSTING_READ16(type##IMR); \
|
||||
I915_WRITE16(type##IER, 0); \
|
||||
I915_WRITE16(type##IIR, 0xffff); \
|
||||
POSTING_READ16(type##IIR); \
|
||||
I915_WRITE16(type##IIR, 0xffff); \
|
||||
POSTING_READ16(type##IIR); \
|
||||
} while (0)
|
||||
/* IIR can theoretically queue up two events. Be paranoid. */
|
||||
intel_uncore_write(uncore, iir, 0xffffffff);
|
||||
intel_uncore_posting_read(uncore, iir);
|
||||
intel_uncore_write(uncore, iir, 0xffffffff);
|
||||
intel_uncore_posting_read(uncore, iir);
|
||||
}
|
||||
|
||||
static void gen2_irq_reset(struct intel_uncore *uncore)
|
||||
{
|
||||
intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
|
||||
intel_uncore_posting_read16(uncore, GEN2_IMR);
|
||||
|
||||
intel_uncore_write16(uncore, GEN2_IER, 0);
|
||||
|
||||
/* IIR can theoretically queue up two events. Be paranoid. */
|
||||
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
|
||||
intel_uncore_posting_read16(uncore, GEN2_IIR);
|
||||
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
|
||||
intel_uncore_posting_read16(uncore, GEN2_IIR);
|
||||
}
|
||||
|
||||
#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
|
||||
({ \
|
||||
unsigned int which_ = which; \
|
||||
gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
|
||||
GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
|
||||
})
|
||||
|
||||
#define GEN3_IRQ_RESET(uncore, type) \
|
||||
gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
|
||||
|
||||
#define GEN2_IRQ_RESET(uncore) \
|
||||
gen2_irq_reset(uncore)
|
||||
|
||||
/*
|
||||
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
|
||||
*/
|
||||
static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg)
|
||||
static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
|
||||
{
|
||||
u32 val = I915_READ(reg);
|
||||
u32 val = intel_uncore_read(uncore, reg);
|
||||
|
||||
if (val == 0)
|
||||
return;
|
||||
|
||||
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
|
||||
i915_mmio_reg_offset(reg), val);
|
||||
I915_WRITE(reg, 0xffffffff);
|
||||
POSTING_READ(reg);
|
||||
I915_WRITE(reg, 0xffffffff);
|
||||
POSTING_READ(reg);
|
||||
intel_uncore_write(uncore, reg, 0xffffffff);
|
||||
intel_uncore_posting_read(uncore, reg);
|
||||
intel_uncore_write(uncore, reg, 0xffffffff);
|
||||
intel_uncore_posting_read(uncore, reg);
|
||||
}
|
||||
|
||||
static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg)
|
||||
static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
|
||||
{
|
||||
u16 val = I915_READ16(reg);
|
||||
u16 val = intel_uncore_read16(uncore, GEN2_IIR);
|
||||
|
||||
if (val == 0)
|
||||
return;
|
||||
|
||||
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
|
||||
i915_mmio_reg_offset(reg), val);
|
||||
I915_WRITE16(reg, 0xffff);
|
||||
POSTING_READ16(reg);
|
||||
I915_WRITE16(reg, 0xffff);
|
||||
POSTING_READ16(reg);
|
||||
i915_mmio_reg_offset(GEN2_IIR), val);
|
||||
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
|
||||
intel_uncore_posting_read16(uncore, GEN2_IIR);
|
||||
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
|
||||
intel_uncore_posting_read16(uncore, GEN2_IIR);
|
||||
}
|
||||
|
||||
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
|
||||
gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
|
||||
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
|
||||
I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
|
||||
POSTING_READ(GEN8_##type##_IMR(which)); \
|
||||
} while (0)
|
||||
static void gen3_irq_init(struct intel_uncore *uncore,
|
||||
i915_reg_t imr, u32 imr_val,
|
||||
i915_reg_t ier, u32 ier_val,
|
||||
i915_reg_t iir)
|
||||
{
|
||||
gen3_assert_iir_is_zero(uncore, iir);
|
||||
|
||||
#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
|
||||
gen3_assert_iir_is_zero(dev_priv, type##IIR); \
|
||||
I915_WRITE(type##IER, (ier_val)); \
|
||||
I915_WRITE(type##IMR, (imr_val)); \
|
||||
POSTING_READ(type##IMR); \
|
||||
} while (0)
|
||||
intel_uncore_write(uncore, ier, ier_val);
|
||||
intel_uncore_write(uncore, imr, imr_val);
|
||||
intel_uncore_posting_read(uncore, imr);
|
||||
}
|
||||
|
||||
#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
|
||||
gen2_assert_iir_is_zero(dev_priv, type##IIR); \
|
||||
I915_WRITE16(type##IER, (ier_val)); \
|
||||
I915_WRITE16(type##IMR, (imr_val)); \
|
||||
POSTING_READ16(type##IMR); \
|
||||
} while (0)
|
||||
static void gen2_irq_init(struct intel_uncore *uncore,
|
||||
u32 imr_val, u32 ier_val)
|
||||
{
|
||||
gen2_assert_iir_is_zero(uncore);
|
||||
|
||||
intel_uncore_write16(uncore, GEN2_IER, ier_val);
|
||||
intel_uncore_write16(uncore, GEN2_IMR, imr_val);
|
||||
intel_uncore_posting_read16(uncore, GEN2_IMR);
|
||||
}
|
||||
|
||||
#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
|
||||
({ \
|
||||
unsigned int which_ = which; \
|
||||
gen3_irq_init((uncore), \
|
||||
GEN8_##type##_IMR(which_), imr_val, \
|
||||
GEN8_##type##_IER(which_), ier_val, \
|
||||
GEN8_##type##_IIR(which_)); \
|
||||
})
|
||||
|
||||
#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
|
||||
gen3_irq_init((uncore), \
|
||||
type##IMR, imr_val, \
|
||||
type##IER, ier_val, \
|
||||
type##IIR)
|
||||
|
||||
#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
|
||||
gen2_irq_init((uncore), imr_val, ier_val)
|
||||
|
||||
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
|
||||
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
|
||||
@ -366,24 +397,41 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
|
||||
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
}
|
||||
|
||||
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
|
||||
static void write_pm_imr(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
return GEN11_GPM_WGBOXPERF_INTR_MASK;
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
return GEN8_GT_IMR(2);
|
||||
else
|
||||
return GEN6_PMIMR;
|
||||
i915_reg_t reg;
|
||||
u32 mask = dev_priv->pm_imr;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
|
||||
/* pm is in upper half */
|
||||
mask = mask << 16;
|
||||
} else if (INTEL_GEN(dev_priv) >= 8) {
|
||||
reg = GEN8_GT_IMR(2);
|
||||
} else {
|
||||
reg = GEN6_PMIMR;
|
||||
}
|
||||
|
||||
I915_WRITE(reg, mask);
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
|
||||
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
|
||||
static void write_pm_ier(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
return GEN8_GT_IER(2);
|
||||
else
|
||||
return GEN6_PMIER;
|
||||
i915_reg_t reg;
|
||||
u32 mask = dev_priv->pm_ier;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
|
||||
/* pm is in upper half */
|
||||
mask = mask << 16;
|
||||
} else if (INTEL_GEN(dev_priv) >= 8) {
|
||||
reg = GEN8_GT_IER(2);
|
||||
} else {
|
||||
reg = GEN6_PMIER;
|
||||
}
|
||||
|
||||
I915_WRITE(reg, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -408,8 +456,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
|
||||
|
||||
if (new_val != dev_priv->pm_imr) {
|
||||
dev_priv->pm_imr = new_val;
|
||||
I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
|
||||
POSTING_READ(gen6_pm_imr(dev_priv));
|
||||
write_pm_imr(dev_priv);
|
||||
}
|
||||
}
|
||||
|
||||
@ -450,7 +497,7 @@ static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mas
|
||||
lockdep_assert_held(&dev_priv->irq_lock);
|
||||
|
||||
dev_priv->pm_ier |= enable_mask;
|
||||
I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
|
||||
write_pm_ier(dev_priv);
|
||||
gen6_unmask_pm_irq(dev_priv, enable_mask);
|
||||
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
|
||||
}
|
||||
@ -461,7 +508,7 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m
|
||||
|
||||
dev_priv->pm_ier &= ~disable_mask;
|
||||
__gen6_mask_pm_irq(dev_priv, disable_mask);
|
||||
I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
|
||||
write_pm_ier(dev_priv);
|
||||
/* though a barrier is missing here, but don't really need a one */
|
||||
}
|
||||
|
||||
@ -1470,7 +1517,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
|
||||
|
||||
if (iir & GT_RENDER_USER_INTERRUPT) {
|
||||
intel_engine_breadcrumbs_irq(engine);
|
||||
tasklet |= USES_GUC_SUBMISSION(engine->i915);
|
||||
tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
|
||||
}
|
||||
|
||||
if (tasklet)
|
||||
@ -1793,6 +1840,25 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
|
||||
/* The RPS events need forcewake, so we add them to a work queue and mask their
|
||||
* IMR bits until the work is done. Other interrupts can be processed without
|
||||
* the work queue. */
|
||||
static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
|
||||
{
|
||||
struct intel_rps *rps = &i915->gt_pm.rps;
|
||||
const u32 events = i915->pm_rps_events & pm_iir;
|
||||
|
||||
lockdep_assert_held(&i915->irq_lock);
|
||||
|
||||
if (unlikely(!events))
|
||||
return;
|
||||
|
||||
gen6_mask_pm_irq(i915, events);
|
||||
|
||||
if (!rps->interrupts_enabled)
|
||||
return;
|
||||
|
||||
rps->pm_iir |= events;
|
||||
schedule_work(&rps->work);
|
||||
}
|
||||
|
||||
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||
{
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
@ -2946,7 +3012,7 @@ gen11_other_irq_handler(struct drm_i915_private * const i915,
|
||||
const u8 instance, const u16 iir)
|
||||
{
|
||||
if (instance == OTHER_GTPM_INSTANCE)
|
||||
return gen6_rps_irq_handler(i915, iir);
|
||||
return gen11_rps_irq_handler(i915, iir);
|
||||
|
||||
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
|
||||
instance, iir);
|
||||
@ -3003,14 +3069,8 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915,
|
||||
|
||||
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
|
||||
|
||||
if (unlikely(!intr_dw)) {
|
||||
DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_set_bit(bit, &intr_dw, 32) {
|
||||
const u32 ident = gen11_gt_engine_identity(i915,
|
||||
bank, bit);
|
||||
const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
|
||||
|
||||
gen11_gt_identity_handler(i915, ident);
|
||||
}
|
||||
@ -3305,10 +3365,12 @@ static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
if (HAS_PCH_NOP(dev_priv))
|
||||
return;
|
||||
|
||||
GEN3_IRQ_RESET(SDE);
|
||||
GEN3_IRQ_RESET(uncore, SDE);
|
||||
|
||||
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
|
||||
I915_WRITE(SERR_INT, 0xffffffff);
|
||||
@ -3336,13 +3398,17 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
|
||||
|
||||
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
GEN3_IRQ_RESET(GT);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
GEN3_IRQ_RESET(uncore, GT);
|
||||
if (INTEL_GEN(dev_priv) >= 6)
|
||||
GEN3_IRQ_RESET(GEN6_PM);
|
||||
GEN3_IRQ_RESET(uncore, GEN6_PM);
|
||||
}
|
||||
|
||||
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
|
||||
else
|
||||
@ -3353,12 +3419,14 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
|
||||
|
||||
i9xx_pipestat_irq_reset(dev_priv);
|
||||
|
||||
GEN3_IRQ_RESET(VLV_);
|
||||
GEN3_IRQ_RESET(uncore, VLV_);
|
||||
dev_priv->irq_mask = ~0u;
|
||||
}
|
||||
|
||||
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
u32 pipestat_mask;
|
||||
u32 enable_mask;
|
||||
enum pipe pipe;
|
||||
@ -3383,7 +3451,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
|
||||
dev_priv->irq_mask = ~enable_mask;
|
||||
|
||||
GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
|
||||
GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
|
||||
}
|
||||
|
||||
/* drm_dma.h hooks
|
||||
@ -3391,8 +3459,9 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
static void ironlake_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
GEN3_IRQ_RESET(DE);
|
||||
GEN3_IRQ_RESET(uncore, DE);
|
||||
if (IS_GEN(dev_priv, 7))
|
||||
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
|
||||
|
||||
@ -3423,15 +3492,18 @@ static void valleyview_irq_reset(struct drm_device *dev)
|
||||
|
||||
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
GEN8_IRQ_RESET_NDX(GT, 0);
|
||||
GEN8_IRQ_RESET_NDX(GT, 1);
|
||||
GEN8_IRQ_RESET_NDX(GT, 2);
|
||||
GEN8_IRQ_RESET_NDX(GT, 3);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
GEN8_IRQ_RESET_NDX(uncore, GT, 0);
|
||||
GEN8_IRQ_RESET_NDX(uncore, GT, 1);
|
||||
GEN8_IRQ_RESET_NDX(uncore, GT, 2);
|
||||
GEN8_IRQ_RESET_NDX(uncore, GT, 3);
|
||||
}
|
||||
|
||||
static void gen8_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
int pipe;
|
||||
|
||||
gen8_master_intr_disable(dev_priv->uncore.regs);
|
||||
@ -3444,11 +3516,11 @@ static void gen8_irq_reset(struct drm_device *dev)
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
if (intel_display_power_is_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(pipe)))
|
||||
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
|
||||
GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
|
||||
|
||||
GEN3_IRQ_RESET(GEN8_DE_PORT_);
|
||||
GEN3_IRQ_RESET(GEN8_DE_MISC_);
|
||||
GEN3_IRQ_RESET(GEN8_PCU_);
|
||||
GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
|
||||
GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
|
||||
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv))
|
||||
ibx_irq_reset(dev_priv);
|
||||
@ -3474,6 +3546,7 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
|
||||
static void gen11_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
int pipe;
|
||||
|
||||
gen11_master_intr_disable(dev_priv->uncore.regs);
|
||||
@ -3488,21 +3561,23 @@ static void gen11_irq_reset(struct drm_device *dev)
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
if (intel_display_power_is_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(pipe)))
|
||||
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
|
||||
GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
|
||||
|
||||
GEN3_IRQ_RESET(GEN8_DE_PORT_);
|
||||
GEN3_IRQ_RESET(GEN8_DE_MISC_);
|
||||
GEN3_IRQ_RESET(GEN11_DE_HPD_);
|
||||
GEN3_IRQ_RESET(GEN11_GU_MISC_);
|
||||
GEN3_IRQ_RESET(GEN8_PCU_);
|
||||
GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
|
||||
GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
|
||||
GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
|
||||
GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
|
||||
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
|
||||
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
GEN3_IRQ_RESET(SDE);
|
||||
GEN3_IRQ_RESET(uncore, SDE);
|
||||
}
|
||||
|
||||
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
u8 pipe_mask)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
|
||||
enum pipe pipe;
|
||||
|
||||
@ -3514,7 +3589,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
|
||||
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
|
||||
dev_priv->de_irq_mask[pipe],
|
||||
~dev_priv->de_irq_mask[pipe] | extra_ier);
|
||||
|
||||
@ -3524,6 +3599,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
u8 pipe_mask)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
enum pipe pipe;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
@ -3534,7 +3610,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
|
||||
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
|
||||
GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
@ -3545,13 +3621,14 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
static void cherryview_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
I915_WRITE(GEN8_MASTER_IRQ, 0);
|
||||
POSTING_READ(GEN8_MASTER_IRQ);
|
||||
|
||||
gen8_gt_irq_reset(dev_priv);
|
||||
|
||||
GEN3_IRQ_RESET(GEN8_PCU_);
|
||||
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display_irqs_enabled)
|
||||
@ -3823,7 +3900,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
|
||||
else
|
||||
mask = SDE_GMBUS_CPT;
|
||||
|
||||
gen3_assert_iir_is_zero(dev_priv, SDEIIR);
|
||||
gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
|
||||
I915_WRITE(SDEIMR, ~mask);
|
||||
|
||||
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
|
||||
@ -3836,6 +3913,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
|
||||
static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u32 pm_irqs, gt_irqs;
|
||||
|
||||
pm_irqs = gt_irqs = 0;
|
||||
@ -3854,7 +3932,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
||||
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
|
||||
}
|
||||
|
||||
GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
|
||||
GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6) {
|
||||
/*
|
||||
@ -3867,13 +3945,14 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
|
||||
}
|
||||
|
||||
dev_priv->pm_imr = 0xffffffff;
|
||||
GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
|
||||
GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
|
||||
}
|
||||
}
|
||||
|
||||
static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u32 display_mask, extra_mask;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 7) {
|
||||
@ -3892,7 +3971,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
}
|
||||
|
||||
if (IS_HASWELL(dev_priv)) {
|
||||
gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
|
||||
gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
|
||||
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
|
||||
display_mask |= DE_EDP_PSR_INT_HSW;
|
||||
}
|
||||
@ -3901,7 +3980,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
|
||||
|
||||
ibx_irq_pre_postinstall(dev);
|
||||
|
||||
GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
|
||||
GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
|
||||
display_mask | extra_mask);
|
||||
|
||||
gen5_gt_irq_postinstall(dev);
|
||||
|
||||
@ -3971,6 +4051,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
|
||||
|
||||
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
/* These are interrupts we'll toggle with the ring mask register */
|
||||
u32 gt_interrupts[] = {
|
||||
(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
|
||||
@ -3991,18 +4073,20 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
|
||||
dev_priv->pm_ier = 0x0;
|
||||
dev_priv->pm_imr = ~dev_priv->pm_ier;
|
||||
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
|
||||
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
|
||||
GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
|
||||
GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
|
||||
/*
|
||||
* RPS interrupts will get enabled/disabled on demand when RPS itself
|
||||
* is enabled/disabled. Same wil be the case for GuC interrupts.
|
||||
*/
|
||||
GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
|
||||
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
|
||||
GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
|
||||
GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
|
||||
}
|
||||
|
||||
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
|
||||
u32 de_pipe_enables;
|
||||
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
|
||||
@ -4038,7 +4122,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
|
||||
|
||||
gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
|
||||
gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
|
||||
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
@ -4046,20 +4130,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
|
||||
if (intel_display_power_is_enabled(dev_priv,
|
||||
POWER_DOMAIN_PIPE(pipe)))
|
||||
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
|
||||
GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
|
||||
dev_priv->de_irq_mask[pipe],
|
||||
de_pipe_enables);
|
||||
}
|
||||
|
||||
GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
|
||||
GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
|
||||
GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
|
||||
GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
u32 de_hpd_masked = 0;
|
||||
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
|
||||
GEN11_DE_TBT_HOTPLUG_MASK;
|
||||
|
||||
GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
|
||||
GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
|
||||
de_hpd_enables);
|
||||
gen11_hpd_detection_setup(dev_priv);
|
||||
} else if (IS_GEN9_LP(dev_priv)) {
|
||||
bxt_hpd_detection_setup(dev_priv);
|
||||
@ -4122,7 +4207,7 @@ static void icp_irq_postinstall(struct drm_device *dev)
|
||||
I915_WRITE(SDEIER, 0xffffffff);
|
||||
POSTING_READ(SDEIER);
|
||||
|
||||
gen3_assert_iir_is_zero(dev_priv, SDEIIR);
|
||||
gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
|
||||
I915_WRITE(SDEIMR, ~mask);
|
||||
|
||||
icp_hpd_detection_setup(dev_priv);
|
||||
@ -4131,6 +4216,7 @@ static void icp_irq_postinstall(struct drm_device *dev)
|
||||
static int gen11_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
|
||||
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
@ -4139,7 +4225,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
|
||||
gen11_gt_irq_postinstall(dev_priv);
|
||||
gen8_de_irq_postinstall(dev_priv);
|
||||
|
||||
GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
|
||||
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
|
||||
|
||||
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
|
||||
|
||||
@ -4169,15 +4255,17 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
|
||||
static void i8xx_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
i9xx_pipestat_irq_reset(dev_priv);
|
||||
|
||||
GEN2_IRQ_RESET();
|
||||
GEN2_IRQ_RESET(uncore);
|
||||
}
|
||||
|
||||
static int i8xx_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u16 enable_mask;
|
||||
|
||||
I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
|
||||
@ -4195,7 +4283,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
|
||||
I915_MASTER_ERROR_INTERRUPT |
|
||||
I915_USER_INTERRUPT;
|
||||
|
||||
GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
|
||||
GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
|
||||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
@ -4299,7 +4387,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
u16 eir = 0, eir_stuck = 0;
|
||||
u16 iir;
|
||||
|
||||
iir = I915_READ16(IIR);
|
||||
iir = I915_READ16(GEN2_IIR);
|
||||
if (iir == 0)
|
||||
break;
|
||||
|
||||
@ -4312,7 +4400,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
if (iir & I915_MASTER_ERROR_INTERRUPT)
|
||||
i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
|
||||
|
||||
I915_WRITE16(IIR, iir);
|
||||
I915_WRITE16(GEN2_IIR, iir);
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
|
||||
@ -4331,6 +4419,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
|
||||
static void i915_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
if (I915_HAS_HOTPLUG(dev_priv)) {
|
||||
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
|
||||
@ -4339,12 +4428,13 @@ static void i915_irq_reset(struct drm_device *dev)
|
||||
|
||||
i9xx_pipestat_irq_reset(dev_priv);
|
||||
|
||||
GEN3_IRQ_RESET();
|
||||
GEN3_IRQ_RESET(uncore, GEN2_);
|
||||
}
|
||||
|
||||
static int i915_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u32 enable_mask;
|
||||
|
||||
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
|
||||
@ -4371,7 +4461,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
|
||||
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
|
||||
}
|
||||
|
||||
GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
|
||||
GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
|
||||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
@ -4403,7 +4493,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
u32 hotplug_status = 0;
|
||||
u32 iir;
|
||||
|
||||
iir = I915_READ(IIR);
|
||||
iir = I915_READ(GEN2_IIR);
|
||||
if (iir == 0)
|
||||
break;
|
||||
|
||||
@ -4420,7 +4510,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
if (iir & I915_MASTER_ERROR_INTERRUPT)
|
||||
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
|
||||
|
||||
I915_WRITE(IIR, iir);
|
||||
I915_WRITE(GEN2_IIR, iir);
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
|
||||
@ -4442,18 +4532,20 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
|
||||
static void i965_irq_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
|
||||
|
||||
i9xx_pipestat_irq_reset(dev_priv);
|
||||
|
||||
GEN3_IRQ_RESET();
|
||||
GEN3_IRQ_RESET(uncore, GEN2_);
|
||||
}
|
||||
|
||||
static int i965_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u32 enable_mask;
|
||||
u32 error_mask;
|
||||
|
||||
@ -4491,7 +4583,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
|
||||
if (IS_G4X(dev_priv))
|
||||
enable_mask |= I915_BSD_USER_INTERRUPT;
|
||||
|
||||
GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
|
||||
GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
|
||||
|
||||
/* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
* just to make the assert_spin_locked check happy. */
|
||||
@ -4549,7 +4641,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
u32 hotplug_status = 0;
|
||||
u32 iir;
|
||||
|
||||
iir = I915_READ(IIR);
|
||||
iir = I915_READ(GEN2_IIR);
|
||||
if (iir == 0)
|
||||
break;
|
||||
|
||||
@ -4565,7 +4657,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
if (iir & I915_MASTER_ERROR_INTERRUPT)
|
||||
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
|
||||
|
||||
I915_WRITE(IIR, iir);
|
||||
I915_WRITE(GEN2_IIR, iir);
|
||||
|
||||
if (iir & I915_USER_INTERRUPT)
|
||||
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
|
||||
@ -4623,6 +4715,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
GEN6_PM_RP_DOWN_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_TIMEOUT);
|
||||
|
||||
/* We share the register with other engine */
|
||||
if (INTEL_GEN(dev_priv) > 9)
|
||||
GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
|
||||
|
||||
rps->pm_intrmsk_mbz = 0;
|
||||
|
||||
/*
|
||||
|
@ -31,8 +31,9 @@
|
||||
#include "i915_drv.h"
|
||||
#include "i915_globals.h"
|
||||
#include "i915_selftest.h"
|
||||
#include "intel_fbdev.h"
|
||||
|
||||
#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
|
||||
#define PLATFORM(x) .platform = (x)
|
||||
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
|
||||
|
||||
#define I845_PIPE_OFFSETS \
|
||||
@ -116,8 +117,16 @@
|
||||
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
|
||||
}
|
||||
|
||||
#define BDW_COLORS \
|
||||
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
|
||||
#define I9XX_COLORS \
|
||||
.color = { .gamma_lut_size = 256 }
|
||||
#define I965_COLORS \
|
||||
.color = { .gamma_lut_size = 129, \
|
||||
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
|
||||
}
|
||||
#define ILK_COLORS \
|
||||
.color = { .gamma_lut_size = 1024 }
|
||||
#define IVB_COLORS \
|
||||
.color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
|
||||
#define CHV_COLORS \
|
||||
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
|
||||
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
|
||||
@ -150,6 +159,7 @@
|
||||
.has_coherent_ggtt = false, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
I9XX_CURSOR_OFFSETS, \
|
||||
I9XX_COLORS, \
|
||||
GEN_DEFAULT_PAGE_SIZES
|
||||
|
||||
#define I845_FEATURES \
|
||||
@ -166,6 +176,7 @@
|
||||
.has_coherent_ggtt = false, \
|
||||
I845_PIPE_OFFSETS, \
|
||||
I845_CURSOR_OFFSETS, \
|
||||
I9XX_COLORS, \
|
||||
GEN_DEFAULT_PAGE_SIZES
|
||||
|
||||
static const struct intel_device_info intel_i830_info = {
|
||||
@ -199,6 +210,7 @@ static const struct intel_device_info intel_i865g_info = {
|
||||
.has_coherent_ggtt = true, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
I9XX_CURSOR_OFFSETS, \
|
||||
I9XX_COLORS, \
|
||||
GEN_DEFAULT_PAGE_SIZES
|
||||
|
||||
static const struct intel_device_info intel_i915g_info = {
|
||||
@ -257,7 +269,14 @@ static const struct intel_device_info intel_g33_info = {
|
||||
.display.has_overlay = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_pineview_info = {
|
||||
static const struct intel_device_info intel_pineview_g_info = {
|
||||
GEN3_FEATURES,
|
||||
PLATFORM(INTEL_PINEVIEW),
|
||||
.display.has_hotplug = 1,
|
||||
.display.has_overlay = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_pineview_m_info = {
|
||||
GEN3_FEATURES,
|
||||
PLATFORM(INTEL_PINEVIEW),
|
||||
.is_mobile = 1,
|
||||
@ -276,6 +295,7 @@ static const struct intel_device_info intel_pineview_info = {
|
||||
.has_coherent_ggtt = true, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
I9XX_CURSOR_OFFSETS, \
|
||||
I965_COLORS, \
|
||||
GEN_DEFAULT_PAGE_SIZES
|
||||
|
||||
static const struct intel_device_info intel_i965g_info = {
|
||||
@ -325,6 +345,7 @@ static const struct intel_device_info intel_gm45_info = {
|
||||
.has_rc6 = 0, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
I9XX_CURSOR_OFFSETS, \
|
||||
ILK_COLORS, \
|
||||
GEN_DEFAULT_PAGE_SIZES
|
||||
|
||||
static const struct intel_device_info intel_ironlake_d_info = {
|
||||
@ -353,6 +374,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
|
||||
.ppgtt_size = 31, \
|
||||
I9XX_PIPE_OFFSETS, \
|
||||
I9XX_CURSOR_OFFSETS, \
|
||||
ILK_COLORS, \
|
||||
GEN_DEFAULT_PAGE_SIZES
|
||||
|
||||
#define SNB_D_PLATFORM \
|
||||
@ -399,6 +421,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
|
||||
.ppgtt_size = 31, \
|
||||
IVB_PIPE_OFFSETS, \
|
||||
IVB_CURSOR_OFFSETS, \
|
||||
IVB_COLORS, \
|
||||
GEN_DEFAULT_PAGE_SIZES
|
||||
|
||||
#define IVB_D_PLATFORM \
|
||||
@ -457,6 +480,7 @@ static const struct intel_device_info intel_valleyview_info = {
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
I9XX_PIPE_OFFSETS,
|
||||
I9XX_CURSOR_OFFSETS,
|
||||
I965_COLORS,
|
||||
GEN_DEFAULT_PAGE_SIZES,
|
||||
};
|
||||
|
||||
@ -494,7 +518,6 @@ static const struct intel_device_info intel_haswell_gt3_info = {
|
||||
#define GEN8_FEATURES \
|
||||
G75_FEATURES, \
|
||||
GEN(8), \
|
||||
BDW_COLORS, \
|
||||
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
|
||||
I915_GTT_PAGE_SIZE_2M, \
|
||||
.has_logical_ring_contexts = 1, \
|
||||
@ -629,7 +652,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
|
||||
.display.has_ipc = 1, \
|
||||
HSW_PIPE_OFFSETS, \
|
||||
IVB_CURSOR_OFFSETS, \
|
||||
BDW_COLORS, \
|
||||
IVB_COLORS, \
|
||||
GEN9_DEFAULT_PAGE_SIZES
|
||||
|
||||
static const struct intel_device_info intel_broxton_info = {
|
||||
@ -761,7 +784,8 @@ static const struct pci_device_id pciidlist[] = {
|
||||
INTEL_I965GM_IDS(&intel_i965gm_info),
|
||||
INTEL_GM45_IDS(&intel_gm45_info),
|
||||
INTEL_G45_IDS(&intel_g45_info),
|
||||
INTEL_PINEVIEW_IDS(&intel_pineview_info),
|
||||
INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info),
|
||||
INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info),
|
||||
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
|
||||
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
|
||||
INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
|
||||
|
42
drivers/gpu/drm/i915/i915_priolist_types.h
Normal file
42
drivers/gpu/drm/i915/i915_priolist_types.h
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2018 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _I915_PRIOLIST_TYPES_H_
|
||||
#define _I915_PRIOLIST_TYPES_H_
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
|
||||
enum {
|
||||
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
|
||||
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
|
||||
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
|
||||
|
||||
I915_PRIORITY_INVALID = INT_MIN
|
||||
};
|
||||
|
||||
#define I915_USER_PRIORITY_SHIFT 3
|
||||
#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
|
||||
|
||||
#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
|
||||
#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
|
||||
|
||||
#define I915_PRIORITY_WAIT ((u8)BIT(0))
|
||||
#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
|
||||
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2))
|
||||
|
||||
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
|
||||
|
||||
struct i915_priolist {
|
||||
struct list_head requests[I915_PRIORITY_COUNT];
|
||||
struct rb_node node;
|
||||
unsigned long used;
|
||||
int priority;
|
||||
};
|
||||
|
||||
#endif /* _I915_PRIOLIST_TYPES_H_ */
|
@ -439,8 +439,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
|
||||
#define PP_DIR_DCLV_2G 0xffffffff
|
||||
|
||||
#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
|
||||
#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
|
||||
#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4)
|
||||
#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8)
|
||||
|
||||
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
|
||||
#define GEN8_RPCS_ENABLE (1 << 31)
|
||||
@ -2446,8 +2446,10 @@ enum i915_power_well_id {
|
||||
#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
|
||||
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
|
||||
#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
|
||||
#define RESET_CTL_REQUEST_RESET (1 << 0)
|
||||
#define RESET_CTL_READY_TO_RESET (1 << 1)
|
||||
#define RESET_CTL_CAT_ERROR REG_BIT(2)
|
||||
#define RESET_CTL_READY_TO_RESET REG_BIT(1)
|
||||
#define RESET_CTL_REQUEST_RESET REG_BIT(0)
|
||||
|
||||
#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
|
||||
|
||||
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
|
||||
@ -2713,10 +2715,10 @@ enum i915_power_well_id {
|
||||
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
|
||||
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
|
||||
#define SCPD0 _MMIO(0x209c) /* 915+ only */
|
||||
#define IER _MMIO(0x20a0)
|
||||
#define IIR _MMIO(0x20a4)
|
||||
#define IMR _MMIO(0x20a8)
|
||||
#define ISR _MMIO(0x20ac)
|
||||
#define GEN2_IER _MMIO(0x20a0)
|
||||
#define GEN2_IIR _MMIO(0x20a4)
|
||||
#define GEN2_IMR _MMIO(0x20a8)
|
||||
#define GEN2_ISR _MMIO(0x20ac)
|
||||
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
|
||||
#define GINT_DIS (1 << 22)
|
||||
#define GCFG_DIS (1 << 8)
|
||||
@ -4209,42 +4211,6 @@ enum {
|
||||
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
|
||||
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
|
||||
|
||||
/* VLV eDP PSR registers */
|
||||
#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
|
||||
#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
|
||||
#define VLV_EDP_PSR_ENABLE (1 << 0)
|
||||
#define VLV_EDP_PSR_RESET (1 << 1)
|
||||
#define VLV_EDP_PSR_MODE_MASK (7 << 2)
|
||||
#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3)
|
||||
#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2)
|
||||
#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7)
|
||||
#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8)
|
||||
#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9)
|
||||
#define VLV_EDP_PSR_DBL_FRAME (1 << 10)
|
||||
#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16)
|
||||
#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
|
||||
#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
|
||||
|
||||
#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
|
||||
#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
|
||||
#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30)
|
||||
#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31)
|
||||
#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30)
|
||||
#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
|
||||
|
||||
#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
|
||||
#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
|
||||
#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3)
|
||||
#define VLV_EDP_PSR_CURR_STATE_MASK 7
|
||||
#define VLV_EDP_PSR_DISABLED (0 << 0)
|
||||
#define VLV_EDP_PSR_INACTIVE (1 << 0)
|
||||
#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0)
|
||||
#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0)
|
||||
#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0)
|
||||
#define VLV_EDP_PSR_EXIT (5 << 0)
|
||||
#define VLV_EDP_PSR_IN_TRANS (1 << 7)
|
||||
#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
|
||||
|
||||
/* HSW+ eDP PSR registers */
|
||||
#define HSW_EDP_PSR_BASE 0x64800
|
||||
#define BDW_EDP_PSR_BASE 0x6f800
|
||||
@ -5795,6 +5761,10 @@ enum {
|
||||
#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
|
||||
#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
|
||||
|
||||
#define _PIPEAGCMAX 0x70010
|
||||
#define _PIPEBGCMAX 0x71010
|
||||
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
|
||||
|
||||
#define _PIPE_MISC_A 0x70030
|
||||
#define _PIPE_MISC_B 0x71030
|
||||
#define PIPEMISC_YUV420_ENABLE (1 << 27)
|
||||
@ -7209,11 +7179,21 @@ enum {
|
||||
#define _LGC_PALETTE_B 0x4a800
|
||||
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
|
||||
|
||||
/* ilk/snb precision palette */
|
||||
#define _PREC_PALETTE_A 0x4b000
|
||||
#define _PREC_PALETTE_B 0x4c000
|
||||
#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4)
|
||||
|
||||
#define _PREC_PIPEAGCMAX 0x4d000
|
||||
#define _PREC_PIPEBGCMAX 0x4d010
|
||||
#define PREC_PIPEGCMAX(pipe, i) _MMIO(_PIPE(pipe, _PIPEAGCMAX, _PIPEBGCMAX) + (i) * 4)
|
||||
|
||||
#define _GAMMA_MODE_A 0x4a480
|
||||
#define _GAMMA_MODE_B 0x4ac80
|
||||
#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
|
||||
#define PRE_CSC_GAMMA_ENABLE (1 << 31)
|
||||
#define POST_CSC_GAMMA_ENABLE (1 << 30)
|
||||
#define GAMMA_MODE_MODE_MASK (3 << 0)
|
||||
#define GAMMA_MODE_MODE_8BIT (0 << 0)
|
||||
#define GAMMA_MODE_MODE_10BIT (1 << 0)
|
||||
#define GAMMA_MODE_MODE_12BIT (2 << 0)
|
||||
@ -8709,8 +8689,9 @@ enum {
|
||||
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
|
||||
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
|
||||
#define GEN9_PG_ENABLE _MMIO(0xA210)
|
||||
#define GEN9_RENDER_PG_ENABLE (1 << 0)
|
||||
#define GEN9_MEDIA_PG_ENABLE (1 << 1)
|
||||
#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
|
||||
#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
|
||||
#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
|
||||
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
|
||||
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
|
||||
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
|
||||
@ -8725,6 +8706,11 @@ enum {
|
||||
#define GEN6_PMIER _MMIO(0x4402C)
|
||||
#define GEN6_PM_MBOX_EVENT (1 << 25)
|
||||
#define GEN6_PM_THERMAL_EVENT (1 << 24)
|
||||
|
||||
/*
|
||||
* For Gen11 these are in the upper word of the GPM_WGBOXPERF
|
||||
* registers. Shifting is handled on accessing the imr and ier.
|
||||
*/
|
||||
#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
|
||||
#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
|
||||
#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
|
||||
@ -10127,6 +10113,7 @@ enum skl_power_gate {
|
||||
#define PAL_PREC_SPLIT_MODE (1 << 31)
|
||||
#define PAL_PREC_AUTO_INCREMENT (1 << 15)
|
||||
#define PAL_PREC_INDEX_VALUE_MASK (0x3ff << 0)
|
||||
#define PAL_PREC_INDEX_VALUE(x) ((x) << 0)
|
||||
#define _PAL_PREC_DATA_A 0x4A404
|
||||
#define _PAL_PREC_DATA_B 0x4AC04
|
||||
#define _PAL_PREC_DATA_C 0x4B404
|
||||
@ -10144,6 +10131,7 @@ enum skl_power_gate {
|
||||
#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
|
||||
#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
|
||||
#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
|
||||
#define PREC_PAL_EXT2_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT2_GC_MAX_A, _PAL_PREC_EXT2_GC_MAX_B) + (i) * 4)
|
||||
|
||||
#define _PRE_CSC_GAMC_INDEX_A 0x4A484
|
||||
#define _PRE_CSC_GAMC_INDEX_B 0x4AC84
|
||||
|
@ -29,10 +29,11 @@
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_active.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_globals.h"
|
||||
#include "i915_reset.h"
|
||||
#include "intel_pm.h"
|
||||
|
||||
struct execute_cb {
|
||||
struct list_head link;
|
||||
@ -100,6 +101,7 @@ static void i915_fence_release(struct dma_fence *fence)
|
||||
* caught trying to reuse dead objects.
|
||||
*/
|
||||
i915_sw_fence_fini(&rq->submit);
|
||||
i915_sw_fence_fini(&rq->semaphore);
|
||||
|
||||
kmem_cache_free(global.slab_requests, rq);
|
||||
}
|
||||
@ -551,6 +553,36 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int __i915_sw_fence_call
|
||||
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
||||
{
|
||||
struct i915_request *request =
|
||||
container_of(fence, typeof(*request), semaphore);
|
||||
|
||||
switch (state) {
|
||||
case FENCE_COMPLETE:
|
||||
/*
|
||||
* We only check a small portion of our dependencies
|
||||
* and so cannot guarantee that there remains no
|
||||
* semaphore chain across all. Instead of opting
|
||||
* for the full NOSEMAPHORE boost, we go for the
|
||||
* smaller (but still preempting) boost of
|
||||
* NEWCLIENT. This will be enough to boost over
|
||||
* a busywaiting request (as that cannot be
|
||||
* NEWCLIENT) without accidentally boosting
|
||||
* a busywait over real work elsewhere.
|
||||
*/
|
||||
i915_schedule_bump_priority(request, I915_PRIORITY_NEWCLIENT);
|
||||
break;
|
||||
|
||||
case FENCE_FREE:
|
||||
i915_request_put(request);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void ring_retire_requests(struct intel_ring *ring)
|
||||
{
|
||||
struct i915_request *rq, *rn;
|
||||
@ -583,11 +615,6 @@ out:
|
||||
return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int add_timeline_barrier(struct i915_request *rq)
|
||||
{
|
||||
return i915_request_await_active_request(rq, &rq->timeline->barrier);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_request_alloc - allocate a request structure
|
||||
*
|
||||
@ -706,6 +733,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
||||
|
||||
/* We bump the ref for the fence chain */
|
||||
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
|
||||
i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
|
||||
|
||||
i915_sched_node_init(&rq->sched);
|
||||
|
||||
@ -737,10 +765,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
||||
*/
|
||||
rq->head = rq->ring->emit;
|
||||
|
||||
ret = add_timeline_barrier(rq);
|
||||
if (ret)
|
||||
goto err_unwind;
|
||||
|
||||
ret = engine->request_alloc(rq);
|
||||
if (ret)
|
||||
goto err_unwind;
|
||||
@ -751,7 +775,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
|
||||
rq->infix = rq->ring->emit; /* end of header; start of user payload */
|
||||
|
||||
/* Check that we didn't interrupt ourselves with a new request */
|
||||
lockdep_assert_held(&rq->timeline->mutex);
|
||||
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
|
||||
rq->cookie = lockdep_pin_lock(&rq->timeline->mutex);
|
||||
|
||||
return rq;
|
||||
|
||||
err_unwind:
|
||||
@ -783,6 +810,18 @@ emit_semaphore_wait(struct i915_request *to,
|
||||
GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
|
||||
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
|
||||
|
||||
/* Just emit the first semaphore we see as request space is limited. */
|
||||
if (to->sched.semaphores & from->engine->mask)
|
||||
return i915_sw_fence_await_dma_fence(&to->submit,
|
||||
&from->fence, 0,
|
||||
I915_FENCE_GFP);
|
||||
|
||||
err = i915_sw_fence_await_dma_fence(&to->semaphore,
|
||||
&from->fence, 0,
|
||||
I915_FENCE_GFP);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* We need to pin the signaler's HWSP until we are finished reading. */
|
||||
err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
|
||||
if (err)
|
||||
@ -814,7 +853,8 @@ emit_semaphore_wait(struct i915_request *to,
|
||||
*cs++ = 0;
|
||||
|
||||
intel_ring_advance(to, cs);
|
||||
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE;
|
||||
to->sched.semaphores |= from->engine->mask;
|
||||
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1063,6 +1103,8 @@ void i915_request_add(struct i915_request *request)
|
||||
engine->name, request->fence.context, request->fence.seqno);
|
||||
|
||||
lockdep_assert_held(&request->timeline->mutex);
|
||||
lockdep_unpin_lock(&request->timeline->mutex, request->cookie);
|
||||
|
||||
trace_i915_request_add(request);
|
||||
|
||||
/*
|
||||
@ -1110,6 +1152,7 @@ void i915_request_add(struct i915_request *request)
|
||||
* run at the earliest possible convenience.
|
||||
*/
|
||||
local_bh_disable();
|
||||
i915_sw_fence_commit(&request->semaphore);
|
||||
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
|
||||
if (engine->schedule) {
|
||||
struct i915_sched_attr attr = request->gem_context->sched;
|
||||
@ -1126,7 +1169,7 @@ void i915_request_add(struct i915_request *request)
|
||||
* far in the distance past over useful work, we keep a history
|
||||
* of any semaphore use along our dependency chain.
|
||||
*/
|
||||
if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE))
|
||||
if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
|
||||
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
|
||||
|
||||
/*
|
||||
@ -1316,7 +1359,9 @@ long i915_request_wait(struct i915_request *rq,
|
||||
if (flags & I915_WAIT_PRIORITY) {
|
||||
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
|
||||
gen6_rps_boost(rq);
|
||||
local_bh_disable(); /* suspend tasklets for reprioritisation */
|
||||
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
|
||||
local_bh_enable(); /* kick tasklets en masse */
|
||||
}
|
||||
|
||||
wait.tsk = current;
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define I915_REQUEST_H
|
||||
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include "i915_gem.h"
|
||||
#include "i915_scheduler.h"
|
||||
@ -120,6 +121,15 @@ struct i915_request {
|
||||
*/
|
||||
unsigned long rcustate;
|
||||
|
||||
/*
|
||||
* We pin the timeline->mutex while constructing the request to
|
||||
* ensure that no caller accidentally drops it during construction.
|
||||
* The timeline->mutex must be held to ensure that only this caller
|
||||
* can use the ring and manipulate the associated timeline during
|
||||
* construction.
|
||||
*/
|
||||
struct pin_cookie cookie;
|
||||
|
||||
/*
|
||||
* Fences for the various phases in the request's lifetime.
|
||||
*
|
||||
@ -133,6 +143,7 @@ struct i915_request {
|
||||
struct i915_sw_dma_fence_cb dmaq;
|
||||
};
|
||||
struct list_head execute_cb;
|
||||
struct i915_sw_fence semaphore;
|
||||
|
||||
/*
|
||||
* A list of everyone we wait upon, and everyone who waits upon us.
|
||||
|
@ -18,6 +18,26 @@
|
||||
/* XXX How to handle concurrent GGTT updates using tiling registers? */
|
||||
#define RESET_UNDER_STOP_MACHINE 0
|
||||
|
||||
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
|
||||
{
|
||||
intel_uncore_rmw(uncore, reg, 0, set);
|
||||
}
|
||||
|
||||
static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
|
||||
{
|
||||
intel_uncore_rmw(uncore, reg, clr, 0);
|
||||
}
|
||||
|
||||
static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
|
||||
{
|
||||
intel_uncore_rmw_fw(uncore, reg, 0, set);
|
||||
}
|
||||
|
||||
static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
|
||||
{
|
||||
intel_uncore_rmw_fw(uncore, reg, clr, 0);
|
||||
}
|
||||
|
||||
static void engine_skip_context(struct i915_request *rq)
|
||||
{
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
@ -119,7 +139,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
|
||||
|
||||
static void gen3_stop_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
const u32 base = engine->mmio_base;
|
||||
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
@ -127,32 +147,35 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
|
||||
if (intel_engine_stop_cs(engine))
|
||||
GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
|
||||
|
||||
I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
|
||||
POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
|
||||
intel_uncore_write_fw(uncore,
|
||||
RING_HEAD(base),
|
||||
intel_uncore_read_fw(uncore, RING_TAIL(base)));
|
||||
intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
|
||||
|
||||
I915_WRITE_FW(RING_HEAD(base), 0);
|
||||
I915_WRITE_FW(RING_TAIL(base), 0);
|
||||
POSTING_READ_FW(RING_TAIL(base));
|
||||
intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
|
||||
intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
|
||||
intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
|
||||
|
||||
/* The ring must be empty before it is disabled */
|
||||
I915_WRITE_FW(RING_CTL(base), 0);
|
||||
intel_uncore_write_fw(uncore, RING_CTL(base), 0);
|
||||
|
||||
/* Check acts as a post */
|
||||
if (I915_READ_FW(RING_HEAD(base)))
|
||||
if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
|
||||
GEM_TRACE("%s: ring head [%x] not parked\n",
|
||||
engine->name, I915_READ_FW(RING_HEAD(base)));
|
||||
engine->name,
|
||||
intel_uncore_read_fw(uncore, RING_HEAD(base)));
|
||||
}
|
||||
|
||||
static void i915_stop_engines(struct drm_i915_private *i915,
|
||||
unsigned int engine_mask)
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
if (INTEL_GEN(i915) < 3)
|
||||
return;
|
||||
|
||||
for_each_engine_masked(engine, i915, engine_mask, id)
|
||||
for_each_engine_masked(engine, i915, engine_mask, tmp)
|
||||
gen3_stop_engine(engine);
|
||||
}
|
||||
|
||||
@ -165,7 +188,7 @@ static bool i915_in_reset(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
static int i915_do_reset(struct drm_i915_private *i915,
|
||||
unsigned int engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
@ -194,7 +217,7 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
static int g33_do_reset(struct drm_i915_private *i915,
|
||||
unsigned int engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
@ -203,17 +226,17 @@ static int g33_do_reset(struct drm_i915_private *i915,
|
||||
return wait_for_atomic(g4x_reset_complete(pdev), 50);
|
||||
}
|
||||
|
||||
static int g4x_do_reset(struct drm_i915_private *dev_priv,
|
||||
unsigned int engine_mask,
|
||||
static int g4x_do_reset(struct drm_i915_private *i915,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
int ret;
|
||||
|
||||
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
|
||||
I915_WRITE_FW(VDECCLK_GATE_D,
|
||||
I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
|
||||
POSTING_READ_FW(VDECCLK_GATE_D);
|
||||
rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
|
||||
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
|
||||
|
||||
pci_write_config_byte(pdev, I915_GDRST,
|
||||
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
|
||||
@ -234,18 +257,17 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv,
|
||||
out:
|
||||
pci_write_config_byte(pdev, I915_GDRST, 0);
|
||||
|
||||
I915_WRITE_FW(VDECCLK_GATE_D,
|
||||
I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
|
||||
POSTING_READ_FW(VDECCLK_GATE_D);
|
||||
rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
|
||||
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ironlake_do_reset(struct drm_i915_private *dev_priv,
|
||||
unsigned int engine_mask,
|
||||
static int ironlake_do_reset(struct drm_i915_private *i915,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
int ret;
|
||||
|
||||
intel_uncore_write_fw(uncore, ILK_GDSR,
|
||||
@ -277,10 +299,10 @@ out:
|
||||
}
|
||||
|
||||
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
|
||||
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
|
||||
static int gen6_hw_domain_reset(struct drm_i915_private *i915,
|
||||
u32 hw_domain_mask)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
int err;
|
||||
|
||||
/*
|
||||
@ -303,7 +325,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static int gen6_reset_engines(struct drm_i915_private *i915,
|
||||
unsigned int engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
@ -319,7 +341,7 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
|
||||
if (engine_mask == ALL_ENGINES) {
|
||||
hw_mask = GEN6_GRDOM_FULL;
|
||||
} else {
|
||||
unsigned int tmp;
|
||||
intel_engine_mask_t tmp;
|
||||
|
||||
hw_mask = 0;
|
||||
for_each_engine_masked(engine, i915, engine_mask, tmp) {
|
||||
@ -331,11 +353,10 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
|
||||
return gen6_hw_domain_reset(i915, hw_mask);
|
||||
}
|
||||
|
||||
static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *engine)
|
||||
static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
|
||||
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
|
||||
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
|
||||
i915_reg_t sfc_usage;
|
||||
@ -382,7 +403,7 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
|
||||
* ends up being locked to the engine we want to reset, we have to reset
|
||||
* it as well (we will unlock it once the reset sequence is completed).
|
||||
*/
|
||||
intel_uncore_rmw_or_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
|
||||
rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
|
||||
|
||||
if (__intel_wait_for_register_fw(uncore,
|
||||
sfc_forced_lock_ack,
|
||||
@ -399,10 +420,10 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
|
||||
struct intel_engine_cs *engine)
|
||||
static void gen11_unlock_sfc(struct intel_engine_cs *engine)
|
||||
{
|
||||
u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
|
||||
i915_reg_t sfc_forced_lock;
|
||||
u32 sfc_forced_lock_bit;
|
||||
|
||||
@ -424,12 +445,11 @@ static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
|
||||
return;
|
||||
}
|
||||
|
||||
I915_WRITE_FW(sfc_forced_lock,
|
||||
I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
|
||||
rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
|
||||
}
|
||||
|
||||
static int gen11_reset_engines(struct drm_i915_private *i915,
|
||||
unsigned int engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
const u32 hw_engine_mask[] = {
|
||||
@ -443,7 +463,7 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
|
||||
[VECS1] = GEN11_GRDOM_VECS2,
|
||||
};
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int tmp;
|
||||
intel_engine_mask_t tmp;
|
||||
u32 hw_mask;
|
||||
int ret;
|
||||
|
||||
@ -454,7 +474,7 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
|
||||
for_each_engine_masked(engine, i915, engine_mask, tmp) {
|
||||
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
|
||||
hw_mask |= hw_engine_mask[engine->id];
|
||||
hw_mask |= gen11_lock_sfc(i915, engine);
|
||||
hw_mask |= gen11_lock_sfc(engine);
|
||||
}
|
||||
}
|
||||
|
||||
@ -462,46 +482,62 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
|
||||
|
||||
if (engine_mask != ALL_ENGINES)
|
||||
for_each_engine_masked(engine, i915, engine_mask, tmp)
|
||||
gen11_unlock_sfc(i915, engine);
|
||||
gen11_unlock_sfc(engine);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_uncore *uncore = &engine->i915->uncore;
|
||||
struct intel_uncore *uncore = engine->uncore;
|
||||
const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
|
||||
u32 request, mask, ack;
|
||||
int ret;
|
||||
|
||||
intel_uncore_write_fw(uncore, RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
|
||||
ack = intel_uncore_read_fw(uncore, reg);
|
||||
if (ack & RESET_CTL_CAT_ERROR) {
|
||||
/*
|
||||
* For catastrophic errors, ready-for-reset sequence
|
||||
* needs to be bypassed: HAS#396813
|
||||
*/
|
||||
request = RESET_CTL_CAT_ERROR;
|
||||
mask = RESET_CTL_CAT_ERROR;
|
||||
|
||||
ret = __intel_wait_for_register_fw(uncore,
|
||||
RING_RESET_CTL(engine->mmio_base),
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
RESET_CTL_READY_TO_RESET,
|
||||
700, 0,
|
||||
NULL);
|
||||
/* Catastrophic errors need to be cleared by HW */
|
||||
ack = 0;
|
||||
} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
|
||||
request = RESET_CTL_REQUEST_RESET;
|
||||
mask = RESET_CTL_READY_TO_RESET;
|
||||
ack = RESET_CTL_READY_TO_RESET;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
|
||||
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
|
||||
700, 0, NULL);
|
||||
if (ret)
|
||||
DRM_ERROR("%s: reset request timeout\n", engine->name);
|
||||
DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
|
||||
engine->name, request,
|
||||
intel_uncore_read_fw(uncore, reg));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
|
||||
intel_uncore_write_fw(engine->uncore,
|
||||
RING_RESET_CTL(engine->mmio_base),
|
||||
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
|
||||
}
|
||||
|
||||
static int gen8_reset_engines(struct drm_i915_private *i915,
|
||||
unsigned int engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
const bool reset_non_ready = retry >= 1;
|
||||
unsigned int tmp;
|
||||
intel_engine_mask_t tmp;
|
||||
int ret;
|
||||
|
||||
for_each_engine_masked(engine, i915, engine_mask, tmp) {
|
||||
@ -537,7 +573,7 @@ skip_reset:
|
||||
}
|
||||
|
||||
typedef int (*reset_func)(struct drm_i915_private *,
|
||||
unsigned int engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned int retry);
|
||||
|
||||
static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
|
||||
@ -558,7 +594,8 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
|
||||
int intel_gpu_reset(struct drm_i915_private *i915,
|
||||
intel_engine_mask_t engine_mask)
|
||||
{
|
||||
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
|
||||
reset_func reset;
|
||||
@ -646,7 +683,7 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
|
||||
* written to the powercontext is undefined and so we may lose
|
||||
* GPU state upon resume, i.e. fail to restart after a reset.
|
||||
*/
|
||||
intel_uncore_forcewake_get(&engine->i915->uncore, FORCEWAKE_ALL);
|
||||
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
|
||||
engine->reset.prepare(engine);
|
||||
}
|
||||
|
||||
@ -692,7 +729,8 @@ static void gt_revoke(struct drm_i915_private *i915)
|
||||
revoke_mmaps(i915);
|
||||
}
|
||||
|
||||
static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
|
||||
static int gt_reset(struct drm_i915_private *i915,
|
||||
intel_engine_mask_t stalled_mask)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
@ -717,7 +755,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
|
||||
static void reset_finish_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
engine->reset.finish(engine);
|
||||
intel_uncore_forcewake_put(&engine->i915->uncore, FORCEWAKE_ALL);
|
||||
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
|
||||
}
|
||||
|
||||
struct i915_gpu_restart {
|
||||
@ -951,7 +989,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
|
||||
return result;
|
||||
}
|
||||
|
||||
static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
|
||||
static int do_reset(struct drm_i915_private *i915,
|
||||
intel_engine_mask_t stalled_mask)
|
||||
{
|
||||
int err, i;
|
||||
|
||||
@ -986,7 +1025,7 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
|
||||
* - re-init display
|
||||
*/
|
||||
void i915_reset(struct drm_i915_private *i915,
|
||||
unsigned int stalled_mask,
|
||||
intel_engine_mask_t stalled_mask,
|
||||
const char *reason)
|
||||
{
|
||||
struct i915_gpu_error *error = &i915->gpu_error;
|
||||
@ -1173,49 +1212,50 @@ static void i915_reset_device(struct drm_i915_private *i915,
|
||||
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
|
||||
}
|
||||
|
||||
static void clear_register(struct drm_i915_private *dev_priv, i915_reg_t reg)
|
||||
static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
|
||||
{
|
||||
I915_WRITE(reg, I915_READ(reg));
|
||||
intel_uncore_rmw(uncore, reg, 0, 0);
|
||||
}
|
||||
|
||||
void i915_clear_error_registers(struct drm_i915_private *dev_priv)
|
||||
void i915_clear_error_registers(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_uncore *uncore = &i915->uncore;
|
||||
u32 eir;
|
||||
|
||||
if (!IS_GEN(dev_priv, 2))
|
||||
clear_register(dev_priv, PGTBL_ER);
|
||||
if (!IS_GEN(i915, 2))
|
||||
clear_register(uncore, PGTBL_ER);
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 4)
|
||||
clear_register(dev_priv, IPEIR(RENDER_RING_BASE));
|
||||
if (INTEL_GEN(i915) < 4)
|
||||
clear_register(uncore, IPEIR(RENDER_RING_BASE));
|
||||
else
|
||||
clear_register(dev_priv, IPEIR_I965);
|
||||
clear_register(uncore, IPEIR_I965);
|
||||
|
||||
clear_register(dev_priv, EIR);
|
||||
eir = I915_READ(EIR);
|
||||
clear_register(uncore, EIR);
|
||||
eir = intel_uncore_read(uncore, EIR);
|
||||
if (eir) {
|
||||
/*
|
||||
* some errors might have become stuck,
|
||||
* mask them.
|
||||
*/
|
||||
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
|
||||
I915_WRITE(EMR, I915_READ(EMR) | eir);
|
||||
I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
|
||||
rmw_set(uncore, EMR, eir);
|
||||
intel_uncore_write(uncore, GEN2_IIR,
|
||||
I915_MASTER_ERROR_INTERRUPT);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8) {
|
||||
I915_WRITE(GEN8_RING_FAULT_REG,
|
||||
I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
|
||||
POSTING_READ(GEN8_RING_FAULT_REG);
|
||||
} else if (INTEL_GEN(dev_priv) >= 6) {
|
||||
if (INTEL_GEN(i915) >= 8) {
|
||||
rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
|
||||
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
|
||||
} else if (INTEL_GEN(i915) >= 6) {
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
I915_WRITE(RING_FAULT_REG(engine),
|
||||
I915_READ(RING_FAULT_REG(engine)) &
|
||||
~RING_FAULT_VALID);
|
||||
for_each_engine(engine, i915, id) {
|
||||
rmw_clear(uncore,
|
||||
RING_FAULT_REG(engine), RING_FAULT_VALID);
|
||||
intel_uncore_posting_read(uncore,
|
||||
RING_FAULT_REG(engine));
|
||||
}
|
||||
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS0]));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1233,14 +1273,14 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
|
||||
* of a ring dump etc.).
|
||||
*/
|
||||
void i915_handle_error(struct drm_i915_private *i915,
|
||||
u32 engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned long flags,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct i915_gpu_error *error = &i915->gpu_error;
|
||||
struct intel_engine_cs *engine;
|
||||
intel_wakeref_t wakeref;
|
||||
unsigned int tmp;
|
||||
intel_engine_mask_t tmp;
|
||||
char error_msg[80];
|
||||
char *msg = NULL;
|
||||
|
||||
|
@ -11,13 +11,16 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
#include "intel_engine_types.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_request;
|
||||
struct intel_engine_cs;
|
||||
struct intel_guc;
|
||||
|
||||
__printf(4, 5)
|
||||
void i915_handle_error(struct drm_i915_private *i915,
|
||||
u32 engine_mask,
|
||||
intel_engine_mask_t engine_mask,
|
||||
unsigned long flags,
|
||||
const char *fmt, ...);
|
||||
#define I915_ERROR_CAPTURE BIT(0)
|
||||
@ -25,7 +28,7 @@ void i915_handle_error(struct drm_i915_private *i915,
|
||||
void i915_clear_error_registers(struct drm_i915_private *i915);
|
||||
|
||||
void i915_reset(struct drm_i915_private *i915,
|
||||
unsigned int stalled_mask,
|
||||
intel_engine_mask_t stalled_mask,
|
||||
const char *reason);
|
||||
int i915_reset_engine(struct intel_engine_cs *engine,
|
||||
const char *reason);
|
||||
@ -41,7 +44,8 @@ int i915_terminally_wedged(struct drm_i915_private *i915);
|
||||
bool intel_has_gpu_reset(struct drm_i915_private *i915);
|
||||
bool intel_has_reset_engine(struct drm_i915_private *i915);
|
||||
|
||||
int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
|
||||
int intel_gpu_reset(struct drm_i915_private *i915,
|
||||
intel_engine_mask_t engine_mask);
|
||||
|
||||
int intel_reset_guc(struct drm_i915_private *i915);
|
||||
|
||||
|
@ -41,6 +41,7 @@ void i915_sched_node_init(struct i915_sched_node *node)
|
||||
INIT_LIST_HEAD(&node->waiters_list);
|
||||
INIT_LIST_HEAD(&node->link);
|
||||
node->attr.priority = I915_PRIORITY_INVALID;
|
||||
node->semaphores = 0;
|
||||
node->flags = 0;
|
||||
}
|
||||
|
||||
@ -63,7 +64,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
spin_lock(&schedule_lock);
|
||||
spin_lock_irq(&schedule_lock);
|
||||
|
||||
if (!node_signaled(signal)) {
|
||||
INIT_LIST_HEAD(&dep->dfs_link);
|
||||
@ -73,14 +74,14 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
|
||||
dep->flags = flags;
|
||||
|
||||
/* Keep track of whether anyone on this chain has a semaphore */
|
||||
if (signal->flags & I915_SCHED_HAS_SEMAPHORE &&
|
||||
if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
|
||||
!node_started(signal))
|
||||
node->flags |= I915_SCHED_HAS_SEMAPHORE;
|
||||
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
|
||||
|
||||
ret = true;
|
||||
}
|
||||
|
||||
spin_unlock(&schedule_lock);
|
||||
spin_unlock_irq(&schedule_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -107,7 +108,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
|
||||
|
||||
GEM_BUG_ON(!list_empty(&node->link));
|
||||
|
||||
spin_lock(&schedule_lock);
|
||||
spin_lock_irq(&schedule_lock);
|
||||
|
||||
/*
|
||||
* Everyone we depended upon (the fences we wait to be signaled)
|
||||
@ -134,7 +135,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
|
||||
i915_dependency_free(dep);
|
||||
}
|
||||
|
||||
spin_unlock(&schedule_lock);
|
||||
spin_unlock_irq(&schedule_lock);
|
||||
}
|
||||
|
||||
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
|
||||
@ -355,7 +356,7 @@ static void __i915_schedule(struct i915_request *rq,
|
||||
|
||||
memset(&cache, 0, sizeof(cache));
|
||||
engine = rq->engine;
|
||||
spin_lock_irq(&engine->timeline.lock);
|
||||
spin_lock(&engine->timeline.lock);
|
||||
|
||||
/* Fifo and depth-first replacement ensure our deps execute before us */
|
||||
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
||||
@ -406,32 +407,33 @@ static void __i915_schedule(struct i915_request *rq,
|
||||
tasklet_hi_schedule(&engine->execlists.tasklet);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&engine->timeline.lock);
|
||||
spin_unlock(&engine->timeline.lock);
|
||||
}
|
||||
|
||||
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
|
||||
{
|
||||
spin_lock(&schedule_lock);
|
||||
spin_lock_irq(&schedule_lock);
|
||||
__i915_schedule(rq, attr);
|
||||
spin_unlock(&schedule_lock);
|
||||
spin_unlock_irq(&schedule_lock);
|
||||
}
|
||||
|
||||
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
|
||||
{
|
||||
struct i915_sched_attr attr;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
|
||||
|
||||
if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&schedule_lock);
|
||||
spin_lock_irqsave(&schedule_lock, flags);
|
||||
|
||||
attr = rq->sched.attr;
|
||||
attr.priority |= bump;
|
||||
__i915_schedule(rq, &attr);
|
||||
|
||||
spin_unlock_bh(&schedule_lock);
|
||||
spin_unlock_irqrestore(&schedule_lock, flags);
|
||||
}
|
||||
|
||||
void __i915_priolist_free(struct i915_priolist *p)
|
||||
|
@ -8,92 +8,10 @@
|
||||
#define _I915_SCHEDULER_H_
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_request;
|
||||
struct intel_engine_cs;
|
||||
|
||||
enum {
|
||||
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
|
||||
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
|
||||
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
|
||||
|
||||
I915_PRIORITY_INVALID = INT_MIN
|
||||
};
|
||||
|
||||
#define I915_USER_PRIORITY_SHIFT 3
|
||||
#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
|
||||
|
||||
#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
|
||||
#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
|
||||
|
||||
#define I915_PRIORITY_WAIT ((u8)BIT(0))
|
||||
#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
|
||||
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2))
|
||||
|
||||
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
|
||||
|
||||
struct i915_sched_attr {
|
||||
/**
|
||||
* @priority: execution and service priority
|
||||
*
|
||||
* All clients are equal, but some are more equal than others!
|
||||
*
|
||||
* Requests from a context with a greater (more positive) value of
|
||||
* @priority will be executed before those with a lower @priority
|
||||
* value, forming a simple QoS.
|
||||
*
|
||||
* The &drm_i915_private.kernel_context is assigned the lowest priority.
|
||||
*/
|
||||
int priority;
|
||||
};
|
||||
|
||||
/*
|
||||
* "People assume that time is a strict progression of cause to effect, but
|
||||
* actually, from a nonlinear, non-subjective viewpoint, it's more like a big
|
||||
* ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
|
||||
*
|
||||
* Requests exist in a complex web of interdependencies. Each request
|
||||
* has to wait for some other request to complete before it is ready to be run
|
||||
* (e.g. we have to wait until the pixels have been rendering into a texture
|
||||
* before we can copy from it). We track the readiness of a request in terms
|
||||
* of fences, but we also need to keep the dependency tree for the lifetime
|
||||
* of the request (beyond the life of an individual fence). We use the tree
|
||||
* at various points to reorder the requests whilst keeping the requests
|
||||
* in order with respect to their various dependencies.
|
||||
*
|
||||
* There is no active component to the "scheduler". As we know the dependency
|
||||
* DAG of each request, we are able to insert it into a sorted queue when it
|
||||
* is ready, and are able to reorder its portion of the graph to accommodate
|
||||
* dynamic priority changes.
|
||||
*/
|
||||
struct i915_sched_node {
|
||||
struct list_head signalers_list; /* those before us, we depend upon */
|
||||
struct list_head waiters_list; /* those after us, they depend upon us */
|
||||
struct list_head link;
|
||||
struct i915_sched_attr attr;
|
||||
unsigned int flags;
|
||||
#define I915_SCHED_HAS_SEMAPHORE BIT(0)
|
||||
};
|
||||
|
||||
struct i915_dependency {
|
||||
struct i915_sched_node *signaler;
|
||||
struct list_head signal_link;
|
||||
struct list_head wait_link;
|
||||
struct list_head dfs_link;
|
||||
unsigned long flags;
|
||||
#define I915_DEPENDENCY_ALLOC BIT(0)
|
||||
};
|
||||
|
||||
struct i915_priolist {
|
||||
struct list_head requests[I915_PRIORITY_COUNT];
|
||||
struct rb_node node;
|
||||
unsigned long used;
|
||||
int priority;
|
||||
};
|
||||
#include "i915_scheduler_types.h"
|
||||
|
||||
#define priolist_for_each_request(it, plist, idx) \
|
||||
for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
|
||||
|
72
drivers/gpu/drm/i915/i915_scheduler_types.h
Normal file
72
drivers/gpu/drm/i915/i915_scheduler_types.h
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2018 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef _I915_SCHEDULER_TYPES_H_
|
||||
#define _I915_SCHEDULER_TYPES_H_
|
||||
|
||||
#include <linux/list.h>
|
||||
|
||||
#include "i915_priolist_types.h"
|
||||
#include "intel_engine_types.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_request;
|
||||
struct intel_engine_cs;
|
||||
|
||||
struct i915_sched_attr {
|
||||
/**
|
||||
* @priority: execution and service priority
|
||||
*
|
||||
* All clients are equal, but some are more equal than others!
|
||||
*
|
||||
* Requests from a context with a greater (more positive) value of
|
||||
* @priority will be executed before those with a lower @priority
|
||||
* value, forming a simple QoS.
|
||||
*
|
||||
* The &drm_i915_private.kernel_context is assigned the lowest priority.
|
||||
*/
|
||||
int priority;
|
||||
};
|
||||
|
||||
/*
|
||||
* "People assume that time is a strict progression of cause to effect, but
|
||||
* actually, from a nonlinear, non-subjective viewpoint, it's more like a big
|
||||
* ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
|
||||
*
|
||||
* Requests exist in a complex web of interdependencies. Each request
|
||||
* has to wait for some other request to complete before it is ready to be run
|
||||
* (e.g. we have to wait until the pixels have been rendering into a texture
|
||||
* before we can copy from it). We track the readiness of a request in terms
|
||||
* of fences, but we also need to keep the dependency tree for the lifetime
|
||||
* of the request (beyond the life of an individual fence). We use the tree
|
||||
* at various points to reorder the requests whilst keeping the requests
|
||||
* in order with respect to their various dependencies.
|
||||
*
|
||||
* There is no active component to the "scheduler". As we know the dependency
|
||||
* DAG of each request, we are able to insert it into a sorted queue when it
|
||||
* is ready, and are able to reorder its portion of the graph to accommodate
|
||||
* dynamic priority changes.
|
||||
*/
|
||||
struct i915_sched_node {
|
||||
struct list_head signalers_list; /* those before us, we depend upon */
|
||||
struct list_head waiters_list; /* those after us, they depend upon us */
|
||||
struct list_head link;
|
||||
struct i915_sched_attr attr;
|
||||
unsigned int flags;
|
||||
#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
|
||||
intel_engine_mask_t semaphores;
|
||||
};
|
||||
|
||||
struct i915_dependency {
|
||||
struct i915_sched_node *signaler;
|
||||
struct list_head signal_link;
|
||||
struct list_head wait_link;
|
||||
struct list_head dfs_link;
|
||||
unsigned long flags;
|
||||
#define I915_DEPENDENCY_ALLOC BIT(0)
|
||||
};
|
||||
|
||||
#endif /* _I915_SCHEDULER_TYPES_H_ */
|
@ -25,8 +25,10 @@
|
||||
*/
|
||||
|
||||
#include <drm/i915_drm.h>
|
||||
#include "intel_drv.h"
|
||||
|
||||
#include "i915_reg.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_fbc.h"
|
||||
|
||||
static void i915_save_display(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
|
@ -253,7 +253,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
|
||||
spin_lock_init(&timeline->lock);
|
||||
mutex_init(&timeline->mutex);
|
||||
|
||||
INIT_ACTIVE_REQUEST(&timeline->barrier);
|
||||
INIT_ACTIVE_REQUEST(&timeline->last_request);
|
||||
INIT_LIST_HEAD(&timeline->requests);
|
||||
|
||||
@ -326,7 +325,6 @@ void i915_timeline_fini(struct i915_timeline *timeline)
|
||||
{
|
||||
GEM_BUG_ON(timeline->pin_count);
|
||||
GEM_BUG_ON(!list_empty(&timeline->requests));
|
||||
GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
|
||||
|
||||
i915_syncmap_free(&timeline->sync);
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include "i915_active.h"
|
||||
#include "i915_syncmap.h"
|
||||
#include "i915_timeline_types.h"
|
||||
|
||||
@ -109,19 +110,4 @@ void i915_timelines_init(struct drm_i915_private *i915);
|
||||
void i915_timelines_park(struct drm_i915_private *i915);
|
||||
void i915_timelines_fini(struct drm_i915_private *i915);
|
||||
|
||||
/**
|
||||
* i915_timeline_set_barrier - orders submission between different timelines
|
||||
* @timeline: timeline to set the barrier on
|
||||
* @rq: request after which new submissions can proceed
|
||||
*
|
||||
* Sets the passed in request as the serialization point for all subsequent
|
||||
* submissions on @timeline. Subsequent requests will not be submitted to GPU
|
||||
* until the barrier has been completed.
|
||||
*/
|
||||
static inline int
|
||||
i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
|
||||
{
|
||||
return i915_active_request_set(&tl->barrier, rq);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -9,9 +9,10 @@
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "i915_active.h"
|
||||
#include "i915_active_types.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_vma;
|
||||
@ -60,16 +61,6 @@ struct i915_timeline {
|
||||
*/
|
||||
struct i915_syncmap *sync;
|
||||
|
||||
/**
|
||||
* Barrier provides the ability to serialize ordering between different
|
||||
* timelines.
|
||||
*
|
||||
* Users can call i915_timeline_set_barrier which will make all
|
||||
* subsequent submissions to this timeline be executed only after the
|
||||
* barrier has been completed.
|
||||
*/
|
||||
struct i915_active_request barrier;
|
||||
|
||||
struct list_head link;
|
||||
struct drm_i915_private *i915;
|
||||
|
||||
|
@ -25,9 +25,13 @@
|
||||
* Jani Nikula <jani.nikula@intel.com>
|
||||
*/
|
||||
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_panel.h"
|
||||
|
||||
static inline int header_credits_available(struct drm_i915_private *dev_priv,
|
||||
enum transcoder dsi_trans)
|
||||
@ -1148,13 +1152,11 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
|
||||
if (wakeref) {
|
||||
intel_display_power_put(dev_priv,
|
||||
port == PORT_A ?
|
||||
POWER_DOMAIN_PORT_DDI_A_IO :
|
||||
POWER_DOMAIN_PORT_DDI_B_IO,
|
||||
wakeref);
|
||||
}
|
||||
intel_display_power_put(dev_priv,
|
||||
port == PORT_A ?
|
||||
POWER_DOMAIN_PORT_DDI_A_IO :
|
||||
POWER_DOMAIN_PORT_DDI_B_IO,
|
||||
wakeref);
|
||||
}
|
||||
|
||||
/* set mode to DDI */
|
||||
|
@ -35,6 +35,8 @@
|
||||
#include <drm/drm_plane_helper.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_sprite.h"
|
||||
|
||||
/**
|
||||
* intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
|
||||
|
@ -35,7 +35,10 @@
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
|
||||
#include "intel_atomic_plane.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_sprite.h"
|
||||
|
||||
struct intel_plane *intel_plane_alloc(void)
|
||||
{
|
||||
|
40
drivers/gpu/drm/i915/intel_atomic_plane.h
Normal file
40
drivers/gpu/drm/i915/intel_atomic_plane.h
Normal file
@ -0,0 +1,40 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_ATOMIC_PLANE_H__
|
||||
#define __INTEL_ATOMIC_PLANE_H__
|
||||
|
||||
struct drm_plane;
|
||||
struct intel_atomic_state;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_plane;
|
||||
struct intel_plane_state;
|
||||
|
||||
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
|
||||
|
||||
void intel_update_plane(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void intel_update_slave(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void intel_disable_plane(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
struct intel_plane *intel_plane_alloc(void);
|
||||
void intel_plane_free(struct intel_plane *plane);
|
||||
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
|
||||
void intel_plane_destroy_state(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
void skl_update_planes_on_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *old_plane_state,
|
||||
struct intel_plane_state *intel_state);
|
||||
|
||||
#endif /* __INTEL_ATOMIC_PLANE_H__ */
|
@ -21,14 +21,16 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/component.h>
|
||||
#include <drm/i915_component.h>
|
||||
#include <drm/intel_lpe_audio.h>
|
||||
#include "intel_drv.h"
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/i915_component.h>
|
||||
#include <drm/intel_lpe_audio.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_audio.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/**
|
||||
* DOC: High Definition Audio over HDMI and Display Port
|
||||
@ -741,18 +743,78 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
struct drm_atomic_state *state;
|
||||
int ret;
|
||||
|
||||
drm_modeset_acquire_init(&ctx, 0);
|
||||
state = drm_atomic_state_alloc(&dev_priv->drm);
|
||||
if (WARN_ON(!state))
|
||||
return;
|
||||
|
||||
state->acquire_ctx = &ctx;
|
||||
|
||||
retry:
|
||||
to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
|
||||
to_intel_atomic_state(state)->cdclk.force_min_cdclk =
|
||||
enable ? 2 * 96000 : 0;
|
||||
|
||||
/*
|
||||
* Protects dev_priv->cdclk.force_min_cdclk
|
||||
* Need to lock this here in case we have no active pipes
|
||||
* and thus wouldn't lock it during the commit otherwise.
|
||||
*/
|
||||
ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
|
||||
&ctx);
|
||||
if (!ret)
|
||||
ret = drm_atomic_commit(state);
|
||||
|
||||
if (ret == -EDEADLK) {
|
||||
drm_atomic_state_clear(state);
|
||||
drm_modeset_backoff(&ctx);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
WARN_ON(ret);
|
||||
|
||||
drm_atomic_state_put(state);
|
||||
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
}
|
||||
|
||||
static unsigned long i915_audio_component_get_power(struct device *kdev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
|
||||
intel_wakeref_t ret;
|
||||
|
||||
/* Catch potential impedance mismatches before they occur! */
|
||||
BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
|
||||
|
||||
return intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
|
||||
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
|
||||
|
||||
/* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
|
||||
if (dev_priv->audio_power_refcount++ == 0)
|
||||
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
|
||||
glk_force_audio_cdclk(dev_priv, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void i915_audio_component_put_power(struct device *kdev,
|
||||
unsigned long cookie)
|
||||
{
|
||||
intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO, cookie);
|
||||
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
|
||||
|
||||
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
|
||||
if (--dev_priv->audio_power_refcount == 0)
|
||||
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
|
||||
glk_force_audio_cdclk(dev_priv, false);
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
|
||||
}
|
||||
|
||||
static void i915_audio_component_codec_wake_override(struct device *kdev,
|
||||
@ -985,7 +1047,7 @@ static const struct component_ops i915_audio_component_bind_ops = {
|
||||
* We ignore any error during registration and continue with reduced
|
||||
* functionality (i.e. without HDMI audio).
|
||||
*/
|
||||
void i915_audio_component_init(struct drm_i915_private *dev_priv)
|
||||
static void i915_audio_component_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1008,7 +1070,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
|
||||
* Deregisters the audio component, breaking any existing binding to the
|
||||
* corresponding snd_hda_intel driver's master component.
|
||||
*/
|
||||
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
|
||||
static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->audio_component_registered)
|
||||
return;
|
||||
|
24
drivers/gpu/drm/i915/intel_audio.h
Normal file
24
drivers/gpu/drm/i915/intel_audio.h
Normal file
@ -0,0 +1,24 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_AUDIO_H__
|
||||
#define __INTEL_AUDIO_H__
|
||||
|
||||
struct drm_connector_state;
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc_state;
|
||||
struct intel_encoder;
|
||||
|
||||
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_codec_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_audio_codec_disable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state);
|
||||
void intel_audio_init(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_deinit(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif /* __INTEL_AUDIO_H__ */
|
@ -27,8 +27,6 @@
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
|
||||
|
||||
static void irq_enable(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!engine->irq_enable)
|
||||
@ -82,7 +80,7 @@ static inline bool __request_completed(const struct i915_request *rq)
|
||||
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
|
||||
}
|
||||
|
||||
bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
|
||||
void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
struct intel_context *ce, *cn;
|
||||
@ -146,19 +144,13 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
|
||||
dma_fence_signal(&rq->fence);
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
return !list_empty(&signal);
|
||||
}
|
||||
|
||||
bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
|
||||
void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
|
||||
{
|
||||
bool result;
|
||||
|
||||
local_irq_disable();
|
||||
result = intel_engine_breadcrumbs_irq(engine);
|
||||
intel_engine_breadcrumbs_irq(engine);
|
||||
local_irq_enable();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void signal_irq_work(struct irq_work *work)
|
||||
|
@ -21,6 +21,7 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "intel_cdclk.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/**
|
||||
@ -517,7 +518,8 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state)
|
||||
const struct intel_cdclk_state *cdclk_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
int cdclk = cdclk_state->cdclk;
|
||||
u32 val, cmd = cdclk_state->voltage_level;
|
||||
@ -599,7 +601,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state)
|
||||
const struct intel_cdclk_state *cdclk_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
int cdclk = cdclk_state->cdclk;
|
||||
u32 val, cmd = cdclk_state->voltage_level;
|
||||
@ -698,7 +701,8 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state)
|
||||
const struct intel_cdclk_state *cdclk_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
int cdclk = cdclk_state->cdclk;
|
||||
u32 val;
|
||||
@ -988,7 +992,8 @@ static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state)
|
||||
const struct intel_cdclk_state *cdclk_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
int cdclk = cdclk_state->cdclk;
|
||||
int vco = cdclk_state->vco;
|
||||
@ -1124,16 +1129,7 @@ sanitize:
|
||||
dev_priv->cdclk.hw.vco = -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* skl_init_cdclk - Initialize CDCLK on SKL
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* Initialize CDCLK for SKL and derivatives. This is generally
|
||||
* done only during the display core initialization sequence,
|
||||
* after which the DMC will take care of turning CDCLK off/on
|
||||
* as needed.
|
||||
*/
|
||||
void skl_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
static void skl_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state cdclk_state;
|
||||
|
||||
@ -1159,17 +1155,10 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
|
||||
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
|
||||
|
||||
skl_set_cdclk(dev_priv, &cdclk_state);
|
||||
skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
|
||||
}
|
||||
|
||||
/**
|
||||
* skl_uninit_cdclk - Uninitialize CDCLK on SKL
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* Uninitialize CDCLK for SKL and derivatives. This is done only
|
||||
* during the display core uninitialization sequence.
|
||||
*/
|
||||
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
|
||||
|
||||
@ -1177,7 +1166,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
cdclk_state.vco = 0;
|
||||
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
|
||||
|
||||
skl_set_cdclk(dev_priv, &cdclk_state);
|
||||
skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
|
||||
}
|
||||
|
||||
static int bxt_calc_cdclk(int min_cdclk)
|
||||
@ -1356,7 +1345,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
|
||||
}
|
||||
|
||||
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state)
|
||||
const struct intel_cdclk_state *cdclk_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
int cdclk = cdclk_state->cdclk;
|
||||
int vco = cdclk_state->vco;
|
||||
@ -1409,11 +1399,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
bxt_de_pll_enable(dev_priv, vco);
|
||||
|
||||
val = divider | skl_cdclk_decimal(cdclk);
|
||||
/*
|
||||
* FIXME if only the cd2x divider needs changing, it could be done
|
||||
* without shutting off the pipe (if only one pipe is active).
|
||||
*/
|
||||
val |= BXT_CDCLK_CD2X_PIPE_NONE;
|
||||
if (pipe == INVALID_PIPE)
|
||||
val |= BXT_CDCLK_CD2X_PIPE_NONE;
|
||||
else
|
||||
val |= BXT_CDCLK_CD2X_PIPE(pipe);
|
||||
/*
|
||||
* Disable SSA Precharge when CD clock frequency < 500 MHz,
|
||||
* enable otherwise.
|
||||
@ -1422,6 +1411,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
|
||||
I915_WRITE(CDCLK_CTL, val);
|
||||
|
||||
if (pipe != INVALID_PIPE)
|
||||
intel_wait_for_vblank(dev_priv, pipe);
|
||||
|
||||
mutex_lock(&dev_priv->pcu_lock);
|
||||
/*
|
||||
* The timeout isn't specified, the 2ms used here is based on
|
||||
@ -1491,16 +1483,7 @@ sanitize:
|
||||
dev_priv->cdclk.hw.vco = -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* bxt_init_cdclk - Initialize CDCLK on BXT
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* Initialize CDCLK for BXT and derivatives. This is generally
|
||||
* done only during the display core initialization sequence,
|
||||
* after which the DMC will take care of turning CDCLK off/on
|
||||
* as needed.
|
||||
*/
|
||||
void bxt_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state cdclk_state;
|
||||
|
||||
@ -1526,17 +1509,10 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
|
||||
|
||||
bxt_set_cdclk(dev_priv, &cdclk_state);
|
||||
bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
|
||||
}
|
||||
|
||||
/**
|
||||
* bxt_uninit_cdclk - Uninitialize CDCLK on BXT
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* Uninitialize CDCLK for BXT and derivatives. This is done only
|
||||
* during the display core uninitialization sequence.
|
||||
*/
|
||||
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
|
||||
|
||||
@ -1544,7 +1520,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
cdclk_state.vco = 0;
|
||||
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
|
||||
|
||||
bxt_set_cdclk(dev_priv, &cdclk_state);
|
||||
bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
|
||||
}
|
||||
|
||||
static int cnl_calc_cdclk(int min_cdclk)
|
||||
@ -1664,7 +1640,8 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
|
||||
}
|
||||
|
||||
static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state)
|
||||
const struct intel_cdclk_state *cdclk_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
int cdclk = cdclk_state->cdclk;
|
||||
int vco = cdclk_state->vco;
|
||||
@ -1705,13 +1682,15 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
cnl_cdclk_pll_enable(dev_priv, vco);
|
||||
|
||||
val = divider | skl_cdclk_decimal(cdclk);
|
||||
/*
|
||||
* FIXME if only the cd2x divider needs changing, it could be done
|
||||
* without shutting off the pipe (if only one pipe is active).
|
||||
*/
|
||||
val |= BXT_CDCLK_CD2X_PIPE_NONE;
|
||||
if (pipe == INVALID_PIPE)
|
||||
val |= BXT_CDCLK_CD2X_PIPE_NONE;
|
||||
else
|
||||
val |= BXT_CDCLK_CD2X_PIPE(pipe);
|
||||
I915_WRITE(CDCLK_CTL, val);
|
||||
|
||||
if (pipe != INVALID_PIPE)
|
||||
intel_wait_for_vblank(dev_priv, pipe);
|
||||
|
||||
/* inform PCU of the change */
|
||||
mutex_lock(&dev_priv->pcu_lock);
|
||||
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
|
||||
@ -1848,7 +1827,8 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
|
||||
}
|
||||
|
||||
static void icl_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state)
|
||||
const struct intel_cdclk_state *cdclk_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
unsigned int cdclk = cdclk_state->cdclk;
|
||||
unsigned int vco = cdclk_state->vco;
|
||||
@ -1873,6 +1853,11 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
if (dev_priv->cdclk.hw.vco != vco)
|
||||
cnl_cdclk_pll_enable(dev_priv, vco);
|
||||
|
||||
/*
|
||||
* On ICL CD2X_DIV can only be 1, so we'll never end up changing the
|
||||
* divider here synchronized to a pipe while CDCLK is on, nor will we
|
||||
* need the corresponding vblank wait.
|
||||
*/
|
||||
I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
|
||||
skl_cdclk_decimal(cdclk));
|
||||
|
||||
@ -1960,16 +1945,7 @@ out:
|
||||
icl_calc_voltage_level(cdclk_state->cdclk);
|
||||
}
|
||||
|
||||
/**
|
||||
* icl_init_cdclk - Initialize CDCLK on ICL
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* Initialize CDCLK for ICL. This consists mainly of initializing
|
||||
* dev_priv->cdclk.hw and sanitizing the state of the hardware if needed. This
|
||||
* is generally done only during the display core initialization sequence, after
|
||||
* which the DMC will take care of turning CDCLK off/on as needed.
|
||||
*/
|
||||
void icl_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
static void icl_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state sanitized_state;
|
||||
u32 val;
|
||||
@ -2003,17 +1979,10 @@ sanitize:
|
||||
sanitized_state.voltage_level =
|
||||
icl_calc_voltage_level(sanitized_state.cdclk);
|
||||
|
||||
icl_set_cdclk(dev_priv, &sanitized_state);
|
||||
icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
|
||||
}
|
||||
|
||||
/**
|
||||
* icl_uninit_cdclk - Uninitialize CDCLK on ICL
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* Uninitialize CDCLK for ICL. This is done only during the display core
|
||||
* uninitialization sequence.
|
||||
*/
|
||||
void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
|
||||
|
||||
@ -2021,19 +1990,10 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
cdclk_state.vco = 0;
|
||||
cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
|
||||
|
||||
icl_set_cdclk(dev_priv, &cdclk_state);
|
||||
icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
|
||||
}
|
||||
|
||||
/**
|
||||
* cnl_init_cdclk - Initialize CDCLK on CNL
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* Initialize CDCLK for CNL. This is generally
|
||||
* done only during the display core initialization sequence,
|
||||
* after which the DMC will take care of turning CDCLK off/on
|
||||
* as needed.
|
||||
*/
|
||||
void cnl_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state cdclk_state;
|
||||
|
||||
@ -2049,17 +2009,10 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
|
||||
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
|
||||
|
||||
cnl_set_cdclk(dev_priv, &cdclk_state);
|
||||
cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
|
||||
}
|
||||
|
||||
/**
|
||||
* cnl_uninit_cdclk - Uninitialize CDCLK on CNL
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* Uninitialize CDCLK for CNL. This is done only
|
||||
* during the display core uninitialization sequence.
|
||||
*/
|
||||
void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
|
||||
|
||||
@ -2067,7 +2020,47 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
cdclk_state.vco = 0;
|
||||
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
|
||||
|
||||
cnl_set_cdclk(dev_priv, &cdclk_state);
|
||||
cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_cdclk_init - Initialize CDCLK
|
||||
* @i915: i915 device
|
||||
*
|
||||
* Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
|
||||
* sanitizing the state of the hardware if needed. This is generally done only
|
||||
* during the display core initialization sequence, after which the DMC will
|
||||
* take care of turning CDCLK off/on as needed.
|
||||
*/
|
||||
void intel_cdclk_init(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) >= 11)
|
||||
icl_init_cdclk(i915);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_init_cdclk(i915);
|
||||
else if (IS_GEN9_BC(i915))
|
||||
skl_init_cdclk(i915);
|
||||
else if (IS_GEN9_LP(i915))
|
||||
bxt_init_cdclk(i915);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_cdclk_uninit - Uninitialize CDCLK
|
||||
* @i915: i915 device
|
||||
*
|
||||
* Uninitialize CDCLK. This is done only during the display core
|
||||
* uninitialization sequence.
|
||||
*/
|
||||
void intel_cdclk_uninit(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) >= 11)
|
||||
icl_uninit_cdclk(i915);
|
||||
else if (IS_CANNONLAKE(i915))
|
||||
cnl_uninit_cdclk(i915);
|
||||
else if (IS_GEN9_BC(i915))
|
||||
skl_uninit_cdclk(i915);
|
||||
else if (IS_GEN9_LP(i915))
|
||||
bxt_uninit_cdclk(i915);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2086,6 +2079,28 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
|
||||
a->ref != b->ref;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
|
||||
* @dev_priv: Not a CDCLK state, it's the drm_i915_private!
|
||||
* @a: first CDCLK state
|
||||
* @b: second CDCLK state
|
||||
*
|
||||
* Returns:
|
||||
* True if the CDCLK states require just a cd2x divider update, false if not.
|
||||
*/
|
||||
bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *a,
|
||||
const struct intel_cdclk_state *b)
|
||||
{
|
||||
/* Older hw doesn't have the capability */
|
||||
if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
|
||||
return false;
|
||||
|
||||
return a->cdclk != b->cdclk &&
|
||||
a->vco == b->vco &&
|
||||
a->ref == b->ref;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_cdclk_changed - Determine if two CDCLK states are different
|
||||
* @a: first CDCLK state
|
||||
@ -2101,6 +2116,26 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a,
|
||||
a->voltage_level != b->voltage_level;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_cdclk_swap_state - make atomic CDCLK configuration effective
|
||||
* @state: atomic state
|
||||
*
|
||||
* This is the CDCLK version of drm_atomic_helper_swap_state() since the
|
||||
* helper does not handle driver-specific global state.
|
||||
*
|
||||
* Similarly to the atomic helpers this function does a complete swap,
|
||||
* i.e. it also puts the old state into @state. This is used by the commit
|
||||
* code to determine how CDCLK has changed (for instance did it increase or
|
||||
* decrease).
|
||||
*/
|
||||
void intel_cdclk_swap_state(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
|
||||
swap(state->cdclk.logical, dev_priv->cdclk.logical);
|
||||
swap(state->cdclk.actual, dev_priv->cdclk.actual);
|
||||
}
|
||||
|
||||
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
|
||||
const char *context)
|
||||
{
|
||||
@ -2114,12 +2149,14 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
|
||||
* intel_set_cdclk - Push the CDCLK state to the hardware
|
||||
* @dev_priv: i915 device
|
||||
* @cdclk_state: new CDCLK state
|
||||
* @pipe: pipe with which to synchronize the update
|
||||
*
|
||||
* Program the hardware based on the passed in CDCLK state,
|
||||
* if necessary.
|
||||
*/
|
||||
void intel_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state)
|
||||
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
|
||||
return;
|
||||
@ -2129,7 +2166,7 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
|
||||
intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
|
||||
|
||||
dev_priv->display.set_cdclk(dev_priv, cdclk_state);
|
||||
dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
|
||||
|
||||
if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
|
||||
"cdclk state doesn't match!\n")) {
|
||||
@ -2138,6 +2175,46 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
|
||||
* @dev_priv: i915 device
|
||||
* @old_state: old CDCLK state
|
||||
* @new_state: new CDCLK state
|
||||
* @pipe: pipe with which to synchronize the update
|
||||
*
|
||||
* Program the hardware before updating the HW plane state based on the passed
|
||||
* in CDCLK state, if necessary.
|
||||
*/
|
||||
void
|
||||
intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *old_state,
|
||||
const struct intel_cdclk_state *new_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
|
||||
intel_set_cdclk(dev_priv, new_state, pipe);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
|
||||
* @dev_priv: i915 device
|
||||
* @old_state: old CDCLK state
|
||||
* @new_state: new CDCLK state
|
||||
* @pipe: pipe with which to synchronize the update
|
||||
*
|
||||
* Program the hardware after updating the HW plane state based on the passed
|
||||
* in CDCLK state, if necessary.
|
||||
*/
|
||||
void
|
||||
intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *old_state,
|
||||
const struct intel_cdclk_state *new_state,
|
||||
enum pipe pipe)
|
||||
{
|
||||
if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
|
||||
intel_set_cdclk(dev_priv, new_state, pipe);
|
||||
}
|
||||
|
||||
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
|
||||
int pixel_rate)
|
||||
{
|
||||
@ -2188,19 +2265,8 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
|
||||
/*
|
||||
* According to BSpec, "The CD clock frequency must be at least twice
|
||||
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
|
||||
*
|
||||
* FIXME: Check the actual, not default, BCLK being used.
|
||||
*
|
||||
* FIXME: This does not depend on ->has_audio because the higher CDCLK
|
||||
* is required for audio probe, also when there are no audio capable
|
||||
* displays connected at probe time. This leads to unnecessarily high
|
||||
* CDCLK when audio is not required.
|
||||
*
|
||||
* FIXME: This limit is only applied when there are displays connected
|
||||
* at probe time. If we probe without displays, we'll still end up using
|
||||
* the platform minimum CDCLK, failing audio probe.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
|
||||
min_cdclk = max(2 * 96000, min_cdclk);
|
||||
|
||||
/*
|
||||
@ -2240,7 +2306,7 @@ static int intel_compute_min_cdclk(struct drm_atomic_state *state)
|
||||
intel_state->min_cdclk[i] = min_cdclk;
|
||||
}
|
||||
|
||||
min_cdclk = 0;
|
||||
min_cdclk = intel_state->cdclk.force_min_cdclk;
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
|
||||
|
||||
@ -2301,7 +2367,8 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
|
||||
vlv_calc_voltage_level(dev_priv, cdclk);
|
||||
|
||||
if (!intel_state->active_crtcs) {
|
||||
cdclk = vlv_calc_cdclk(dev_priv, 0);
|
||||
cdclk = vlv_calc_cdclk(dev_priv,
|
||||
intel_state->cdclk.force_min_cdclk);
|
||||
|
||||
intel_state->cdclk.actual.cdclk = cdclk;
|
||||
intel_state->cdclk.actual.voltage_level =
|
||||
@ -2334,7 +2401,7 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
|
||||
bdw_calc_voltage_level(cdclk);
|
||||
|
||||
if (!intel_state->active_crtcs) {
|
||||
cdclk = bdw_calc_cdclk(0);
|
||||
cdclk = bdw_calc_cdclk(intel_state->cdclk.force_min_cdclk);
|
||||
|
||||
intel_state->cdclk.actual.cdclk = cdclk;
|
||||
intel_state->cdclk.actual.voltage_level =
|
||||
@ -2406,7 +2473,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
|
||||
skl_calc_voltage_level(cdclk);
|
||||
|
||||
if (!intel_state->active_crtcs) {
|
||||
cdclk = skl_calc_cdclk(0, vco);
|
||||
cdclk = skl_calc_cdclk(intel_state->cdclk.force_min_cdclk, vco);
|
||||
|
||||
intel_state->cdclk.actual.vco = vco;
|
||||
intel_state->cdclk.actual.cdclk = cdclk;
|
||||
@ -2445,10 +2512,10 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
|
||||
|
||||
if (!intel_state->active_crtcs) {
|
||||
if (IS_GEMINILAKE(dev_priv)) {
|
||||
cdclk = glk_calc_cdclk(0);
|
||||
cdclk = glk_calc_cdclk(intel_state->cdclk.force_min_cdclk);
|
||||
vco = glk_de_pll_vco(dev_priv, cdclk);
|
||||
} else {
|
||||
cdclk = bxt_calc_cdclk(0);
|
||||
cdclk = bxt_calc_cdclk(intel_state->cdclk.force_min_cdclk);
|
||||
vco = bxt_de_pll_vco(dev_priv, cdclk);
|
||||
}
|
||||
|
||||
@ -2484,7 +2551,7 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
|
||||
cnl_compute_min_voltage_level(intel_state));
|
||||
|
||||
if (!intel_state->active_crtcs) {
|
||||
cdclk = cnl_calc_cdclk(0);
|
||||
cdclk = cnl_calc_cdclk(intel_state->cdclk.force_min_cdclk);
|
||||
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
|
||||
|
||||
intel_state->cdclk.actual.vco = vco;
|
||||
@ -2520,7 +2587,7 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
|
||||
cnl_compute_min_voltage_level(intel_state));
|
||||
|
||||
if (!intel_state->active_crtcs) {
|
||||
cdclk = icl_calc_cdclk(0, ref);
|
||||
cdclk = icl_calc_cdclk(intel_state->cdclk.force_min_cdclk, ref);
|
||||
vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
|
||||
|
||||
intel_state->cdclk.actual.vco = vco;
|
||||
|
46
drivers/gpu/drm/i915/intel_cdclk.h
Normal file
46
drivers/gpu/drm/i915/intel_cdclk.h
Normal file
@ -0,0 +1,46 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_CDCLK_H__
|
||||
#define __INTEL_CDCLK_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "intel_display.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct intel_atomic_state;
|
||||
struct intel_cdclk_state;
|
||||
struct intel_crtc_state;
|
||||
|
||||
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
|
||||
void intel_cdclk_init(struct drm_i915_private *i915);
|
||||
void intel_cdclk_uninit(struct drm_i915_private *i915);
|
||||
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
|
||||
void intel_update_cdclk(struct drm_i915_private *dev_priv);
|
||||
void intel_update_rawclk(struct drm_i915_private *dev_priv);
|
||||
bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *a,
|
||||
const struct intel_cdclk_state *b);
|
||||
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
|
||||
const struct intel_cdclk_state *b);
|
||||
bool intel_cdclk_changed(const struct intel_cdclk_state *a,
|
||||
const struct intel_cdclk_state *b);
|
||||
void intel_cdclk_swap_state(struct intel_atomic_state *state);
|
||||
void
|
||||
intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *old_state,
|
||||
const struct intel_cdclk_state *new_state,
|
||||
enum pipe pipe);
|
||||
void
|
||||
intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *old_state,
|
||||
const struct intel_cdclk_state *new_state,
|
||||
enum pipe pipe);
|
||||
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
|
||||
const char *context);
|
||||
|
||||
#endif /* __INTEL_CDCLK_H__ */
|
@ -22,6 +22,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "intel_color.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
#define CTM_COEFF_SIGN (1ULL << 63)
|
||||
@ -273,6 +274,14 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
|
||||
ilk_csc_coeff_limited_range,
|
||||
ilk_csc_postoff_limited_range);
|
||||
} else if (crtc_state->csc_enable) {
|
||||
/*
|
||||
* On GLK+ both pipe CSC and degamma LUT are controlled
|
||||
* by csc_enable. Hence for the cases where the degama
|
||||
* LUT is needed but CSC is not we need to load an
|
||||
* identity matrix.
|
||||
*/
|
||||
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
|
||||
|
||||
ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
|
||||
ilk_csc_coeff_identity,
|
||||
ilk_csc_off_zero);
|
||||
@ -351,6 +360,29 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
|
||||
I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
|
||||
}
|
||||
|
||||
/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
|
||||
static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
|
||||
{
|
||||
return (color->red & 0xff) << 16 |
|
||||
(color->green & 0xff) << 8 |
|
||||
(color->blue & 0xff);
|
||||
}
|
||||
|
||||
/* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */
|
||||
static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
|
||||
{
|
||||
return (color->red >> 8) << 16 |
|
||||
(color->green >> 8) << 8 |
|
||||
(color->blue >> 8);
|
||||
}
|
||||
|
||||
static u32 ilk_lut_10(const struct drm_color_lut *color)
|
||||
{
|
||||
return drm_color_lut_extract(color->red, 10) << 20 |
|
||||
drm_color_lut_extract(color->green, 10) << 10 |
|
||||
drm_color_lut_extract(color->blue, 10);
|
||||
}
|
||||
|
||||
/* Loads the legacy palette/gamma unit for the CRTC. */
|
||||
static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_property_blob *blob)
|
||||
@ -376,15 +408,6 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
|
||||
(drm_color_lut_extract(lut[i].green, 8) << 8) |
|
||||
drm_color_lut_extract(lut[i].blue, 8);
|
||||
|
||||
if (HAS_GMCH(dev_priv))
|
||||
I915_WRITE(PALETTE(pipe, i), word);
|
||||
else
|
||||
I915_WRITE(LGC_PALETTE(pipe, i), word);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < 256; i++) {
|
||||
u32 word = (i << 16) | (i << 8) | i;
|
||||
|
||||
if (HAS_GMCH(dev_priv))
|
||||
I915_WRITE(PALETTE(pipe, i), word);
|
||||
else
|
||||
@ -422,6 +445,8 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
|
||||
val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
|
||||
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
|
||||
I915_WRITE(PIPECONF(pipe), val);
|
||||
|
||||
ilk_load_csc_matrix(crtc_state);
|
||||
}
|
||||
|
||||
static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
|
||||
@ -460,84 +485,90 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
|
||||
ilk_load_csc_matrix(crtc_state);
|
||||
}
|
||||
|
||||
static void bdw_load_degamma_lut(const struct intel_crtc_state *crtc_state)
|
||||
static void i965_load_lut_10p6(struct intel_crtc *crtc,
|
||||
const struct drm_property_blob *blob)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
|
||||
u32 i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
|
||||
const struct drm_color_lut *lut = blob->data;
|
||||
int i, lut_size = drm_color_lut_size(blob);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
I915_WRITE(PREC_PAL_INDEX(pipe),
|
||||
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
|
||||
|
||||
if (degamma_lut) {
|
||||
const struct drm_color_lut *lut = degamma_lut->data;
|
||||
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 word =
|
||||
drm_color_lut_extract(lut[i].red, 10) << 20 |
|
||||
drm_color_lut_extract(lut[i].green, 10) << 10 |
|
||||
drm_color_lut_extract(lut[i].blue, 10);
|
||||
|
||||
I915_WRITE(PREC_PAL_DATA(pipe), word);
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
|
||||
|
||||
I915_WRITE(PREC_PAL_DATA(pipe),
|
||||
(v << 20) | (v << 10) | v);
|
||||
}
|
||||
for (i = 0; i < lut_size - 1; i++) {
|
||||
I915_WRITE(PALETTE(pipe, 2 * i + 0),
|
||||
i965_lut_10p6_ldw(&lut[i]));
|
||||
I915_WRITE(PALETTE(pipe, 2 * i + 1),
|
||||
i965_lut_10p6_udw(&lut[i]));
|
||||
}
|
||||
|
||||
I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
|
||||
I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
|
||||
I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
|
||||
}
|
||||
|
||||
static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 offset)
|
||||
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
|
||||
u32 i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
|
||||
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
|
||||
i9xx_load_luts(crtc_state);
|
||||
else
|
||||
i965_load_lut_10p6(crtc, gamma_lut);
|
||||
}
|
||||
|
||||
static void ilk_load_lut_10(struct intel_crtc *crtc,
|
||||
const struct drm_property_blob *blob)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
const struct drm_color_lut *lut = blob->data;
|
||||
int i, lut_size = drm_color_lut_size(blob);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
|
||||
for (i = 0; i < lut_size; i++)
|
||||
I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
|
||||
}
|
||||
|
||||
I915_WRITE(PREC_PAL_INDEX(pipe),
|
||||
(offset ? PAL_PREC_SPLIT_MODE : 0) |
|
||||
PAL_PREC_AUTO_INCREMENT |
|
||||
offset);
|
||||
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
|
||||
|
||||
if (gamma_lut) {
|
||||
const struct drm_color_lut *lut = gamma_lut->data;
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
|
||||
i9xx_load_luts(crtc_state);
|
||||
else
|
||||
ilk_load_lut_10(crtc, gamma_lut);
|
||||
}
|
||||
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 word =
|
||||
(drm_color_lut_extract(lut[i].red, 10) << 20) |
|
||||
(drm_color_lut_extract(lut[i].green, 10) << 10) |
|
||||
drm_color_lut_extract(lut[i].blue, 10);
|
||||
static int ivb_lut_10_size(u32 prec_index)
|
||||
{
|
||||
if (prec_index & PAL_PREC_SPLIT_MODE)
|
||||
return 512;
|
||||
else
|
||||
return 1024;
|
||||
}
|
||||
|
||||
I915_WRITE(PREC_PAL_DATA(pipe), word);
|
||||
}
|
||||
/*
|
||||
* IVB/HSW Bspec / PAL_PREC_INDEX:
|
||||
* "Restriction : Index auto increment mode is not
|
||||
* supported and must not be enabled."
|
||||
*/
|
||||
static void ivb_load_lut_10(struct intel_crtc *crtc,
|
||||
const struct drm_property_blob *blob,
|
||||
u32 prec_index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
int hw_lut_size = ivb_lut_10_size(prec_index);
|
||||
const struct drm_color_lut *lut = blob->data;
|
||||
int i, lut_size = drm_color_lut_size(blob);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
/* Program the max register to clamp values > 1.0. */
|
||||
i = lut_size - 1;
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
|
||||
drm_color_lut_extract(lut[i].red, 16));
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
|
||||
drm_color_lut_extract(lut[i].green, 16));
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 2),
|
||||
drm_color_lut_extract(lut[i].blue, 16));
|
||||
} else {
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
|
||||
for (i = 0; i < hw_lut_size; i++) {
|
||||
/* We discard half the user entries in split gamma mode */
|
||||
const struct drm_color_lut *entry =
|
||||
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
|
||||
|
||||
I915_WRITE(PREC_PAL_DATA(pipe),
|
||||
(v << 20) | (v << 10) | v);
|
||||
}
|
||||
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1);
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
|
||||
I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
|
||||
I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
|
||||
I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -547,22 +578,143 @@ static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 of
|
||||
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
|
||||
}
|
||||
|
||||
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
|
||||
static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
/* On BDW+ the index auto increment mode actually works */
|
||||
static void bdw_load_lut_10(struct intel_crtc *crtc,
|
||||
const struct drm_property_blob *blob,
|
||||
u32 prec_index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
int hw_lut_size = ivb_lut_10_size(prec_index);
|
||||
const struct drm_color_lut *lut = blob->data;
|
||||
int i, lut_size = drm_color_lut_size(blob);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
|
||||
PAL_PREC_AUTO_INCREMENT);
|
||||
|
||||
for (i = 0; i < hw_lut_size; i++) {
|
||||
/* We discard half the user entries in split gamma mode */
|
||||
const struct drm_color_lut *entry =
|
||||
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
|
||||
|
||||
I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset the index, otherwise it prevents the legacy palette to be
|
||||
* written properly.
|
||||
*/
|
||||
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
|
||||
}
|
||||
|
||||
static void ivb_load_lut_10_max(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
/* Program the max register to clamp values > 1.0. */
|
||||
I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
|
||||
I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
|
||||
I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
|
||||
|
||||
/*
|
||||
* Program the gc max 2 register to clamp values > 1.0.
|
||||
* ToDo: Extend the ABI to be able to program values
|
||||
* from 3.0 to 7.0
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
|
||||
I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
|
||||
I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
|
||||
I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
|
||||
}
|
||||
}
|
||||
|
||||
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
|
||||
const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
|
||||
|
||||
if (crtc_state_is_legacy_gamma(crtc_state)) {
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
|
||||
i9xx_load_luts(crtc_state);
|
||||
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
|
||||
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
|
||||
PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_10_max(crtc);
|
||||
ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
|
||||
PAL_PREC_INDEX_VALUE(512));
|
||||
} else {
|
||||
bdw_load_degamma_lut(crtc_state);
|
||||
bdw_load_gamma_lut(crtc_state,
|
||||
INTEL_INFO(dev_priv)->color.degamma_lut_size);
|
||||
const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
|
||||
|
||||
ivb_load_lut_10(crtc, blob,
|
||||
PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_10_max(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
|
||||
const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
|
||||
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
|
||||
i9xx_load_luts(crtc_state);
|
||||
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
|
||||
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
|
||||
PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_10_max(crtc);
|
||||
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
|
||||
PAL_PREC_INDEX_VALUE(512));
|
||||
} else {
|
||||
const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
|
||||
|
||||
bdw_load_lut_10(crtc, blob,
|
||||
PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_10_max(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
|
||||
const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
|
||||
u32 i;
|
||||
|
||||
/*
|
||||
* When setting the auto-increment bit, the hardware seems to
|
||||
* ignore the index bits, so we need to reset it to index 0
|
||||
* separately.
|
||||
*/
|
||||
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
|
||||
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
|
||||
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
/*
|
||||
* First 33 entries represent range from 0 to 1.0
|
||||
* 34th and 35th entry will represent extended range
|
||||
* inputs 3.0 and 7.0 respectively, currently clamped
|
||||
* at 1.0. Since the precision is 16bit, the user
|
||||
* value can be directly filled to register.
|
||||
* The pipe degamma table in GLK+ onwards doesn't
|
||||
* support different values per channel, so this just
|
||||
* programs green value which will be equal to Red and
|
||||
* Blue into the lut registers.
|
||||
* ToDo: Extend to max 7.0. Enable 32 bit input value
|
||||
* as compared to just 16 to achieve this.
|
||||
*/
|
||||
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
|
||||
}
|
||||
|
||||
/* Clamp values > 1.0. */
|
||||
while (i++ < 35)
|
||||
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
|
||||
}
|
||||
|
||||
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
@ -578,58 +730,58 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
|
||||
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
|
||||
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
|
||||
|
||||
if (crtc_state->base.degamma_lut) {
|
||||
struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 v = (i << 16) / (lut_size - 1);
|
||||
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
/*
|
||||
* First 33 entries represent range from 0 to 1.0
|
||||
* 34th and 35th entry will represent extended range
|
||||
* inputs 3.0 and 7.0 respectively, currently clamped
|
||||
* at 1.0. Since the precision is 16bit, the user
|
||||
* value can be directly filled to register.
|
||||
* The pipe degamma table in GLK+ onwards doesn't
|
||||
* support different values per channel, so this just
|
||||
* programs green value which will be equal to Red and
|
||||
* Blue into the lut registers.
|
||||
* ToDo: Extend to max 7.0. Enable 32 bit input value
|
||||
* as compared to just 16 to achieve this.
|
||||
*/
|
||||
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
|
||||
}
|
||||
} else {
|
||||
/* load a linear table. */
|
||||
for (i = 0; i < lut_size; i++) {
|
||||
u32 v = (i * (1 << 16)) / (lut_size - 1);
|
||||
|
||||
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
|
||||
}
|
||||
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
|
||||
}
|
||||
|
||||
/* Clamp values > 1.0. */
|
||||
while (i++ < 35)
|
||||
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
|
||||
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
|
||||
}
|
||||
|
||||
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
glk_load_degamma_lut(crtc_state);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
|
||||
if (crtc_state_is_legacy_gamma(crtc_state))
|
||||
i9xx_load_luts(crtc_state);
|
||||
/*
|
||||
* On GLK+ both pipe CSC and degamma LUT are controlled
|
||||
* by csc_enable. Hence for the cases where the CSC is
|
||||
* needed but degamma LUT is not we need to load a
|
||||
* linear degamma LUT. In fact we'll just always load
|
||||
* the degama LUT so that we don't have to reload
|
||||
* it every time the pipe CSC is being enabled.
|
||||
*/
|
||||
if (crtc_state->base.degamma_lut)
|
||||
glk_load_degamma_lut(crtc_state);
|
||||
else
|
||||
bdw_load_gamma_lut(crtc_state, 0);
|
||||
glk_load_degamma_lut_linear(crtc_state);
|
||||
|
||||
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
|
||||
i9xx_load_luts(crtc_state);
|
||||
} else {
|
||||
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_10_max(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
glk_load_degamma_lut(crtc_state);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
|
||||
if (crtc_state_is_legacy_gamma(crtc_state))
|
||||
if (crtc_state->base.degamma_lut)
|
||||
glk_load_degamma_lut(crtc_state);
|
||||
|
||||
if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) ==
|
||||
GAMMA_MODE_MODE_8BIT) {
|
||||
i9xx_load_luts(crtc_state);
|
||||
else
|
||||
/* ToDo: Add support for multi segment gamma LUT */
|
||||
bdw_load_gamma_lut(crtc_state, 0);
|
||||
} else {
|
||||
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
|
||||
ivb_load_lut_10_max(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
@ -643,7 +795,7 @@ static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
cherryview_load_csc_matrix(crtc_state);
|
||||
|
||||
if (crtc_state_is_legacy_gamma(crtc_state)) {
|
||||
i9xx_load_luts_internal(crtc_state, gamma_lut);
|
||||
i9xx_load_luts(crtc_state);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -682,12 +834,6 @@ static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Also program a linear LUT in the legacy block (behind the
|
||||
* CGM block).
|
||||
*/
|
||||
i9xx_load_luts_internal(crtc_state, NULL);
|
||||
}
|
||||
|
||||
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
|
||||
@ -704,6 +850,13 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
|
||||
dev_priv->display.color_commit(crtc_state);
|
||||
}
|
||||
|
||||
int intel_color_check(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
|
||||
return dev_priv->display.color_check(crtc_state);
|
||||
}
|
||||
|
||||
static bool need_plane_update(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
@ -771,6 +924,68 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_luts(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
|
||||
const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
|
||||
int gamma_length, degamma_length;
|
||||
u32 gamma_tests, degamma_tests;
|
||||
|
||||
/* Always allow legacy gamma LUT with no further checking. */
|
||||
if (crtc_state_is_legacy_gamma(crtc_state))
|
||||
return 0;
|
||||
|
||||
/* C8 relies on its palette being stored in the legacy LUT */
|
||||
if (crtc_state->c8_planes)
|
||||
return -EINVAL;
|
||||
|
||||
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
|
||||
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
|
||||
degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
|
||||
gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
|
||||
|
||||
if (check_lut_size(degamma_lut, degamma_length) ||
|
||||
check_lut_size(gamma_lut, gamma_length))
|
||||
return -EINVAL;
|
||||
|
||||
if (drm_color_lut_check(degamma_lut, degamma_tests) ||
|
||||
drm_color_lut_check(gamma_lut, gamma_tests))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
if (!crtc_state->gamma_enable ||
|
||||
crtc_state_is_legacy_gamma(crtc_state))
|
||||
return GAMMA_MODE_MODE_8BIT;
|
||||
else
|
||||
return GAMMA_MODE_MODE_10BIT; /* i965+ only */
|
||||
}
|
||||
|
||||
static int i9xx_color_check(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = check_luts(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->gamma_enable =
|
||||
crtc_state->base.gamma_lut &&
|
||||
!crtc_state->c8_planes;
|
||||
|
||||
crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
|
||||
|
||||
ret = intel_color_add_affected_planes(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
u32 cgm_mode = 0;
|
||||
@ -788,83 +1003,220 @@ static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
|
||||
return cgm_mode;
|
||||
}
|
||||
|
||||
int intel_color_check(struct intel_crtc_state *crtc_state)
|
||||
/*
|
||||
* CHV color pipeline:
|
||||
* u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma ->
|
||||
* u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10
|
||||
*
|
||||
* We always bypass the WGC csc and use the CGM csc
|
||||
* instead since it has degamma and better precision.
|
||||
*/
|
||||
static int chv_color_check(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
|
||||
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
|
||||
const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
|
||||
bool limited_color_range = false;
|
||||
int gamma_length, degamma_length;
|
||||
u32 gamma_tests, degamma_tests;
|
||||
int ret;
|
||||
|
||||
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
|
||||
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
|
||||
degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
|
||||
gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
|
||||
ret = check_luts(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* C8 needs the legacy LUT all to itself */
|
||||
if (crtc_state->c8_planes &&
|
||||
!crtc_state_is_legacy_gamma(crtc_state))
|
||||
return -EINVAL;
|
||||
|
||||
crtc_state->gamma_enable = (gamma_lut || degamma_lut) &&
|
||||
/*
|
||||
* Pipe gamma will be used only for the legacy LUT.
|
||||
* Otherwise we bypass it and use the CGM gamma instead.
|
||||
*/
|
||||
crtc_state->gamma_enable =
|
||||
crtc_state_is_legacy_gamma(crtc_state) &&
|
||||
!crtc_state->c8_planes;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9 ||
|
||||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
|
||||
limited_color_range = crtc_state->limited_color_range;
|
||||
crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
|
||||
|
||||
crtc_state->csc_enable =
|
||||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
|
||||
crtc_state->base.ctm || limited_color_range;
|
||||
crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
|
||||
|
||||
ret = intel_color_add_affected_planes(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
if (!crtc_state->gamma_enable ||
|
||||
crtc_state_is_legacy_gamma(crtc_state))
|
||||
return GAMMA_MODE_MODE_8BIT;
|
||||
else
|
||||
return GAMMA_MODE_MODE_10BIT;
|
||||
}
|
||||
|
||||
static int ilk_color_check(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = check_luts(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->gamma_enable =
|
||||
crtc_state->base.gamma_lut &&
|
||||
!crtc_state->c8_planes;
|
||||
|
||||
/*
|
||||
* We don't expose the ctm on ilk/snb currently,
|
||||
* nor do we enable YCbCr output. Also RGB limited
|
||||
* range output is handled by the hw automagically.
|
||||
*/
|
||||
crtc_state->csc_enable = false;
|
||||
|
||||
crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
|
||||
|
||||
crtc_state->csc_mode = 0;
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
|
||||
ret = intel_color_add_affected_planes(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Always allow legacy gamma LUT with no further checking. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
if (!crtc_state->gamma_enable ||
|
||||
crtc_state_is_legacy_gamma(crtc_state)) {
|
||||
crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
|
||||
if (INTEL_GEN(dev_priv) >= 11 &&
|
||||
crtc_state->gamma_enable)
|
||||
crtc_state->gamma_mode |= POST_CSC_GAMMA_ENABLE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (check_lut_size(degamma_lut, degamma_length) ||
|
||||
check_lut_size(gamma_lut, gamma_length))
|
||||
return -EINVAL;
|
||||
|
||||
if (drm_color_lut_check(degamma_lut, degamma_tests) ||
|
||||
drm_color_lut_check(gamma_lut, gamma_tests))
|
||||
return -EINVAL;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT |
|
||||
PRE_CSC_GAMMA_ENABLE |
|
||||
POST_CSC_GAMMA_ENABLE;
|
||||
else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
|
||||
else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
|
||||
crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
|
||||
crtc_state_is_legacy_gamma(crtc_state))
|
||||
return GAMMA_MODE_MODE_8BIT;
|
||||
else if (crtc_state->base.gamma_lut &&
|
||||
crtc_state->base.degamma_lut)
|
||||
return GAMMA_MODE_MODE_SPLIT;
|
||||
else
|
||||
crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
|
||||
return GAMMA_MODE_MODE_10BIT;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
|
||||
crtc_state->limited_color_range)
|
||||
crtc_state->csc_mode |= ICL_OUTPUT_CSC_ENABLE;
|
||||
static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
bool limited_color_range = ilk_csc_limited_range(crtc_state);
|
||||
|
||||
if (crtc_state->base.ctm)
|
||||
crtc_state->csc_mode |= ICL_CSC_ENABLE;
|
||||
}
|
||||
/*
|
||||
* CSC comes after the LUT in degamma, RGB->YCbCr,
|
||||
* and RGB full->limited range mode.
|
||||
*/
|
||||
if (crtc_state->base.degamma_lut ||
|
||||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
|
||||
limited_color_range)
|
||||
return 0;
|
||||
|
||||
return CSC_POSITION_BEFORE_GAMMA;
|
||||
}
|
||||
|
||||
static int ivb_color_check(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
bool limited_color_range = ilk_csc_limited_range(crtc_state);
|
||||
int ret;
|
||||
|
||||
ret = check_luts(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->gamma_enable =
|
||||
(crtc_state->base.gamma_lut ||
|
||||
crtc_state->base.degamma_lut) &&
|
||||
!crtc_state->c8_planes;
|
||||
|
||||
crtc_state->csc_enable =
|
||||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
|
||||
crtc_state->base.ctm || limited_color_range;
|
||||
|
||||
crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
|
||||
|
||||
crtc_state->csc_mode = ivb_csc_mode(crtc_state);
|
||||
|
||||
ret = intel_color_add_affected_planes(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
if (!crtc_state->gamma_enable ||
|
||||
crtc_state_is_legacy_gamma(crtc_state))
|
||||
return GAMMA_MODE_MODE_8BIT;
|
||||
else
|
||||
return GAMMA_MODE_MODE_10BIT;
|
||||
}
|
||||
|
||||
static int glk_color_check(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = check_luts(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->gamma_enable =
|
||||
crtc_state->base.gamma_lut &&
|
||||
!crtc_state->c8_planes;
|
||||
|
||||
/* On GLK+ degamma LUT is controlled by csc_enable */
|
||||
crtc_state->csc_enable =
|
||||
crtc_state->base.degamma_lut ||
|
||||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
|
||||
crtc_state->base.ctm || crtc_state->limited_color_range;
|
||||
|
||||
crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
|
||||
|
||||
crtc_state->csc_mode = 0;
|
||||
|
||||
ret = intel_color_add_affected_planes(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
u32 gamma_mode = 0;
|
||||
|
||||
if (crtc_state->base.degamma_lut)
|
||||
gamma_mode |= PRE_CSC_GAMMA_ENABLE;
|
||||
|
||||
if (crtc_state->base.gamma_lut &&
|
||||
!crtc_state->c8_planes)
|
||||
gamma_mode |= POST_CSC_GAMMA_ENABLE;
|
||||
|
||||
if (!crtc_state->base.gamma_lut ||
|
||||
crtc_state_is_legacy_gamma(crtc_state))
|
||||
gamma_mode |= GAMMA_MODE_MODE_8BIT;
|
||||
else
|
||||
gamma_mode |= GAMMA_MODE_MODE_10BIT;
|
||||
|
||||
return gamma_mode;
|
||||
}
|
||||
|
||||
static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
u32 csc_mode = 0;
|
||||
|
||||
if (crtc_state->base.ctm)
|
||||
csc_mode |= ICL_CSC_ENABLE;
|
||||
|
||||
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
|
||||
crtc_state->limited_color_range)
|
||||
csc_mode |= ICL_OUTPUT_CSC_ENABLE;
|
||||
|
||||
return csc_mode;
|
||||
}
|
||||
|
||||
static int icl_color_check(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = check_luts(crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
crtc_state->gamma_mode = icl_gamma_mode(crtc_state);
|
||||
|
||||
crtc_state->csc_mode = icl_csc_mode(crtc_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -872,25 +1224,33 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
|
||||
void intel_color_init(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0;
|
||||
|
||||
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
|
||||
|
||||
if (HAS_GMCH(dev_priv)) {
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
dev_priv->display.color_check = chv_color_check;
|
||||
dev_priv->display.color_commit = i9xx_color_commit;
|
||||
dev_priv->display.load_luts = cherryview_load_luts;
|
||||
else
|
||||
} else if (INTEL_GEN(dev_priv) >= 4) {
|
||||
dev_priv->display.color_check = i9xx_color_check;
|
||||
dev_priv->display.color_commit = i9xx_color_commit;
|
||||
dev_priv->display.load_luts = i965_load_luts;
|
||||
} else {
|
||||
dev_priv->display.color_check = i9xx_color_check;
|
||||
dev_priv->display.color_commit = i9xx_color_commit;
|
||||
dev_priv->display.load_luts = i9xx_load_luts;
|
||||
|
||||
dev_priv->display.color_commit = i9xx_color_commit;
|
||||
}
|
||||
} else {
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
dev_priv->display.load_luts = icl_load_luts;
|
||||
else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
|
||||
dev_priv->display.load_luts = glk_load_luts;
|
||||
else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
|
||||
dev_priv->display.load_luts = broadwell_load_luts;
|
||||
dev_priv->display.color_check = icl_color_check;
|
||||
else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
dev_priv->display.color_check = glk_color_check;
|
||||
else if (INTEL_GEN(dev_priv) >= 7)
|
||||
dev_priv->display.color_check = ivb_color_check;
|
||||
else
|
||||
dev_priv->display.load_luts = i9xx_load_luts;
|
||||
dev_priv->display.color_check = ilk_color_check;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
dev_priv->display.color_commit = skl_color_commit;
|
||||
@ -898,13 +1258,21 @@ void intel_color_init(struct intel_crtc *crtc)
|
||||
dev_priv->display.color_commit = hsw_color_commit;
|
||||
else
|
||||
dev_priv->display.color_commit = ilk_color_commit;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
dev_priv->display.load_luts = icl_load_luts;
|
||||
else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
|
||||
dev_priv->display.load_luts = glk_load_luts;
|
||||
else if (INTEL_GEN(dev_priv) >= 8)
|
||||
dev_priv->display.load_luts = bdw_load_luts;
|
||||
else if (INTEL_GEN(dev_priv) >= 7)
|
||||
dev_priv->display.load_luts = ivb_load_luts;
|
||||
else
|
||||
dev_priv->display.load_luts = ilk_load_luts;
|
||||
}
|
||||
|
||||
/* Enable color management support when we have degamma & gamma LUTs. */
|
||||
if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
|
||||
INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
|
||||
drm_crtc_enable_color_mgmt(&crtc->base,
|
||||
INTEL_INFO(dev_priv)->color.degamma_lut_size,
|
||||
true,
|
||||
INTEL_INFO(dev_priv)->color.gamma_lut_size);
|
||||
drm_crtc_enable_color_mgmt(&crtc->base,
|
||||
INTEL_INFO(dev_priv)->color.degamma_lut_size,
|
||||
has_ctm,
|
||||
INTEL_INFO(dev_priv)->color.gamma_lut_size);
|
||||
}
|
||||
|
17
drivers/gpu/drm/i915/intel_color.h
Normal file
17
drivers/gpu/drm/i915/intel_color.h
Normal file
@ -0,0 +1,17 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_COLOR_H__
|
||||
#define __INTEL_COLOR_H__
|
||||
|
||||
struct intel_crtc_state;
|
||||
struct intel_crtc;
|
||||
|
||||
void intel_color_init(struct intel_crtc *crtc);
|
||||
int intel_color_check(struct intel_crtc_state *crtc_state);
|
||||
void intel_color_commit(const struct intel_crtc_state *crtc_state);
|
||||
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
#endif /* __INTEL_COLOR_H__ */
|
@ -239,7 +239,8 @@ void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
|
||||
for_each_combo_port_reverse(dev_priv, port) {
|
||||
u32 val;
|
||||
|
||||
if (!icl_combo_phy_verify_state(dev_priv, port))
|
||||
if (port == PORT_A &&
|
||||
!icl_combo_phy_verify_state(dev_priv, port))
|
||||
DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
|
||||
port_name(port));
|
||||
|
||||
|
@ -23,12 +23,17 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include "intel_drv.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_panel.h"
|
||||
|
||||
int intel_connector_init(struct intel_connector *connector)
|
||||
{
|
||||
|
35
drivers/gpu/drm/i915/intel_connector.h
Normal file
35
drivers/gpu/drm/i915/intel_connector.h
Normal file
@ -0,0 +1,35 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_CONNECTOR_H__
|
||||
#define __INTEL_CONNECTOR_H__
|
||||
|
||||
#include "intel_display.h"
|
||||
|
||||
struct drm_connector;
|
||||
struct edid;
|
||||
struct i2c_adapter;
|
||||
struct intel_connector;
|
||||
struct intel_encoder;
|
||||
|
||||
int intel_connector_init(struct intel_connector *connector);
|
||||
struct intel_connector *intel_connector_alloc(void);
|
||||
void intel_connector_free(struct intel_connector *connector);
|
||||
void intel_connector_destroy(struct drm_connector *connector);
|
||||
int intel_connector_register(struct drm_connector *connector);
|
||||
void intel_connector_unregister(struct drm_connector *connector);
|
||||
void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
struct intel_encoder *encoder);
|
||||
bool intel_connector_get_hw_state(struct intel_connector *connector);
|
||||
enum pipe intel_connector_get_pipe(struct intel_connector *connector);
|
||||
int intel_connector_update_modes(struct drm_connector *connector,
|
||||
struct edid *edid);
|
||||
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
|
||||
void intel_attach_force_audio_property(struct drm_connector *connector);
|
||||
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
|
||||
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
|
||||
void intel_attach_colorspace_property(struct drm_connector *connector);
|
||||
|
||||
#endif /* __INTEL_CONNECTOR_H__ */
|
@ -24,6 +24,7 @@ struct intel_context_ops {
|
||||
int (*pin)(struct intel_context *ce);
|
||||
void (*unpin)(struct intel_context *ce);
|
||||
|
||||
void (*reset)(struct intel_context *ce);
|
||||
void (*destroy)(struct kref *kref);
|
||||
};
|
||||
|
||||
|
@ -27,13 +27,18 @@
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include "intel_drv.h"
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_crt.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/* Here's the desired hotplug mode */
|
||||
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
|
||||
|
21
drivers/gpu/drm/i915/intel_crt.h
Normal file
21
drivers/gpu/drm/i915/intel_crt.h
Normal file
@ -0,0 +1,21 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_CRT_H__
|
||||
#define __INTEL_CRT_H__
|
||||
|
||||
#include "i915_reg.h"
|
||||
|
||||
enum pipe;
|
||||
struct drm_encoder;
|
||||
struct drm_i915_private;
|
||||
struct drm_i915_private;
|
||||
|
||||
bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t adpa_reg, enum pipe *pipe);
|
||||
void intel_crt_init(struct drm_i915_private *dev_priv);
|
||||
void intel_crt_reset(struct drm_encoder *encoder);
|
||||
|
||||
#endif /* __INTEL_CRT_H__ */
|
@ -21,9 +21,12 @@
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "intel_csr.h"
|
||||
|
||||
/**
|
||||
* DOC: csr support for dmc
|
||||
|
17
drivers/gpu/drm/i915/intel_csr.h
Normal file
17
drivers/gpu/drm/i915/intel_csr.h
Normal file
@ -0,0 +1,17 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_CSR_H__
|
||||
#define __INTEL_CSR_H__
|
||||
|
||||
struct drm_i915_private;
|
||||
|
||||
void intel_csr_ucode_init(struct drm_i915_private *i915);
|
||||
void intel_csr_load_program(struct drm_i915_private *i915);
|
||||
void intel_csr_ucode_fini(struct drm_i915_private *i915);
|
||||
void intel_csr_ucode_suspend(struct drm_i915_private *i915);
|
||||
void intel_csr_ucode_resume(struct drm_i915_private *i915);
|
||||
|
||||
#endif /* __INTEL_CSR_H__ */
|
@ -26,9 +26,19 @@
|
||||
*/
|
||||
|
||||
#include <drm/drm_scdc_helper.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_audio.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_hdmi.h"
|
||||
#include "intel_lspcon.h"
|
||||
#include "intel_panel.h"
|
||||
#include "intel_psr.h"
|
||||
|
||||
struct ddi_buf_trans {
|
||||
u32 trans1; /* balance leg enable, de-emph level */
|
||||
@ -3847,14 +3857,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
|
||||
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
|
||||
else
|
||||
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (IS_GEN9_LP(dev_priv) && ret)
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
pipe_config->lane_lat_optim_mask =
|
||||
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
|
||||
|
||||
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
|
53
drivers/gpu/drm/i915/intel_ddi.h
Normal file
53
drivers/gpu/drm/i915/intel_ddi.h
Normal file
@ -0,0 +1,53 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_DDI_H__
|
||||
#define __INTEL_DDI_H__
|
||||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "intel_display.h"
|
||||
|
||||
struct drm_connector_state;
|
||||
struct drm_i915_private;
|
||||
struct intel_connector;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_dp;
|
||||
struct intel_dpll_hw_state;
|
||||
struct intel_encoder;
|
||||
|
||||
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state);
|
||||
void hsw_fdi_link_train(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
|
||||
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
|
||||
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
|
||||
bool state);
|
||||
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
u32 bxt_signal_levels(struct intel_dp *intel_dp);
|
||||
u32 ddi_signal_levels(struct intel_dp *intel_dp);
|
||||
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
|
||||
u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
|
||||
u8 voltage_swing);
|
||||
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
|
||||
bool enable);
|
||||
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
|
||||
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *state);
|
||||
|
||||
#endif /* __INTEL_DDI_H__ */
|
@ -714,6 +714,99 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#undef INTEL_VGA_DEVICE
|
||||
#define INTEL_VGA_DEVICE(id, info) (id)
|
||||
|
||||
static const u16 subplatform_ult_ids[] = {
|
||||
INTEL_HSW_ULT_GT1_IDS(0),
|
||||
INTEL_HSW_ULT_GT2_IDS(0),
|
||||
INTEL_HSW_ULT_GT3_IDS(0),
|
||||
INTEL_BDW_ULT_GT1_IDS(0),
|
||||
INTEL_BDW_ULT_GT2_IDS(0),
|
||||
INTEL_BDW_ULT_GT3_IDS(0),
|
||||
INTEL_BDW_ULT_RSVD_IDS(0),
|
||||
INTEL_SKL_ULT_GT1_IDS(0),
|
||||
INTEL_SKL_ULT_GT2_IDS(0),
|
||||
INTEL_SKL_ULT_GT3_IDS(0),
|
||||
INTEL_KBL_ULT_GT1_IDS(0),
|
||||
INTEL_KBL_ULT_GT2_IDS(0),
|
||||
INTEL_KBL_ULT_GT3_IDS(0),
|
||||
INTEL_CFL_U_GT2_IDS(0),
|
||||
INTEL_CFL_U_GT3_IDS(0),
|
||||
INTEL_WHL_U_GT1_IDS(0),
|
||||
INTEL_WHL_U_GT2_IDS(0),
|
||||
INTEL_WHL_U_GT3_IDS(0)
|
||||
};
|
||||
|
||||
static const u16 subplatform_ulx_ids[] = {
|
||||
INTEL_HSW_ULX_GT1_IDS(0),
|
||||
INTEL_HSW_ULX_GT2_IDS(0),
|
||||
INTEL_BDW_ULX_GT1_IDS(0),
|
||||
INTEL_BDW_ULX_GT2_IDS(0),
|
||||
INTEL_BDW_ULX_GT3_IDS(0),
|
||||
INTEL_BDW_ULX_RSVD_IDS(0),
|
||||
INTEL_SKL_ULX_GT1_IDS(0),
|
||||
INTEL_SKL_ULX_GT2_IDS(0),
|
||||
INTEL_KBL_ULX_GT1_IDS(0),
|
||||
INTEL_KBL_ULX_GT2_IDS(0)
|
||||
};
|
||||
|
||||
static const u16 subplatform_aml_ids[] = {
|
||||
INTEL_AML_KBL_GT2_IDS(0),
|
||||
INTEL_AML_CFL_GT2_IDS(0)
|
||||
};
|
||||
|
||||
static const u16 subplatform_portf_ids[] = {
|
||||
INTEL_CNL_PORT_F_IDS(0),
|
||||
INTEL_ICL_PORT_F_IDS(0)
|
||||
};
|
||||
|
||||
static bool find_devid(u16 id, const u16 *p, unsigned int num)
|
||||
{
|
||||
for (; num; num--, p++) {
|
||||
if (*p == id)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_device_info_subplatform_init(struct drm_i915_private *i915)
|
||||
{
|
||||
const struct intel_device_info *info = INTEL_INFO(i915);
|
||||
const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
|
||||
const unsigned int pi = __platform_mask_index(rinfo, info->platform);
|
||||
const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
|
||||
u16 devid = INTEL_DEVID(i915);
|
||||
u32 mask = 0;
|
||||
|
||||
/* Make sure IS_<platform> checks are working. */
|
||||
RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
|
||||
|
||||
/* Find and mark subplatform bits based on the PCI device id. */
|
||||
if (find_devid(devid, subplatform_ult_ids,
|
||||
ARRAY_SIZE(subplatform_ult_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_ULT);
|
||||
} else if (find_devid(devid, subplatform_ulx_ids,
|
||||
ARRAY_SIZE(subplatform_ulx_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_ULX);
|
||||
if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
|
||||
/* ULX machines are also considered ULT. */
|
||||
mask |= BIT(INTEL_SUBPLATFORM_ULT);
|
||||
}
|
||||
} else if (find_devid(devid, subplatform_aml_ids,
|
||||
ARRAY_SIZE(subplatform_aml_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_AML);
|
||||
} else if (find_devid(devid, subplatform_portf_ids,
|
||||
ARRAY_SIZE(subplatform_portf_ids))) {
|
||||
mask = BIT(INTEL_SUBPLATFORM_PORTF);
|
||||
}
|
||||
|
||||
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
|
||||
|
||||
RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_device_info_runtime_init - initialize runtime info
|
||||
* @dev_priv: the i915 device
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
|
||||
#include "intel_engine_types.h"
|
||||
#include "intel_display.h"
|
||||
|
||||
struct drm_printer;
|
||||
@ -77,6 +78,21 @@ enum intel_platform {
|
||||
INTEL_MAX_PLATFORMS
|
||||
};
|
||||
|
||||
/*
|
||||
* Subplatform bits share the same namespace per parent platform. In other words
|
||||
* it is fine for the same bit to be used on multiple parent platforms.
|
||||
*/
|
||||
|
||||
#define INTEL_SUBPLATFORM_BITS (3)
|
||||
|
||||
/* HSW/BDW/SKL/KBL/CFL */
|
||||
#define INTEL_SUBPLATFORM_ULT (0)
|
||||
#define INTEL_SUBPLATFORM_ULX (1)
|
||||
#define INTEL_SUBPLATFORM_AML (2)
|
||||
|
||||
/* CNL/ICL */
|
||||
#define INTEL_SUBPLATFORM_PORTF (0)
|
||||
|
||||
enum intel_ppgtt_type {
|
||||
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
|
||||
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
|
||||
@ -150,8 +166,6 @@ struct sseu_dev_info {
|
||||
u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
|
||||
};
|
||||
|
||||
typedef u8 intel_engine_mask_t;
|
||||
|
||||
struct intel_device_info {
|
||||
u16 gen_mask;
|
||||
|
||||
@ -160,7 +174,6 @@ struct intel_device_info {
|
||||
intel_engine_mask_t engine_mask; /* Engines supported by the HW */
|
||||
|
||||
enum intel_platform platform;
|
||||
u32 platform_mask;
|
||||
|
||||
enum intel_ppgtt_type ppgtt_type;
|
||||
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
|
||||
@ -197,6 +210,16 @@ struct intel_device_info {
|
||||
};
|
||||
|
||||
struct intel_runtime_info {
|
||||
/*
|
||||
* Platform mask is used for optimizing or-ed IS_PLATFORM calls into
|
||||
* into single runtime conditionals, and also to provide groundwork
|
||||
* for future per platform, or per SKU build optimizations.
|
||||
*
|
||||
* Array can be extended when necessary if the corresponding
|
||||
* BUILD_BUG_ON is hit.
|
||||
*/
|
||||
u32 platform_mask[2];
|
||||
|
||||
u16 device_id;
|
||||
|
||||
u8 num_sprites[I915_MAX_PIPES];
|
||||
@ -267,6 +290,7 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
|
||||
|
||||
const char *intel_platform_name(enum intel_platform platform);
|
||||
|
||||
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
|
||||
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
|
||||
void intel_device_info_dump_flags(const struct intel_device_info *info,
|
||||
struct drm_printer *p);
|
||||
|
@ -44,21 +44,31 @@
|
||||
#include <drm/drm_rect.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_clflush.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_clflush.h"
|
||||
#include "i915_reset.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_atomic_plane.h"
|
||||
#include "intel_color.h"
|
||||
#include "intel_cdclk.h"
|
||||
#include "intel_crt.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_dvo.h"
|
||||
#include "intel_fbc.h"
|
||||
#include "intel_fbdev.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_hdmi.h"
|
||||
#include "intel_lvds.h"
|
||||
#include "intel_pipe_crc.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_sdvo.h"
|
||||
#include "intel_sprite.h"
|
||||
#include "intel_tv.h"
|
||||
|
||||
/* Primary plane formats for gen <= 3 */
|
||||
static const u32 i8xx_primary_formats[] = {
|
||||
@ -115,8 +125,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
static void chv_prepare_pll(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
|
||||
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
|
||||
static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
|
||||
static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
|
||||
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
|
||||
@ -467,10 +477,11 @@ static const struct intel_limit intel_limits_bxt = {
|
||||
};
|
||||
|
||||
static void
|
||||
skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
|
||||
skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
|
||||
{
|
||||
if (enable)
|
||||
I915_WRITE(CLKGATE_DIS_PSL(pipe),
|
||||
I915_READ(CLKGATE_DIS_PSL(pipe)) |
|
||||
DUPS1_GATING_DIS | DUPS2_GATING_DIS);
|
||||
else
|
||||
I915_WRITE(CLKGATE_DIS_PSL(pipe),
|
||||
@ -5530,7 +5541,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
|
||||
/* Display WA 827 */
|
||||
if (needs_nv12_wa(dev_priv, old_crtc_state) &&
|
||||
!needs_nv12_wa(dev_priv, pipe_config)) {
|
||||
skl_wa_clkgate(dev_priv, crtc->pipe, false);
|
||||
skl_wa_827(dev_priv, crtc->pipe, false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5569,7 +5580,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
|
||||
/* Display WA 827 */
|
||||
if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
|
||||
needs_nv12_wa(dev_priv, pipe_config)) {
|
||||
skl_wa_clkgate(dev_priv, crtc->pipe, true);
|
||||
skl_wa_827(dev_priv, crtc->pipe, true);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -6180,6 +6191,9 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
|
||||
if (port == PORT_NONE)
|
||||
return false;
|
||||
|
||||
if (IS_ELKHARTLAKE(dev_priv))
|
||||
return port <= PORT_C;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
return port <= PORT_B;
|
||||
|
||||
@ -6188,7 +6202,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
|
||||
|
||||
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
|
||||
return port >= PORT_C && port <= PORT_F;
|
||||
|
||||
return false;
|
||||
@ -9751,7 +9765,8 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
|
||||
|
||||
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
u64 *power_domain_mask)
|
||||
u64 *power_domain_mask,
|
||||
intel_wakeref_t *wakerefs)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
@ -9759,6 +9774,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
|
||||
unsigned long panel_transcoder_mask = 0;
|
||||
unsigned long enabled_panel_transcoders = 0;
|
||||
enum transcoder panel_transcoder;
|
||||
intel_wakeref_t wf;
|
||||
u32 tmp;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
@ -9824,10 +9840,13 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
|
||||
enabled_panel_transcoders != BIT(TRANSCODER_EDP));
|
||||
|
||||
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
|
||||
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
|
||||
WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
|
||||
|
||||
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
|
||||
if (!wf)
|
||||
return false;
|
||||
|
||||
WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
|
||||
wakerefs[power_domain] = wf;
|
||||
*power_domain_mask |= BIT_ULL(power_domain);
|
||||
|
||||
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
|
||||
@ -9837,13 +9856,15 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
|
||||
|
||||
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
u64 *power_domain_mask)
|
||||
u64 *power_domain_mask,
|
||||
intel_wakeref_t *wakerefs)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum intel_display_power_domain power_domain;
|
||||
enum port port;
|
||||
enum transcoder cpu_transcoder;
|
||||
intel_wakeref_t wf;
|
||||
enum port port;
|
||||
u32 tmp;
|
||||
|
||||
for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
|
||||
@ -9853,10 +9874,13 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
|
||||
cpu_transcoder = TRANSCODER_DSI_C;
|
||||
|
||||
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
|
||||
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
|
||||
WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
|
||||
|
||||
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
|
||||
if (!wf)
|
||||
continue;
|
||||
|
||||
WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
|
||||
wakerefs[power_domain] = wf;
|
||||
*power_domain_mask |= BIT_ULL(power_domain);
|
||||
|
||||
/*
|
||||
@ -9935,6 +9959,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
|
||||
enum intel_display_power_domain power_domain;
|
||||
u64 power_domain_mask;
|
||||
bool active;
|
||||
@ -9942,16 +9967,21 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
||||
intel_crtc_init_scalers(crtc, pipe_config);
|
||||
|
||||
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
|
||||
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
|
||||
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
|
||||
if (!wf)
|
||||
return false;
|
||||
|
||||
wakerefs[power_domain] = wf;
|
||||
power_domain_mask = BIT_ULL(power_domain);
|
||||
|
||||
pipe_config->shared_dpll = NULL;
|
||||
|
||||
active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
|
||||
active = hsw_get_transcoder_state(crtc, pipe_config,
|
||||
&power_domain_mask, wakerefs);
|
||||
|
||||
if (IS_GEN9_LP(dev_priv) &&
|
||||
bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
|
||||
bxt_get_dsi_transcoder_state(crtc, pipe_config,
|
||||
&power_domain_mask, wakerefs)) {
|
||||
WARN_ON(active);
|
||||
active = true;
|
||||
}
|
||||
@ -9985,8 +10015,11 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
||||
}
|
||||
|
||||
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
|
||||
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
|
||||
WARN_ON(power_domain_mask & BIT_ULL(power_domain));
|
||||
WARN_ON(power_domain_mask & BIT_ULL(power_domain));
|
||||
|
||||
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
|
||||
if (wf) {
|
||||
wakerefs[power_domain] = wf;
|
||||
power_domain_mask |= BIT_ULL(power_domain);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
@ -10018,7 +10051,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
||||
|
||||
out:
|
||||
for_each_power_domain(power_domain, power_domain_mask)
|
||||
intel_display_power_put_unchecked(dev_priv, power_domain);
|
||||
intel_display_power_put(dev_priv,
|
||||
power_domain, wakerefs[power_domain]);
|
||||
|
||||
return active;
|
||||
}
|
||||
@ -12990,10 +13024,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* keep the current setting */
|
||||
if (!intel_state->cdclk.force_min_cdclk_changed)
|
||||
intel_state->cdclk.force_min_cdclk =
|
||||
dev_priv->cdclk.force_min_cdclk;
|
||||
|
||||
intel_state->modeset = true;
|
||||
intel_state->active_crtcs = dev_priv->active_crtcs;
|
||||
intel_state->cdclk.logical = dev_priv->cdclk.logical;
|
||||
intel_state->cdclk.actual = dev_priv->cdclk.actual;
|
||||
intel_state->cdclk.pipe = INVALID_PIPE;
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
if (new_crtc_state->active)
|
||||
@ -13013,6 +13053,8 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
|
||||
* adjusted_mode bits in the crtc directly.
|
||||
*/
|
||||
if (dev_priv->display.modeset_calc_cdclk) {
|
||||
enum pipe pipe;
|
||||
|
||||
ret = dev_priv->display.modeset_calc_cdclk(state);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -13029,12 +13071,36 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (is_power_of_2(intel_state->active_crtcs)) {
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
pipe = ilog2(intel_state->active_crtcs);
|
||||
crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
|
||||
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
if (crtc_state && needs_modeset(crtc_state))
|
||||
pipe = INVALID_PIPE;
|
||||
} else {
|
||||
pipe = INVALID_PIPE;
|
||||
}
|
||||
|
||||
/* All pipes must be switched off while we change the cdclk. */
|
||||
if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
|
||||
&intel_state->cdclk.actual)) {
|
||||
if (pipe != INVALID_PIPE &&
|
||||
intel_cdclk_needs_cd2x_update(dev_priv,
|
||||
&dev_priv->cdclk.actual,
|
||||
&intel_state->cdclk.actual)) {
|
||||
ret = intel_lock_all_pipes(state);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
intel_state->cdclk.pipe = pipe;
|
||||
} else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
|
||||
&intel_state->cdclk.actual)) {
|
||||
ret = intel_modeset_all_pipes(state);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
intel_state->cdclk.pipe = INVALID_PIPE;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
|
||||
@ -13043,8 +13109,6 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
|
||||
DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
|
||||
intel_state->cdclk.logical.voltage_level,
|
||||
intel_state->cdclk.actual.voltage_level);
|
||||
} else {
|
||||
to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
|
||||
}
|
||||
|
||||
intel_modeset_clear_plls(state);
|
||||
@ -13085,7 +13149,7 @@ static int intel_atomic_check(struct drm_device *dev,
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *old_crtc_state, *crtc_state;
|
||||
int ret, i;
|
||||
bool any_ms = false;
|
||||
bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
|
||||
|
||||
/* Catch I915_MODE_FLAG_INHERITED */
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
|
||||
@ -13210,14 +13274,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
|
||||
else if (new_plane_state)
|
||||
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
|
||||
|
||||
intel_begin_crtc_commit(crtc, old_crtc_state);
|
||||
intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
|
||||
else
|
||||
i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
|
||||
|
||||
intel_finish_crtc_commit(crtc, old_crtc_state);
|
||||
intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
|
||||
}
|
||||
|
||||
static void intel_update_crtcs(struct drm_atomic_state *state)
|
||||
@ -13445,7 +13509,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
if (intel_state->modeset) {
|
||||
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
|
||||
|
||||
intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
|
||||
intel_set_cdclk_pre_plane_update(dev_priv,
|
||||
&intel_state->cdclk.actual,
|
||||
&dev_priv->cdclk.actual,
|
||||
intel_state->cdclk.pipe);
|
||||
|
||||
/*
|
||||
* SKL workaround: bspec recommends we disable the SAGV when we
|
||||
@ -13474,6 +13541,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
|
||||
dev_priv->display.update_crtcs(state);
|
||||
|
||||
if (intel_state->modeset)
|
||||
intel_set_cdclk_post_plane_update(dev_priv,
|
||||
&intel_state->cdclk.actual,
|
||||
&dev_priv->cdclk.actual,
|
||||
intel_state->cdclk.pipe);
|
||||
|
||||
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
|
||||
* already, but still need the state for the delayed optimization. To
|
||||
* fix this:
|
||||
@ -13675,8 +13748,10 @@ static int intel_atomic_commit(struct drm_device *dev,
|
||||
intel_state->min_voltage_level,
|
||||
sizeof(intel_state->min_voltage_level));
|
||||
dev_priv->active_crtcs = intel_state->active_crtcs;
|
||||
dev_priv->cdclk.logical = intel_state->cdclk.logical;
|
||||
dev_priv->cdclk.actual = intel_state->cdclk.actual;
|
||||
dev_priv->cdclk.force_min_cdclk =
|
||||
intel_state->cdclk.force_min_cdclk;
|
||||
|
||||
intel_cdclk_swap_state(intel_state);
|
||||
}
|
||||
|
||||
drm_atomic_state_get(state);
|
||||
@ -13996,39 +14071,35 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
|
||||
return max_scale;
|
||||
}
|
||||
|
||||
static void intel_begin_crtc_commit(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
static void intel_begin_crtc_commit(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc_state *old_intel_cstate =
|
||||
to_intel_crtc_state(old_crtc_state);
|
||||
struct intel_atomic_state *old_intel_state =
|
||||
to_intel_atomic_state(old_crtc_state->state);
|
||||
struct intel_crtc_state *intel_cstate =
|
||||
intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
|
||||
bool modeset = needs_modeset(&intel_cstate->base);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
bool modeset = needs_modeset(&new_crtc_state->base);
|
||||
|
||||
/* Perform vblank evasion around commit operation */
|
||||
intel_pipe_update_start(intel_cstate);
|
||||
intel_pipe_update_start(new_crtc_state);
|
||||
|
||||
if (modeset)
|
||||
goto out;
|
||||
|
||||
if (intel_cstate->base.color_mgmt_changed ||
|
||||
intel_cstate->update_pipe)
|
||||
intel_color_commit(intel_cstate);
|
||||
if (new_crtc_state->base.color_mgmt_changed ||
|
||||
new_crtc_state->update_pipe)
|
||||
intel_color_commit(new_crtc_state);
|
||||
|
||||
if (intel_cstate->update_pipe)
|
||||
intel_update_pipe_config(old_intel_cstate, intel_cstate);
|
||||
if (new_crtc_state->update_pipe)
|
||||
intel_update_pipe_config(old_crtc_state, new_crtc_state);
|
||||
else if (INTEL_GEN(dev_priv) >= 9)
|
||||
skl_detach_scalers(intel_cstate);
|
||||
skl_detach_scalers(new_crtc_state);
|
||||
|
||||
out:
|
||||
if (dev_priv->display.atomic_update_watermarks)
|
||||
dev_priv->display.atomic_update_watermarks(old_intel_state,
|
||||
intel_cstate);
|
||||
dev_priv->display.atomic_update_watermarks(state,
|
||||
new_crtc_state);
|
||||
}
|
||||
|
||||
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
||||
@ -14047,21 +14118,20 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
static void intel_finish_crtc_commit(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_atomic_state *old_intel_state =
|
||||
to_intel_atomic_state(old_crtc_state->state);
|
||||
struct intel_crtc_state *old_crtc_state =
|
||||
intel_atomic_get_old_crtc_state(state, crtc);
|
||||
struct intel_crtc_state *new_crtc_state =
|
||||
intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
intel_pipe_update_end(new_crtc_state);
|
||||
|
||||
if (new_crtc_state->update_pipe &&
|
||||
!needs_modeset(&new_crtc_state->base) &&
|
||||
old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
|
||||
intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
|
||||
old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
|
||||
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -25,22 +25,34 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_dp_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_hdcp.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include "intel_drv.h"
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_audio.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_hdmi.h"
|
||||
#include "intel_lspcon.h"
|
||||
#include "intel_lvds.h"
|
||||
#include "intel_panel.h"
|
||||
#include "intel_psr.h"
|
||||
|
||||
#define DP_DPRX_ESI_LEN 14
|
||||
|
||||
@ -1883,6 +1895,9 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
|
||||
int pipe_bpp;
|
||||
int ret;
|
||||
|
||||
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
|
||||
intel_dp_supports_fec(intel_dp, pipe_config);
|
||||
|
||||
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
|
||||
return -EINVAL;
|
||||
|
||||
@ -1966,6 +1981,14 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
|
||||
return 6 * 3;
|
||||
else
|
||||
return 8 * 3;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
@ -1989,7 +2012,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
||||
limits.min_lane_count = 1;
|
||||
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
|
||||
|
||||
limits.min_bpp = 6 * 3;
|
||||
limits.min_bpp = intel_dp_min_bpp(pipe_config);
|
||||
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
|
||||
|
||||
if (intel_dp_is_edp(intel_dp)) {
|
||||
@ -2091,7 +2114,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
to_intel_digital_connector_state(conn_state);
|
||||
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
|
||||
DP_DPCD_QUIRK_CONSTANT_N);
|
||||
int ret;
|
||||
int ret, output_bpp;
|
||||
|
||||
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
|
||||
pipe_config->has_pch_encoder = true;
|
||||
@ -2136,9 +2159,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
return -EINVAL;
|
||||
|
||||
pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
|
||||
intel_dp_supports_fec(intel_dp, pipe_config);
|
||||
|
||||
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -2146,25 +2166,22 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
pipe_config->limited_color_range =
|
||||
intel_dp_limited_color_range(pipe_config, conn_state);
|
||||
|
||||
if (!pipe_config->dsc_params.compression_enable)
|
||||
intel_link_compute_m_n(pipe_config->pipe_bpp,
|
||||
pipe_config->lane_count,
|
||||
adjusted_mode->crtc_clock,
|
||||
pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n,
|
||||
constant_n);
|
||||
if (pipe_config->dsc_params.compression_enable)
|
||||
output_bpp = pipe_config->dsc_params.compressed_bpp;
|
||||
else
|
||||
intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
|
||||
pipe_config->lane_count,
|
||||
adjusted_mode->crtc_clock,
|
||||
pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n,
|
||||
constant_n);
|
||||
output_bpp = pipe_config->pipe_bpp;
|
||||
|
||||
intel_link_compute_m_n(output_bpp,
|
||||
pipe_config->lane_count,
|
||||
adjusted_mode->crtc_clock,
|
||||
pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n,
|
||||
constant_n);
|
||||
|
||||
if (intel_connector->panel.downclock_mode != NULL &&
|
||||
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
|
||||
pipe_config->has_drrs = true;
|
||||
intel_link_compute_m_n(pipe_config->pipe_bpp,
|
||||
intel_link_compute_m_n(output_bpp,
|
||||
pipe_config->lane_count,
|
||||
intel_connector->panel.downclock_mode->clock,
|
||||
pipe_config->port_clock,
|
||||
|
122
drivers/gpu/drm/i915/intel_dp.h
Normal file
122
drivers/gpu/drm/i915/intel_dp.h
Normal file
@ -0,0 +1,122 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_DP_H__
|
||||
#define __INTEL_DP_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_reg.h"
|
||||
|
||||
enum pipe;
|
||||
struct drm_connector_state;
|
||||
struct drm_encoder;
|
||||
struct drm_i915_private;
|
||||
struct drm_modeset_acquire_ctx;
|
||||
struct intel_connector;
|
||||
struct intel_crtc_state;
|
||||
struct intel_digital_port;
|
||||
struct intel_dp;
|
||||
struct intel_encoder;
|
||||
|
||||
struct link_config_limits {
|
||||
int min_clock, max_clock;
|
||||
int min_lane_count, max_lane_count;
|
||||
int min_bpp, max_bpp;
|
||||
};
|
||||
|
||||
void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct link_config_limits *limits);
|
||||
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state);
|
||||
bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t dp_reg, enum port port,
|
||||
enum pipe *pipe);
|
||||
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
|
||||
enum port port);
|
||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
int link_rate, u8 lane_count,
|
||||
bool link_mst);
|
||||
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
|
||||
int link_rate, u8 lane_count);
|
||||
int intel_dp_retrain_link(struct intel_encoder *encoder,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
||||
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enable);
|
||||
void intel_dp_encoder_reset(struct drm_encoder *encoder);
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
|
||||
void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
|
||||
int intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state);
|
||||
bool intel_dp_is_edp(struct intel_dp *intel_dp);
|
||||
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
|
||||
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
|
||||
bool long_hpd);
|
||||
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
|
||||
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_off(struct intel_dp *intel_dp);
|
||||
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
|
||||
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
|
||||
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
|
||||
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
|
||||
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
|
||||
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
|
||||
u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
|
||||
|
||||
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits);
|
||||
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits);
|
||||
|
||||
void
|
||||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
u8 dp_train_pat);
|
||||
void
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
|
||||
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
|
||||
u8
|
||||
intel_dp_voltage_max(struct intel_dp *intel_dp);
|
||||
u8
|
||||
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
|
||||
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
|
||||
u8 *link_bw, u8 *rate_select);
|
||||
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
|
||||
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
|
||||
bool
|
||||
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
|
||||
u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
|
||||
int mode_clock, int mode_hdisplay);
|
||||
u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
|
||||
int mode_hdisplay);
|
||||
|
||||
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
|
||||
int intel_dp_link_required(int pixel_clock, int bpp);
|
||||
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
|
||||
bool intel_digital_port_connected(struct intel_encoder *encoder);
|
||||
void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dig_port);
|
||||
|
||||
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
|
||||
{
|
||||
return ~((1 << lane_count) - 1) & 0xf;
|
||||
}
|
||||
|
||||
#endif /* __INTEL_DP_H__ */
|
@ -21,6 +21,7 @@
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
static void
|
||||
|
@ -23,12 +23,17 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_audio.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state,
|
||||
@ -119,7 +124,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
limits.min_lane_count =
|
||||
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
|
||||
|
||||
limits.min_bpp = 6 * 3;
|
||||
limits.min_bpp = intel_dp_min_bpp(pipe_config);
|
||||
limits.max_bpp = pipe_config->pipe_bpp;
|
||||
|
||||
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
|
||||
|
@ -21,6 +21,7 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/**
|
||||
|
@ -27,23 +27,24 @@
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/stackdepot.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_dp_dual_mode_helper.h>
|
||||
#include <drm/drm_dp_mst_helper.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_rect.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include <drm/i915_mei_hdcp_interface.h>
|
||||
#include <media/cec-notifier.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
struct drm_printer;
|
||||
|
||||
/**
|
||||
@ -558,6 +559,11 @@ struct intel_atomic_state {
|
||||
* state only when all crtc's are DPMS off.
|
||||
*/
|
||||
struct intel_cdclk_state actual;
|
||||
|
||||
int force_min_cdclk;
|
||||
bool force_min_cdclk_changed;
|
||||
/* pipe to which cd2x update is synchronized */
|
||||
enum pipe pipe;
|
||||
} cdclk;
|
||||
|
||||
bool dpll_set, modeset;
|
||||
@ -1597,6 +1603,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
|
||||
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
|
||||
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
|
||||
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
|
||||
|
||||
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
|
||||
u32 mask)
|
||||
@ -1624,85 +1631,8 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
|
||||
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
|
||||
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_crt.c */
|
||||
bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t adpa_reg, enum pipe *pipe);
|
||||
void intel_crt_init(struct drm_i915_private *dev_priv);
|
||||
void intel_crt_reset(struct drm_encoder *encoder);
|
||||
|
||||
/* intel_ddi.c */
|
||||
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state);
|
||||
void hsw_fdi_link_train(struct intel_crtc *crtc,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
|
||||
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
|
||||
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
|
||||
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
|
||||
bool state);
|
||||
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
u32 bxt_signal_levels(struct intel_dp *intel_dp);
|
||||
u32 ddi_signal_levels(struct intel_dp *intel_dp);
|
||||
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
|
||||
u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
|
||||
u8 voltage_swing);
|
||||
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
|
||||
bool enable);
|
||||
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
|
||||
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *state);
|
||||
|
||||
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
|
||||
int color_plane, unsigned int height);
|
||||
|
||||
/* intel_audio.c */
|
||||
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_codec_enable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_audio_codec_disable(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state);
|
||||
void i915_audio_component_init(struct drm_i915_private *dev_priv);
|
||||
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_init(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_deinit(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_cdclk.c */
|
||||
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
|
||||
void skl_init_cdclk(struct drm_i915_private *dev_priv);
|
||||
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
|
||||
void cnl_init_cdclk(struct drm_i915_private *dev_priv);
|
||||
void cnl_uninit_cdclk(struct drm_i915_private *dev_priv);
|
||||
void bxt_init_cdclk(struct drm_i915_private *dev_priv);
|
||||
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
|
||||
void icl_init_cdclk(struct drm_i915_private *dev_priv);
|
||||
void icl_uninit_cdclk(struct drm_i915_private *dev_priv);
|
||||
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
|
||||
void intel_update_cdclk(struct drm_i915_private *dev_priv);
|
||||
void intel_update_rawclk(struct drm_i915_private *dev_priv);
|
||||
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
|
||||
const struct intel_cdclk_state *b);
|
||||
bool intel_cdclk_changed(const struct intel_cdclk_state *a,
|
||||
const struct intel_cdclk_state *b);
|
||||
void intel_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
const struct intel_cdclk_state *cdclk_state);
|
||||
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
|
||||
const char *context);
|
||||
|
||||
/* intel_display.c */
|
||||
void intel_plane_destroy(struct drm_plane *plane);
|
||||
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
|
||||
@ -1717,6 +1647,8 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv);
|
||||
unsigned int intel_fb_xy_to_linear(int x, int y,
|
||||
const struct intel_plane_state *state,
|
||||
int plane);
|
||||
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
|
||||
int color_plane, unsigned int height);
|
||||
void intel_add_fb_offsets(int *x, int *y,
|
||||
const struct intel_plane_state *state, int plane);
|
||||
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
|
||||
@ -1884,117 +1816,9 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
|
||||
u32 pixel_format, u64 modifier,
|
||||
unsigned int rotation);
|
||||
|
||||
/* intel_connector.c */
|
||||
int intel_connector_init(struct intel_connector *connector);
|
||||
struct intel_connector *intel_connector_alloc(void);
|
||||
void intel_connector_free(struct intel_connector *connector);
|
||||
void intel_connector_destroy(struct drm_connector *connector);
|
||||
int intel_connector_register(struct drm_connector *connector);
|
||||
void intel_connector_unregister(struct drm_connector *connector);
|
||||
void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
struct intel_encoder *encoder);
|
||||
bool intel_connector_get_hw_state(struct intel_connector *connector);
|
||||
enum pipe intel_connector_get_pipe(struct intel_connector *connector);
|
||||
int intel_connector_update_modes(struct drm_connector *connector,
|
||||
struct edid *edid);
|
||||
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
|
||||
void intel_attach_force_audio_property(struct drm_connector *connector);
|
||||
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
|
||||
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
|
||||
void intel_attach_colorspace_property(struct drm_connector *connector);
|
||||
|
||||
/* intel_csr.c */
|
||||
void intel_csr_ucode_init(struct drm_i915_private *);
|
||||
void intel_csr_load_program(struct drm_i915_private *);
|
||||
void intel_csr_ucode_fini(struct drm_i915_private *);
|
||||
void intel_csr_ucode_suspend(struct drm_i915_private *);
|
||||
void intel_csr_ucode_resume(struct drm_i915_private *);
|
||||
|
||||
/* intel_dp.c */
|
||||
struct link_config_limits {
|
||||
int min_clock, max_clock;
|
||||
int min_lane_count, max_lane_count;
|
||||
int min_bpp, max_bpp;
|
||||
};
|
||||
void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct link_config_limits *limits);
|
||||
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t dp_reg, enum port port,
|
||||
enum pipe *pipe);
|
||||
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
|
||||
enum port port);
|
||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
int link_rate, u8 lane_count,
|
||||
bool link_mst);
|
||||
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
|
||||
int link_rate, u8 lane_count);
|
||||
/* intel_dp_link_training.c */
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
int intel_dp_retrain_link(struct intel_encoder *encoder,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
||||
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enable);
|
||||
void intel_dp_encoder_reset(struct drm_encoder *encoder);
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
|
||||
void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
|
||||
int intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state);
|
||||
bool intel_dp_is_edp(struct intel_dp *intel_dp);
|
||||
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
|
||||
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
|
||||
bool long_hpd);
|
||||
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
|
||||
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_off(struct intel_dp *intel_dp);
|
||||
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
|
||||
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
|
||||
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
|
||||
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
|
||||
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
|
||||
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
|
||||
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
|
||||
u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
|
||||
void intel_plane_destroy(struct drm_plane *plane);
|
||||
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits);
|
||||
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits);
|
||||
|
||||
void
|
||||
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
|
||||
u8 dp_train_pat);
|
||||
void
|
||||
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
|
||||
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
|
||||
u8
|
||||
intel_dp_voltage_max(struct intel_dp *intel_dp);
|
||||
u8
|
||||
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
|
||||
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
|
||||
u8 *link_bw, u8 *rate_select);
|
||||
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
|
||||
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
|
||||
bool
|
||||
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]);
|
||||
u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
|
||||
int mode_clock, int mode_hdisplay);
|
||||
u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
|
||||
int mode_hdisplay);
|
||||
|
||||
/* intel_vdsc.c */
|
||||
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
|
||||
@ -2002,18 +1826,6 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
|
||||
enum intel_display_power_domain
|
||||
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
|
||||
{
|
||||
return ~((1 << lane_count) - 1) & 0xf;
|
||||
}
|
||||
|
||||
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
|
||||
int intel_dp_link_required(int pixel_clock, int bpp);
|
||||
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
|
||||
bool intel_digital_port_connected(struct intel_encoder *encoder);
|
||||
void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dig_port);
|
||||
|
||||
/* intel_dp_aux_backlight.c */
|
||||
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
|
||||
|
||||
@ -2029,109 +1841,11 @@ void icl_dsi_init(struct drm_i915_private *dev_priv);
|
||||
/* intel_dsi_dcs_backlight.c */
|
||||
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
|
||||
|
||||
/* intel_dvo.c */
|
||||
void intel_dvo_init(struct drm_i915_private *dev_priv);
|
||||
/* intel_hotplug.c */
|
||||
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
|
||||
bool intel_encoder_hotplug(struct intel_encoder *encoder,
|
||||
struct intel_connector *connector);
|
||||
|
||||
/* legacy fbdev emulation in intel_fbdev.c */
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
extern int intel_fbdev_init(struct drm_device *dev);
|
||||
extern void intel_fbdev_initial_config_async(struct drm_device *dev);
|
||||
extern void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
|
||||
extern void intel_fbdev_fini(struct drm_i915_private *dev_priv);
|
||||
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
|
||||
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
|
||||
extern void intel_fbdev_restore_mode(struct drm_device *dev);
|
||||
#else
|
||||
static inline int intel_fbdev_init(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* intel_fbc.c */
|
||||
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
|
||||
struct intel_atomic_state *state);
|
||||
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_pre_update(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state);
|
||||
void intel_fbc_post_update(struct intel_crtc *crtc);
|
||||
void intel_fbc_init(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_enable(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state);
|
||||
void intel_fbc_disable(struct intel_crtc *crtc);
|
||||
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_fbc_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits, enum fb_op_origin origin);
|
||||
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
|
||||
int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_hdmi.c */
|
||||
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
|
||||
enum port port);
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state);
|
||||
bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
|
||||
struct drm_connector *connector,
|
||||
bool high_tmds_clock_ratio,
|
||||
bool scrambling);
|
||||
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
|
||||
void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
|
||||
u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
u32 intel_hdmi_infoframe_enable(unsigned int type);
|
||||
void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
void intel_read_infoframe(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
union hdmi_infoframe *frame);
|
||||
|
||||
/* intel_lvds.c */
|
||||
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t lvds_reg, enum pipe *pipe);
|
||||
void intel_lvds_init(struct drm_i915_private *dev_priv);
|
||||
struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
|
||||
bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_overlay.c */
|
||||
void intel_overlay_setup(struct drm_i915_private *dev_priv);
|
||||
void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
|
||||
@ -2142,92 +1856,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void intel_overlay_reset(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
||||
/* intel_panel.c */
|
||||
int intel_panel_init(struct intel_panel *panel,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *downclock_mode);
|
||||
void intel_panel_fini(struct intel_panel *panel);
|
||||
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
void intel_pch_panel_fitting(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int fitting_mode);
|
||||
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int fitting_mode);
|
||||
void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
|
||||
u32 level, u32 max);
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector,
|
||||
enum pipe pipe);
|
||||
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_panel_update_backlight(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
|
||||
struct drm_display_mode *
|
||||
intel_panel_edid_downclock_mode(struct intel_connector *connector,
|
||||
const struct drm_display_mode *fixed_mode);
|
||||
struct drm_display_mode *
|
||||
intel_panel_edid_fixed_mode(struct intel_connector *connector);
|
||||
struct drm_display_mode *
|
||||
intel_panel_vbt_fixed_mode(struct intel_connector *connector);
|
||||
|
||||
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
|
||||
int intel_backlight_device_register(struct intel_connector *connector);
|
||||
void intel_backlight_device_unregister(struct intel_connector *connector);
|
||||
#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
|
||||
static inline int intel_backlight_device_register(struct intel_connector *connector)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void intel_backlight_device_unregister(struct intel_connector *connector)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
|
||||
|
||||
/* intel_hdcp.c */
|
||||
void intel_hdcp_atomic_check(struct drm_connector *connector,
|
||||
struct drm_connector_state *old_state,
|
||||
struct drm_connector_state *new_state);
|
||||
int intel_hdcp_init(struct intel_connector *connector,
|
||||
const struct intel_hdcp_shim *hdcp_shim);
|
||||
int intel_hdcp_enable(struct intel_connector *connector);
|
||||
int intel_hdcp_disable(struct intel_connector *connector);
|
||||
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_hdcp_capable(struct intel_connector *connector);
|
||||
void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
|
||||
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
|
||||
void intel_hdcp_cleanup(struct intel_connector *connector);
|
||||
void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
|
||||
|
||||
/* intel_psr.c */
|
||||
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
|
||||
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
|
||||
void intel_psr_enable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_psr_disable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *old_crtc_state);
|
||||
void intel_psr_update(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
|
||||
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_psr_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_psr_init(struct drm_i915_private *dev_priv);
|
||||
void intel_psr_compute_config(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
|
||||
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
|
||||
void intel_psr_short_pulse(struct intel_dp *intel_dp);
|
||||
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
|
||||
u32 *out_value);
|
||||
bool intel_psr_enabled(struct intel_dp *intel_dp);
|
||||
|
||||
/* intel_quirks.c */
|
||||
void intel_init_quirks(struct drm_i915_private *dev_priv);
|
||||
|
||||
@ -2377,102 +2005,6 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
|
||||
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
|
||||
enum dpio_channel ch, bool override);
|
||||
|
||||
|
||||
/* intel_pm.c */
|
||||
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
|
||||
void intel_suspend_hw(struct drm_i915_private *dev_priv);
|
||||
int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
|
||||
void intel_update_watermarks(struct intel_crtc *crtc);
|
||||
void intel_init_pm(struct drm_i915_private *dev_priv);
|
||||
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_pm_setup(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_teardown(void);
|
||||
void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_busy(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_boost(struct i915_request *rq);
|
||||
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
|
||||
struct skl_ddb_entry *ddb_y,
|
||||
struct skl_ddb_entry *ddb_uv);
|
||||
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct skl_ddb_allocation *ddb /* out */);
|
||||
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
|
||||
struct skl_pipe_wm *out);
|
||||
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
|
||||
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
|
||||
bool intel_can_enable_sagv(struct drm_atomic_state *state);
|
||||
int intel_enable_sagv(struct drm_i915_private *dev_priv);
|
||||
int intel_disable_sagv(struct drm_i915_private *dev_priv);
|
||||
bool skl_wm_level_equals(const struct skl_wm_level *l1,
|
||||
const struct skl_wm_level *l2);
|
||||
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
|
||||
const struct skl_ddb_entry entries[],
|
||||
int num_entries, int ignore_idx);
|
||||
void skl_write_plane_wm(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void skl_write_cursor_wm(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
bool ilk_disable_lp_wm(struct drm_device *dev);
|
||||
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *cstate);
|
||||
void intel_init_ipc(struct drm_i915_private *dev_priv);
|
||||
void intel_enable_ipc(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_sdvo.c */
|
||||
bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t sdvo_reg, enum pipe *pipe);
|
||||
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg, enum port port);
|
||||
|
||||
|
||||
/* intel_sprite.c */
|
||||
bool is_planar_yuv_format(u32 pixelformat);
|
||||
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
|
||||
int usecs);
|
||||
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, int plane);
|
||||
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
|
||||
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
|
||||
int intel_plane_check_stride(const struct intel_plane_state *plane_state);
|
||||
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
|
||||
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
|
||||
struct intel_plane *
|
||||
skl_universal_plane_create(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, enum plane_id plane_id);
|
||||
|
||||
static inline bool icl_is_nv12_y_plane(enum plane_id id)
|
||||
{
|
||||
/* Don't need to do a gen check, these planes are only available on gen11 */
|
||||
if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane_id plane_id)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) < 11)
|
||||
return false;
|
||||
|
||||
return plane_id < PLANE_SPRITE2;
|
||||
}
|
||||
|
||||
/* intel_tv.c */
|
||||
void intel_tv_init(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_atomic.c */
|
||||
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
|
||||
const struct drm_connector_state *state,
|
||||
@ -2509,76 +2041,4 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
/* intel_atomic_plane.c */
|
||||
void intel_update_plane(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void intel_update_slave(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void intel_disable_plane(struct intel_plane *plane,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
struct intel_plane *intel_plane_alloc(void);
|
||||
void intel_plane_free(struct intel_plane *plane);
|
||||
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
|
||||
void intel_plane_destroy_state(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
|
||||
void skl_update_planes_on_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *old_plane_state,
|
||||
struct intel_plane_state *intel_state);
|
||||
|
||||
/* intel_color.c */
|
||||
void intel_color_init(struct intel_crtc *crtc);
|
||||
int intel_color_check(struct intel_crtc_state *crtc_state);
|
||||
void intel_color_commit(const struct intel_crtc_state *crtc_state);
|
||||
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
/* intel_lspcon.c */
|
||||
bool lspcon_init(struct intel_digital_port *intel_dig_port);
|
||||
void lspcon_resume(struct intel_lspcon *lspcon);
|
||||
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
|
||||
void lspcon_write_infoframe(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
unsigned int type,
|
||||
const void *buf, ssize_t len);
|
||||
void lspcon_read_infoframe(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
unsigned int type,
|
||||
void *frame, ssize_t len);
|
||||
void lspcon_set_infoframes(struct intel_encoder *encoder,
|
||||
bool enable,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void lspcon_ycbcr420_config(struct drm_connector *connector,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
/* intel_pipe_crc.c */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
|
||||
int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
|
||||
const char *source_name, size_t *values_cnt);
|
||||
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
|
||||
size_t *count);
|
||||
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
|
||||
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
|
||||
#else
|
||||
#define intel_crtc_set_crc_source NULL
|
||||
#define intel_crtc_verify_crc_source NULL
|
||||
#define intel_crtc_get_crc_sources NULL
|
||||
static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
|
@ -24,14 +24,20 @@
|
||||
* Authors:
|
||||
* Eric Anholt <eric@anholt.net>
|
||||
*/
|
||||
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include "intel_drv.h"
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
#include "dvo.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dvo.h"
|
||||
#include "intel_panel.h"
|
||||
|
||||
#define SIL164_ADDR 0x38
|
||||
#define CH7xxx_ADDR 0x76
|
||||
|
13
drivers/gpu/drm/i915/intel_dvo.h
Normal file
13
drivers/gpu/drm/i915/intel_dvo.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_DVO_H__
|
||||
#define __INTEL_DVO_H__
|
||||
|
||||
struct drm_i915_private;
|
||||
|
||||
void intel_dvo_init(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif /* __INTEL_DVO_H__ */
|
@ -753,6 +753,30 @@ err_unpin:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_gt_resume(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
/*
|
||||
* After resume, we may need to poke into the pinned kernel
|
||||
* contexts to paper over any damage caused by the sudden suspend.
|
||||
* Only the kernel contexts should remain pinned over suspend,
|
||||
* allowing us to fixup the user contexts on their first pin.
|
||||
*/
|
||||
for_each_engine(engine, i915, id) {
|
||||
struct intel_context *ce;
|
||||
|
||||
ce = engine->kernel_context;
|
||||
if (ce)
|
||||
ce->ops->reset(ce);
|
||||
|
||||
ce = engine->preempt_context;
|
||||
if (ce)
|
||||
ce->ops->reset(ce);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_engines_cleanup_common - cleans up the engine state created by
|
||||
* the common initiailizers.
|
||||
@ -1381,40 +1405,33 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
|
||||
if (HAS_EXECLISTS(dev_priv)) {
|
||||
const u32 *hws =
|
||||
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
|
||||
const u8 num_entries = execlists->csb_size;
|
||||
unsigned int idx;
|
||||
u8 read, write;
|
||||
|
||||
drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
|
||||
drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
|
||||
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
|
||||
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
|
||||
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
|
||||
num_entries);
|
||||
|
||||
read = execlists->csb_head;
|
||||
write = READ_ONCE(*execlists->csb_write);
|
||||
|
||||
drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
|
||||
drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
|
||||
read, write,
|
||||
GEN8_CSB_WRITE_PTR(ENGINE_READ(engine, RING_CONTEXT_STATUS_PTR)),
|
||||
yesno(test_bit(TASKLET_STATE_SCHED,
|
||||
&engine->execlists.tasklet.state)),
|
||||
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
|
||||
if (read >= GEN8_CSB_ENTRIES)
|
||||
if (read >= num_entries)
|
||||
read = 0;
|
||||
if (write >= GEN8_CSB_ENTRIES)
|
||||
if (write >= num_entries)
|
||||
write = 0;
|
||||
if (read > write)
|
||||
write += GEN8_CSB_ENTRIES;
|
||||
write += num_entries;
|
||||
while (read < write) {
|
||||
idx = ++read % GEN8_CSB_ENTRIES;
|
||||
drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
|
||||
idx,
|
||||
hws[idx * 2],
|
||||
ENGINE_READ_IDX(engine,
|
||||
RING_CONTEXT_STATUS_BUF_LO,
|
||||
idx),
|
||||
hws[idx * 2 + 1],
|
||||
ENGINE_READ_IDX(engine,
|
||||
RING_CONTEXT_STATUS_BUF_HI,
|
||||
idx));
|
||||
idx = ++read % num_entries;
|
||||
drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
|
||||
idx, hws[idx * 2], hws[idx * 2 + 1]);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -13,8 +13,10 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "i915_gem.h"
|
||||
#include "i915_priolist_types.h"
|
||||
#include "i915_selftest.h"
|
||||
#include "i915_timeline_types.h"
|
||||
#include "intel_device_info.h"
|
||||
#include "intel_workarounds_types.h"
|
||||
|
||||
#include "i915_gem_batch_pool.h"
|
||||
@ -25,12 +27,16 @@
|
||||
|
||||
#define I915_CMD_HASH_ORDER 9
|
||||
|
||||
struct dma_fence;
|
||||
struct drm_i915_reg_table;
|
||||
struct i915_gem_context;
|
||||
struct i915_request;
|
||||
struct i915_sched_attr;
|
||||
struct intel_uncore;
|
||||
|
||||
typedef u8 intel_engine_mask_t;
|
||||
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
|
||||
|
||||
struct intel_hw_status_page {
|
||||
struct i915_vma *vma;
|
||||
u32 *addr;
|
||||
@ -105,8 +111,9 @@ enum intel_engine_id {
|
||||
VCS3,
|
||||
#define _VCS(n) (VCS0 + (n))
|
||||
VECS0,
|
||||
VECS1
|
||||
VECS1,
|
||||
#define _VECS(n) (VECS0 + (n))
|
||||
I915_NUM_ENGINES
|
||||
};
|
||||
|
||||
struct st_preempt_hang {
|
||||
@ -239,6 +246,11 @@ struct intel_engine_execlists {
|
||||
*/
|
||||
u32 preempt_complete_status;
|
||||
|
||||
/**
|
||||
* @csb_size: context status buffer FIFO size
|
||||
*/
|
||||
u8 csb_size;
|
||||
|
||||
/**
|
||||
* @csb_head: context status buffer head
|
||||
*/
|
||||
@ -425,6 +437,7 @@ struct intel_engine_cs {
|
||||
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
|
||||
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
|
||||
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
|
||||
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
|
||||
unsigned int flags;
|
||||
|
||||
/*
|
||||
@ -508,6 +521,12 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine)
|
||||
return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
|
||||
{
|
||||
return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
|
||||
}
|
||||
|
||||
#define instdone_slice_mask(dev_priv__) \
|
||||
(IS_GEN(dev_priv__, 7) ? \
|
||||
1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
|
||||
|
@ -40,8 +40,10 @@
|
||||
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_fbc.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
|
42
drivers/gpu/drm/i915/intel_fbc.h
Normal file
42
drivers/gpu/drm/i915/intel_fbc.h
Normal file
@ -0,0 +1,42 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_FBC_H__
|
||||
#define __INTEL_FBC_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct intel_atomic_state;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_plane_state;
|
||||
|
||||
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
|
||||
struct intel_atomic_state *state);
|
||||
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_pre_update(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state);
|
||||
void intel_fbc_post_update(struct intel_crtc *crtc);
|
||||
void intel_fbc_init(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_enable(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state);
|
||||
void intel_fbc_disable(struct intel_crtc *crtc);
|
||||
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_fbc_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits, enum fb_op_origin origin);
|
||||
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
|
||||
int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif /* __INTEL_FBC_H__ */
|
@ -25,26 +25,27 @@
|
||||
*/
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_fbdev.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
|
||||
{
|
||||
|
53
drivers/gpu/drm/i915/intel_fbdev.h
Normal file
53
drivers/gpu/drm/i915/intel_fbdev.h
Normal file
@ -0,0 +1,53 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_FBDEV_H__
|
||||
#define __INTEL_FBDEV_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_device;
|
||||
struct drm_i915_private;
|
||||
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
int intel_fbdev_init(struct drm_device *dev);
|
||||
void intel_fbdev_initial_config_async(struct drm_device *dev);
|
||||
void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
|
||||
void intel_fbdev_fini(struct drm_i915_private *dev_priv);
|
||||
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
|
||||
void intel_fbdev_output_poll_changed(struct drm_device *dev);
|
||||
void intel_fbdev_restore_mode(struct drm_device *dev);
|
||||
#else
|
||||
static inline int intel_fbdev_init(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_restore_mode(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __INTEL_FBDEV_H__ */
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_fbc.h"
|
||||
|
||||
/**
|
||||
* DOC: fifo underrun handling
|
||||
|
@ -61,9 +61,12 @@
|
||||
*/
|
||||
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_fbc.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "intel_psr.h"
|
||||
|
||||
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin,
|
||||
|
@ -24,9 +24,19 @@
|
||||
#ifndef __INTEL_FRONTBUFFER_H__
|
||||
#define __INTEL_FRONTBUFFER_H__
|
||||
|
||||
#include "i915_gem_object.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
enum fb_op_origin {
|
||||
ORIGIN_GTT,
|
||||
ORIGIN_CPU,
|
||||
ORIGIN_CS,
|
||||
ORIGIN_FLIP,
|
||||
ORIGIN_DIRTYFB,
|
||||
};
|
||||
|
||||
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
|
||||
|
@ -567,7 +567,7 @@ static void inject_preempt_context(struct work_struct *work)
|
||||
preempt_work[engine->id]);
|
||||
struct intel_guc_client *client = guc->preempt_client;
|
||||
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
|
||||
struct intel_context *ce = intel_context_lookup(client->owner, engine);
|
||||
struct intel_context *ce = engine->preempt_context;
|
||||
u32 data[7];
|
||||
|
||||
if (!ce->ring->emit) { /* recreate upon load/resume */
|
||||
@ -650,9 +650,10 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
|
||||
struct guc_ctx_report *report =
|
||||
&data->preempt_ctx_report[engine->guc_id];
|
||||
|
||||
WARN_ON(wait_for_atomic(report->report_return_status ==
|
||||
INTEL_GUC_REPORT_STATUS_COMPLETE,
|
||||
GUC_PREEMPT_POSTPROCESS_DELAY_MS));
|
||||
if (wait_for_atomic(report->report_return_status ==
|
||||
INTEL_GUC_REPORT_STATUS_COMPLETE,
|
||||
GUC_PREEMPT_POSTPROCESS_DELAY_MS))
|
||||
DRM_ERROR("Timed out waiting for GuC preemption report\n");
|
||||
/*
|
||||
* GuC is expecting that we're also going to clear the affected context
|
||||
* counter, let's also reset the return status to not depend on GuC
|
||||
@ -871,6 +872,104 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
|
||||
flush_workqueue(engine->i915->guc.preempt_wq);
|
||||
}
|
||||
|
||||
static void guc_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct i915_request *rq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
|
||||
execlists_cancel_port_requests(execlists);
|
||||
|
||||
/* Push back any incomplete requests for replay after the reset. */
|
||||
rq = execlists_unwind_incomplete_requests(execlists);
|
||||
if (!rq)
|
||||
goto out_unlock;
|
||||
|
||||
if (!i915_request_started(rq))
|
||||
stalled = false;
|
||||
|
||||
i915_reset_request(rq, stalled);
|
||||
intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
static void guc_cancel_requests(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct i915_request *rq, *rn;
|
||||
struct rb_node *rb;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
|
||||
/*
|
||||
* Before we call engine->cancel_requests(), we should have exclusive
|
||||
* access to the submission state. This is arranged for us by the
|
||||
* caller disabling the interrupt generation, the tasklet and other
|
||||
* threads that may then access the same state, giving us a free hand
|
||||
* to reset state. However, we still need to let lockdep be aware that
|
||||
* we know this state may be accessed in hardirq context, so we
|
||||
* disable the irq around this manipulation and we want to keep
|
||||
* the spinlock focused on its duties and not accidentally conflate
|
||||
* coverage to the submission's irq state. (Similarly, although we
|
||||
* shouldn't need to disable irq around the manipulation of the
|
||||
* submission's irq state, we also wish to remind ourselves that
|
||||
* it is irq state.)
|
||||
*/
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
|
||||
/* Cancel the requests on the HW and clear the ELSP tracker. */
|
||||
execlists_cancel_port_requests(execlists);
|
||||
|
||||
/* Mark all executing requests as skipped. */
|
||||
list_for_each_entry(rq, &engine->timeline.requests, link) {
|
||||
if (!i915_request_signaled(rq))
|
||||
dma_fence_set_error(&rq->fence, -EIO);
|
||||
|
||||
i915_request_mark_complete(rq);
|
||||
}
|
||||
|
||||
/* Flush the queued requests to the timeline list (for retiring). */
|
||||
while ((rb = rb_first_cached(&execlists->queue))) {
|
||||
struct i915_priolist *p = to_priolist(rb);
|
||||
int i;
|
||||
|
||||
priolist_for_each_request_consume(rq, rn, p, i) {
|
||||
list_del_init(&rq->sched.link);
|
||||
__i915_request_submit(rq);
|
||||
dma_fence_set_error(&rq->fence, -EIO);
|
||||
i915_request_mark_complete(rq);
|
||||
}
|
||||
|
||||
rb_erase_cached(&p->node, &execlists->queue);
|
||||
i915_priolist_free(p);
|
||||
}
|
||||
|
||||
/* Remaining _unready_ requests will be nop'ed when submitted */
|
||||
|
||||
execlists->queue_priority_hint = INT_MIN;
|
||||
execlists->queue = RB_ROOT_CACHED;
|
||||
GEM_BUG_ON(port_isset(execlists->port));
|
||||
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
static void guc_reset_finish(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
|
||||
if (__tasklet_enable(&execlists->tasklet))
|
||||
/* And kick in case we missed a new request submission. */
|
||||
tasklet_hi_schedule(&execlists->tasklet);
|
||||
|
||||
GEM_TRACE("%s: depth->%d\n", engine->name,
|
||||
atomic_read(&execlists->tasklet.count));
|
||||
}
|
||||
|
||||
/*
|
||||
* Everything below here is concerned with setup & teardown, and is
|
||||
* therefore not part of the somewhat time-critical batch-submission
|
||||
@ -1262,10 +1361,12 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
|
||||
static void guc_submission_park(struct intel_engine_cs *engine)
|
||||
{
|
||||
intel_engine_unpin_breadcrumbs_irq(engine);
|
||||
engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
|
||||
}
|
||||
|
||||
static void guc_submission_unpark(struct intel_engine_cs *engine)
|
||||
{
|
||||
engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
|
||||
intel_engine_pin_breadcrumbs_irq(engine);
|
||||
}
|
||||
|
||||
@ -1290,6 +1391,10 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
|
||||
engine->unpark = guc_submission_unpark;
|
||||
|
||||
engine->reset.prepare = guc_reset_prepare;
|
||||
engine->reset.reset = guc_reset;
|
||||
engine->reset.finish = guc_reset_finish;
|
||||
|
||||
engine->cancel_requests = guc_cancel_requests;
|
||||
|
||||
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
|
||||
}
|
||||
|
@ -29,6 +29,7 @@
|
||||
|
||||
#include "i915_gem.h"
|
||||
#include "i915_selftest.h"
|
||||
#include "intel_engine_types.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
|
||||
|
@ -221,8 +221,8 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
|
||||
unsigned int stuck)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
intel_engine_mask_t tmp;
|
||||
char msg[80];
|
||||
unsigned int tmp;
|
||||
int len;
|
||||
|
||||
/* If some rings hung but others were still busy, only
|
||||
|
@ -6,14 +6,16 @@
|
||||
* Sean Paul <seanpaul@chromium.org>
|
||||
*/
|
||||
|
||||
#include <drm/drm_hdcp.h>
|
||||
#include <drm/i915_component.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/component.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include <drm/drm_hdcp.h>
|
||||
#include <drm/i915_component.h>
|
||||
|
||||
#include "i915_reg.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_hdcp.h"
|
||||
|
||||
#define KEY_LOAD_TRIES 5
|
||||
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
|
||||
|
33
drivers/gpu/drm/i915/intel_hdcp.h
Normal file
33
drivers/gpu/drm/i915/intel_hdcp.h
Normal file
@ -0,0 +1,33 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_HDCP_H__
|
||||
#define __INTEL_HDCP_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
struct drm_connector;
|
||||
struct drm_connector_state;
|
||||
struct drm_i915_private;
|
||||
struct intel_connector;
|
||||
struct intel_hdcp_shim;
|
||||
|
||||
void intel_hdcp_atomic_check(struct drm_connector *connector,
|
||||
struct drm_connector_state *old_state,
|
||||
struct drm_connector_state *new_state);
|
||||
int intel_hdcp_init(struct intel_connector *connector,
|
||||
const struct intel_hdcp_shim *hdcp_shim);
|
||||
int intel_hdcp_enable(struct intel_connector *connector);
|
||||
int intel_hdcp_disable(struct intel_connector *connector);
|
||||
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_hdcp_capable(struct intel_connector *connector);
|
||||
void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
|
||||
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
|
||||
void intel_hdcp_cleanup(struct intel_connector *connector);
|
||||
void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
|
||||
|
||||
#endif /* __INTEL_HDCP_H__ */
|
@ -26,19 +26,30 @@
|
||||
* Jesse Barnes <jesse.barnes@intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_hdcp.h>
|
||||
#include <drm/drm_scdc_helper.h>
|
||||
#include "intel_drv.h"
|
||||
#include <drm/i915_drm.h>
|
||||
#include <drm/intel_lpe_audio.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_audio.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_hdmi.h"
|
||||
#include "intel_lspcon.h"
|
||||
#include "intel_sdvo.h"
|
||||
#include "intel_panel.h"
|
||||
|
||||
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
|
||||
{
|
||||
|
51
drivers/gpu/drm/i915/intel_hdmi.h
Normal file
51
drivers/gpu/drm/i915/intel_hdmi.h
Normal file
@ -0,0 +1,51 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_HDMI_H__
|
||||
#define __INTEL_HDMI_H__
|
||||
|
||||
#include <linux/hdmi.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_reg.h"
|
||||
|
||||
struct drm_connector;
|
||||
struct drm_encoder;
|
||||
struct drm_i915_private;
|
||||
struct intel_connector;
|
||||
struct intel_digital_port;
|
||||
struct intel_encoder;
|
||||
struct intel_crtc_state;
|
||||
struct intel_hdmi;
|
||||
struct drm_connector_state;
|
||||
union hdmi_infoframe;
|
||||
|
||||
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
|
||||
enum port port);
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state);
|
||||
bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
|
||||
struct drm_connector *connector,
|
||||
bool high_tmds_clock_ratio,
|
||||
bool scrambling);
|
||||
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
|
||||
void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
|
||||
u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
u32 intel_hdmi_infoframe_enable(unsigned int type);
|
||||
void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
void intel_read_infoframe(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
union hdmi_infoframe *frame);
|
||||
|
||||
#endif /* __INTEL_HDMI_H__ */
|
@ -233,7 +233,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
|
||||
{
|
||||
int last_prio;
|
||||
|
||||
if (!intel_engine_has_preemption(engine))
|
||||
if (!engine->preempt_context)
|
||||
return false;
|
||||
|
||||
if (i915_request_completed(rq))
|
||||
@ -429,13 +429,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
|
||||
return active;
|
||||
}
|
||||
|
||||
void
|
||||
struct i915_request *
|
||||
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
struct intel_engine_cs *engine =
|
||||
container_of(execlists, typeof(*engine), execlists);
|
||||
|
||||
__unwind_incomplete_requests(engine);
|
||||
return __unwind_incomplete_requests(engine);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -893,96 +893,6 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
|
||||
clflush((void *)last);
|
||||
}
|
||||
|
||||
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
|
||||
|
||||
/*
|
||||
* After a reset, the HW starts writing into CSB entry [0]. We
|
||||
* therefore have to set our HEAD pointer back one entry so that
|
||||
* the *first* entry we check is entry 0. To complicate this further,
|
||||
* as we don't wait for the first interrupt after reset, we have to
|
||||
* fake the HW write to point back to the last entry so that our
|
||||
* inline comparison of our cached head position against the last HW
|
||||
* write works even before the first interrupt.
|
||||
*/
|
||||
execlists->csb_head = reset_value;
|
||||
WRITE_ONCE(*execlists->csb_write, reset_value);
|
||||
|
||||
invalidate_csb_entries(&execlists->csb_status[0],
|
||||
&execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
|
||||
}
|
||||
|
||||
static void nop_submission_tasklet(unsigned long data)
|
||||
{
|
||||
/* The driver is wedged; don't process any more events. */
|
||||
}
|
||||
|
||||
static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct i915_request *rq, *rn;
|
||||
struct rb_node *rb;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
|
||||
/*
|
||||
* Before we call engine->cancel_requests(), we should have exclusive
|
||||
* access to the submission state. This is arranged for us by the
|
||||
* caller disabling the interrupt generation, the tasklet and other
|
||||
* threads that may then access the same state, giving us a free hand
|
||||
* to reset state. However, we still need to let lockdep be aware that
|
||||
* we know this state may be accessed in hardirq context, so we
|
||||
* disable the irq around this manipulation and we want to keep
|
||||
* the spinlock focused on its duties and not accidentally conflate
|
||||
* coverage to the submission's irq state. (Similarly, although we
|
||||
* shouldn't need to disable irq around the manipulation of the
|
||||
* submission's irq state, we also wish to remind ourselves that
|
||||
* it is irq state.)
|
||||
*/
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
|
||||
/* Cancel the requests on the HW and clear the ELSP tracker. */
|
||||
execlists_cancel_port_requests(execlists);
|
||||
execlists_user_end(execlists);
|
||||
|
||||
/* Mark all executing requests as skipped. */
|
||||
list_for_each_entry(rq, &engine->timeline.requests, link) {
|
||||
if (!i915_request_signaled(rq))
|
||||
dma_fence_set_error(&rq->fence, -EIO);
|
||||
|
||||
i915_request_mark_complete(rq);
|
||||
}
|
||||
|
||||
/* Flush the queued requests to the timeline list (for retiring). */
|
||||
while ((rb = rb_first_cached(&execlists->queue))) {
|
||||
struct i915_priolist *p = to_priolist(rb);
|
||||
int i;
|
||||
|
||||
priolist_for_each_request_consume(rq, rn, p, i) {
|
||||
list_del_init(&rq->sched.link);
|
||||
__i915_request_submit(rq);
|
||||
dma_fence_set_error(&rq->fence, -EIO);
|
||||
i915_request_mark_complete(rq);
|
||||
}
|
||||
|
||||
rb_erase_cached(&p->node, &execlists->queue);
|
||||
i915_priolist_free(p);
|
||||
}
|
||||
|
||||
/* Remaining _unready_ requests will be nop'ed when submitted */
|
||||
|
||||
execlists->queue_priority_hint = INT_MIN;
|
||||
execlists->queue = RB_ROOT_CACHED;
|
||||
GEM_BUG_ON(port_isset(execlists->port));
|
||||
|
||||
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
|
||||
execlists->tasklet.func = nop_submission_tasklet;
|
||||
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
reset_in_progress(const struct intel_engine_execlists *execlists)
|
||||
{
|
||||
@ -994,6 +904,7 @@ static void process_csb(struct intel_engine_cs *engine)
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct execlist_port *port = execlists->port;
|
||||
const u32 * const buf = execlists->csb_status;
|
||||
const u8 num_entries = execlists->csb_size;
|
||||
u8 head, tail;
|
||||
|
||||
lockdep_assert_held(&engine->timeline.lock);
|
||||
@ -1029,7 +940,7 @@ static void process_csb(struct intel_engine_cs *engine)
|
||||
unsigned int status;
|
||||
unsigned int count;
|
||||
|
||||
if (++head == GEN8_CSB_ENTRIES)
|
||||
if (++head == num_entries)
|
||||
head = 0;
|
||||
|
||||
/*
|
||||
@ -1151,7 +1062,7 @@ static void process_csb(struct intel_engine_cs *engine)
|
||||
* the wash as hardware, working or not, will need to do the
|
||||
* invalidation before.
|
||||
*/
|
||||
invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
|
||||
invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
|
||||
}
|
||||
|
||||
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
|
||||
@ -1379,9 +1290,33 @@ static int execlists_context_pin(struct intel_context *ce)
|
||||
return __execlists_context_pin(ce, ce->engine);
|
||||
}
|
||||
|
||||
static void execlists_context_reset(struct intel_context *ce)
|
||||
{
|
||||
/*
|
||||
* Because we emit WA_TAIL_DWORDS there may be a disparity
|
||||
* between our bookkeeping in ce->ring->head and ce->ring->tail and
|
||||
* that stored in context. As we only write new commands from
|
||||
* ce->ring->tail onwards, everything before that is junk. If the GPU
|
||||
* starts reading from its RING_HEAD from the context, it may try to
|
||||
* execute that junk and die.
|
||||
*
|
||||
* The contexts that are stilled pinned on resume belong to the
|
||||
* kernel, and are local to each engine. All other contexts will
|
||||
* have their head/tail sanitized upon pinning before use, so they
|
||||
* will never see garbage,
|
||||
*
|
||||
* So to avoid that we reset the context images upon resume. For
|
||||
* simplicity, we just zero everything out.
|
||||
*/
|
||||
intel_ring_reset(ce->ring, 0);
|
||||
__execlists_update_reg_state(ce, ce->engine);
|
||||
}
|
||||
|
||||
static const struct intel_context_ops execlists_context_ops = {
|
||||
.pin = execlists_context_pin,
|
||||
.unpin = execlists_context_unpin,
|
||||
|
||||
.reset = execlists_context_reset,
|
||||
.destroy = execlists_context_destroy,
|
||||
};
|
||||
|
||||
@ -1451,10 +1386,11 @@ static int emit_pdps(struct i915_request *rq)
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
|
||||
for (i = GEN8_3LVL_PDPES; i--; ) {
|
||||
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
|
||||
u32 base = engine->mmio_base;
|
||||
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
|
||||
*cs++ = upper_32_bits(pd_daddr);
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
|
||||
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
|
||||
*cs++ = lower_32_bits(pd_daddr);
|
||||
}
|
||||
*cs++ = MI_NOOP;
|
||||
@ -1823,17 +1759,9 @@ static void enable_execlists(struct intel_engine_cs *engine)
|
||||
|
||||
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
|
||||
|
||||
/*
|
||||
* Make sure we're not enabling the new 12-deep CSB
|
||||
* FIFO as that requires a slightly updated handling
|
||||
* in the ctx switch irq. Since we're currently only
|
||||
* using only 2 elements of the enhanced execlists the
|
||||
* deeper FIFO it's not needed and it's not worth adding
|
||||
* more statements to the irq handler to support it.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
I915_WRITE(RING_MODE_GEN7(engine),
|
||||
_MASKED_BIT_DISABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
|
||||
_MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
|
||||
else
|
||||
I915_WRITE(RING_MODE_GEN7(engine),
|
||||
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
|
||||
@ -1903,7 +1831,6 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
|
||||
|
||||
/* And flush any current direct submission. */
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
process_csb(engine); /* drain preemption events */
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
@ -1924,14 +1851,48 @@ static bool lrc_regs_ok(const struct i915_request *rq)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
const unsigned int reset_value = execlists->csb_size - 1;
|
||||
|
||||
/*
|
||||
* After a reset, the HW starts writing into CSB entry [0]. We
|
||||
* therefore have to set our HEAD pointer back one entry so that
|
||||
* the *first* entry we check is entry 0. To complicate this further,
|
||||
* as we don't wait for the first interrupt after reset, we have to
|
||||
* fake the HW write to point back to the last entry so that our
|
||||
* inline comparison of our cached head position against the last HW
|
||||
* write works even before the first interrupt.
|
||||
*/
|
||||
execlists->csb_head = reset_value;
|
||||
WRITE_ONCE(*execlists->csb_write, reset_value);
|
||||
wmb(); /* Make sure this is visible to HW (paranoia?) */
|
||||
|
||||
invalidate_csb_entries(&execlists->csb_status[0],
|
||||
&execlists->csb_status[reset_value]);
|
||||
}
|
||||
|
||||
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct intel_context *ce;
|
||||
struct i915_request *rq;
|
||||
unsigned long flags;
|
||||
u32 *regs;
|
||||
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
process_csb(engine); /* drain preemption events */
|
||||
|
||||
/* Following the reset, we need to reload the CSB read/write pointers */
|
||||
reset_csb_pointers(&engine->execlists);
|
||||
|
||||
/*
|
||||
* Save the currently executing context, even if we completed
|
||||
* its request, it was still running at the time of the
|
||||
* reset and will have been clobbered.
|
||||
*/
|
||||
if (!port_isset(execlists->port))
|
||||
goto out_clear;
|
||||
|
||||
ce = port_request(execlists->port)->hw_context;
|
||||
|
||||
/*
|
||||
* Catch up with any missed context-switch interrupts.
|
||||
@ -1946,12 +1907,13 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
|
||||
/* Push back any incomplete requests for replay after the reset. */
|
||||
rq = __unwind_incomplete_requests(engine);
|
||||
|
||||
/* Following the reset, we need to reload the CSB read/write pointers */
|
||||
reset_csb_pointers(&engine->execlists);
|
||||
|
||||
if (!rq)
|
||||
goto out_unlock;
|
||||
goto out_replay;
|
||||
|
||||
if (rq->hw_context != ce) { /* caught just before a CS event */
|
||||
rq = NULL;
|
||||
goto out_replay;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this request hasn't started yet, e.g. it is waiting on a
|
||||
@ -1966,7 +1928,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
* perfectly and we do not need to flag the result as being erroneous.
|
||||
*/
|
||||
if (!i915_request_started(rq) && lrc_regs_ok(rq))
|
||||
goto out_unlock;
|
||||
goto out_replay;
|
||||
|
||||
/*
|
||||
* If the request was innocent, we leave the request in the ELSP
|
||||
@ -1981,7 +1943,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
*/
|
||||
i915_reset_request(rq, stalled);
|
||||
if (!stalled && lrc_regs_ok(rq))
|
||||
goto out_unlock;
|
||||
goto out_replay;
|
||||
|
||||
/*
|
||||
* We want a simple context + ring to execute the breadcrumb update.
|
||||
@ -1991,21 +1953,103 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
* future request will be after userspace has had the opportunity
|
||||
* to recreate its own state.
|
||||
*/
|
||||
regs = rq->hw_context->lrc_reg_state;
|
||||
regs = ce->lrc_reg_state;
|
||||
if (engine->pinned_default_state) {
|
||||
memcpy(regs, /* skip restoring the vanilla PPHWSP */
|
||||
engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
|
||||
engine->context_size - PAGE_SIZE);
|
||||
}
|
||||
execlists_init_reg_state(regs, ce, engine, ce->ring);
|
||||
|
||||
/* Rerun the request; its payload has been neutered (if guilty). */
|
||||
rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
|
||||
intel_ring_update_space(rq->ring);
|
||||
out_replay:
|
||||
ce->ring->head =
|
||||
rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
|
||||
intel_ring_update_space(ce->ring);
|
||||
__execlists_update_reg_state(ce, engine);
|
||||
|
||||
execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring);
|
||||
__execlists_update_reg_state(rq->hw_context, engine);
|
||||
out_clear:
|
||||
execlists_clear_all_active(execlists);
|
||||
}
|
||||
|
||||
static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
|
||||
__execlists_reset(engine, stalled);
|
||||
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
static void nop_submission_tasklet(unsigned long data)
|
||||
{
|
||||
/* The driver is wedged; don't process any more events. */
|
||||
}
|
||||
|
||||
static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct i915_request *rq, *rn;
|
||||
struct rb_node *rb;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
|
||||
/*
|
||||
* Before we call engine->cancel_requests(), we should have exclusive
|
||||
* access to the submission state. This is arranged for us by the
|
||||
* caller disabling the interrupt generation, the tasklet and other
|
||||
* threads that may then access the same state, giving us a free hand
|
||||
* to reset state. However, we still need to let lockdep be aware that
|
||||
* we know this state may be accessed in hardirq context, so we
|
||||
* disable the irq around this manipulation and we want to keep
|
||||
* the spinlock focused on its duties and not accidentally conflate
|
||||
* coverage to the submission's irq state. (Similarly, although we
|
||||
* shouldn't need to disable irq around the manipulation of the
|
||||
* submission's irq state, we also wish to remind ourselves that
|
||||
* it is irq state.)
|
||||
*/
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
|
||||
__execlists_reset(engine, true);
|
||||
|
||||
/* Mark all executing requests as skipped. */
|
||||
list_for_each_entry(rq, &engine->timeline.requests, link) {
|
||||
if (!i915_request_signaled(rq))
|
||||
dma_fence_set_error(&rq->fence, -EIO);
|
||||
|
||||
i915_request_mark_complete(rq);
|
||||
}
|
||||
|
||||
/* Flush the queued requests to the timeline list (for retiring). */
|
||||
while ((rb = rb_first_cached(&execlists->queue))) {
|
||||
struct i915_priolist *p = to_priolist(rb);
|
||||
int i;
|
||||
|
||||
priolist_for_each_request_consume(rq, rn, p, i) {
|
||||
list_del_init(&rq->sched.link);
|
||||
__i915_request_submit(rq);
|
||||
dma_fence_set_error(&rq->fence, -EIO);
|
||||
i915_request_mark_complete(rq);
|
||||
}
|
||||
|
||||
rb_erase_cached(&p->node, &execlists->queue);
|
||||
i915_priolist_free(p);
|
||||
}
|
||||
|
||||
/* Remaining _unready_ requests will be nop'ed when submitted */
|
||||
|
||||
execlists->queue_priority_hint = INT_MIN;
|
||||
execlists->queue = RB_ROOT_CACHED;
|
||||
GEM_BUG_ON(port_isset(execlists->port));
|
||||
|
||||
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
|
||||
execlists->tasklet.func = nop_submission_tasklet;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
@ -2035,7 +2079,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
|
||||
{
|
||||
u32 *cs;
|
||||
|
||||
cs = intel_ring_begin(rq, 6);
|
||||
cs = intel_ring_begin(rq, 4);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
@ -2046,19 +2090,37 @@ static int gen8_emit_bb_start(struct i915_request *rq,
|
||||
* particular all the gen that do not need the w/a at all!), if we
|
||||
* took care to make sure that on every switch into this context
|
||||
* (both ordinary and for preemption) that arbitrartion was enabled
|
||||
* we would be fine. However, there doesn't seem to be a downside to
|
||||
* being paranoid and making sure it is set before each batch and
|
||||
* every context-switch.
|
||||
*
|
||||
* Note that if we fail to enable arbitration before the request
|
||||
* is complete, then we do not see the context-switch interrupt and
|
||||
* the engine hangs (with RING_HEAD == RING_TAIL).
|
||||
*
|
||||
* That satisfies both the GPGPU w/a and our heavy-handed paranoia.
|
||||
* we would be fine. However, for gen8 there is another w/a that
|
||||
* requires us to not preempt inside GPGPU execution, so we keep
|
||||
* arbitration disabled for gen8 batches. Arbitration will be
|
||||
* re-enabled before we close the request
|
||||
* (engine->emit_fini_breadcrumb).
|
||||
*/
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
|
||||
|
||||
/* FIXME(BDW+): Address space and security selectors. */
|
||||
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
|
||||
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
|
||||
*cs++ = lower_32_bits(offset);
|
||||
*cs++ = upper_32_bits(offset);
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen9_emit_bb_start(struct i915_request *rq,
|
||||
u64 offset, u32 len,
|
||||
const unsigned int flags)
|
||||
{
|
||||
u32 *cs;
|
||||
|
||||
cs = intel_ring_begin(rq, 6);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
|
||||
|
||||
/* FIXME(BDW): Address space and security selectors. */
|
||||
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
|
||||
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
|
||||
*cs++ = lower_32_bits(offset);
|
||||
@ -2309,6 +2371,8 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
|
||||
engine->execlists.tasklet.func = execlists_submission_tasklet;
|
||||
|
||||
engine->reset.prepare = execlists_reset_prepare;
|
||||
engine->reset.reset = execlists_reset;
|
||||
engine->reset.finish = execlists_reset_finish;
|
||||
|
||||
engine->park = NULL;
|
||||
engine->unpark = NULL;
|
||||
@ -2316,7 +2380,8 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
|
||||
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
|
||||
if (!intel_vgpu_active(engine->i915))
|
||||
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
|
||||
if (engine->preempt_context)
|
||||
if (engine->preempt_context &&
|
||||
HAS_LOGICAL_RING_PREEMPTION(engine->i915))
|
||||
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
|
||||
}
|
||||
|
||||
@ -2350,7 +2415,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
||||
* until a more refined solution exists.
|
||||
*/
|
||||
}
|
||||
engine->emit_bb_start = gen8_emit_bb_start;
|
||||
if (IS_GEN(engine->i915, 8))
|
||||
engine->emit_bb_start = gen8_emit_bb_start;
|
||||
else
|
||||
engine->emit_bb_start = gen9_emit_bb_start;
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -2429,6 +2497,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
|
||||
execlists->csb_write =
|
||||
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 11)
|
||||
execlists->csb_size = GEN8_CSB_ENTRIES;
|
||||
else
|
||||
execlists->csb_size = GEN11_CSB_ENTRIES;
|
||||
|
||||
reset_csb_pointers(execlists);
|
||||
|
||||
return 0;
|
||||
@ -2707,14 +2780,14 @@ static void execlists_init_reg_state(u32 *regs,
|
||||
|
||||
CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
|
||||
/* PDP values well be assigned later if needed */
|
||||
CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 0);
|
||||
CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 0);
|
||||
CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 0);
|
||||
CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 0);
|
||||
CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 0);
|
||||
CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 0);
|
||||
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
|
||||
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
|
||||
CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
|
||||
CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
|
||||
CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
|
||||
CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
|
||||
CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
|
||||
CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
|
||||
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
|
||||
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
|
||||
|
||||
if (i915_vm_is_4lvl(&ppgtt->vm)) {
|
||||
/* 64b PPGTT (48bit canonical)
|
||||
@ -2872,31 +2945,6 @@ error_deref_obj:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_lr_context_resume(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
struct intel_context *ce;
|
||||
|
||||
/*
|
||||
* Because we emit WA_TAIL_DWORDS there may be a disparity
|
||||
* between our bookkeeping in ce->ring->head and ce->ring->tail and
|
||||
* that stored in context. As we only write new commands from
|
||||
* ce->ring->tail onwards, everything before that is junk. If the GPU
|
||||
* starts reading from its RING_HEAD from the context, it may try to
|
||||
* execute that junk and die.
|
||||
*
|
||||
* So to avoid that we reset the context images upon resume. For
|
||||
* simplicity, we just zero everything out.
|
||||
*/
|
||||
list_for_each_entry(ctx, &i915->contexts.list, link) {
|
||||
list_for_each_entry(ce, &ctx->active_engines, active_link) {
|
||||
GEM_BUG_ON(!ce->ring);
|
||||
intel_ring_reset(ce->ring, 0);
|
||||
__execlists_update_reg_state(ce, ce->engine);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void intel_execlists_show_requests(struct intel_engine_cs *engine,
|
||||
struct drm_printer *m,
|
||||
void (*show_request)(struct drm_printer *m,
|
||||
@ -2957,6 +3005,37 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
void intel_lr_context_reset(struct intel_engine_cs *engine,
|
||||
struct intel_context *ce,
|
||||
u32 head,
|
||||
bool scrub)
|
||||
{
|
||||
/*
|
||||
* We want a simple context + ring to execute the breadcrumb update.
|
||||
* We cannot rely on the context being intact across the GPU hang,
|
||||
* so clear it and rebuild just what we need for the breadcrumb.
|
||||
* All pending requests for this context will be zapped, and any
|
||||
* future request will be after userspace has had the opportunity
|
||||
* to recreate its own state.
|
||||
*/
|
||||
if (scrub) {
|
||||
u32 *regs = ce->lrc_reg_state;
|
||||
|
||||
if (engine->pinned_default_state) {
|
||||
memcpy(regs, /* skip restoring the vanilla PPHWSP */
|
||||
engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
|
||||
engine->context_size - PAGE_SIZE);
|
||||
}
|
||||
execlists_init_reg_state(regs, ce, engine, ce->ring);
|
||||
}
|
||||
|
||||
/* Rerun the request; its payload has been neutered (if guilty). */
|
||||
ce->ring->head = head;
|
||||
intel_ring_update_space(ce->ring);
|
||||
|
||||
__execlists_update_reg_state(ce, engine);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/intel_lrc.c"
|
||||
#endif
|
||||
|
@ -36,12 +36,10 @@
|
||||
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
|
||||
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
|
||||
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
|
||||
#define RING_CONTEXT_STATUS_BUF_BASE(base) _MMIO((base) + 0x370)
|
||||
#define RING_CONTEXT_STATUS_BUF_LO(base, i) _MMIO((base) + 0x370 + (i) * 8)
|
||||
#define RING_CONTEXT_STATUS_BUF_HI(base, i) _MMIO((base) + 0x370 + (i) * 8 + 4)
|
||||
#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
|
||||
#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
|
||||
#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
|
||||
|
||||
#define EL_CTRL_LOAD (1 << 0)
|
||||
|
||||
/* The docs specify that the write pointer wraps around after 5h, "After status
|
||||
@ -55,10 +53,11 @@
|
||||
#define GEN8_CSB_PTR_MASK 0x7
|
||||
#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
|
||||
#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
|
||||
#define GEN8_CSB_WRITE_PTR(csb_status) \
|
||||
(((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
|
||||
#define GEN8_CSB_READ_PTR(csb_status) \
|
||||
(((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
|
||||
|
||||
#define GEN11_CSB_ENTRIES 12
|
||||
#define GEN11_CSB_PTR_MASK 0xf
|
||||
#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
|
||||
#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
|
||||
|
||||
enum {
|
||||
INTEL_CONTEXT_SCHEDULE_IN = 0,
|
||||
@ -102,9 +101,13 @@ struct drm_printer;
|
||||
struct drm_i915_private;
|
||||
struct i915_gem_context;
|
||||
|
||||
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
|
||||
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_lr_context_reset(struct intel_engine_cs *engine,
|
||||
struct intel_context *ce,
|
||||
u32 head,
|
||||
bool scrub);
|
||||
|
||||
void intel_execlists_show_requests(struct intel_engine_cs *engine,
|
||||
struct drm_printer *m,
|
||||
void (*show_request)(struct drm_printer *m,
|
||||
|
@ -22,10 +22,14 @@
|
||||
*
|
||||
*
|
||||
*/
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_dp_dual_mode_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_lspcon.h"
|
||||
|
||||
/* LSPCON OUI Vendor ID(signatures) */
|
||||
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
|
||||
|
38
drivers/gpu/drm/i915/intel_lspcon.h
Normal file
38
drivers/gpu/drm/i915/intel_lspcon.h
Normal file
@ -0,0 +1,38 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_LSPCON_H__
|
||||
#define __INTEL_LSPCON_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_connector;
|
||||
struct drm_connector_state;
|
||||
struct intel_crtc_state;
|
||||
struct intel_digital_port;
|
||||
struct intel_encoder;
|
||||
struct intel_lspcon;
|
||||
|
||||
bool lspcon_init(struct intel_digital_port *intel_dig_port);
|
||||
void lspcon_resume(struct intel_lspcon *lspcon);
|
||||
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
|
||||
void lspcon_write_infoframe(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
unsigned int type,
|
||||
const void *buf, ssize_t len);
|
||||
void lspcon_read_infoframe(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
unsigned int type,
|
||||
void *frame, ssize_t len);
|
||||
void lspcon_set_infoframes(struct intel_encoder *encoder,
|
||||
bool enable,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void lspcon_ycbcr420_config(struct drm_connector *connector,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
#endif /* __INTEL_LSPCON_H__ */
|
@ -28,17 +28,22 @@
|
||||
*/
|
||||
|
||||
#include <acpi/button.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include "intel_drv.h"
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include <linux/acpi.h>
|
||||
#include "intel_connector.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_lvds.h"
|
||||
#include "intel_panel.h"
|
||||
|
||||
/* Private structure for the integrated LVDS support */
|
||||
struct intel_lvds_pps {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user