mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-14 00:24:15 +08:00
Merge tag 'drm-intel-next-2017-01-09' of git://anongit.freedesktop.org/git/drm-intel into drm-next
More 4.11 stuff, holidays edition (i.e. not much): - docs and cleanups for shared dpll code (Ander) - some kerneldoc work (Chris) - fbc by default on gen9+ too, yeah! (Paulo) - fixes, polish and other small things all over gem code (Chris) - and a few small things on top Plus a backmerge, because Dave was enjoying time off too. * tag 'drm-intel-next-2017-01-09' of git://anongit.freedesktop.org/git/drm-intel: (275 commits) drm/i915: Update DRIVER_DATE to 20170109 drm/i915: Drain freed objects for mmap space exhaustion drm/i915: Purge loose pages if we run out of DMA remap space drm/i915: Fix phys pwrite for struct_mutex-less operation drm/i915: Simplify testing for am-I-the-kernel-context? drm/i915: Use range_overflows() drm/i915: Use fixed-sized types for stolen drm/i915: Use phys_addr_t for the address of stolen memory drm/i915: Consolidate checks for memcpy-from-wc support drm/i915: Only skip requests once a context is banned drm/i915: Move a few more utility macros to i915_utils.h drm/i915: Clear ret before unbinding in i915_gem_evict_something() drm/i915/guc: Exclude the upper end of the Global GTT for the GuC drm/i915: Move a few utility macros into a separate header drm/i915/execlists: Reorder execlists register enabling drm/i915: Assert that we do create the deferred context drm/i915: Assert all timeline requests are gone before fini drm/i915: Revoke fenced GTT mmapings across GPU reset drm/i915: enable FBC on gen9+ too drm/i915: actually drive the BDW reserved IDs ...
This commit is contained in:
commit
5c37daf5dd
@ -213,6 +213,18 @@ Video BIOS Table (VBT)
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_vbt_defs.h
|
||||
:internal:
|
||||
|
||||
Display PLLs
|
||||
------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_dpll_mgr.c
|
||||
:doc: Display PLLs
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_dpll_mgr.c
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/intel_dpll_mgr.h
|
||||
:internal:
|
||||
|
||||
Memory Management and Command Submission
|
||||
========================================
|
||||
|
||||
@ -356,4 +368,95 @@ switch_mm
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
|
||||
:doc: switch_mm tracepoint
|
||||
|
||||
Perf
|
||||
====
|
||||
|
||||
Overview
|
||||
--------
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:doc: i915 Perf Overview
|
||||
|
||||
Comparison with Core Perf
|
||||
-------------------------
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:doc: i915 Perf History and Comparison with Core Perf
|
||||
|
||||
i915 Driver Entry Points
|
||||
------------------------
|
||||
|
||||
This section covers the entrypoints exported outside of i915_perf.c to
|
||||
integrate with drm/i915 and to handle the `DRM_I915_PERF_OPEN` ioctl.
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_init
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_fini
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_register
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_unregister
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_open_ioctl
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_release
|
||||
|
||||
i915 Perf Stream
|
||||
----------------
|
||||
|
||||
This section covers the stream-semantics-agnostic structures and functions
|
||||
for representing an i915 perf stream FD and associated file operations.
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h
|
||||
:functions: i915_perf_stream
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h
|
||||
:functions: i915_perf_stream_ops
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: read_properties_unlocked
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_open_ioctl_locked
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_destroy_locked
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_read
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_ioctl
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_enable_locked
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_disable_locked
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_poll
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_perf_poll_locked
|
||||
|
||||
i915 Perf Observation Architecture Stream
|
||||
-----------------------------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h
|
||||
:functions: i915_oa_ops
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_oa_stream_init
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_oa_read
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_oa_stream_enable
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_oa_stream_disable
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_oa_wait_unlocked
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:functions: i915_oa_poll_wait
|
||||
|
||||
All i915 Perf Internals
|
||||
-----------------------
|
||||
|
||||
This section simply includes all currently documented i915 perf internals, in
|
||||
no particular order, but may include some more minor utilities or platform
|
||||
specific details than found in the more high-level sections.
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
|
||||
:internal:
|
||||
|
||||
.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/drm/i915/i915_irq.c
|
||||
|
@ -1420,8 +1420,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
}
|
||||
EXPORT_SYMBOL(intel_gmch_probe);
|
||||
|
||||
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
|
||||
phys_addr_t *mappable_base, u64 *mappable_end)
|
||||
void intel_gtt_get(u64 *gtt_total,
|
||||
u32 *stolen_size,
|
||||
phys_addr_t *mappable_base,
|
||||
u64 *mappable_end)
|
||||
{
|
||||
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
|
||||
*stolen_size = intel_private.stolen_size;
|
||||
|
@ -19,9 +19,12 @@ config DRM_I915_DEBUG
|
||||
bool "Enable additional driver debugging"
|
||||
depends on DRM_I915
|
||||
select PREEMPT_COUNT
|
||||
select I2C_CHARDEV
|
||||
select DRM_DP_AUX_CHARDEV
|
||||
select X86_MSR # used by igt/pm_rpm
|
||||
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
|
||||
select DRM_DEBUG_MM if DRM=y
|
||||
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
||||
default n
|
||||
help
|
||||
Choose this option to turn on extra driver debugging that may affect
|
||||
@ -43,3 +46,15 @@ config DRM_I915_DEBUG_GEM
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
||||
bool "Enable additional driver debugging for fence objects"
|
||||
depends on DRM_I915
|
||||
select DEBUG_OBJECTS
|
||||
default n
|
||||
help
|
||||
Choose this option to turn on extra driver debugging that may affect
|
||||
performance but will catch some internal issues.
|
||||
|
||||
Recommended for driver developers only.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
@ -24,7 +24,7 @@ i915-y := i915_drv.o \
|
||||
intel_runtime_pm.o
|
||||
|
||||
i915-$(CONFIG_COMPAT) += i915_ioc32.o
|
||||
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
|
||||
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
|
||||
|
||||
# GEM code
|
||||
i915-y += i915_cmd_parser.o \
|
||||
@ -55,7 +55,8 @@ i915-y += i915_cmd_parser.o \
|
||||
intel_uncore.o
|
||||
|
||||
# general-purpose microcontroller (GuC) support
|
||||
i915-y += intel_guc_loader.o \
|
||||
i915-y += intel_uc.o \
|
||||
intel_guc_loader.o \
|
||||
i915_guc_submission.o
|
||||
|
||||
# autogenerated null render state
|
||||
@ -117,6 +118,10 @@ i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
|
||||
# virtual gpu code
|
||||
i915-y += i915_vgpu.o
|
||||
|
||||
# perf code
|
||||
i915-y += i915_perf.o \
|
||||
i915_oa_hsw.o
|
||||
|
||||
ifeq ($(CONFIG_DRM_I915_GVT),y)
|
||||
i915-y += intel_gvt.o
|
||||
include $(src)/gvt/Makefile
|
||||
|
@ -73,12 +73,15 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
search_again:
|
||||
ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
|
||||
node, size, 4096, 0,
|
||||
node, size, 4096,
|
||||
I915_COLOR_UNEVICTABLE,
|
||||
start, end, search_flag,
|
||||
alloc_flag);
|
||||
if (ret) {
|
||||
ret = i915_gem_evict_something(&dev_priv->ggtt.base,
|
||||
size, 4096, 0, start, end, 0);
|
||||
size, 4096,
|
||||
I915_COLOR_UNEVICTABLE,
|
||||
start, end, 0);
|
||||
if (ret == 0 && ++retried < 3)
|
||||
goto search_again;
|
||||
|
||||
|
@ -1602,7 +1602,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
return -ENOMEM;
|
||||
|
||||
entry_obj->obj =
|
||||
i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
|
||||
i915_gem_object_create(s->vgpu->gvt->dev_priv,
|
||||
roundup(bb_size, PAGE_SIZE));
|
||||
if (IS_ERR(entry_obj->obj)) {
|
||||
ret = PTR_ERR(entry_obj->obj);
|
||||
@ -2665,14 +2665,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
||||
|
||||
static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
{
|
||||
struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
|
||||
int ctx_size = wa_ctx->indirect_ctx.size;
|
||||
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret = 0;
|
||||
void *map;
|
||||
|
||||
obj = i915_gem_object_create(dev,
|
||||
obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
|
||||
roundup(ctx_size + CACHELINE_BYTES,
|
||||
PAGE_SIZE));
|
||||
if (IS_ERR(obj))
|
||||
|
@ -2200,7 +2200,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
|
||||
MMIO_D(OACONTROL, D_HSW);
|
||||
MMIO_D(GEN7_OACONTROL, D_HSW);
|
||||
MMIO_D(0x2b00, D_BDW_PLUS);
|
||||
MMIO_D(0x2360, D_BDW_PLUS);
|
||||
MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
|
@ -549,18 +549,10 @@ err:
|
||||
|
||||
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
|
||||
&vgpu->shadow_ctx_notifier_block);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* a little hacky to mark as ctx closed */
|
||||
vgpu->shadow_ctx->closed = true;
|
||||
i915_gem_context_put(vgpu->shadow_ctx);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
i915_gem_context_put_unlocked(vgpu->shadow_ctx);
|
||||
}
|
||||
|
||||
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
|
||||
|
@ -86,6 +86,102 @@
|
||||
* general bitmasking mechanism.
|
||||
*/
|
||||
|
||||
/*
|
||||
* A command that requires special handling by the command parser.
|
||||
*/
|
||||
struct drm_i915_cmd_descriptor {
|
||||
/*
|
||||
* Flags describing how the command parser processes the command.
|
||||
*
|
||||
* CMD_DESC_FIXED: The command has a fixed length if this is set,
|
||||
* a length mask if not set
|
||||
* CMD_DESC_SKIP: The command is allowed but does not follow the
|
||||
* standard length encoding for the opcode range in
|
||||
* which it falls
|
||||
* CMD_DESC_REJECT: The command is never allowed
|
||||
* CMD_DESC_REGISTER: The command should be checked against the
|
||||
* register whitelist for the appropriate ring
|
||||
* CMD_DESC_MASTER: The command is allowed if the submitting process
|
||||
* is the DRM master
|
||||
*/
|
||||
u32 flags;
|
||||
#define CMD_DESC_FIXED (1<<0)
|
||||
#define CMD_DESC_SKIP (1<<1)
|
||||
#define CMD_DESC_REJECT (1<<2)
|
||||
#define CMD_DESC_REGISTER (1<<3)
|
||||
#define CMD_DESC_BITMASK (1<<4)
|
||||
#define CMD_DESC_MASTER (1<<5)
|
||||
|
||||
/*
|
||||
* The command's unique identification bits and the bitmask to get them.
|
||||
* This isn't strictly the opcode field as defined in the spec and may
|
||||
* also include type, subtype, and/or subop fields.
|
||||
*/
|
||||
struct {
|
||||
u32 value;
|
||||
u32 mask;
|
||||
} cmd;
|
||||
|
||||
/*
|
||||
* The command's length. The command is either fixed length (i.e. does
|
||||
* not include a length field) or has a length field mask. The flag
|
||||
* CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
|
||||
* a length mask. All command entries in a command table must include
|
||||
* length information.
|
||||
*/
|
||||
union {
|
||||
u32 fixed;
|
||||
u32 mask;
|
||||
} length;
|
||||
|
||||
/*
|
||||
* Describes where to find a register address in the command to check
|
||||
* against the ring's register whitelist. Only valid if flags has the
|
||||
* CMD_DESC_REGISTER bit set.
|
||||
*
|
||||
* A non-zero step value implies that the command may access multiple
|
||||
* registers in sequence (e.g. LRI), in that case step gives the
|
||||
* distance in dwords between individual offset fields.
|
||||
*/
|
||||
struct {
|
||||
u32 offset;
|
||||
u32 mask;
|
||||
u32 step;
|
||||
} reg;
|
||||
|
||||
#define MAX_CMD_DESC_BITMASKS 3
|
||||
/*
|
||||
* Describes command checks where a particular dword is masked and
|
||||
* compared against an expected value. If the command does not match
|
||||
* the expected value, the parser rejects it. Only valid if flags has
|
||||
* the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
|
||||
* are valid.
|
||||
*
|
||||
* If the check specifies a non-zero condition_mask then the parser
|
||||
* only performs the check when the bits specified by condition_mask
|
||||
* are non-zero.
|
||||
*/
|
||||
struct {
|
||||
u32 offset;
|
||||
u32 mask;
|
||||
u32 expected;
|
||||
u32 condition_offset;
|
||||
u32 condition_mask;
|
||||
} bits[MAX_CMD_DESC_BITMASKS];
|
||||
};
|
||||
|
||||
/*
|
||||
* A table of commands requiring special handling by the command parser.
|
||||
*
|
||||
* Each engine has an array of tables. Each table consists of an array of
|
||||
* command descriptors, which must be sorted with command opcodes in
|
||||
* ascending order.
|
||||
*/
|
||||
struct drm_i915_cmd_table {
|
||||
const struct drm_i915_cmd_descriptor *table;
|
||||
int count;
|
||||
};
|
||||
|
||||
#define STD_MI_OPCODE_SHIFT (32 - 9)
|
||||
#define STD_3D_OPCODE_SHIFT (32 - 16)
|
||||
#define STD_2D_OPCODE_SHIFT (32 - 10)
|
||||
@ -450,7 +546,6 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
|
||||
REG64(PS_INVOCATION_COUNT),
|
||||
REG64(PS_DEPTH_COUNT),
|
||||
REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
|
||||
REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
|
||||
REG64(MI_PREDICATE_SRC0),
|
||||
REG64(MI_PREDICATE_SRC1),
|
||||
REG32(GEN7_3DPRIM_END_OFFSET),
|
||||
@ -559,7 +654,7 @@ static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
|
||||
|
||||
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
|
||||
{
|
||||
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
|
||||
u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
|
||||
u32 subclient =
|
||||
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
|
||||
|
||||
@ -578,7 +673,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
|
||||
|
||||
static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
|
||||
{
|
||||
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
|
||||
u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
|
||||
u32 subclient =
|
||||
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
|
||||
u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
|
||||
@ -601,7 +696,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
|
||||
|
||||
static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
|
||||
{
|
||||
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
|
||||
u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
|
||||
|
||||
if (client == INSTR_MI_CLIENT)
|
||||
return 0x3F;
|
||||
@ -984,7 +1079,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
||||
|
||||
src = ERR_PTR(-ENODEV);
|
||||
if (src_needs_clflush &&
|
||||
i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) {
|
||||
i915_can_memcpy_from_wc(NULL, batch_start_offset, 0)) {
|
||||
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
|
||||
if (!IS_ERR(src)) {
|
||||
i915_memcpy_from_wc(dst,
|
||||
@ -1036,32 +1131,10 @@ unpin_src:
|
||||
return dst;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_engine_needs_cmd_parser() - should a given engine use software
|
||||
* command parsing?
|
||||
* @engine: the engine in question
|
||||
*
|
||||
* Only certain platforms require software batch buffer command parsing, and
|
||||
* only when enabled via module parameter.
|
||||
*
|
||||
* Return: true if the engine requires software command parsing
|
||||
*/
|
||||
bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!engine->needs_cmd_parser)
|
||||
return false;
|
||||
|
||||
if (!USES_PPGTT(engine->i915))
|
||||
return false;
|
||||
|
||||
return (i915.enable_cmd_parser == 1);
|
||||
}
|
||||
|
||||
static bool check_cmd(const struct intel_engine_cs *engine,
|
||||
const struct drm_i915_cmd_descriptor *desc,
|
||||
const u32 *cmd, u32 length,
|
||||
const bool is_master,
|
||||
bool *oacontrol_set)
|
||||
const bool is_master)
|
||||
{
|
||||
if (desc->flags & CMD_DESC_SKIP)
|
||||
return true;
|
||||
@ -1098,31 +1171,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* OACONTROL requires some special handling for
|
||||
* writes. We want to make sure that any batch which
|
||||
* enables OA also disables it before the end of the
|
||||
* batch. The goal is to prevent one process from
|
||||
* snooping on the perf data from another process. To do
|
||||
* that, we need to check the value that will be written
|
||||
* to the register. Hence, limit OACONTROL writes to
|
||||
* only MI_LOAD_REGISTER_IMM commands.
|
||||
*/
|
||||
if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
|
||||
*oacontrol_set = (cmd[offset + 1] != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the value written to the register against the
|
||||
* allowed mask/value pair given in the whitelist entry.
|
||||
@ -1214,7 +1262,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
||||
u32 *cmd, *batch_end;
|
||||
struct drm_i915_cmd_descriptor default_desc = noop_desc;
|
||||
const struct drm_i915_cmd_descriptor *desc = &default_desc;
|
||||
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
|
||||
bool needs_clflush_after = false;
|
||||
int ret = 0;
|
||||
|
||||
@ -1270,20 +1317,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
||||
break;
|
||||
}
|
||||
|
||||
if (!check_cmd(engine, desc, cmd, length, is_master,
|
||||
&oacontrol_set)) {
|
||||
ret = -EINVAL;
|
||||
if (!check_cmd(engine, desc, cmd, length, is_master)) {
|
||||
ret = -EACCES;
|
||||
break;
|
||||
}
|
||||
|
||||
cmd += length;
|
||||
}
|
||||
|
||||
if (oacontrol_set) {
|
||||
DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (cmd >= batch_end) {
|
||||
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
|
||||
ret = -EINVAL;
|
||||
@ -1313,7 +1354,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
|
||||
|
||||
/* If the command parser is not enabled, report 0 - unsupported */
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
if (intel_engine_needs_cmd_parser(engine)) {
|
||||
if (engine->needs_cmd_parser) {
|
||||
active = true;
|
||||
break;
|
||||
}
|
||||
@ -1333,6 +1374,11 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
|
||||
* 5. GPGPU dispatch compute indirect registers.
|
||||
* 6. TIMESTAMP register and Haswell CS GPR registers
|
||||
* 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
|
||||
* 8. Don't report cmd_check() failures as EINVAL errors to userspace;
|
||||
* rely on the HW to NOOP disallowed commands as it would without
|
||||
* the parser enabled.
|
||||
* 9. Don't whitelist or handle oacontrol specially, as ownership
|
||||
* for oacontrol state is moving to i915-perf.
|
||||
*/
|
||||
return 7;
|
||||
return 9;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -142,9 +142,8 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void intel_detect_pch(struct drm_device *dev)
|
||||
static void intel_detect_pch(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pch = NULL;
|
||||
|
||||
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
|
||||
@ -361,10 +360,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_get_bridge_dev(struct drm_device *dev)
|
||||
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
|
||||
if (!dev_priv->bridge_dev) {
|
||||
DRM_ERROR("bridge device not found\n");
|
||||
@ -375,9 +372,8 @@ static int i915_get_bridge_dev(struct drm_device *dev)
|
||||
|
||||
/* Allocate space for the MCH regs if needed, return nonzero on error */
|
||||
static int
|
||||
intel_alloc_mchbar_resource(struct drm_device *dev)
|
||||
intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
||||
u32 temp_lo, temp_hi = 0;
|
||||
u64 mchbar_addr;
|
||||
@ -421,9 +417,8 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
|
||||
|
||||
/* Setup MCHBAR if possible, return true if we should disable it again */
|
||||
static void
|
||||
intel_setup_mchbar(struct drm_device *dev)
|
||||
intel_setup_mchbar(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
||||
u32 temp;
|
||||
bool enabled;
|
||||
@ -445,7 +440,7 @@ intel_setup_mchbar(struct drm_device *dev)
|
||||
if (enabled)
|
||||
return;
|
||||
|
||||
if (intel_alloc_mchbar_resource(dev))
|
||||
if (intel_alloc_mchbar_resource(dev_priv))
|
||||
return;
|
||||
|
||||
dev_priv->mchbar_need_disable = true;
|
||||
@ -461,9 +456,8 @@ intel_setup_mchbar(struct drm_device *dev)
|
||||
}
|
||||
|
||||
static void
|
||||
intel_teardown_mchbar(struct drm_device *dev)
|
||||
intel_teardown_mchbar(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
||||
|
||||
if (dev_priv->mchbar_need_disable) {
|
||||
@ -493,9 +487,9 @@ intel_teardown_mchbar(struct drm_device *dev)
|
||||
/* true = enable decode, false = disable decoder */
|
||||
static unsigned int i915_vga_set_decode(void *cookie, bool state)
|
||||
{
|
||||
struct drm_device *dev = cookie;
|
||||
struct drm_i915_private *dev_priv = cookie;
|
||||
|
||||
intel_modeset_vga_set_state(to_i915(dev), state);
|
||||
intel_modeset_vga_set_state(dev_priv, state);
|
||||
if (state)
|
||||
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
|
||||
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
||||
@ -503,6 +497,9 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
|
||||
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
||||
}
|
||||
|
||||
static int i915_resume_switcheroo(struct drm_device *dev);
|
||||
static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
|
||||
|
||||
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
@ -544,12 +541,11 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
|
||||
static void i915_gem_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
i915_gem_cleanup_engines(&dev_priv->drm);
|
||||
i915_gem_context_fini(&dev_priv->drm);
|
||||
i915_gem_cleanup_engines(dev_priv);
|
||||
i915_gem_context_fini(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
rcu_barrier();
|
||||
flush_work(&dev_priv->mm.free_work);
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
|
||||
WARN_ON(!list_empty(&dev_priv->context_list));
|
||||
}
|
||||
@ -574,7 +570,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
* then we do not take part in VGA arbitration and the
|
||||
* vga_client_register() fails with -ENODEV.
|
||||
*/
|
||||
ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode);
|
||||
ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
|
||||
if (ret && ret != -ENODEV)
|
||||
goto out;
|
||||
|
||||
@ -595,7 +591,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto cleanup_csr;
|
||||
|
||||
intel_setup_gmbus(dev);
|
||||
intel_setup_gmbus(dev_priv);
|
||||
|
||||
/* Important: The output setup functions called by modeset_init need
|
||||
* working irqs for e.g. gmbus and dp aux transfers. */
|
||||
@ -603,9 +599,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto cleanup_irq;
|
||||
|
||||
intel_guc_init(dev);
|
||||
intel_guc_init(dev_priv);
|
||||
|
||||
ret = i915_gem_init(dev);
|
||||
ret = i915_gem_init(dev_priv);
|
||||
if (ret)
|
||||
goto cleanup_irq;
|
||||
|
||||
@ -626,13 +622,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
return 0;
|
||||
|
||||
cleanup_gem:
|
||||
if (i915_gem_suspend(dev))
|
||||
if (i915_gem_suspend(dev_priv))
|
||||
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
|
||||
i915_gem_fini(dev_priv);
|
||||
cleanup_irq:
|
||||
intel_guc_fini(dev);
|
||||
intel_guc_fini(dev_priv);
|
||||
drm_irq_uninstall(dev);
|
||||
intel_teardown_gmbus(dev);
|
||||
intel_teardown_gmbus(dev_priv);
|
||||
cleanup_csr:
|
||||
intel_csr_ucode_fini(dev_priv);
|
||||
intel_power_domains_fini(dev_priv);
|
||||
@ -643,7 +639,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_FB)
|
||||
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct apertures_struct *ap;
|
||||
@ -668,12 +663,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_VGA_CONSOLE)
|
||||
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
|
||||
@ -811,12 +800,15 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
||||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
spin_lock_init(&dev_priv->mm.object_stat_lock);
|
||||
spin_lock_init(&dev_priv->mmio_flip_lock);
|
||||
spin_lock_init(&dev_priv->wm.dsparb_lock);
|
||||
mutex_init(&dev_priv->sb_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
mutex_init(&dev_priv->wm.wm_mutex);
|
||||
mutex_init(&dev_priv->pps_mutex);
|
||||
|
||||
intel_uc_init_early(dev_priv);
|
||||
|
||||
i915_memcpy_init_early(dev_priv);
|
||||
|
||||
ret = i915_workqueues_init(dev_priv);
|
||||
@ -828,9 +820,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
||||
goto err_workqueues;
|
||||
|
||||
/* This must be called before any calls to HAS_PCH_* */
|
||||
intel_detect_pch(&dev_priv->drm);
|
||||
intel_detect_pch(dev_priv);
|
||||
|
||||
intel_pm_setup(&dev_priv->drm);
|
||||
intel_pm_setup(dev_priv);
|
||||
intel_init_dpio(dev_priv);
|
||||
intel_power_domains_init(dev_priv);
|
||||
intel_irq_init(dev_priv);
|
||||
@ -838,7 +830,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
||||
intel_init_display_hooks(dev_priv);
|
||||
intel_init_clock_gating_hooks(dev_priv);
|
||||
intel_init_audio_hooks(dev_priv);
|
||||
ret = i915_gem_load_init(&dev_priv->drm);
|
||||
ret = i915_gem_load_init(dev_priv);
|
||||
if (ret < 0)
|
||||
goto err_gvt;
|
||||
|
||||
@ -848,6 +840,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
||||
|
||||
intel_detect_preproduction_hw(dev_priv);
|
||||
|
||||
i915_perf_init(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
err_gvt:
|
||||
@ -863,13 +857,13 @@ err_workqueues:
|
||||
*/
|
||||
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915_gem_load_cleanup(&dev_priv->drm);
|
||||
i915_perf_fini(dev_priv);
|
||||
i915_gem_load_cleanup(dev_priv);
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
}
|
||||
|
||||
static int i915_mmio_setup(struct drm_device *dev)
|
||||
static int i915_mmio_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
int mmio_bar;
|
||||
int mmio_size;
|
||||
@ -895,17 +889,16 @@ static int i915_mmio_setup(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
intel_setup_mchbar(dev);
|
||||
intel_setup_mchbar(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_mmio_cleanup(struct drm_device *dev)
|
||||
static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
|
||||
intel_teardown_mchbar(dev);
|
||||
intel_teardown_mchbar(dev_priv);
|
||||
pci_iounmap(pdev, dev_priv->regs);
|
||||
}
|
||||
|
||||
@ -920,16 +913,15 @@ static void i915_mmio_cleanup(struct drm_device *dev)
|
||||
*/
|
||||
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
|
||||
if (i915_inject_load_failure())
|
||||
return -ENODEV;
|
||||
|
||||
if (i915_get_bridge_dev(dev))
|
||||
if (i915_get_bridge_dev(dev_priv))
|
||||
return -EIO;
|
||||
|
||||
ret = i915_mmio_setup(dev);
|
||||
ret = i915_mmio_setup(dev_priv);
|
||||
if (ret < 0)
|
||||
goto put_bridge;
|
||||
|
||||
@ -949,10 +941,8 @@ put_bridge:
|
||||
*/
|
||||
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
intel_uncore_fini(dev_priv);
|
||||
i915_mmio_cleanup(dev);
|
||||
i915_mmio_cleanup(dev_priv);
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
}
|
||||
|
||||
@ -1043,7 +1033,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
||||
* behaviour if any general state is accessed within a page above 4GB,
|
||||
* which also needs to be handled carefully.
|
||||
*/
|
||||
if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
|
||||
if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
|
||||
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
|
||||
if (ret) {
|
||||
@ -1126,6 +1116,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
|
||||
i915_debugfs_register(dev_priv);
|
||||
i915_guc_register(dev_priv);
|
||||
i915_setup_sysfs(dev_priv);
|
||||
|
||||
/* Depends on sysfs having been initialized */
|
||||
i915_perf_register(dev_priv);
|
||||
} else
|
||||
DRM_ERROR("Failed to register driver for userspace access!\n");
|
||||
|
||||
@ -1162,6 +1155,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
||||
acpi_video_unregister();
|
||||
intel_opregion_unregister(dev_priv);
|
||||
|
||||
i915_perf_unregister(dev_priv);
|
||||
|
||||
i915_teardown_sysfs(dev_priv);
|
||||
i915_guc_unregister(dev_priv);
|
||||
i915_debugfs_unregister(dev_priv);
|
||||
@ -1194,8 +1189,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (dev_priv)
|
||||
ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
|
||||
if (ret) {
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"[" DRM_NAME ":%s] allocation failed\n", __func__);
|
||||
DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
|
||||
kfree(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
@ -1243,6 +1237,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
intel_runtime_pm_enable(dev_priv);
|
||||
|
||||
dev_priv->ipc_enabled = false;
|
||||
|
||||
/* Everything is in place, we can now relax! */
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
|
||||
driver.name, driver.major, driver.minor, driver.patchlevel,
|
||||
@ -1280,7 +1276,7 @@ void i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
intel_fbdev_fini(dev);
|
||||
|
||||
if (i915_gem_suspend(dev))
|
||||
if (i915_gem_suspend(dev_priv))
|
||||
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
|
||||
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
||||
@ -1312,12 +1308,12 @@ void i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
/* Free error state after interrupts are fully disabled. */
|
||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||
i915_destroy_error_state(dev);
|
||||
i915_destroy_error_state(dev_priv);
|
||||
|
||||
/* Flush any outstanding unpin_work. */
|
||||
drain_workqueue(dev_priv->wq);
|
||||
|
||||
intel_guc_fini(dev);
|
||||
intel_guc_fini(dev_priv);
|
||||
i915_gem_fini(dev_priv);
|
||||
intel_fbc_cleanup_cfb(dev_priv);
|
||||
|
||||
@ -1422,14 +1418,14 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||
|
||||
pci_save_state(pdev);
|
||||
|
||||
error = i915_gem_suspend(dev);
|
||||
error = i915_gem_suspend(dev_priv);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev,
|
||||
"GEM idle failed, resume might fail\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
intel_guc_suspend(dev);
|
||||
intel_guc_suspend(dev_priv);
|
||||
|
||||
intel_display_suspend(dev);
|
||||
|
||||
@ -1444,7 +1440,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||
|
||||
i915_gem_suspend_gtt_mappings(dev_priv);
|
||||
|
||||
i915_save_state(dev);
|
||||
i915_save_state(dev_priv);
|
||||
|
||||
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
|
||||
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
|
||||
@ -1527,7 +1523,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
|
||||
static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
int error;
|
||||
|
||||
@ -1565,33 +1561,36 @@ static int i915_drm_resume(struct drm_device *dev)
|
||||
|
||||
intel_csr_ucode_resume(dev_priv);
|
||||
|
||||
i915_gem_resume(dev);
|
||||
i915_gem_resume(dev_priv);
|
||||
|
||||
i915_restore_state(dev);
|
||||
i915_restore_state(dev_priv);
|
||||
intel_pps_unlock_regs_wa(dev_priv);
|
||||
intel_opregion_setup(dev_priv);
|
||||
|
||||
intel_init_pch_refclk(dev);
|
||||
drm_mode_config_reset(dev);
|
||||
intel_init_pch_refclk(dev_priv);
|
||||
|
||||
/*
|
||||
* Interrupts have to be enabled before any batches are run. If not the
|
||||
* GPU will hang. i915_gem_init_hw() will initiate batches to
|
||||
* update/restore the context.
|
||||
*
|
||||
* drm_mode_config_reset() needs AUX interrupts.
|
||||
*
|
||||
* Modeset enabling in intel_modeset_init_hw() also needs working
|
||||
* interrupts.
|
||||
*/
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (i915_gem_init_hw(dev)) {
|
||||
if (i915_gem_init_hw(dev_priv)) {
|
||||
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
|
||||
i915_gem_set_wedged(dev_priv);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_guc_resume(dev);
|
||||
intel_guc_resume(dev_priv);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
|
||||
@ -1715,7 +1714,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int i915_resume_switcheroo(struct drm_device *dev)
|
||||
static int i915_resume_switcheroo(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1764,11 +1763,10 @@ static void enable_engines_irq(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void i915_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct i915_gpu_error *error = &dev_priv->gpu_error;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
|
||||
return;
|
||||
@ -1778,6 +1776,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
|
||||
error->reset_count++;
|
||||
|
||||
pr_notice("drm/i915: Resetting chip after gpu hang\n");
|
||||
i915_gem_reset_prepare(dev_priv);
|
||||
|
||||
disable_engines_irq(dev_priv);
|
||||
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
|
||||
@ -1791,7 +1790,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
|
||||
goto error;
|
||||
}
|
||||
|
||||
i915_gem_reset(dev_priv);
|
||||
i915_gem_reset_finish(dev_priv);
|
||||
intel_overlay_reset(dev_priv);
|
||||
|
||||
/* Ok, now get things going again... */
|
||||
@ -1808,12 +1807,14 @@ void i915_reset(struct drm_i915_private *dev_priv)
|
||||
* was running at the time of the reset (i.e. we weren't VT
|
||||
* switched away).
|
||||
*/
|
||||
ret = i915_gem_init_hw(dev);
|
||||
ret = i915_gem_init_hw(dev_priv);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed hw init on reset %d\n", ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
i915_queue_hangcheck(dev_priv);
|
||||
|
||||
wakeup:
|
||||
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
|
||||
return;
|
||||
@ -2320,7 +2321,7 @@ static int intel_runtime_suspend(struct device *kdev)
|
||||
*/
|
||||
i915_gem_runtime_suspend(dev_priv);
|
||||
|
||||
intel_guc_suspend(dev);
|
||||
intel_guc_suspend(dev_priv);
|
||||
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
|
||||
@ -2405,10 +2406,10 @@ static int intel_runtime_resume(struct device *kdev)
|
||||
if (intel_uncore_unclaimed_mmio(dev_priv))
|
||||
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
|
||||
|
||||
intel_guc_resume(dev);
|
||||
intel_guc_resume(dev_priv);
|
||||
|
||||
if (IS_GEN6(dev_priv))
|
||||
intel_init_pch_refclk(dev);
|
||||
intel_init_pch_refclk(dev_priv);
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
bxt_disable_dc9(dev_priv);
|
||||
@ -2565,6 +2566,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static struct drm_driver driver = {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -38,6 +38,7 @@
|
||||
#include <linux/reservation.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-buf.h>
|
||||
@ -69,7 +70,8 @@ insert_mappable_node(struct i915_ggtt *ggtt,
|
||||
{
|
||||
memset(node, 0, sizeof(*node));
|
||||
return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
|
||||
size, 0, -1,
|
||||
size, 0,
|
||||
I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->mappable_end,
|
||||
DRM_MM_SEARCH_DEFAULT,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
@ -595,52 +597,25 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
void *vaddr = obj->phys_handle->vaddr + args->offset;
|
||||
char __user *user_data = u64_to_user_ptr(args->data_ptr);
|
||||
int ret;
|
||||
|
||||
/* We manually control the domain here and pretend that it
|
||||
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
|
||||
*/
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED |
|
||||
I915_WAIT_ALL,
|
||||
MAX_SCHEDULE_TIMEOUT,
|
||||
to_rps_client(file));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
/* The physical object once assigned is fixed for the lifetime
|
||||
* of the obj, so we can safely drop the lock and continue
|
||||
* to access vaddr.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (unwritten) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (copy_from_user(vaddr, user_data, args->size))
|
||||
return -EFAULT;
|
||||
|
||||
drm_clflush_virt_range(vaddr, args->size);
|
||||
i915_gem_chipset_flush(to_i915(dev));
|
||||
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
||||
|
||||
out:
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *i915_gem_object_alloc(struct drm_device *dev)
|
||||
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
|
||||
}
|
||||
|
||||
@ -652,7 +627,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
|
||||
|
||||
static int
|
||||
i915_gem_create(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv,
|
||||
uint64_t size,
|
||||
uint32_t *handle_p)
|
||||
{
|
||||
@ -665,7 +640,7 @@ i915_gem_create(struct drm_file *file,
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate the new object */
|
||||
obj = i915_gem_object_create(dev, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
@ -687,7 +662,7 @@ i915_gem_dumb_create(struct drm_file *file,
|
||||
/* have to work out size/pitch and return them */
|
||||
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
|
||||
args->size = args->pitch * args->height;
|
||||
return i915_gem_create(file, dev,
|
||||
return i915_gem_create(file, to_i915(dev),
|
||||
args->size, &args->handle);
|
||||
}
|
||||
|
||||
@ -701,11 +676,12 @@ int
|
||||
i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_create *args = data;
|
||||
|
||||
i915_gem_flush_free_objects(to_i915(dev));
|
||||
i915_gem_flush_free_objects(dev_priv);
|
||||
|
||||
return i915_gem_create(file, dev,
|
||||
return i915_gem_create(file, dev_priv,
|
||||
args->size, &args->handle);
|
||||
}
|
||||
|
||||
@ -1140,8 +1116,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
|
||||
/* Bounds check source. */
|
||||
if (args->offset > obj->base.size ||
|
||||
args->size > obj->base.size - args->offset) {
|
||||
if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1454,8 +1429,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
|
||||
/* Bounds check destination. */
|
||||
if (args->offset > obj->base.size ||
|
||||
args->size > obj->base.size - args->offset) {
|
||||
if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
@ -1517,7 +1491,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
continue;
|
||||
break;
|
||||
|
||||
if (i915_vma_is_active(vma))
|
||||
continue;
|
||||
@ -2098,7 +2072,8 @@ u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
|
||||
* Minimum alignment is 4k (GTT page size), but might be greater
|
||||
* if a fence register is needed for the object.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
|
||||
if (INTEL_GEN(dev_priv) >= 4 ||
|
||||
(!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) ||
|
||||
tiling_mode == I915_TILING_NONE)
|
||||
return 4096;
|
||||
|
||||
@ -2115,23 +2090,21 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
|
||||
int err;
|
||||
|
||||
err = drm_gem_create_mmap_offset(&obj->base);
|
||||
if (!err)
|
||||
if (likely(!err))
|
||||
return 0;
|
||||
|
||||
/* We can idle the GPU locklessly to flush stale objects, but in order
|
||||
* to claim that space for ourselves, we need to take the big
|
||||
* struct_mutex to free the requests+objects and allocate our slot.
|
||||
*/
|
||||
err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
|
||||
if (err)
|
||||
return err;
|
||||
/* Attempt to reap some mmap space from dead objects */
|
||||
do {
|
||||
err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
err = i915_mutex_lock_interruptible(&dev_priv->drm);
|
||||
if (!err) {
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
err = drm_gem_create_mmap_offset(&obj->base);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
if (!err)
|
||||
break;
|
||||
|
||||
} while (flush_delayed_work(&dev_priv->gt.retire_work));
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -2324,6 +2297,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
|
||||
/* called before being DMA mapped, no need to copy sg->dma_* */
|
||||
new_sg = sg_next(new_sg);
|
||||
}
|
||||
GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
|
||||
|
||||
sg_free_table(orig_st);
|
||||
|
||||
@ -2645,35 +2619,34 @@ err_unlock:
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
|
||||
static bool ban_context(const struct i915_gem_context *ctx)
|
||||
{
|
||||
unsigned long elapsed;
|
||||
|
||||
if (ctx->hang_stats.banned)
|
||||
return true;
|
||||
|
||||
elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
|
||||
if (ctx->hang_stats.ban_period_seconds &&
|
||||
elapsed <= ctx->hang_stats.ban_period_seconds) {
|
||||
DRM_DEBUG("context hanging too fast, banning!\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return (i915_gem_context_is_bannable(ctx) &&
|
||||
ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD);
|
||||
}
|
||||
|
||||
static void i915_set_reset_status(struct i915_gem_context *ctx,
|
||||
const bool guilty)
|
||||
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
|
||||
{
|
||||
struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
|
||||
ctx->guilty_count++;
|
||||
ctx->ban_score += CONTEXT_SCORE_GUILTY;
|
||||
if (ban_context(ctx))
|
||||
i915_gem_context_set_banned(ctx);
|
||||
|
||||
if (guilty) {
|
||||
hs->banned = i915_context_is_banned(ctx);
|
||||
hs->batch_active++;
|
||||
hs->guilty_ts = get_seconds();
|
||||
} else {
|
||||
hs->batch_pending++;
|
||||
}
|
||||
DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
|
||||
ctx->name, ctx->ban_score,
|
||||
yesno(i915_gem_context_is_banned(ctx)));
|
||||
|
||||
if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv))
|
||||
return;
|
||||
|
||||
ctx->file_priv->context_bans++;
|
||||
DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
|
||||
ctx->name, ctx->file_priv->context_bans);
|
||||
}
|
||||
|
||||
static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
|
||||
{
|
||||
ctx->active_count++;
|
||||
}
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
@ -2716,10 +2689,15 @@ static void reset_request(struct drm_i915_gem_request *request)
|
||||
memset(vaddr + head, 0, request->postfix - head);
|
||||
}
|
||||
|
||||
void i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915_gem_revoke_fences(dev_priv);
|
||||
}
|
||||
|
||||
static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_gem_request *request;
|
||||
struct i915_gem_context *incomplete_ctx;
|
||||
struct i915_gem_context *hung_ctx;
|
||||
struct intel_timeline *timeline;
|
||||
unsigned long flags;
|
||||
bool ring_hung;
|
||||
@ -2731,11 +2709,21 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
||||
if (!request)
|
||||
return;
|
||||
|
||||
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
|
||||
if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
|
||||
ring_hung = false;
|
||||
hung_ctx = request->ctx;
|
||||
|
||||
ring_hung = engine->hangcheck.stalled;
|
||||
if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
|
||||
DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n",
|
||||
engine->name,
|
||||
yesno(ring_hung));
|
||||
ring_hung = false;
|
||||
}
|
||||
|
||||
if (ring_hung)
|
||||
i915_gem_context_mark_guilty(hung_ctx);
|
||||
else
|
||||
i915_gem_context_mark_innocent(hung_ctx);
|
||||
|
||||
i915_set_reset_status(request->ctx, ring_hung);
|
||||
if (!ring_hung)
|
||||
return;
|
||||
|
||||
@ -2745,6 +2733,10 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
||||
/* Setup the CS to resume from the breadcrumb of the hung request */
|
||||
engine->reset_hw(engine, request);
|
||||
|
||||
/* If this context is now banned, skip all of its pending requests. */
|
||||
if (!i915_gem_context_is_banned(hung_ctx))
|
||||
return;
|
||||
|
||||
/* Users of the default context do not rely on logical state
|
||||
* preserved between batches. They have to emit full state on
|
||||
* every batch and so it is safe to execute queued requests following
|
||||
@ -2753,17 +2745,16 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
||||
* Other contexts preserve state, now corrupt. We want to skip all
|
||||
* queued requests that reference the corrupt context.
|
||||
*/
|
||||
incomplete_ctx = request->ctx;
|
||||
if (i915_gem_context_is_default(incomplete_ctx))
|
||||
if (i915_gem_context_is_default(hung_ctx))
|
||||
return;
|
||||
|
||||
timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
|
||||
timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
|
||||
|
||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
||||
spin_lock(&timeline->lock);
|
||||
|
||||
list_for_each_entry_continue(request, &engine->timeline->requests, link)
|
||||
if (request->ctx == incomplete_ctx)
|
||||
if (request->ctx == hung_ctx)
|
||||
reset_request(request);
|
||||
|
||||
list_for_each_entry(request, &timeline->requests, link)
|
||||
@ -2773,7 +2764,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
|
||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
||||
}
|
||||
|
||||
void i915_gem_reset(struct drm_i915_private *dev_priv)
|
||||
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
@ -2803,6 +2794,12 @@ static void nop_submit_request(struct drm_i915_gem_request *request)
|
||||
|
||||
static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
|
||||
{
|
||||
/* We need to be sure that no thread is running the old callback as
|
||||
* we install the nop handler (otherwise we would submit a request
|
||||
* to hardware that will never complete). In order to prevent this
|
||||
* race, we wait until the machine is idle before making the swap
|
||||
* (using stop_machine()).
|
||||
*/
|
||||
engine->submit_request = nop_submit_request;
|
||||
|
||||
/* Mark all pending requests as complete so that any concurrent
|
||||
@ -2833,20 +2830,29 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
|
||||
static int __i915_gem_set_wedged_BKL(void *data)
|
||||
{
|
||||
struct drm_i915_private *i915 = data;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, i915, id)
|
||||
i915_gem_cleanup_engine(engine);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
|
||||
|
||||
i915_gem_context_lost(dev_priv);
|
||||
for_each_engine(engine, dev_priv, id)
|
||||
i915_gem_cleanup_engine(engine);
|
||||
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
|
||||
stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
|
||||
|
||||
i915_gem_context_lost(dev_priv);
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3532,7 +3538,7 @@ err_unpin_display:
|
||||
void
|
||||
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
|
||||
{
|
||||
lockdep_assert_held(&vma->vm->dev->struct_mutex);
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
|
||||
if (WARN_ON(vma->obj->pin_display == 0))
|
||||
return;
|
||||
@ -3966,14 +3972,9 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
|
||||
.put_pages = i915_gem_object_put_pages_gtt,
|
||||
};
|
||||
|
||||
/* Note we don't consider signbits :| */
|
||||
#define overflows_type(x, T) \
|
||||
(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create(struct drm_device *dev, u64 size)
|
||||
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct address_space *mapping;
|
||||
gfp_t mask;
|
||||
@ -3990,16 +3991,16 @@ i915_gem_object_create(struct drm_device *dev, u64 size)
|
||||
if (overflows_type(size, obj->base.size))
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
obj = i915_gem_object_alloc(dev);
|
||||
obj = i915_gem_object_alloc(dev_priv);
|
||||
if (obj == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = drm_gem_object_init(dev, &obj->base, size);
|
||||
ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
|
||||
if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
|
||||
if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
|
||||
/* 965gm cannot relocate objects above 4GiB. */
|
||||
mask &= ~__GFP_HIGHMEM;
|
||||
mask |= __GFP_DMA32;
|
||||
@ -4192,12 +4193,12 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
|
||||
enum intel_engine_id id;
|
||||
|
||||
for_each_engine(engine, dev_priv, id)
|
||||
GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
|
||||
GEM_BUG_ON(!i915_gem_context_is_kernel(engine->last_retired_context));
|
||||
}
|
||||
|
||||
int i915_gem_suspend(struct drm_device *dev)
|
||||
int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
|
||||
intel_suspend_gt_powersave(dev_priv);
|
||||
@ -4231,8 +4232,14 @@ int i915_gem_suspend(struct drm_device *dev)
|
||||
|
||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
|
||||
flush_delayed_work(&dev_priv->gt.idle_work);
|
||||
flush_work(&dev_priv->mm.free_work);
|
||||
|
||||
/* As the idle_work is rearming if it detects a race, play safe and
|
||||
* repeat the flush until it is definitely idle.
|
||||
*/
|
||||
while (flush_delayed_work(&dev_priv->gt.idle_work))
|
||||
;
|
||||
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
|
||||
/* Assert that we sucessfully flushed all the work and
|
||||
* reset the GPU back to its idle, low power state.
|
||||
@ -4271,9 +4278,9 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void i915_gem_resume(struct drm_device *dev)
|
||||
void i915_gem_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
WARN_ON(dev_priv->gt.awake);
|
||||
|
||||
@ -4338,9 +4345,8 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_init_hw(struct drm_device *dev)
|
||||
i915_gem_init_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int ret;
|
||||
@ -4394,10 +4400,10 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
goto out;
|
||||
}
|
||||
|
||||
intel_mocs_init_l3cc_table(dev);
|
||||
intel_mocs_init_l3cc_table(dev_priv);
|
||||
|
||||
/* We can't enable contexts until all firmware is loaded */
|
||||
ret = intel_guc_setup(dev);
|
||||
ret = intel_guc_setup(dev_priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -4427,12 +4433,11 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
|
||||
return true;
|
||||
}
|
||||
|
||||
int i915_gem_init(struct drm_device *dev)
|
||||
int i915_gem_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
dev_priv->gt.resume = intel_legacy_submission_resume;
|
||||
@ -4456,15 +4461,15 @@ int i915_gem_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = i915_gem_context_init(dev);
|
||||
ret = i915_gem_context_init(dev_priv);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = intel_engines_init(dev);
|
||||
ret = intel_engines_init(dev_priv);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = i915_gem_init_hw(dev);
|
||||
ret = i915_gem_init_hw(dev_priv);
|
||||
if (ret == -EIO) {
|
||||
/* Allow engine initialisation to fail by marking the GPU as
|
||||
* wedged. But we only want to do this where the GPU is angry,
|
||||
@ -4477,15 +4482,14 @@ int i915_gem_init(struct drm_device *dev)
|
||||
|
||||
out_unlock:
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
i915_gem_cleanup_engines(struct drm_device *dev)
|
||||
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
@ -4501,8 +4505,9 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
|
||||
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
|
||||
!IS_CHERRYVIEW(dev_priv))
|
||||
dev_priv->num_fence_regs = 32;
|
||||
else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
|
||||
IS_I945GM(dev_priv) || IS_G33(dev_priv))
|
||||
else if (INTEL_INFO(dev_priv)->gen >= 4 ||
|
||||
IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
|
||||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
|
||||
dev_priv->num_fence_regs = 16;
|
||||
else
|
||||
dev_priv->num_fence_regs = 8;
|
||||
@ -4525,9 +4530,8 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_load_init(struct drm_device *dev)
|
||||
i915_gem_load_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int err = -ENOMEM;
|
||||
|
||||
dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
|
||||
@ -4596,10 +4600,8 @@ err_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
void i915_gem_load_cleanup(struct drm_device *dev)
|
||||
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
WARN_ON(!llist_empty(&dev_priv->mm.free_list));
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
@ -4750,7 +4752,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
|
||||
|
||||
/* Allocate a new GEM object and fill it with the supplied data */
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_from_data(struct drm_device *dev,
|
||||
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
|
||||
const void *data, size_t size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
@ -4758,7 +4760,7 @@ i915_gem_object_create_from_data(struct drm_device *dev,
|
||||
size_t bytes;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
|
||||
obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
|
||||
if (IS_ERR(obj))
|
||||
return obj;
|
||||
|
||||
|
@ -27,8 +27,10 @@
|
||||
|
||||
#ifdef CONFIG_DRM_I915_DEBUG_GEM
|
||||
#define GEM_BUG_ON(expr) BUG_ON(expr)
|
||||
#define GEM_WARN_ON(expr) WARN_ON(expr)
|
||||
#else
|
||||
#define GEM_BUG_ON(expr) do { } while (0)
|
||||
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
|
||||
#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
|
||||
#endif
|
||||
|
||||
#define I915_NUM_ENGINES 5
|
||||
|
@ -141,7 +141,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
||||
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
trace_i915_context_free(ctx);
|
||||
GEM_BUG_ON(!ctx->closed);
|
||||
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
|
||||
|
||||
i915_ppgtt_put(ctx->ppgtt);
|
||||
|
||||
@ -166,15 +166,15 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
||||
static struct drm_i915_gem_object *
|
||||
alloc_context_obj(struct drm_i915_private *dev_priv, u64 size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
obj = i915_gem_object_create(dev, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj))
|
||||
return obj;
|
||||
|
||||
@ -193,7 +193,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
||||
* This is only applicable for Ivy Bridge devices since
|
||||
* later platforms don't have L3 control bits in the PTE.
|
||||
*/
|
||||
if (IS_IVYBRIDGE(to_i915(dev))) {
|
||||
if (IS_IVYBRIDGE(dev_priv)) {
|
||||
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
|
||||
/* Failure shouldn't ever happen this early */
|
||||
if (WARN_ON(ret)) {
|
||||
@ -228,8 +228,7 @@ static void i915_ppgtt_close(struct i915_address_space *vm)
|
||||
|
||||
static void context_close(struct i915_gem_context *ctx)
|
||||
{
|
||||
GEM_BUG_ON(ctx->closed);
|
||||
ctx->closed = true;
|
||||
i915_gem_context_set_closed(ctx);
|
||||
if (ctx->ppgtt)
|
||||
i915_ppgtt_close(&ctx->ppgtt->base);
|
||||
ctx->file_priv = ERR_PTR(-EBADF);
|
||||
@ -259,10 +258,9 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
|
||||
}
|
||||
|
||||
static struct i915_gem_context *
|
||||
__create_hw_context(struct drm_device *dev,
|
||||
__create_hw_context(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_gem_context *ctx;
|
||||
int ret;
|
||||
|
||||
@ -286,8 +284,7 @@ __create_hw_context(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
|
||||
obj = i915_gem_alloc_context_obj(dev,
|
||||
dev_priv->hw_context_size);
|
||||
obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size);
|
||||
if (IS_ERR(obj)) {
|
||||
ret = PTR_ERR(obj);
|
||||
goto err_out;
|
||||
@ -331,12 +328,21 @@ __create_hw_context(struct drm_device *dev,
|
||||
* is no remap info, it will be a NOP. */
|
||||
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
|
||||
|
||||
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
|
||||
i915_gem_context_set_bannable(ctx);
|
||||
ctx->ring_size = 4 * PAGE_SIZE;
|
||||
ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
|
||||
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
|
||||
|
||||
/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
|
||||
* present or not in use we still need a small bias as ring wraparound
|
||||
* at offset 0 sometimes hangs. No idea why.
|
||||
*/
|
||||
if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
|
||||
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
|
||||
else
|
||||
ctx->ggtt_offset_bias = 4096;
|
||||
|
||||
return ctx;
|
||||
|
||||
err_pid:
|
||||
@ -353,21 +359,21 @@ err_out:
|
||||
* well as an idle case.
|
||||
*/
|
||||
static struct i915_gem_context *
|
||||
i915_gem_create_context(struct drm_device *dev,
|
||||
i915_gem_create_context(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
ctx = __create_hw_context(dev, file_priv);
|
||||
ctx = __create_hw_context(dev_priv, file_priv);
|
||||
if (IS_ERR(ctx))
|
||||
return ctx;
|
||||
|
||||
if (USES_FULL_PPGTT(dev)) {
|
||||
if (USES_FULL_PPGTT(dev_priv)) {
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
||||
ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
|
||||
ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
|
||||
if (IS_ERR(ppgtt)) {
|
||||
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
|
||||
PTR_ERR(ppgtt));
|
||||
@ -407,35 +413,24 @@ i915_gem_context_create_gvt(struct drm_device *dev)
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ctx = i915_gem_create_context(dev, NULL);
|
||||
ctx = __create_hw_context(to_i915(dev), NULL);
|
||||
if (IS_ERR(ctx))
|
||||
goto out;
|
||||
|
||||
ctx->execlists_force_single_submission = true;
|
||||
ctx->file_priv = ERR_PTR(-EBADF);
|
||||
i915_gem_context_set_closed(ctx); /* not user accessible */
|
||||
i915_gem_context_clear_bannable(ctx);
|
||||
i915_gem_context_set_force_single_submission(ctx);
|
||||
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
|
||||
|
||||
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static void i915_gem_context_unpin(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
int i915_gem_context_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (i915.enable_execlists) {
|
||||
intel_lr_context_unpin(ctx, engine);
|
||||
} else {
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
|
||||
if (ce->state)
|
||||
i915_vma_unpin(ce->state);
|
||||
|
||||
i915_gem_context_put(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
int i915_gem_context_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
/* Init should only be called once per module load. Eventually the
|
||||
@ -469,16 +464,19 @@ int i915_gem_context_init(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
ctx = i915_gem_create_context(dev, NULL);
|
||||
ctx = i915_gem_create_context(dev_priv, NULL);
|
||||
if (IS_ERR(ctx)) {
|
||||
DRM_ERROR("Failed to create default global context (error %ld)\n",
|
||||
PTR_ERR(ctx));
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
i915_gem_context_clear_bannable(ctx);
|
||||
ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
|
||||
dev_priv->kernel_context = ctx;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
|
||||
|
||||
DRM_DEBUG_DRIVER("%s context support initialized\n",
|
||||
i915.enable_execlists ? "LR" :
|
||||
dev_priv->hw_context_size ? "HW" : "fake");
|
||||
@ -493,10 +491,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
if (engine->last_context) {
|
||||
i915_gem_context_unpin(engine->last_context, engine);
|
||||
engine->last_context = NULL;
|
||||
}
|
||||
engine->legacy_active_context = NULL;
|
||||
|
||||
if (!engine->last_retired_context)
|
||||
continue;
|
||||
|
||||
engine->context_unpin(engine, engine->last_retired_context);
|
||||
engine->last_retired_context = NULL;
|
||||
}
|
||||
|
||||
/* Force the GPU state to be restored on enabling */
|
||||
@ -522,12 +523,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_context_fini(struct drm_device *dev)
|
||||
void i915_gem_context_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_gem_context *dctx = dev_priv->kernel_context;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
GEM_BUG_ON(!i915_gem_context_is_kernel(dctx));
|
||||
|
||||
context_close(dctx);
|
||||
dev_priv->kernel_context = NULL;
|
||||
@ -551,9 +553,11 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
|
||||
idr_init(&file_priv->context_idr);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ctx = i915_gem_create_context(dev, file_priv);
|
||||
ctx = i915_gem_create_context(to_i915(dev), file_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
|
||||
|
||||
if (IS_ERR(ctx)) {
|
||||
idr_destroy(&file_priv->context_idr);
|
||||
return PTR_ERR(ctx);
|
||||
@ -719,7 +723,7 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
|
||||
return false;
|
||||
|
||||
return to == engine->last_context;
|
||||
return to == engine->legacy_active_context;
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -731,11 +735,11 @@ needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
|
||||
return false;
|
||||
|
||||
/* Always load the ppgtt on first use */
|
||||
if (!engine->last_context)
|
||||
if (!engine->legacy_active_context)
|
||||
return true;
|
||||
|
||||
/* Same context without new entries, skip */
|
||||
if (engine->last_context == to &&
|
||||
if (engine->legacy_active_context == to &&
|
||||
!(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
|
||||
return false;
|
||||
|
||||
@ -765,57 +769,20 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
|
||||
return false;
|
||||
}
|
||||
|
||||
struct i915_vma *
|
||||
i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct i915_vma *vma = ctx->engine[RCS].state;
|
||||
int ret;
|
||||
|
||||
/* Clear this page out of any CPU caches for coherent swap-in/out.
|
||||
* We only want to do this on the first bind so that we do not stall
|
||||
* on an active context (which by nature is already on the GPU).
|
||||
*/
|
||||
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
|
||||
ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
||||
static int do_rcs_switch(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct i915_gem_context *to = req->ctx;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
|
||||
struct i915_vma *vma;
|
||||
struct i915_gem_context *from;
|
||||
struct i915_gem_context *from = engine->legacy_active_context;
|
||||
u32 hw_flags;
|
||||
int ret, i;
|
||||
|
||||
GEM_BUG_ON(engine->id != RCS);
|
||||
|
||||
if (skip_rcs_switch(ppgtt, engine, to))
|
||||
return 0;
|
||||
|
||||
/* Trying to pin first makes error handling easier. */
|
||||
vma = i915_gem_context_pin_legacy(to, 0);
|
||||
if (IS_ERR(vma))
|
||||
return PTR_ERR(vma);
|
||||
|
||||
/*
|
||||
* Pin can switch back to the default context if we end up calling into
|
||||
* evict_everything - as a last ditch gtt defrag effort that also
|
||||
* switches to the default context. Hence we need to reload from here.
|
||||
*
|
||||
* XXX: Doing so is painfully broken!
|
||||
*/
|
||||
from = engine->last_context;
|
||||
|
||||
if (needs_pd_load_pre(ppgtt, engine, to)) {
|
||||
/* Older GENs and non render rings still want the load first,
|
||||
* "PP_DCLV followed by PP_DIR_BASE register through Load
|
||||
@ -824,7 +791,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
||||
trace_switch_mm(engine, to);
|
||||
ret = ppgtt->switch_mm(ppgtt, req);
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
|
||||
@ -841,29 +808,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
||||
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
|
||||
ret = mi_set_context(req, hw_flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
return ret;
|
||||
|
||||
/* The backing object for the context is done after switching to the
|
||||
* *next* context. Therefore we cannot retire the previous context until
|
||||
* the next context has already started running. In fact, the below code
|
||||
* is a bit suboptimal because the retiring can occur simply after the
|
||||
* MI_SET_CONTEXT instead of when the next seqno has completed.
|
||||
*/
|
||||
if (from != NULL) {
|
||||
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
|
||||
* whole damn pipeline, we don't need to explicitly mark the
|
||||
* object dirty. The only exception is that the context must be
|
||||
* correct in case the object gets swapped out. Ideally we'd be
|
||||
* able to defer doing this until we know the object would be
|
||||
* swapped, but there is no way to do that yet.
|
||||
*/
|
||||
i915_vma_move_to_active(from->engine[RCS].state, req, 0);
|
||||
/* state is kept alive until the next request */
|
||||
i915_vma_unpin(from->engine[RCS].state);
|
||||
i915_gem_context_put(from);
|
||||
engine->legacy_active_context = to;
|
||||
}
|
||||
engine->last_context = i915_gem_context_get(to);
|
||||
|
||||
/* GEN8 does *not* require an explicit reload if the PDPs have been
|
||||
* setup, and we do not wish to move them.
|
||||
@ -904,10 +852,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
i915_vma_unpin(vma);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -947,12 +891,6 @@ int i915_switch_context(struct drm_i915_gem_request *req)
|
||||
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
if (to != engine->last_context) {
|
||||
if (engine->last_context)
|
||||
i915_gem_context_put(engine->last_context);
|
||||
engine->last_context = i915_gem_context_get(to);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1003,6 +941,11 @@ static bool contexts_enabled(struct drm_device *dev)
|
||||
return i915.enable_execlists || to_i915(dev)->hw_context_size;
|
||||
}
|
||||
|
||||
static bool client_is_banned(struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS;
|
||||
}
|
||||
|
||||
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
@ -1017,17 +960,27 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
if (args->pad != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (client_is_banned(file_priv)) {
|
||||
DRM_DEBUG("client %s[%d] banned from creating ctx\n",
|
||||
current->comm,
|
||||
pid_nr(get_task_pid(current, PIDTYPE_PID)));
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctx = i915_gem_create_context(dev, file_priv);
|
||||
ctx = i915_gem_create_context(to_i915(dev), file_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
|
||||
|
||||
args->ctx_id = ctx->user_handle;
|
||||
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
|
||||
DRM_DEBUG("HW context %d created\n", args->ctx_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1060,7 +1013,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
context_close(ctx);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
|
||||
DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1085,7 +1038,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
args->size = 0;
|
||||
switch (args->param) {
|
||||
case I915_CONTEXT_PARAM_BAN_PERIOD:
|
||||
args->value = ctx->hang_stats.ban_period_seconds;
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_NO_ZEROMAP:
|
||||
args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
|
||||
@ -1099,7 +1052,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
args->value = to_i915(dev)->ggtt.base.total;
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
|
||||
args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
|
||||
args->value = i915_gem_context_no_error_capture(ctx);
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_BANNABLE:
|
||||
args->value = i915_gem_context_is_bannable(ctx);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -1130,13 +1086,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
switch (args->param) {
|
||||
case I915_CONTEXT_PARAM_BAN_PERIOD:
|
||||
if (args->size)
|
||||
ret = -EINVAL;
|
||||
else if (args->value < ctx->hang_stats.ban_period_seconds &&
|
||||
!capable(CAP_SYS_ADMIN))
|
||||
ret = -EPERM;
|
||||
else
|
||||
ctx->hang_stats.ban_period_seconds = args->value;
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_NO_ZEROMAP:
|
||||
if (args->size) {
|
||||
@ -1147,14 +1097,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
|
||||
if (args->size) {
|
||||
if (args->size)
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
if (args->value)
|
||||
ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
|
||||
else
|
||||
ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
|
||||
}
|
||||
else if (args->value)
|
||||
i915_gem_context_set_no_error_capture(ctx);
|
||||
else
|
||||
i915_gem_context_clear_no_error_capture(ctx);
|
||||
break;
|
||||
case I915_CONTEXT_PARAM_BANNABLE:
|
||||
if (args->size)
|
||||
ret = -EINVAL;
|
||||
else if (!capable(CAP_SYS_ADMIN) && !args->value)
|
||||
ret = -EPERM;
|
||||
else if (args->value)
|
||||
i915_gem_context_set_bannable(ctx);
|
||||
else
|
||||
i915_gem_context_clear_bannable(ctx);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -1170,7 +1128,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_reset_stats *args = data;
|
||||
struct i915_ctx_hang_stats *hs;
|
||||
struct i915_gem_context *ctx;
|
||||
int ret;
|
||||
|
||||
@ -1189,15 +1146,14 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
hs = &ctx->hang_stats;
|
||||
|
||||
if (capable(CAP_SYS_ADMIN))
|
||||
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
|
||||
else
|
||||
args->reset_count = 0;
|
||||
|
||||
args->batch_active = hs->batch_active;
|
||||
args->batch_pending = hs->batch_pending;
|
||||
args->batch_active = ctx->guilty_count;
|
||||
args->batch_pending = ctx->active_count;
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
|
277
drivers/gpu/drm/i915/i915_gem_context.h
Normal file
277
drivers/gpu/drm/i915/i915_gem_context.h
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __I915_GEM_CONTEXT_H__
|
||||
#define __I915_GEM_CONTEXT_H__
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct pid;
|
||||
|
||||
struct drm_device;
|
||||
struct drm_file;
|
||||
|
||||
struct drm_i915_private;
|
||||
struct drm_i915_file_private;
|
||||
struct i915_hw_ppgtt;
|
||||
struct i915_vma;
|
||||
struct intel_ring;
|
||||
|
||||
#define DEFAULT_CONTEXT_HANDLE 0
|
||||
|
||||
/**
|
||||
* struct i915_gem_context - client state
|
||||
*
|
||||
* The struct i915_gem_context represents the combined view of the driver and
|
||||
* logical hardware state for a particular client.
|
||||
*/
|
||||
struct i915_gem_context {
|
||||
/** i915: i915 device backpointer */
|
||||
struct drm_i915_private *i915;
|
||||
|
||||
/** file_priv: owning file descriptor */
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
||||
/**
|
||||
* @ppgtt: unique address space (GTT)
|
||||
*
|
||||
* In full-ppgtt mode, each context has its own address space ensuring
|
||||
* complete seperation of one client from all others.
|
||||
*
|
||||
* In other modes, this is a NULL pointer with the expectation that
|
||||
* the caller uses the shared global GTT.
|
||||
*/
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
||||
/**
|
||||
* @pid: process id of creator
|
||||
*
|
||||
* Note that who created the context may not be the principle user,
|
||||
* as the context may be shared across a local socket. However,
|
||||
* that should only affect the default context, all contexts created
|
||||
* explicitly by the client are expected to be isolated.
|
||||
*/
|
||||
struct pid *pid;
|
||||
|
||||
/**
|
||||
* @name: arbitrary name
|
||||
*
|
||||
* A name is constructed for the context from the creator's process
|
||||
* name, pid and user handle in order to uniquely identify the
|
||||
* context in messages.
|
||||
*/
|
||||
const char *name;
|
||||
|
||||
/** link: place with &drm_i915_private.context_list */
|
||||
struct list_head link;
|
||||
|
||||
/**
|
||||
* @ref: reference count
|
||||
*
|
||||
* A reference to a context is held by both the client who created it
|
||||
* and on each request submitted to the hardware using the request
|
||||
* (to ensure the hardware has access to the state until it has
|
||||
* finished all pending writes). See i915_gem_context_get() and
|
||||
* i915_gem_context_put() for access.
|
||||
*/
|
||||
struct kref ref;
|
||||
|
||||
/**
|
||||
* @flags: small set of booleans
|
||||
*/
|
||||
unsigned long flags;
|
||||
#define CONTEXT_NO_ZEROMAP BIT(0)
|
||||
#define CONTEXT_NO_ERROR_CAPTURE 1
|
||||
#define CONTEXT_CLOSED 2
|
||||
#define CONTEXT_BANNABLE 3
|
||||
#define CONTEXT_BANNED 4
|
||||
#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
|
||||
|
||||
/**
|
||||
* @hw_id: - unique identifier for the context
|
||||
*
|
||||
* The hardware needs to uniquely identify the context for a few
|
||||
* functions like fault reporting, PASID, scheduling. The
|
||||
* &drm_i915_private.context_hw_ida is used to assign a unqiue
|
||||
* id for the lifetime of the context.
|
||||
*/
|
||||
unsigned int hw_id;
|
||||
|
||||
/**
|
||||
* @user_handle: userspace identifier
|
||||
*
|
||||
* A unique per-file identifier is generated from
|
||||
* &drm_i915_file_private.contexts.
|
||||
*/
|
||||
u32 user_handle;
|
||||
|
||||
/**
|
||||
* @priority: execution and service priority
|
||||
*
|
||||
* All clients are equal, but some are more equal than others!
|
||||
*
|
||||
* Requests from a context with a greater (more positive) value of
|
||||
* @priority will be executed before those with a lower @priority
|
||||
* value, forming a simple QoS.
|
||||
*
|
||||
* The &drm_i915_private.kernel_context is assigned the lowest priority.
|
||||
*/
|
||||
int priority;
|
||||
|
||||
/** ggtt_alignment: alignment restriction for context objects */
|
||||
u32 ggtt_alignment;
|
||||
/** ggtt_offset_bias: placement restriction for context objects */
|
||||
u32 ggtt_offset_bias;
|
||||
|
||||
/** engine: per-engine logical HW state */
|
||||
struct intel_context {
|
||||
struct i915_vma *state;
|
||||
struct intel_ring *ring;
|
||||
u32 *lrc_reg_state;
|
||||
u64 lrc_desc;
|
||||
int pin_count;
|
||||
bool initialised;
|
||||
} engine[I915_NUM_ENGINES];
|
||||
|
||||
/** ring_size: size for allocating the per-engine ring buffer */
|
||||
u32 ring_size;
|
||||
/** desc_template: invariant fields for the HW context descriptor */
|
||||
u32 desc_template;
|
||||
|
||||
/** status_notifier: list of callbacks for context-switch changes */
|
||||
struct atomic_notifier_head status_notifier;
|
||||
|
||||
/** guilty_count: How many times this context has caused a GPU hang. */
|
||||
unsigned int guilty_count;
|
||||
/**
|
||||
* @active_count: How many times this context was active during a GPU
|
||||
* hang, but did not cause it.
|
||||
*/
|
||||
unsigned int active_count;
|
||||
|
||||
#define CONTEXT_SCORE_GUILTY 10
|
||||
#define CONTEXT_SCORE_BAN_THRESHOLD 40
|
||||
/** ban_score: Accumulated score of all hangs caused by this context. */
|
||||
int ban_score;
|
||||
|
||||
/** remap_slice: Bitmask of cache lines that need remapping */
|
||||
u8 remap_slice;
|
||||
};
|
||||
|
||||
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return test_bit(CONTEXT_CLOSED, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
|
||||
{
|
||||
GEM_BUG_ON(i915_gem_context_is_closed(ctx));
|
||||
__set_bit(CONTEXT_CLOSED, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
|
||||
{
|
||||
__set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
|
||||
{
|
||||
__clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return test_bit(CONTEXT_BANNABLE, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
|
||||
{
|
||||
__set_bit(CONTEXT_BANNABLE, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
|
||||
{
|
||||
__clear_bit(CONTEXT_BANNABLE, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return test_bit(CONTEXT_BANNED, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
|
||||
{
|
||||
__set_bit(CONTEXT_BANNED, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
|
||||
{
|
||||
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
|
||||
{
|
||||
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
|
||||
}
|
||||
|
||||
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
|
||||
{
|
||||
return !ctx->file_priv;
|
||||
}
|
||||
|
||||
/* i915_gem_context.c */
|
||||
int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_context_lost(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_context_fini(struct drm_i915_private *dev_priv);
|
||||
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
|
||||
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
|
||||
int i915_switch_context(struct drm_i915_gem_request *req);
|
||||
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_context_free(struct kref *ctx_ref);
|
||||
struct i915_gem_context *
|
||||
i915_gem_context_create_gvt(struct drm_device *dev);
|
||||
|
||||
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
#endif /* !__I915_GEM_CONTEXT_H__ */
|
@ -278,7 +278,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
|
||||
|
||||
get_dma_buf(dma_buf);
|
||||
|
||||
obj = i915_gem_object_alloc(dev);
|
||||
obj = i915_gem_object_alloc(to_i915(dev));
|
||||
if (obj == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_detach;
|
||||
|
@ -99,7 +99,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
||||
u64 start, u64 end,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct drm_mm_scan scan;
|
||||
struct list_head eviction_list;
|
||||
struct list_head *phases[] = {
|
||||
@ -111,7 +111,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
||||
struct drm_mm_node *node;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&vm->dev->struct_mutex);
|
||||
lockdep_assert_held(&vm->i915->drm.struct_mutex);
|
||||
trace_i915_gem_evict(vm, min_size, alignment, flags);
|
||||
|
||||
/*
|
||||
@ -132,7 +132,14 @@ i915_gem_evict_something(struct i915_address_space *vm,
|
||||
start, end,
|
||||
flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
|
||||
|
||||
if (flags & PIN_NONBLOCK)
|
||||
/* Retire before we search the active list. Although we have
|
||||
* reasonable accuracy in our retirement lists, we may have
|
||||
* a stray pin (preventing eviction) that can only be resolved by
|
||||
* retiring.
|
||||
*/
|
||||
if (!(flags & PIN_NONBLOCK))
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
else
|
||||
phases[1] = NULL;
|
||||
|
||||
search_again:
|
||||
@ -165,7 +172,7 @@ search_again:
|
||||
* back to userspace to give our workqueues time to
|
||||
* acquire our locks and unpin the old scanouts.
|
||||
*/
|
||||
return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
|
||||
return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
|
||||
}
|
||||
|
||||
/* Not everything in the GGTT is tracked via vma (otherwise we
|
||||
@ -202,6 +209,7 @@ found:
|
||||
}
|
||||
|
||||
/* Unbinding will emit any required flushes */
|
||||
ret = 0;
|
||||
while (!list_empty(&eviction_list)) {
|
||||
vma = list_first_entry(&eviction_list,
|
||||
struct i915_vma,
|
||||
@ -221,45 +229,107 @@ found:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_evict_for_vma(struct i915_vma *target)
|
||||
/**
|
||||
* i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
|
||||
* @target: address space and range to evict for
|
||||
* @flags: additional flags to control the eviction algorithm
|
||||
*
|
||||
* This function will try to evict vmas that overlap the target node.
|
||||
*
|
||||
* To clarify: This is for freeing up virtual address space, not for freeing
|
||||
* memory in e.g. the shrinker.
|
||||
*/
|
||||
int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
|
||||
{
|
||||
struct drm_mm_node *node, *next;
|
||||
LIST_HEAD(eviction_list);
|
||||
struct drm_mm_node *node;
|
||||
u64 start = target->node.start;
|
||||
u64 end = start + target->node.size;
|
||||
struct i915_vma *vma, *next;
|
||||
bool check_color;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&target->vm->dev->struct_mutex);
|
||||
lockdep_assert_held(&target->vm->i915->drm.struct_mutex);
|
||||
trace_i915_gem_evict_vma(target, flags);
|
||||
|
||||
list_for_each_entry_safe(node, next,
|
||||
&target->vm->mm.head_node.node_list,
|
||||
node_list) {
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
/* Retire before we search the active list. Although we have
|
||||
* reasonable accuracy in our retirement lists, we may have
|
||||
* a stray pin (preventing eviction) that can only be resolved by
|
||||
* retiring.
|
||||
*/
|
||||
if (!(flags & PIN_NONBLOCK))
|
||||
i915_gem_retire_requests(target->vm->i915);
|
||||
|
||||
if (node->start + node->size <= target->node.start)
|
||||
continue;
|
||||
if (node->start >= target->node.start + target->node.size)
|
||||
check_color = target->vm->mm.color_adjust;
|
||||
if (check_color) {
|
||||
/* Expand search to cover neighbouring guard pages (or lack!) */
|
||||
if (start > target->vm->start)
|
||||
start -= 4096;
|
||||
if (end < target->vm->start + target->vm->total)
|
||||
end += 4096;
|
||||
}
|
||||
|
||||
drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
|
||||
/* If we find any non-objects (!vma), we cannot evict them */
|
||||
if (node->color == I915_COLOR_UNEVICTABLE) {
|
||||
ret = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
|
||||
vma = container_of(node, typeof(*vma), node);
|
||||
|
||||
if (i915_vma_is_pinned(vma)) {
|
||||
if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
|
||||
/* Object is pinned for some other use */
|
||||
return -EBUSY;
|
||||
|
||||
/* We need to evict a buffer in the same batch */
|
||||
if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
|
||||
/* Overlapping fixed objects in the same batch */
|
||||
return -EINVAL;
|
||||
|
||||
return -ENOSPC;
|
||||
/* If we are using coloring to insert guard pages between
|
||||
* different cache domains within the address space, we have
|
||||
* to check whether the objects on either side of our range
|
||||
* abutt and conflict. If they are in conflict, then we evict
|
||||
* those as well to make room for our guard pages.
|
||||
*/
|
||||
if (check_color) {
|
||||
if (vma->node.start + vma->node.size == target->node.start) {
|
||||
if (vma->node.color == target->node.color)
|
||||
continue;
|
||||
}
|
||||
if (vma->node.start == target->node.start + target->node.size) {
|
||||
if (vma->node.color == target->node.color)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
ret = i915_vma_unbind(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (flags & PIN_NONBLOCK &&
|
||||
(i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
|
||||
ret = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Overlap of objects in the same batch? */
|
||||
if (i915_vma_is_pinned(vma)) {
|
||||
ret = -ENOSPC;
|
||||
if (vma->exec_entry &&
|
||||
vma->exec_entry->flags & EXEC_OBJECT_PINNED)
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Never show fear in the face of dragons!
|
||||
*
|
||||
* We cannot directly remove this node from within this
|
||||
* iterator and as with i915_gem_evict_something() we employ
|
||||
* the vma pin_count in order to prevent the action of
|
||||
* unbinding one vma from freeing (by dropping its active
|
||||
* reference) another in our eviction list.
|
||||
*/
|
||||
__i915_vma_pin(vma);
|
||||
list_add(&vma->exec_list, &eviction_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
|
||||
list_del_init(&vma->exec_list);
|
||||
__i915_vma_unpin(vma);
|
||||
if (ret == 0)
|
||||
ret = i915_vma_unbind(vma);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -281,11 +351,11 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
||||
struct i915_vma *vma, *next;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&vm->dev->struct_mutex);
|
||||
lockdep_assert_held(&vm->i915->drm.struct_mutex);
|
||||
trace_i915_gem_evict_vm(vm);
|
||||
|
||||
if (do_idle) {
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
|
||||
if (i915_is_ggtt(vm)) {
|
||||
ret = i915_gem_switch_to_kernel_context(dev_priv);
|
||||
|
@ -274,6 +274,7 @@ static void eb_destroy(struct eb_vmas *eb)
|
||||
exec_list);
|
||||
list_del_init(&vma->exec_list);
|
||||
i915_gem_execbuffer_unreserve_vma(vma);
|
||||
vma->exec_entry = NULL;
|
||||
i915_vma_put(vma);
|
||||
}
|
||||
kfree(eb);
|
||||
@ -437,7 +438,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
||||
memset(&cache->node, 0, sizeof(cache->node));
|
||||
ret = drm_mm_insert_node_in_range_generic
|
||||
(&ggtt->base.mm, &cache->node,
|
||||
4096, 0, 0,
|
||||
4096, 0, I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->mappable_end,
|
||||
DRM_MM_SEARCH_DEFAULT,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
@ -1232,14 +1233,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
||||
struct intel_engine_cs *engine, const u32 ctx_id)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
struct i915_ctx_hang_stats *hs;
|
||||
|
||||
ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
|
||||
if (IS_ERR(ctx))
|
||||
return ctx;
|
||||
|
||||
hs = &ctx->hang_stats;
|
||||
if (hs->banned) {
|
||||
if (i915_gem_context_is_banned(ctx)) {
|
||||
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
@ -1260,6 +1259,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
const unsigned int idx = req->engine->id;
|
||||
|
||||
lockdep_assert_held(&req->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
|
||||
/* Add a reference if we're newly entering the active list.
|
||||
@ -1715,7 +1715,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
params->args_batch_start_offset = args->batch_start_offset;
|
||||
if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
|
||||
if (engine->needs_cmd_parser && args->batch_len) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
|
||||
|
@ -290,7 +290,7 @@ i915_vma_put_fence(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_fence_reg *fence = vma->fence;
|
||||
|
||||
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
|
||||
assert_rpm_wakelock_held(vma->vm->i915);
|
||||
|
||||
if (!fence)
|
||||
return 0;
|
||||
@ -313,7 +313,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
/* Wait for completion of pending flips which consume fences */
|
||||
if (intel_has_pending_fb_unpin(&dev_priv->drm))
|
||||
if (intel_has_pending_fb_unpin(dev_priv))
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
return ERR_PTR(-EDEADLK);
|
||||
@ -346,7 +346,7 @@ i915_vma_get_fence(struct i915_vma *vma)
|
||||
/* Note that we revoke fences on runtime suspend. Therefore the user
|
||||
* must keep the device awake whilst using the fence.
|
||||
*/
|
||||
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
|
||||
assert_rpm_wakelock_held(vma->vm->i915);
|
||||
|
||||
/* Just update our place in the LRU if our fence is getting reused. */
|
||||
if (vma->fence) {
|
||||
@ -357,7 +357,7 @@ i915_vma_get_fence(struct i915_vma *vma)
|
||||
return 0;
|
||||
}
|
||||
} else if (set) {
|
||||
fence = fence_find(to_i915(vma->vm->dev));
|
||||
fence = fence_find(vma->vm->i915);
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
} else
|
||||
@ -366,6 +366,30 @@ i915_vma_get_fence(struct i915_vma *vma)
|
||||
return fence_update(fence, set);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_revoke_fences - revoke fence state
|
||||
* @dev_priv: i915 device private
|
||||
*
|
||||
* Removes all GTT mmappings via the fence registers. This forces any user
|
||||
* of the fence to reacquire that fence before continuing with their access.
|
||||
* One use is during GPU reset where the fence register is lost and we need to
|
||||
* revoke concurrent userspace access via GTT mmaps until the hardware has been
|
||||
* reset and the fence registers have been restored.
|
||||
*/
|
||||
void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++) {
|
||||
struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
|
||||
|
||||
if (fence->vma)
|
||||
i915_gem_release_mmap(fence->vma->obj);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_restore_fences - restore fence state
|
||||
* @dev_priv: i915 device private
|
||||
@ -512,8 +536,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
} else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
|
||||
!IS_G33(dev_priv))) {
|
||||
} else if (IS_MOBILE(dev_priv) ||
|
||||
IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
|
||||
uint32_t dcc;
|
||||
|
||||
/* On 9xx chipsets, channel interleave by the CPU is
|
||||
|
@ -113,10 +113,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||
bool has_full_ppgtt;
|
||||
bool has_full_48bit_ppgtt;
|
||||
|
||||
has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
|
||||
has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
|
||||
has_full_48bit_ppgtt =
|
||||
IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
|
||||
has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
|
||||
has_full_ppgtt = dev_priv->info.has_full_ppgtt;
|
||||
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
|
||||
|
||||
if (intel_vgpu_active(dev_priv)) {
|
||||
/* emulation is too hard */
|
||||
@ -372,7 +371,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
|
||||
/* There are only few exceptions for gen >=6. chv and bxt.
|
||||
* And we are not sure about the latter so play safe for now.
|
||||
*/
|
||||
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
|
||||
if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
|
||||
drm_clflush_virt_range(vaddr, PAGE_SIZE);
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
@ -380,7 +379,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
|
||||
|
||||
#define kmap_px(px) kmap_page_dma(px_base(px))
|
||||
#define kunmap_px(ppgtt, vaddr) \
|
||||
kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
|
||||
kunmap_page_dma((ppgtt)->base.i915, (vaddr))
|
||||
|
||||
#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
|
||||
#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
|
||||
@ -470,7 +469,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
|
||||
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
|
||||
I915_CACHE_LLC);
|
||||
|
||||
fill_px(to_i915(vm->dev), pt, scratch_pte);
|
||||
fill_px(vm->i915, pt, scratch_pte);
|
||||
}
|
||||
|
||||
static void gen6_initialize_pt(struct i915_address_space *vm,
|
||||
@ -483,7 +482,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
|
||||
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
|
||||
I915_CACHE_LLC, 0);
|
||||
|
||||
fill32_px(to_i915(vm->dev), pt, scratch_pte);
|
||||
fill32_px(vm->i915, pt, scratch_pte);
|
||||
}
|
||||
|
||||
static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
|
||||
@ -531,7 +530,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
|
||||
|
||||
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
|
||||
|
||||
fill_px(to_i915(vm->dev), pd, scratch_pde);
|
||||
fill_px(vm->i915, pd, scratch_pde);
|
||||
}
|
||||
|
||||
static int __pdp_init(struct drm_i915_private *dev_priv,
|
||||
@ -612,7 +611,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
|
||||
|
||||
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
|
||||
|
||||
fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
|
||||
fill_px(vm->i915, pdp, scratch_pdpe);
|
||||
}
|
||||
|
||||
static void gen8_initialize_pml4(struct i915_address_space *vm,
|
||||
@ -623,7 +622,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
|
||||
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
|
||||
I915_CACHE_LLC);
|
||||
|
||||
fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
|
||||
fill_px(vm->i915, pml4, scratch_pml4e);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -710,7 +709,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
*/
|
||||
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask;
|
||||
ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
|
||||
}
|
||||
|
||||
/* Removes entries from a single page table, releasing it if it's empty.
|
||||
@ -736,10 +735,8 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
|
||||
|
||||
bitmap_clear(pt->used_ptes, pte, num_entries);
|
||||
|
||||
if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
|
||||
free_pt(to_i915(vm->dev), pt);
|
||||
if (bitmap_empty(pt->used_ptes, GEN8_PTES))
|
||||
return true;
|
||||
}
|
||||
|
||||
pt_vaddr = kmap_px(pt);
|
||||
|
||||
@ -775,13 +772,12 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
|
||||
pde_vaddr = kmap_px(pd);
|
||||
pde_vaddr[pde] = scratch_pde;
|
||||
kunmap_px(ppgtt, pde_vaddr);
|
||||
free_pt(vm->i915, pt);
|
||||
}
|
||||
}
|
||||
|
||||
if (bitmap_empty(pd->used_pdes, I915_PDES)) {
|
||||
free_pd(to_i915(vm->dev), pd);
|
||||
if (bitmap_empty(pd->used_pdes, I915_PDES))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -795,7 +791,6 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
|
||||
uint64_t length)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct i915_page_directory *pd;
|
||||
uint64_t pdpe;
|
||||
gen8_ppgtt_pdpe_t *pdpe_vaddr;
|
||||
@ -813,16 +808,14 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
|
||||
pdpe_vaddr[pdpe] = scratch_pdpe;
|
||||
kunmap_px(ppgtt, pdpe_vaddr);
|
||||
}
|
||||
free_pd(vm->i915, pd);
|
||||
}
|
||||
}
|
||||
|
||||
mark_tlbs_dirty(ppgtt);
|
||||
|
||||
if (USES_FULL_48BIT_PPGTT(dev_priv) &&
|
||||
bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) {
|
||||
free_pdp(dev_priv, pdp);
|
||||
if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -843,7 +836,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
|
||||
gen8_ppgtt_pml4e_t scratch_pml4e =
|
||||
gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
|
||||
|
||||
GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev)));
|
||||
GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
|
||||
|
||||
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
|
||||
if (WARN_ON(!pml4->pdps[pml4e]))
|
||||
@ -854,6 +847,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
|
||||
pml4e_vaddr = kmap_px(pml4);
|
||||
pml4e_vaddr[pml4e] = scratch_pml4e;
|
||||
kunmap_px(ppgtt, pml4e_vaddr);
|
||||
free_pdp(vm->i915, pdp);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -863,7 +857,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
|
||||
if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
|
||||
if (USES_FULL_48BIT_PPGTT(vm->i915))
|
||||
gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
|
||||
else
|
||||
gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
|
||||
@ -898,7 +892,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
|
||||
kunmap_px(ppgtt, pt_vaddr);
|
||||
pt_vaddr = NULL;
|
||||
if (++pde == I915_PDES) {
|
||||
if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev)))
|
||||
if (++pdpe == I915_PDPES_PER_PDP(vm->i915))
|
||||
break;
|
||||
pde = 0;
|
||||
}
|
||||
@ -921,7 +915,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
|
||||
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
|
||||
|
||||
if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
|
||||
if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
|
||||
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
|
||||
cache_level);
|
||||
} else {
|
||||
@ -955,7 +949,7 @@ static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
|
||||
|
||||
static int gen8_init_scratch(struct i915_address_space *vm)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
int ret;
|
||||
|
||||
ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
|
||||
@ -1002,7 +996,7 @@ free_scratch_page:
|
||||
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
|
||||
{
|
||||
enum vgt_g2v_type msg;
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
struct drm_i915_private *dev_priv = ppgtt->base.i915;
|
||||
int i;
|
||||
|
||||
if (USES_FULL_48BIT_PPGTT(dev_priv)) {
|
||||
@ -1032,7 +1026,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
|
||||
|
||||
static void gen8_free_scratch(struct i915_address_space *vm)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
|
||||
if (USES_FULL_48BIT_PPGTT(dev_priv))
|
||||
free_pdp(dev_priv, vm->scratch_pdp);
|
||||
@ -1059,7 +1053,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
|
||||
|
||||
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
struct drm_i915_private *dev_priv = ppgtt->base.i915;
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
|
||||
@ -1074,7 +1068,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
|
||||
|
||||
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
|
||||
if (intel_vgpu_active(dev_priv))
|
||||
@ -1112,7 +1106,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
|
||||
uint64_t length,
|
||||
unsigned long *new_pts)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct i915_page_table *pt;
|
||||
uint32_t pde;
|
||||
|
||||
@ -1173,7 +1167,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
|
||||
uint64_t length,
|
||||
unsigned long *new_pds)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct i915_page_directory *pd;
|
||||
uint32_t pdpe;
|
||||
uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
|
||||
@ -1226,7 +1220,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
|
||||
uint64_t length,
|
||||
unsigned long *new_pdps)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct i915_page_directory_pointer *pdp;
|
||||
uint32_t pml4e;
|
||||
|
||||
@ -1301,7 +1295,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
unsigned long *new_page_dirs, *new_page_tables;
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct i915_page_directory *pd;
|
||||
const uint64_t orig_start = start;
|
||||
const uint64_t orig_length = length;
|
||||
@ -1309,15 +1303,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
|
||||
uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
|
||||
int ret;
|
||||
|
||||
/* Wrap is never okay since we can only represent 48b, and we don't
|
||||
* actually use the other side of the canonical address space.
|
||||
*/
|
||||
if (WARN_ON(start + length < start))
|
||||
return -ENODEV;
|
||||
|
||||
if (WARN_ON(start + length > vm->total))
|
||||
return -ENODEV;
|
||||
|
||||
ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1450,7 +1435,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
|
||||
|
||||
err_out:
|
||||
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
|
||||
gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]);
|
||||
gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1460,7 +1445,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
|
||||
if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
|
||||
if (USES_FULL_48BIT_PPGTT(vm->i915))
|
||||
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
|
||||
else
|
||||
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
|
||||
@ -1531,7 +1516,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
|
||||
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
|
||||
I915_CACHE_LLC);
|
||||
|
||||
if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
|
||||
if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
|
||||
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
|
||||
} else {
|
||||
uint64_t pml4e;
|
||||
@ -1584,7 +1569,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
|
||||
*/
|
||||
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
struct drm_i915_private *dev_priv = ppgtt->base.i915;
|
||||
int ret;
|
||||
|
||||
ret = gen8_init_scratch(&ppgtt->base);
|
||||
@ -1927,7 +1912,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
||||
uint64_t start_in, uint64_t length_in)
|
||||
{
|
||||
DECLARE_BITMAP(new_page_tables, I915_PDES);
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct i915_page_table *pt;
|
||||
@ -1935,9 +1920,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
|
||||
uint32_t pde;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(start_in + length_in > ppgtt->base.total))
|
||||
return -ENODEV;
|
||||
|
||||
start = start_save = start_in;
|
||||
length = length_save = length_in;
|
||||
|
||||
@ -2014,7 +1996,7 @@ unwind_out:
|
||||
|
||||
static int gen6_init_scratch(struct i915_address_space *vm)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
int ret;
|
||||
|
||||
ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
|
||||
@ -2034,7 +2016,7 @@ static int gen6_init_scratch(struct i915_address_space *vm)
|
||||
|
||||
static void gen6_free_scratch(struct i915_address_space *vm)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
|
||||
free_pt(dev_priv, vm->scratch_pt);
|
||||
cleanup_scratch_page(dev_priv, &vm->scratch_page);
|
||||
@ -2044,7 +2026,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct i915_page_directory *pd = &ppgtt->pd;
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct i915_page_table *pt;
|
||||
uint32_t pde;
|
||||
|
||||
@ -2060,7 +2042,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct i915_address_space *vm = &ppgtt->base;
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
struct drm_i915_private *dev_priv = ppgtt->base.i915;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
bool retried = false;
|
||||
int ret;
|
||||
@ -2076,15 +2058,15 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
|
||||
return ret;
|
||||
|
||||
alloc:
|
||||
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
|
||||
&ppgtt->node, GEN6_PD_SIZE,
|
||||
GEN6_PD_ALIGN, 0,
|
||||
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, &ppgtt->node,
|
||||
GEN6_PD_SIZE, GEN6_PD_ALIGN,
|
||||
I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->base.total,
|
||||
DRM_MM_TOPDOWN);
|
||||
if (ret == -ENOSPC && !retried) {
|
||||
ret = i915_gem_evict_something(&ggtt->base,
|
||||
GEN6_PD_SIZE, GEN6_PD_ALIGN,
|
||||
I915_CACHE_NONE,
|
||||
I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->base.total,
|
||||
0);
|
||||
if (ret)
|
||||
@ -2125,7 +2107,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
|
||||
|
||||
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
struct drm_i915_private *dev_priv = ppgtt->base.i915;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
int ret;
|
||||
|
||||
@ -2176,7 +2158,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
ppgtt->base.dev = &dev_priv->drm;
|
||||
ppgtt->base.i915 = dev_priv;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen < 8)
|
||||
return gen6_ppgtt_init(ppgtt);
|
||||
@ -2379,10 +2361,24 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
|
||||
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages)
|
||||
{
|
||||
if (dma_map_sg(&obj->base.dev->pdev->dev,
|
||||
pages->sgl, pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL))
|
||||
return 0;
|
||||
do {
|
||||
if (dma_map_sg(&obj->base.dev->pdev->dev,
|
||||
pages->sgl, pages->nents,
|
||||
PCI_DMA_BIDIRECTIONAL))
|
||||
return 0;
|
||||
|
||||
/* If the DMA remap fails, one cause can be that we have
|
||||
* too many objects pinned in a small remapping table,
|
||||
* such as swiotlb. Incrementally purge all other objects and
|
||||
* try again - if there are no more pages to remove from
|
||||
* the DMA remapper, i915_gem_shrink will return 0.
|
||||
*/
|
||||
GEM_BUG_ON(obj->mm.pages == pages);
|
||||
} while (i915_gem_shrink(to_i915(obj->base.dev),
|
||||
obj->base.size >> PAGE_SHIFT,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_ACTIVE));
|
||||
|
||||
return -ENOSPC;
|
||||
}
|
||||
@ -2398,7 +2394,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
|
||||
enum i915_cache_level level,
|
||||
u32 unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
gen8_pte_t __iomem *pte =
|
||||
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
|
||||
(offset >> PAGE_SHIFT);
|
||||
@ -2414,7 +2410,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
uint64_t start,
|
||||
enum i915_cache_level level, u32 unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
struct sgt_iter sgt_iter;
|
||||
gen8_pte_t __iomem *gtt_entries;
|
||||
@ -2479,7 +2475,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
|
||||
enum i915_cache_level level,
|
||||
u32 flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
gen6_pte_t __iomem *pte =
|
||||
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
|
||||
(offset >> PAGE_SHIFT);
|
||||
@ -2501,7 +2497,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
uint64_t start,
|
||||
enum i915_cache_level level, u32 flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
struct drm_i915_private *dev_priv = vm->i915;
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
struct sgt_iter sgt_iter;
|
||||
gen6_pte_t __iomem *gtt_entries;
|
||||
@ -2621,7 +2617,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(vma->vm->dev);
|
||||
struct drm_i915_private *i915 = vma->vm->i915;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
u32 pte_flags = 0;
|
||||
int ret;
|
||||
@ -2653,7 +2649,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(vma->vm->dev);
|
||||
struct drm_i915_private *i915 = vma->vm->i915;
|
||||
u32 pte_flags;
|
||||
int ret;
|
||||
|
||||
@ -2687,7 +2683,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
||||
|
||||
static void ggtt_unbind_vma(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(vma->vm->dev);
|
||||
struct drm_i915_private *i915 = vma->vm->i915;
|
||||
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
|
||||
const u64 size = min(vma->size, vma->node.size);
|
||||
|
||||
@ -2758,7 +2754,8 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
|
||||
/* Reserve a mappable slot for our lockless error capture */
|
||||
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
|
||||
&ggtt->error_capture,
|
||||
4096, 0, -1,
|
||||
4096, 0,
|
||||
I915_COLOR_UNEVICTABLE,
|
||||
0, ggtt->mappable_end,
|
||||
0, 0);
|
||||
if (ret)
|
||||
@ -2927,8 +2924,8 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
|
||||
|
||||
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
|
||||
struct pci_dev *pdev = ggtt->base.dev->pdev;
|
||||
struct drm_i915_private *dev_priv = ggtt->base.i915;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
phys_addr_t phys_addr;
|
||||
int ret;
|
||||
|
||||
@ -2942,7 +2939,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
|
||||
* resort to an uncached mapping. The WC issue is easily caught by the
|
||||
* readback check when writing GTT PTE entries.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
ggtt->gsm = ioremap_nocache(phys_addr, size);
|
||||
else
|
||||
ggtt->gsm = ioremap_wc(phys_addr, size);
|
||||
@ -3040,12 +3037,12 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
|
||||
iounmap(ggtt->gsm);
|
||||
cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page);
|
||||
cleanup_scratch_page(vm->i915, &vm->scratch_page);
|
||||
}
|
||||
|
||||
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
|
||||
struct drm_i915_private *dev_priv = ggtt->base.i915;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
unsigned int size;
|
||||
u16 snb_gmch_ctl;
|
||||
@ -3072,7 +3069,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
|
||||
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
|
||||
if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
|
||||
chv_setup_private_ppat(dev_priv);
|
||||
else
|
||||
bdw_setup_private_ppat(dev_priv);
|
||||
@ -3094,7 +3091,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
|
||||
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
|
||||
struct drm_i915_private *dev_priv = ggtt->base.i915;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
unsigned int size;
|
||||
u16 snb_gmch_ctl;
|
||||
@ -3147,7 +3144,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
|
||||
|
||||
static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
|
||||
struct drm_i915_private *dev_priv = ggtt->base.i915;
|
||||
int ret;
|
||||
|
||||
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
|
||||
@ -3156,8 +3153,10 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
|
||||
&ggtt->mappable_base, &ggtt->mappable_end);
|
||||
intel_gtt_get(&ggtt->base.total,
|
||||
&ggtt->stolen_size,
|
||||
&ggtt->mappable_base,
|
||||
&ggtt->mappable_end);
|
||||
|
||||
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
|
||||
ggtt->base.insert_page = i915_ggtt_insert_page;
|
||||
@ -3182,7 +3181,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
int ret;
|
||||
|
||||
ggtt->base.dev = &dev_priv->drm;
|
||||
ggtt->base.i915 = dev_priv;
|
||||
|
||||
if (INTEL_GEN(dev_priv) <= 5)
|
||||
ret = i915_gmch_probe(ggtt);
|
||||
@ -3193,6 +3192,16 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Trim the GGTT to fit the GuC mappable upper range (when enabled).
|
||||
* This is easier than doing range restriction on the fly, as we
|
||||
* currently don't have any bits spare to pass in this upper
|
||||
* restriction!
|
||||
*/
|
||||
if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
|
||||
ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
|
||||
ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
|
||||
}
|
||||
|
||||
if ((ggtt->base.total - 1) >> 32) {
|
||||
DRM_ERROR("We never expected a Global GTT with more than 32bits"
|
||||
" of address space! Found %lldM!\n",
|
||||
@ -3212,7 +3221,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
|
||||
DRM_INFO("Memory usable by graphics device = %lluM\n",
|
||||
ggtt->base.total >> 20);
|
||||
DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
|
||||
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
|
||||
DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
if (intel_iommu_gfx_mapped)
|
||||
DRM_INFO("VT-d active for gfx access\n");
|
||||
@ -3312,7 +3321,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
|
||||
ggtt->base.closed = false;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8) {
|
||||
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
|
||||
if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
|
||||
chv_setup_private_ppat(dev_priv);
|
||||
else
|
||||
bdw_setup_private_ppat(dev_priv);
|
||||
|
@ -220,7 +220,7 @@ struct i915_pml4 {
|
||||
struct i915_address_space {
|
||||
struct drm_mm mm;
|
||||
struct i915_gem_timeline timeline;
|
||||
struct drm_device *dev;
|
||||
struct drm_i915_private *i915;
|
||||
/* Every address space belongs to a struct file - except for the global
|
||||
* GTT that is owned by the driver (and so @file is set to NULL). In
|
||||
* principle, no information should leak from one context to another
|
||||
@ -315,12 +315,21 @@ struct i915_ggtt {
|
||||
struct i915_address_space base;
|
||||
struct io_mapping mappable; /* Mapping to our CPU mappable region */
|
||||
|
||||
size_t stolen_size; /* Total size of stolen memory */
|
||||
size_t stolen_usable_size; /* Total size minus BIOS reserved */
|
||||
size_t stolen_reserved_base;
|
||||
size_t stolen_reserved_size;
|
||||
u64 mappable_end; /* End offset that we can CPU map */
|
||||
phys_addr_t mappable_base; /* PA of our GMADR */
|
||||
u64 mappable_end; /* End offset that we can CPU map */
|
||||
|
||||
/* Stolen memory is segmented in hardware with different portions
|
||||
* offlimits to certain functions.
|
||||
*
|
||||
* The drm_mm is initialised to the total accessible range, as found
|
||||
* from the PCI config. On Broadwell+, this is further restricted to
|
||||
* avoid the first page! The upper end of stolen memory is reserved for
|
||||
* hardware functions and similarly removed from the accessible range.
|
||||
*/
|
||||
u32 stolen_size; /* Total size of stolen memory */
|
||||
u32 stolen_usable_size; /* Total size minus reserved ranges */
|
||||
u32 stolen_reserved_base;
|
||||
u32 stolen_reserved_size;
|
||||
|
||||
/** "Graphics Stolen Memory" holds the global PTEs */
|
||||
void __iomem *gsm;
|
||||
|
@ -71,7 +71,7 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
|
||||
#endif
|
||||
|
||||
gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
|
||||
if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
|
||||
if (IS_I965GM(i915) || IS_I965G(i915)) {
|
||||
/* 965gm cannot relocate objects above 4GiB. */
|
||||
gfp &= ~__GFP_HIGHMEM;
|
||||
gfp |= __GFP_DMA32;
|
||||
@ -155,7 +155,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = i915_gem_object_alloc(&i915->drm);
|
||||
obj = i915_gem_object_alloc(i915);
|
||||
if (!obj)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -62,6 +62,15 @@ static void i915_fence_release(struct dma_fence *fence)
|
||||
{
|
||||
struct drm_i915_gem_request *req = to_request(fence);
|
||||
|
||||
/* The request is put onto a RCU freelist (i.e. the address
|
||||
* is immediately reused), mark the fences as being freed now.
|
||||
* Otherwise the debugobjects for the fences are only marked as
|
||||
* freed when the slab cache itself is freed, and so we would get
|
||||
* caught trying to reuse dead objects.
|
||||
*/
|
||||
i915_sw_fence_fini(&req->submit);
|
||||
i915_sw_fence_fini(&req->execute);
|
||||
|
||||
kmem_cache_free(req->i915->requests, req);
|
||||
}
|
||||
|
||||
@ -197,6 +206,7 @@ void i915_gem_retire_noop(struct i915_gem_active *active,
|
||||
|
||||
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
struct i915_gem_active *active, *next;
|
||||
|
||||
lockdep_assert_held(&request->i915->drm.struct_mutex);
|
||||
@ -207,9 +217,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
|
||||
trace_i915_gem_request_retire(request);
|
||||
|
||||
spin_lock_irq(&request->engine->timeline->lock);
|
||||
spin_lock_irq(&engine->timeline->lock);
|
||||
list_del_init(&request->link);
|
||||
spin_unlock_irq(&request->engine->timeline->lock);
|
||||
spin_unlock_irq(&engine->timeline->lock);
|
||||
|
||||
/* We know the GPU must have read the request to have
|
||||
* sent us the seqno + interrupt, so use the position
|
||||
@ -257,13 +267,20 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
|
||||
i915_gem_request_remove_from_client(request);
|
||||
|
||||
if (request->previous_context) {
|
||||
if (i915.enable_execlists)
|
||||
intel_lr_context_unpin(request->previous_context,
|
||||
request->engine);
|
||||
}
|
||||
/* Retirement decays the ban score as it is a sign of ctx progress */
|
||||
if (request->ctx->ban_score > 0)
|
||||
request->ctx->ban_score--;
|
||||
|
||||
i915_gem_context_put(request->ctx);
|
||||
/* The backing object for the context is done after switching to the
|
||||
* *next* context. Therefore we cannot retire the previous context until
|
||||
* the next context has already started running. However, since we
|
||||
* cannot take the required locks at i915_gem_request_submit() we
|
||||
* defer the unpinning of the active context to now, retirement of
|
||||
* the subsequent request.
|
||||
*/
|
||||
if (engine->last_retired_context)
|
||||
engine->context_unpin(engine, engine->last_retired_context);
|
||||
engine->last_retired_context = request->ctx;
|
||||
|
||||
dma_fence_signal(&request->fence);
|
||||
|
||||
@ -277,6 +294,8 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
|
||||
struct drm_i915_gem_request *tmp;
|
||||
|
||||
lockdep_assert_held(&req->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(!i915_gem_request_completed(req));
|
||||
|
||||
if (list_empty(&req->link))
|
||||
return;
|
||||
|
||||
@ -326,11 +345,11 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
|
||||
GEM_BUG_ON(i915->gt.active_requests > 1);
|
||||
|
||||
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
|
||||
if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
|
||||
if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
|
||||
while (intel_breadcrumbs_busy(i915))
|
||||
cond_resched(); /* spin until threads are complete */
|
||||
}
|
||||
atomic_set(&timeline->next_seqno, seqno);
|
||||
atomic_set(&timeline->seqno, seqno);
|
||||
|
||||
/* Finally reset hw state */
|
||||
for_each_engine(engine, i915, id)
|
||||
@ -365,11 +384,11 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
|
||||
static int reserve_global_seqno(struct drm_i915_private *i915)
|
||||
{
|
||||
u32 active_requests = ++i915->gt.active_requests;
|
||||
u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
|
||||
u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
|
||||
int ret;
|
||||
|
||||
/* Reservation is fine until we need to wrap around */
|
||||
if (likely(next_seqno + active_requests > next_seqno))
|
||||
if (likely(seqno + active_requests > seqno))
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_init_global_seqno(i915, 0);
|
||||
@ -383,13 +402,13 @@ static int reserve_global_seqno(struct drm_i915_private *i915)
|
||||
|
||||
static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
|
||||
{
|
||||
/* next_seqno only incremented under a mutex */
|
||||
return ++tl->next_seqno.counter;
|
||||
/* seqno only incremented under a mutex */
|
||||
return ++tl->seqno.counter;
|
||||
}
|
||||
|
||||
static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
|
||||
{
|
||||
return atomic_inc_return(&tl->next_seqno);
|
||||
return atomic_inc_return(&tl->seqno);
|
||||
}
|
||||
|
||||
void __i915_gem_request_submit(struct drm_i915_gem_request *request)
|
||||
@ -509,10 +528,18 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = reserve_global_seqno(dev_priv);
|
||||
/* Pinning the contexts may generate requests in order to acquire
|
||||
* GGTT space, so do this first before we reserve a seqno for
|
||||
* ourselves.
|
||||
*/
|
||||
ret = engine->context_pin(engine, ctx);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = reserve_global_seqno(dev_priv);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
/* Move the oldest request to the slab-cache (if not in use!) */
|
||||
req = list_first_entry_or_null(&engine->timeline->requests,
|
||||
typeof(*req), link);
|
||||
@ -578,11 +605,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
INIT_LIST_HEAD(&req->active_list);
|
||||
req->i915 = dev_priv;
|
||||
req->engine = engine;
|
||||
req->ctx = i915_gem_context_get(ctx);
|
||||
req->ctx = ctx;
|
||||
|
||||
/* No zalloc, must clear what we need by hand */
|
||||
req->global_seqno = 0;
|
||||
req->previous_context = NULL;
|
||||
req->file_priv = NULL;
|
||||
req->batch = NULL;
|
||||
|
||||
@ -596,10 +622,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
|
||||
GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
ret = intel_logical_ring_alloc_request_extras(req);
|
||||
else
|
||||
ret = intel_ring_alloc_request_extras(req);
|
||||
ret = engine->request_alloc(req);
|
||||
if (ret)
|
||||
goto err_ctx;
|
||||
|
||||
@ -613,10 +636,16 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
return req;
|
||||
|
||||
err_ctx:
|
||||
i915_gem_context_put(ctx);
|
||||
/* Make sure we didn't add ourselves to external state before freeing */
|
||||
GEM_BUG_ON(!list_empty(&req->active_list));
|
||||
GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
|
||||
GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
|
||||
|
||||
kmem_cache_free(dev_priv->requests, req);
|
||||
err_unreserve:
|
||||
dev_priv->gt.active_requests--;
|
||||
err_unpin:
|
||||
engine->context_unpin(engine, ctx);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -170,17 +170,6 @@ struct drm_i915_gem_request {
|
||||
/** Preallocate space in the ring for the emitting the request */
|
||||
u32 reserved_space;
|
||||
|
||||
/**
|
||||
* Context related to the previous request.
|
||||
* As the contexts are accessed by the hardware until the switch is
|
||||
* completed to a new context, the hardware may still be writing
|
||||
* to the context object after the breadcrumb is visible. We must
|
||||
* not unpin/unbind/prune that object whilst still active and so
|
||||
* we keep the previous context pinned until the following (this)
|
||||
* request is retired.
|
||||
*/
|
||||
struct i915_gem_context *previous_context;
|
||||
|
||||
/** Batch buffer related to this request if any (used for
|
||||
* error state dump only).
|
||||
*/
|
||||
|
@ -54,12 +54,6 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
|
||||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return -ENODEV;
|
||||
|
||||
/* See the comment at the drm_mm_init() call for more about this check.
|
||||
* WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
|
||||
*/
|
||||
if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
|
||||
start = 4096;
|
||||
|
||||
mutex_lock(&dev_priv->mm.stolen_lock);
|
||||
ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
|
||||
alignment, start, end,
|
||||
@ -73,11 +67,8 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node, u64 size,
|
||||
unsigned alignment)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
|
||||
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
|
||||
alignment, 0,
|
||||
ggtt->stolen_usable_size);
|
||||
alignment, 0, U64_MAX);
|
||||
}
|
||||
|
||||
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
|
||||
@ -152,7 +143,7 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
|
||||
tom = tmp * MB(32);
|
||||
|
||||
base = tom - tseg_size - ggtt->stolen_size;
|
||||
} else if (IS_845G(dev_priv)) {
|
||||
} else if (IS_I845G(dev_priv)) {
|
||||
u32 tseg_size = 0;
|
||||
u32 tom;
|
||||
u8 tmp;
|
||||
@ -202,8 +193,8 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
|
||||
return 0;
|
||||
|
||||
/* make sure we don't clobber the GTT if it's within stolen memory */
|
||||
if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
|
||||
!IS_G4X(dev_priv)) {
|
||||
if (INTEL_GEN(dev_priv) <= 4 &&
|
||||
!IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
|
||||
struct {
|
||||
u32 start, end;
|
||||
} stolen[2] = {
|
||||
@ -290,14 +281,13 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
|
||||
}
|
||||
|
||||
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
unsigned long *base, unsigned long *size)
|
||||
phys_addr_t *base, u32 *size)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
|
||||
CTG_STOLEN_RESERVED :
|
||||
ELK_STOLEN_RESERVED);
|
||||
unsigned long stolen_top = dev_priv->mm.stolen_base +
|
||||
ggtt->stolen_size;
|
||||
phys_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
|
||||
|
||||
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
|
||||
|
||||
@ -314,7 +304,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
unsigned long *base, unsigned long *size)
|
||||
phys_addr_t *base, u32 *size)
|
||||
{
|
||||
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
|
||||
|
||||
@ -340,7 +330,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
unsigned long *base, unsigned long *size)
|
||||
phys_addr_t *base, u32 *size)
|
||||
{
|
||||
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
|
||||
|
||||
@ -359,8 +349,8 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
}
|
||||
|
||||
static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
unsigned long *base, unsigned long *size)
|
||||
static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
phys_addr_t *base, u32 *size)
|
||||
{
|
||||
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
|
||||
|
||||
@ -386,11 +376,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
unsigned long *base, unsigned long *size)
|
||||
phys_addr_t *base, u32 *size)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
|
||||
unsigned long stolen_top;
|
||||
phys_addr_t stolen_top;
|
||||
|
||||
stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
|
||||
|
||||
@ -409,8 +399,9 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
unsigned long reserved_total, reserved_base = 0, reserved_size;
|
||||
unsigned long stolen_top;
|
||||
phys_addr_t reserved_base, stolen_top;
|
||||
u32 reserved_total, reserved_size;
|
||||
u32 stolen_usable_start;
|
||||
|
||||
mutex_init(&dev_priv->mm.stolen_lock);
|
||||
|
||||
@ -429,6 +420,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
||||
return 0;
|
||||
|
||||
stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
|
||||
reserved_base = 0;
|
||||
reserved_size = 0;
|
||||
|
||||
switch (INTEL_INFO(dev_priv)->gen) {
|
||||
case 2:
|
||||
@ -436,8 +429,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
||||
break;
|
||||
case 4:
|
||||
if (IS_G4X(dev_priv))
|
||||
g4x_get_stolen_reserved(dev_priv, &reserved_base,
|
||||
&reserved_size);
|
||||
g4x_get_stolen_reserved(dev_priv,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
case 5:
|
||||
/* Assume the gen6 maximum for the older platforms. */
|
||||
@ -445,21 +438,20 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
||||
reserved_base = stolen_top - reserved_size;
|
||||
break;
|
||||
case 6:
|
||||
gen6_get_stolen_reserved(dev_priv, &reserved_base,
|
||||
&reserved_size);
|
||||
gen6_get_stolen_reserved(dev_priv,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
case 7:
|
||||
gen7_get_stolen_reserved(dev_priv, &reserved_base,
|
||||
&reserved_size);
|
||||
gen7_get_stolen_reserved(dev_priv,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
default:
|
||||
if (IS_BROADWELL(dev_priv) ||
|
||||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
bdw_get_stolen_reserved(dev_priv, &reserved_base,
|
||||
&reserved_size);
|
||||
if (IS_LP(dev_priv))
|
||||
chv_get_stolen_reserved(dev_priv,
|
||||
&reserved_base, &reserved_size);
|
||||
else
|
||||
gen8_get_stolen_reserved(dev_priv, &reserved_base,
|
||||
&reserved_size);
|
||||
bdw_get_stolen_reserved(dev_priv,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -472,9 +464,10 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
||||
|
||||
if (reserved_base < dev_priv->mm.stolen_base ||
|
||||
reserved_base + reserved_size > stolen_top) {
|
||||
DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
|
||||
reserved_base, reserved_base + reserved_size,
|
||||
dev_priv->mm.stolen_base, stolen_top);
|
||||
phys_addr_t reserved_top = reserved_base + reserved_size;
|
||||
DRM_DEBUG_KMS("Stolen reserved area [%pa - %pa] outside stolen memory [%pa - %pa]\n",
|
||||
&reserved_base, &reserved_top,
|
||||
&dev_priv->mm.stolen_base, &stolen_top);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -485,24 +478,21 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
||||
* memory, so just consider the start. */
|
||||
reserved_total = stolen_top - reserved_base;
|
||||
|
||||
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
|
||||
DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
|
||||
ggtt->stolen_size >> 10,
|
||||
(ggtt->stolen_size - reserved_total) >> 10);
|
||||
|
||||
ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
|
||||
stolen_usable_start = 0;
|
||||
/* WaSkipStolenMemoryFirstPage:bdw+ */
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
stolen_usable_start = 4096;
|
||||
|
||||
/*
|
||||
* Basic memrange allocator for stolen space.
|
||||
*
|
||||
* TODO: Notice that some platforms require us to not use the first page
|
||||
* of the stolen memory but their BIOSes may still put the framebuffer
|
||||
* on the first page. So we don't reserve this page for now because of
|
||||
* that. Our current solution is to just prevent new nodes from being
|
||||
* inserted on the first page - see the check we have at
|
||||
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
|
||||
* problem later.
|
||||
*/
|
||||
drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
|
||||
ggtt->stolen_usable_size =
|
||||
ggtt->stolen_size - reserved_total - stolen_usable_start;
|
||||
|
||||
/* Basic memrange allocator for stolen space. */
|
||||
drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
|
||||
ggtt->stolen_usable_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -515,7 +505,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
|
||||
GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
|
||||
GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
|
||||
|
||||
/* We hide that we have no struct page backing our stolen object
|
||||
* by wrapping the contiguous physical allocation with a fake
|
||||
@ -578,22 +568,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
|
||||
};
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
_i915_gem_object_create_stolen(struct drm_device *dev,
|
||||
_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *stolen)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = i915_gem_object_alloc(dev);
|
||||
obj = i915_gem_object_alloc(dev_priv);
|
||||
if (obj == NULL)
|
||||
return NULL;
|
||||
|
||||
drm_gem_private_object_init(dev, &obj->base, stolen->size);
|
||||
drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
|
||||
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
|
||||
|
||||
obj->stolen = stolen;
|
||||
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
|
||||
obj->cache_level = HAS_LLC(to_i915(dev)) ?
|
||||
I915_CACHE_LLC : I915_CACHE_NONE;
|
||||
obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
|
||||
|
||||
if (i915_gem_object_pin_pages(obj))
|
||||
goto cleanup;
|
||||
@ -606,9 +595,8 @@ cleanup:
|
||||
}
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
|
||||
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mm_node *stolen;
|
||||
int ret;
|
||||
@ -629,7 +617,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
obj = _i915_gem_object_create_stolen(dev, stolen);
|
||||
obj = _i915_gem_object_create_stolen(dev_priv, stolen);
|
||||
if (obj)
|
||||
return obj;
|
||||
|
||||
@ -639,12 +627,11 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
|
||||
}
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
|
||||
u32 stolen_offset,
|
||||
u32 gtt_offset,
|
||||
u32 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mm_node *stolen;
|
||||
@ -654,7 +641,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return NULL;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
|
||||
stolen_offset, gtt_offset, size);
|
||||
@ -679,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
obj = _i915_gem_object_create_stolen(dev, stolen);
|
||||
obj = _i915_gem_object_create_stolen(dev_priv, stolen);
|
||||
if (obj == NULL) {
|
||||
DRM_DEBUG_KMS("failed to allocate stolen object\n");
|
||||
i915_gem_stolen_remove_node(dev_priv, stolen);
|
||||
|
@ -119,7 +119,7 @@ i915_tiling_ok(struct drm_i915_private *dev_priv,
|
||||
|
||||
static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
|
||||
struct drm_i915_private *dev_priv = vma->vm->i915;
|
||||
u32 size;
|
||||
|
||||
if (!i915_vma_is_map_and_fenceable(vma))
|
||||
|
@ -81,10 +81,18 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
|
||||
&class, "&global_timeline->lock");
|
||||
}
|
||||
|
||||
void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
|
||||
void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
|
||||
{
|
||||
lockdep_assert_held(&tl->i915->drm.struct_mutex);
|
||||
int i;
|
||||
|
||||
list_del(&tl->link);
|
||||
kfree(tl->name);
|
||||
lockdep_assert_held(&timeline->i915->drm.struct_mutex);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
|
||||
struct intel_timeline *tl = &timeline->engine[i];
|
||||
|
||||
GEM_BUG_ON(!list_empty(&tl->requests));
|
||||
}
|
||||
|
||||
list_del(&timeline->link);
|
||||
kfree(timeline->name);
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ struct intel_timeline {
|
||||
|
||||
struct i915_gem_timeline {
|
||||
struct list_head link;
|
||||
atomic_t next_seqno;
|
||||
atomic_t seqno;
|
||||
|
||||
struct drm_i915_private *i915;
|
||||
const char *name;
|
||||
|
@ -784,7 +784,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
obj = i915_gem_object_alloc(dev);
|
||||
obj = i915_gem_object_alloc(dev_priv);
|
||||
if (obj == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -176,9 +176,14 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
|
||||
|
||||
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
|
||||
|
||||
static bool compress_init(struct z_stream_s *zstream)
|
||||
struct compress {
|
||||
struct z_stream_s zstream;
|
||||
void *tmp;
|
||||
};
|
||||
|
||||
static bool compress_init(struct compress *c)
|
||||
{
|
||||
memset(zstream, 0, sizeof(*zstream));
|
||||
struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
|
||||
|
||||
zstream->workspace =
|
||||
kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
|
||||
@ -191,14 +196,22 @@ static bool compress_init(struct z_stream_s *zstream)
|
||||
return false;
|
||||
}
|
||||
|
||||
c->tmp = NULL;
|
||||
if (i915_has_memcpy_from_wc())
|
||||
c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int compress_page(struct z_stream_s *zstream,
|
||||
static int compress_page(struct compress *c,
|
||||
void *src,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
struct z_stream_s *zstream = &c->zstream;
|
||||
|
||||
zstream->next_in = src;
|
||||
if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
|
||||
zstream->next_in = c->tmp;
|
||||
zstream->avail_in = PAGE_SIZE;
|
||||
|
||||
do {
|
||||
@ -226,9 +239,11 @@ static int compress_page(struct z_stream_s *zstream,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void compress_fini(struct z_stream_s *zstream,
|
||||
static void compress_fini(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
struct z_stream_s *zstream = &c->zstream;
|
||||
|
||||
if (dst) {
|
||||
zlib_deflate(zstream, Z_FINISH);
|
||||
dst->unused = zstream->avail_out;
|
||||
@ -236,6 +251,9 @@ static void compress_fini(struct z_stream_s *zstream,
|
||||
|
||||
zlib_deflateEnd(zstream);
|
||||
kfree(zstream->workspace);
|
||||
|
||||
if (c->tmp)
|
||||
free_page((unsigned long)c->tmp);
|
||||
}
|
||||
|
||||
static void err_compression_marker(struct drm_i915_error_state_buf *m)
|
||||
@ -245,28 +263,34 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
|
||||
|
||||
#else
|
||||
|
||||
static bool compress_init(struct z_stream_s *zstream)
|
||||
struct compress {
|
||||
};
|
||||
|
||||
static bool compress_init(struct compress *c)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static int compress_page(struct z_stream_s *zstream,
|
||||
static int compress_page(struct compress *c,
|
||||
void *src,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
unsigned long page;
|
||||
void *ptr;
|
||||
|
||||
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
dst->pages[dst->page_count++] =
|
||||
memcpy((void *)page, src, PAGE_SIZE);
|
||||
ptr = (void *)page;
|
||||
if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
|
||||
memcpy(ptr, src, PAGE_SIZE);
|
||||
dst->pages[dst->page_count++] = ptr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void compress_fini(struct z_stream_s *zstream,
|
||||
static void compress_fini(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
}
|
||||
@ -316,24 +340,6 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
||||
}
|
||||
}
|
||||
|
||||
static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
|
||||
{
|
||||
switch (a) {
|
||||
case HANGCHECK_IDLE:
|
||||
return "idle";
|
||||
case HANGCHECK_WAIT:
|
||||
return "wait";
|
||||
case HANGCHECK_ACTIVE:
|
||||
return "active";
|
||||
case HANGCHECK_KICK:
|
||||
return "kick";
|
||||
case HANGCHECK_HUNG:
|
||||
return "hung";
|
||||
}
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
static void error_print_instdone(struct drm_i915_error_state_buf *m,
|
||||
struct drm_i915_error_engine *ee)
|
||||
{
|
||||
@ -370,8 +376,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
|
||||
if (!erq->seqno)
|
||||
return;
|
||||
|
||||
err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
|
||||
prefix, erq->pid,
|
||||
err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
|
||||
prefix, erq->pid, erq->ban_score,
|
||||
erq->context, erq->seqno,
|
||||
jiffies_to_msecs(jiffies - erq->jiffies),
|
||||
erq->head, erq->tail);
|
||||
@ -441,9 +447,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
|
||||
err_printf(m, " waiting: %s\n", yesno(ee->waiting));
|
||||
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
|
||||
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
|
||||
err_printf(m, " hangcheck: %s [%d]\n",
|
||||
hangcheck_action_to_str(ee->hangcheck_action),
|
||||
ee->hangcheck_score);
|
||||
err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
|
||||
err_printf(m, " hangcheck action: %s\n",
|
||||
hangcheck_action_to_str(ee->hangcheck_action));
|
||||
err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
|
||||
ee->hangcheck_timestamp,
|
||||
jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
|
||||
|
||||
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
|
||||
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
|
||||
}
|
||||
@ -528,11 +538,10 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
|
||||
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
const struct i915_error_state_file_priv *error_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(error_priv->dev);
|
||||
struct drm_i915_private *dev_priv = error_priv->i915;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct drm_i915_error_state *error = error_priv->error;
|
||||
struct drm_i915_error_object *obj;
|
||||
int max_hangcheck_score;
|
||||
int i, j;
|
||||
|
||||
if (!error) {
|
||||
@ -549,22 +558,20 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
err_printf(m, "Uptime: %ld s %ld us\n",
|
||||
error->uptime.tv_sec, error->uptime.tv_usec);
|
||||
err_print_capabilities(m, &error->device_info);
|
||||
max_hangcheck_score = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
|
||||
if (error->engine[i].hangcheck_score > max_hangcheck_score)
|
||||
max_hangcheck_score = error->engine[i].hangcheck_score;
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
|
||||
if (error->engine[i].hangcheck_score == max_hangcheck_score &&
|
||||
if (error->engine[i].hangcheck_stalled &&
|
||||
error->engine[i].pid != -1) {
|
||||
err_printf(m, "Active process (on ring %s): %s [%d]\n",
|
||||
err_printf(m, "Active process (on ring %s): %s [%d], context bans %d\n",
|
||||
engine_str(i),
|
||||
error->engine[i].comm,
|
||||
error->engine[i].pid);
|
||||
error->engine[i].pid,
|
||||
error->engine[i].context_bans);
|
||||
}
|
||||
}
|
||||
err_printf(m, "Reset count: %u\n", error->reset_count);
|
||||
err_printf(m, "Suspend count: %u\n", error->suspend_count);
|
||||
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
|
||||
err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
|
||||
err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
|
||||
err_printf(m, "PCI Subsystem: %04x:%04x\n",
|
||||
@ -651,9 +658,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
if (obj) {
|
||||
err_puts(m, dev_priv->engine[i]->name);
|
||||
if (ee->pid != -1)
|
||||
err_printf(m, " (submitted by %s [%d])",
|
||||
err_printf(m, " (submitted by %s [%d], bans %d)",
|
||||
ee->comm,
|
||||
ee->pid);
|
||||
ee->pid,
|
||||
ee->context_bans);
|
||||
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
|
||||
upper_32_bits(obj->gtt_offset),
|
||||
lower_32_bits(obj->gtt_offset));
|
||||
@ -801,7 +809,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
||||
struct i915_ggtt *ggtt = &i915->ggtt;
|
||||
const u64 slot = ggtt->error_capture.start;
|
||||
struct drm_i915_error_object *dst;
|
||||
struct z_stream_s zstream;
|
||||
struct compress compress;
|
||||
unsigned long num_pages;
|
||||
struct sgt_iter iter;
|
||||
dma_addr_t dma;
|
||||
@ -821,7 +829,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
||||
dst->page_count = 0;
|
||||
dst->unused = 0;
|
||||
|
||||
if (!compress_init(&zstream)) {
|
||||
if (!compress_init(&compress)) {
|
||||
kfree(dst);
|
||||
return NULL;
|
||||
}
|
||||
@ -834,7 +842,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
||||
I915_CACHE_NONE, 0);
|
||||
|
||||
s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
|
||||
ret = compress_page(&zstream, (void __force *)s, dst);
|
||||
ret = compress_page(&compress, (void __force *)s, dst);
|
||||
io_mapping_unmap_atomic(s);
|
||||
|
||||
if (ret)
|
||||
@ -849,7 +857,7 @@ unwind:
|
||||
dst = NULL;
|
||||
|
||||
out:
|
||||
compress_fini(&zstream, dst);
|
||||
compress_fini(&compress, dst);
|
||||
ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
|
||||
return dst;
|
||||
}
|
||||
@ -941,7 +949,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
|
||||
* strictly a client bug. Use instdone to differentiate those some.
|
||||
*/
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
|
||||
if (error->engine[i].hangcheck_stalled) {
|
||||
if (engine_id)
|
||||
*engine_id = i;
|
||||
|
||||
@ -1159,8 +1167,9 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
|
||||
ee->hws = I915_READ(mmio);
|
||||
}
|
||||
|
||||
ee->hangcheck_score = engine->hangcheck.score;
|
||||
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
|
||||
ee->hangcheck_action = engine->hangcheck.action;
|
||||
ee->hangcheck_stalled = engine->hangcheck.stalled;
|
||||
|
||||
if (USES_PPGTT(dev_priv)) {
|
||||
int i;
|
||||
@ -1188,6 +1197,7 @@ static void record_request(struct drm_i915_gem_request *request,
|
||||
struct drm_i915_error_request *erq)
|
||||
{
|
||||
erq->context = request->ctx->hw_id;
|
||||
erq->ban_score = request->ctx->ban_score;
|
||||
erq->seqno = request->global_seqno;
|
||||
erq->jiffies = request->emitted_jiffies;
|
||||
erq->head = request->head;
|
||||
@ -1321,7 +1331,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
error->simulated |=
|
||||
request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
|
||||
i915_gem_context_no_error_capture(request->ctx);
|
||||
|
||||
ee->rq_head = request->head;
|
||||
ee->rq_post = request->postfix;
|
||||
@ -1659,9 +1669,8 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
|
||||
kref_put(&error_priv->error->ref, i915_error_state_free);
|
||||
}
|
||||
|
||||
void i915_destroy_error_state(struct drm_device *dev)
|
||||
void i915_destroy_error_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_error_state *error;
|
||||
|
||||
spin_lock_irq(&dev_priv->gpu_error.lock);
|
||||
|
@ -73,6 +73,9 @@
|
||||
#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
|
||||
#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
|
||||
|
||||
/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
|
||||
#define GUC_GGTT_TOP 0xFEE00000
|
||||
|
||||
#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
|
||||
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
|
||||
#define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
|
||||
@ -100,8 +103,8 @@
|
||||
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
|
||||
GUC_ENABLE_MIA_CLOCK_GATING)
|
||||
|
||||
#define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
|
||||
#define HOST2GUC_TRIGGER (1<<0)
|
||||
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
|
||||
#define GUC_SEND_TRIGGER (1<<0)
|
||||
|
||||
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
|
||||
#define GEN8_DRB_VALID (1<<0)
|
||||
|
@ -21,12 +21,11 @@
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/circ_buf.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/relay.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_guc.h"
|
||||
#include "intel_uc.h"
|
||||
|
||||
/**
|
||||
* DOC: GuC-based command submission
|
||||
@ -49,7 +48,7 @@
|
||||
* Firmware writes a success/fail code back to the action register after
|
||||
* processes the request. The kernel driver polls waiting for this update and
|
||||
* then proceeds.
|
||||
* See host2guc_action()
|
||||
* See intel_guc_send()
|
||||
*
|
||||
* Doorbells:
|
||||
* Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
|
||||
@ -65,142 +64,30 @@
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Read GuC command/status register (SOFT_SCRATCH_0)
|
||||
* Return true if it contains a response rather than a command
|
||||
*/
|
||||
static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
|
||||
u32 *status)
|
||||
{
|
||||
u32 val = I915_READ(SOFT_SCRATCH(0));
|
||||
*status = val;
|
||||
return GUC2HOST_IS_RESPONSE(val);
|
||||
}
|
||||
|
||||
static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
u32 status;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(len < 1 || len > 15))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&guc->action_lock);
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
dev_priv->guc.action_count += 1;
|
||||
dev_priv->guc.action_cmd = data[0];
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
I915_WRITE(SOFT_SCRATCH(i), data[i]);
|
||||
|
||||
POSTING_READ(SOFT_SCRATCH(i - 1));
|
||||
|
||||
I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
|
||||
|
||||
/*
|
||||
* Fast commands should complete in less than 10us, so sample quickly
|
||||
* up to that length of time, then switch to a slower sleep-wait loop.
|
||||
* No HOST2GUC command should ever take longer than 10ms.
|
||||
*/
|
||||
ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
|
||||
if (ret)
|
||||
ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
|
||||
if (status != GUC2HOST_STATUS_SUCCESS) {
|
||||
/*
|
||||
* Either the GuC explicitly returned an error (which
|
||||
* we convert to -EIO here) or no response at all was
|
||||
* received within the timeout limit (-ETIMEDOUT)
|
||||
*/
|
||||
if (ret != -ETIMEDOUT)
|
||||
ret = -EIO;
|
||||
|
||||
DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n",
|
||||
data[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
|
||||
|
||||
dev_priv->guc.action_fail += 1;
|
||||
dev_priv->guc.action_err = ret;
|
||||
}
|
||||
dev_priv->guc.action_status = status;
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
mutex_unlock(&guc->action_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the GuC to allocate or deallocate a specific doorbell
|
||||
*/
|
||||
|
||||
static int host2guc_allocate_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
static int guc_allocate_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
u32 data[2];
|
||||
u32 action[] = {
|
||||
INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
|
||||
client->ctx_index
|
||||
};
|
||||
|
||||
data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
|
||||
data[1] = client->ctx_index;
|
||||
|
||||
return host2guc_action(guc, data, 2);
|
||||
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
||||
}
|
||||
|
||||
static int host2guc_release_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
static int guc_release_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
u32 data[2];
|
||||
u32 action[] = {
|
||||
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
|
||||
client->ctx_index
|
||||
};
|
||||
|
||||
data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
|
||||
data[1] = client->ctx_index;
|
||||
|
||||
return host2guc_action(guc, data, 2);
|
||||
}
|
||||
|
||||
static int host2guc_sample_forcewake(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
u32 data[2];
|
||||
|
||||
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
|
||||
data[1] = 0;
|
||||
else
|
||||
/* bit 0 and 1 are for Render and Media domain separately */
|
||||
data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
|
||||
|
||||
return host2guc_action(guc, data, ARRAY_SIZE(data));
|
||||
}
|
||||
|
||||
static int host2guc_logbuffer_flush_complete(struct intel_guc *guc)
|
||||
{
|
||||
u32 data[1];
|
||||
|
||||
data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE;
|
||||
|
||||
return host2guc_action(guc, data, 1);
|
||||
}
|
||||
|
||||
static int host2guc_force_logbuffer_flush(struct intel_guc *guc)
|
||||
{
|
||||
u32 data[2];
|
||||
|
||||
data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH;
|
||||
data[1] = 0;
|
||||
|
||||
return host2guc_action(guc, data, 2);
|
||||
}
|
||||
|
||||
static int host2guc_logging_control(struct intel_guc *guc, u32 control_val)
|
||||
{
|
||||
u32 data[2];
|
||||
|
||||
data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING;
|
||||
data[1] = control_val;
|
||||
|
||||
return host2guc_action(guc, data, 2);
|
||||
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -226,7 +113,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
|
||||
test_bit(client->doorbell_id, doorbell_bitmap)) {
|
||||
/* Deactivate the old doorbell */
|
||||
doorbell->db_status = GUC_DOORBELL_DISABLED;
|
||||
(void)host2guc_release_doorbell(guc, client);
|
||||
(void)guc_release_doorbell(guc, client);
|
||||
__clear_bit(client->doorbell_id, doorbell_bitmap);
|
||||
}
|
||||
|
||||
@ -247,16 +134,9 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
|
||||
|
||||
/* Activate the new doorbell */
|
||||
__set_bit(new_id, doorbell_bitmap);
|
||||
doorbell->cookie = 0;
|
||||
doorbell->db_status = GUC_DOORBELL_ENABLED;
|
||||
return host2guc_allocate_doorbell(guc, client);
|
||||
}
|
||||
|
||||
static int guc_init_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client,
|
||||
uint16_t db_id)
|
||||
{
|
||||
return guc_update_doorbell_id(guc, client, db_id);
|
||||
doorbell->cookie = client->doorbell_cookie;
|
||||
return guc_allocate_doorbell(guc, client);
|
||||
}
|
||||
|
||||
static void guc_disable_doorbell(struct intel_guc *guc,
|
||||
@ -298,7 +178,7 @@ select_doorbell_register(struct intel_guc *guc, uint32_t priority)
|
||||
* Select, assign and relase doorbell cachelines
|
||||
*
|
||||
* These functions track which doorbell cachelines are in use.
|
||||
* The data they manipulate is protected by the host2guc lock.
|
||||
* The data they manipulate is protected by the intel_guc_send lock.
|
||||
*/
|
||||
|
||||
static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
|
||||
@ -390,11 +270,11 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
|
||||
|
||||
/* The state page is after PPHWSP */
|
||||
lrc->ring_lcra =
|
||||
i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
|
||||
guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
|
||||
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
|
||||
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
|
||||
|
||||
lrc->ring_begin = i915_ggtt_offset(ce->ring->vma);
|
||||
lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
|
||||
lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
|
||||
lrc->ring_next_free_location = lrc->ring_begin;
|
||||
lrc->ring_current_tail_pointer_value = 0;
|
||||
@ -410,7 +290,7 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
|
||||
* The doorbell, process descriptor, and workqueue are all parts
|
||||
* of the client object, which the GuC will reference via the GGTT
|
||||
*/
|
||||
gfx_addr = i915_ggtt_offset(client->vma);
|
||||
gfx_addr = guc_ggtt_offset(client->vma);
|
||||
desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
|
||||
client->doorbell_offset;
|
||||
desc.db_trigger_cpu =
|
||||
@ -464,22 +344,23 @@ static void guc_ctx_desc_fini(struct intel_guc *guc,
|
||||
int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
|
||||
{
|
||||
const size_t wqi_size = sizeof(struct guc_wq_item);
|
||||
struct i915_guc_client *gc = request->i915->guc.execbuf_client;
|
||||
struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
|
||||
struct i915_guc_client *client = request->i915->guc.execbuf_client;
|
||||
struct guc_process_desc *desc = client->vaddr +
|
||||
client->proc_desc_offset;
|
||||
u32 freespace;
|
||||
int ret;
|
||||
|
||||
spin_lock(&gc->wq_lock);
|
||||
freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
|
||||
freespace -= gc->wq_rsvd;
|
||||
spin_lock(&client->wq_lock);
|
||||
freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
|
||||
freespace -= client->wq_rsvd;
|
||||
if (likely(freespace >= wqi_size)) {
|
||||
gc->wq_rsvd += wqi_size;
|
||||
client->wq_rsvd += wqi_size;
|
||||
ret = 0;
|
||||
} else {
|
||||
gc->no_wq_space++;
|
||||
client->no_wq_space++;
|
||||
ret = -EAGAIN;
|
||||
}
|
||||
spin_unlock(&gc->wq_lock);
|
||||
spin_unlock(&client->wq_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -487,17 +368,17 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
|
||||
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
|
||||
{
|
||||
const size_t wqi_size = sizeof(struct guc_wq_item);
|
||||
struct i915_guc_client *gc = request->i915->guc.execbuf_client;
|
||||
struct i915_guc_client *client = request->i915->guc.execbuf_client;
|
||||
|
||||
GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size);
|
||||
GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
|
||||
|
||||
spin_lock(&gc->wq_lock);
|
||||
gc->wq_rsvd -= wqi_size;
|
||||
spin_unlock(&gc->wq_lock);
|
||||
spin_lock(&client->wq_lock);
|
||||
client->wq_rsvd -= wqi_size;
|
||||
spin_unlock(&client->wq_lock);
|
||||
}
|
||||
|
||||
/* Construct a Work Item and append it to the GuC's Work Queue */
|
||||
static void guc_wq_item_append(struct i915_guc_client *gc,
|
||||
static void guc_wq_item_append(struct i915_guc_client *client,
|
||||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
/* wqi_len is in DWords, and does not include the one-word header */
|
||||
@ -508,10 +389,10 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
|
||||
struct guc_wq_item *wqi;
|
||||
u32 freespace, tail, wq_off;
|
||||
|
||||
desc = gc->vaddr + gc->proc_desc_offset;
|
||||
desc = client->vaddr + client->proc_desc_offset;
|
||||
|
||||
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
|
||||
freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
|
||||
freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
|
||||
GEM_BUG_ON(freespace < wqi_size);
|
||||
|
||||
/* The GuC firmware wants the tail index in QWords, not bytes */
|
||||
@ -528,17 +409,17 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
|
||||
* workqueue buffer dw by dw.
|
||||
*/
|
||||
BUILD_BUG_ON(wqi_size != 16);
|
||||
GEM_BUG_ON(gc->wq_rsvd < wqi_size);
|
||||
GEM_BUG_ON(client->wq_rsvd < wqi_size);
|
||||
|
||||
/* postincrement WQ tail for next time */
|
||||
wq_off = gc->wq_tail;
|
||||
wq_off = client->wq_tail;
|
||||
GEM_BUG_ON(wq_off & (wqi_size - 1));
|
||||
gc->wq_tail += wqi_size;
|
||||
gc->wq_tail &= gc->wq_size - 1;
|
||||
gc->wq_rsvd -= wqi_size;
|
||||
client->wq_tail += wqi_size;
|
||||
client->wq_tail &= client->wq_size - 1;
|
||||
client->wq_rsvd -= wqi_size;
|
||||
|
||||
/* WQ starts from the page after doorbell / process_desc */
|
||||
wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
|
||||
wqi = client->vaddr + wq_off + GUC_DB_SIZE;
|
||||
|
||||
/* Now fill in the 4-word work queue item */
|
||||
wqi->header = WQ_TYPE_INORDER |
|
||||
@ -553,30 +434,30 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
|
||||
wqi->fence_id = rq->global_seqno;
|
||||
}
|
||||
|
||||
static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
static int guc_ring_doorbell(struct i915_guc_client *client)
|
||||
{
|
||||
struct guc_process_desc *desc;
|
||||
union guc_doorbell_qw db_cmp, db_exc, db_ret;
|
||||
union guc_doorbell_qw *db;
|
||||
int attempt = 2, ret = -EAGAIN;
|
||||
|
||||
desc = gc->vaddr + gc->proc_desc_offset;
|
||||
desc = client->vaddr + client->proc_desc_offset;
|
||||
|
||||
/* Update the tail so it is visible to GuC */
|
||||
desc->tail = gc->wq_tail;
|
||||
desc->tail = client->wq_tail;
|
||||
|
||||
/* current cookie */
|
||||
db_cmp.db_status = GUC_DOORBELL_ENABLED;
|
||||
db_cmp.cookie = gc->cookie;
|
||||
db_cmp.cookie = client->doorbell_cookie;
|
||||
|
||||
/* cookie to be updated */
|
||||
db_exc.db_status = GUC_DOORBELL_ENABLED;
|
||||
db_exc.cookie = gc->cookie + 1;
|
||||
db_exc.cookie = client->doorbell_cookie + 1;
|
||||
if (db_exc.cookie == 0)
|
||||
db_exc.cookie = 1;
|
||||
|
||||
/* pointer of current doorbell cacheline */
|
||||
db = gc->vaddr + gc->doorbell_offset;
|
||||
db = client->vaddr + client->doorbell_offset;
|
||||
|
||||
while (attempt--) {
|
||||
/* lets ring the doorbell */
|
||||
@ -586,7 +467,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
/* if the exchange was successfully executed */
|
||||
if (db_ret.value_qw == db_cmp.value_qw) {
|
||||
/* db was successfully rung */
|
||||
gc->cookie = db_exc.cookie;
|
||||
client->doorbell_cookie = db_exc.cookie;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
@ -609,12 +490,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_guc_submit() - Submit commands through GuC
|
||||
* __i915_guc_submit() - Submit commands through GuC
|
||||
* @rq: request associated with the commands
|
||||
*
|
||||
* Return: 0 on success, otherwise an errno.
|
||||
* (Note: nonzero really shouldn't happen!)
|
||||
*
|
||||
* The caller must have already called i915_guc_wq_reserve() above with
|
||||
* a result of 0 (success), guaranteeing that there is space in the work
|
||||
* queue for the new request, so enqueuing the item cannot fail.
|
||||
@ -626,7 +504,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
* The only error here arises if the doorbell hardware isn't functioning
|
||||
* as expected, which really shouln't happen.
|
||||
*/
|
||||
static void i915_guc_submit(struct drm_i915_gem_request *rq)
|
||||
static void __i915_guc_submit(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = rq->i915;
|
||||
struct intel_engine_cs *engine = rq->engine;
|
||||
@ -635,17 +513,6 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
|
||||
struct i915_guc_client *client = guc->execbuf_client;
|
||||
int b_ret;
|
||||
|
||||
/* We keep the previous context alive until we retire the following
|
||||
* request. This ensures that any the context object is still pinned
|
||||
* for any residual writes the HW makes into it on the context switch
|
||||
* into the next object following the breadcrumb. Otherwise, we may
|
||||
* retire the context too early.
|
||||
*/
|
||||
rq->previous_context = engine->last_context;
|
||||
engine->last_context = rq->ctx;
|
||||
|
||||
i915_gem_request_submit(rq);
|
||||
|
||||
spin_lock(&client->wq_lock);
|
||||
guc_wq_item_append(client, rq);
|
||||
|
||||
@ -665,6 +532,12 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
|
||||
spin_unlock(&client->wq_lock);
|
||||
}
|
||||
|
||||
static void i915_guc_submit(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
i915_gem_request_submit(rq);
|
||||
__i915_guc_submit(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Everything below here is concerned with setup & teardown, and is
|
||||
* therefore not part of the somewhat time-critical batch-submission
|
||||
@ -691,7 +564,7 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_object_create(&dev_priv->drm, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
@ -779,8 +652,7 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
|
||||
uint16_t db_id;
|
||||
int i, err;
|
||||
|
||||
/* Save client's original doorbell selection */
|
||||
db_id = client->doorbell_id;
|
||||
guc_disable_doorbell(guc, client);
|
||||
|
||||
for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
|
||||
/* Skip if doorbell is OK */
|
||||
@ -793,7 +665,9 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
|
||||
i, err);
|
||||
}
|
||||
|
||||
/* Restore to original value */
|
||||
db_id = select_doorbell_register(guc, client->priority);
|
||||
WARN_ON(db_id == GUC_INVALID_DOORBELL_ID);
|
||||
|
||||
err = guc_update_doorbell_id(guc, client, db_id);
|
||||
if (err)
|
||||
DRM_WARN("Failed to restore doorbell to %d, err %d\n",
|
||||
@ -883,8 +757,13 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
|
||||
|
||||
guc_proc_desc_init(guc, client);
|
||||
guc_ctx_desc_init(guc, client);
|
||||
if (guc_init_doorbell(guc, client, db_id))
|
||||
goto err;
|
||||
|
||||
/* For runtime client allocation we need to enable the doorbell. Not
|
||||
* required yet for the static execbuf_client as this special kernel
|
||||
* client is enabled from i915_guc_submission_enable().
|
||||
*
|
||||
* guc_update_doorbell_id(guc, client, db_id);
|
||||
*/
|
||||
|
||||
DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
|
||||
priority, client, client->engines, client->ctx_index);
|
||||
@ -1318,7 +1197,7 @@ static void guc_log_create(struct intel_guc *guc)
|
||||
* it should be present on the chipsets supporting GuC based
|
||||
* submisssions.
|
||||
*/
|
||||
if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
|
||||
if (WARN_ON(!i915_has_memcpy_from_wc())) {
|
||||
/* logging will not be enabled */
|
||||
i915.guc_log_level = -1;
|
||||
return;
|
||||
@ -1347,7 +1226,7 @@ static void guc_log_create(struct intel_guc *guc)
|
||||
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
|
||||
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
|
||||
|
||||
offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
|
||||
offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
|
||||
guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
|
||||
}
|
||||
|
||||
@ -1450,7 +1329,7 @@ static void guc_addon_create(struct intel_guc *guc)
|
||||
guc_policies_init(policies);
|
||||
|
||||
ads->scheduler_policies =
|
||||
i915_ggtt_offset(vma) + sizeof(struct guc_ads);
|
||||
guc_ggtt_offset(vma) + sizeof(struct guc_ads);
|
||||
|
||||
/* MMIO reg state */
|
||||
reg_state = (void *)policies + sizeof(struct guc_policies);
|
||||
@ -1484,6 +1363,9 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct i915_vma *vma;
|
||||
|
||||
if (!HAS_GUC_SCHED(dev_priv))
|
||||
return 0;
|
||||
|
||||
/* Wipe bitmap & delete client in case of reinitialisation */
|
||||
bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
|
||||
i915_guc_submission_disable(dev_priv);
|
||||
@ -1500,46 +1382,62 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
|
||||
|
||||
guc->ctx_pool_vma = vma;
|
||||
ida_init(&guc->ctx_ids);
|
||||
mutex_init(&guc->action_lock);
|
||||
guc_log_create(guc);
|
||||
guc_addon_create(guc);
|
||||
|
||||
guc->execbuf_client = guc_client_alloc(dev_priv,
|
||||
INTEL_INFO(dev_priv)->ring_mask,
|
||||
GUC_CTX_PRIORITY_KMD_NORMAL,
|
||||
dev_priv->kernel_context);
|
||||
if (!guc->execbuf_client) {
|
||||
DRM_ERROR("Failed to create GuC client for execbuf!\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
i915_guc_submission_fini(dev_priv);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void guc_reset_wq(struct i915_guc_client *client)
|
||||
{
|
||||
struct guc_process_desc *desc = client->vaddr +
|
||||
client->proc_desc_offset;
|
||||
|
||||
desc->head = 0;
|
||||
desc->tail = 0;
|
||||
|
||||
client->wq_tail = 0;
|
||||
}
|
||||
|
||||
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct drm_i915_gem_request *request;
|
||||
struct i915_guc_client *client;
|
||||
struct i915_guc_client *client = guc->execbuf_client;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
/* client for execbuf submission */
|
||||
client = guc_client_alloc(dev_priv,
|
||||
INTEL_INFO(dev_priv)->ring_mask,
|
||||
GUC_CTX_PRIORITY_KMD_NORMAL,
|
||||
dev_priv->kernel_context);
|
||||
if (!client) {
|
||||
DRM_ERROR("Failed to create normal GuC client!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!client)
|
||||
return -ENODEV;
|
||||
|
||||
guc->execbuf_client = client;
|
||||
host2guc_sample_forcewake(guc, client);
|
||||
intel_guc_sample_forcewake(guc);
|
||||
|
||||
guc_reset_wq(client);
|
||||
guc_init_doorbell_hw(guc);
|
||||
|
||||
/* Take over from manual control of ELSP (execlists) */
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
struct drm_i915_gem_request *rq;
|
||||
|
||||
engine->submit_request = i915_guc_submit;
|
||||
engine->schedule = NULL;
|
||||
|
||||
/* Replay the current set of previously submitted requests */
|
||||
list_for_each_entry(request,
|
||||
&engine->timeline->requests, link) {
|
||||
list_for_each_entry(rq, &engine->timeline->requests, link) {
|
||||
client->wq_rsvd += sizeof(struct guc_wq_item);
|
||||
if (i915_sw_fence_done(&request->submit))
|
||||
i915_guc_submit(request);
|
||||
__i915_guc_submit(rq);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1555,14 +1453,18 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
|
||||
|
||||
/* Revert back to manual ELSP submission */
|
||||
intel_execlists_enable_submission(dev_priv);
|
||||
|
||||
guc_client_free(dev_priv, guc->execbuf_client);
|
||||
guc->execbuf_client = NULL;
|
||||
}
|
||||
|
||||
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct i915_guc_client *client;
|
||||
|
||||
client = fetch_and_zero(&guc->execbuf_client);
|
||||
if (!client)
|
||||
return;
|
||||
|
||||
guc_client_free(dev_priv, client);
|
||||
|
||||
i915_vma_unpin_and_release(&guc->ads_vma);
|
||||
i915_vma_unpin_and_release(&guc->log.vma);
|
||||
@ -1574,11 +1476,10 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
|
||||
|
||||
/**
|
||||
* intel_guc_suspend() - notify GuC entering suspend state
|
||||
* @dev: drm device
|
||||
* @dev_priv: i915 device private
|
||||
*/
|
||||
int intel_guc_suspend(struct drm_device *dev)
|
||||
int intel_guc_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct i915_gem_context *ctx;
|
||||
u32 data[3];
|
||||
@ -1590,23 +1491,22 @@ int intel_guc_suspend(struct drm_device *dev)
|
||||
|
||||
ctx = dev_priv->kernel_context;
|
||||
|
||||
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
|
||||
data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
|
||||
/* any value greater than GUC_POWER_D0 */
|
||||
data[1] = GUC_POWER_D1;
|
||||
/* first page is shared data with GuC */
|
||||
data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
|
||||
data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
|
||||
|
||||
return host2guc_action(guc, data, ARRAY_SIZE(data));
|
||||
return intel_guc_send(guc, data, ARRAY_SIZE(data));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* intel_guc_resume() - notify GuC resuming from suspend state
|
||||
* @dev: drm device
|
||||
* @dev_priv: i915 device private
|
||||
*/
|
||||
int intel_guc_resume(struct drm_device *dev)
|
||||
int intel_guc_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct i915_gem_context *ctx;
|
||||
u32 data[3];
|
||||
@ -1619,12 +1519,12 @@ int intel_guc_resume(struct drm_device *dev)
|
||||
|
||||
ctx = dev_priv->kernel_context;
|
||||
|
||||
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
|
||||
data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
|
||||
data[1] = GUC_POWER_D0;
|
||||
/* first page is shared data with GuC */
|
||||
data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
|
||||
data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
|
||||
|
||||
return host2guc_action(guc, data, ARRAY_SIZE(data));
|
||||
return intel_guc_send(guc, data, ARRAY_SIZE(data));
|
||||
}
|
||||
|
||||
void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
|
||||
@ -1635,7 +1535,7 @@ void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
|
||||
* time, so get/put should be really quick.
|
||||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
host2guc_logbuffer_flush_complete(&dev_priv->guc);
|
||||
intel_guc_log_flush_complete(&dev_priv->guc);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
@ -1653,7 +1553,7 @@ void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
|
||||
flush_work(&dev_priv->guc.log.flush_work);
|
||||
|
||||
/* Ask GuC to update the log buffer state */
|
||||
host2guc_force_logbuffer_flush(&dev_priv->guc);
|
||||
intel_guc_log_flush(&dev_priv->guc);
|
||||
|
||||
/* GuC would have updated log buffer by now, so capture it */
|
||||
i915_guc_capture_logs(dev_priv);
|
||||
@ -1694,9 +1594,9 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
|
||||
if (!log_param.logging_enabled && (i915.guc_log_level < 0))
|
||||
return 0;
|
||||
|
||||
ret = host2guc_logging_control(&dev_priv->guc, log_param.value);
|
||||
ret = intel_guc_log_control(&dev_priv->guc, log_param.value);
|
||||
if (ret < 0) {
|
||||
DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret);
|
||||
DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1683,8 +1683,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
|
||||
u32 msg, flush;
|
||||
|
||||
msg = I915_READ(SOFT_SCRATCH(15));
|
||||
flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
|
||||
GUC2HOST_MSG_FLUSH_LOG_BUFFER);
|
||||
flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
|
||||
INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
|
||||
if (flush) {
|
||||
/* Clear the message bits that are handled */
|
||||
I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
|
||||
@ -2435,7 +2435,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
|
||||
if (tmp_mask) {
|
||||
bxt_hpd_irq_handler(dev_priv, tmp_mask,
|
||||
@ -2451,7 +2451,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
|
||||
if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
|
||||
gmbus_irq_handler(dev_priv);
|
||||
found = true;
|
||||
}
|
||||
@ -3375,7 +3375,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
de_port_masked |= BXT_DE_PORT_GMBUS;
|
||||
} else {
|
||||
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
|
||||
@ -3386,7 +3386,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
||||
GEN8_PIPE_FIFO_UNDERRUN;
|
||||
|
||||
de_port_enables = de_port_masked;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
|
||||
else if (IS_BROADWELL(dev_priv))
|
||||
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
|
||||
@ -4211,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
dev->driver->irq_uninstall = gen8_irq_uninstall;
|
||||
dev->driver->enable_vblank = gen8_enable_vblank;
|
||||
dev->driver->disable_vblank = gen8_disable_vblank;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
|
||||
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
|
||||
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
|
||||
|
752
drivers/gpu/drm/i915/i915_oa_hsw.c
Normal file
752
drivers/gpu/drm/i915/i915_oa_hsw.c
Normal file
@ -0,0 +1,752 @@
|
||||
/*
|
||||
* Autogenerated file, DO NOT EDIT manually!
|
||||
*
|
||||
* Copyright (c) 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_oa_hsw.h"
|
||||
|
||||
enum metric_set_id {
|
||||
METRIC_SET_ID_RENDER_BASIC = 1,
|
||||
METRIC_SET_ID_COMPUTE_BASIC,
|
||||
METRIC_SET_ID_COMPUTE_EXTENDED,
|
||||
METRIC_SET_ID_MEMORY_READS,
|
||||
METRIC_SET_ID_MEMORY_WRITES,
|
||||
METRIC_SET_ID_SAMPLER_BALANCE,
|
||||
};
|
||||
|
||||
int i915_oa_n_builtin_metric_sets_hsw = 6;
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_render_basic[] = {
|
||||
{ _MMIO(0x2724), 0x00800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0x00800000 },
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_render_basic[] = {
|
||||
{ _MMIO(0x253a4), 0x01600000 },
|
||||
{ _MMIO(0x25440), 0x00100000 },
|
||||
{ _MMIO(0x25128), 0x00000000 },
|
||||
{ _MMIO(0x2691c), 0x00000800 },
|
||||
{ _MMIO(0x26aa0), 0x01500000 },
|
||||
{ _MMIO(0x26b9c), 0x00006000 },
|
||||
{ _MMIO(0x2791c), 0x00000800 },
|
||||
{ _MMIO(0x27aa0), 0x01500000 },
|
||||
{ _MMIO(0x27b9c), 0x00006000 },
|
||||
{ _MMIO(0x2641c), 0x00000400 },
|
||||
{ _MMIO(0x25380), 0x00000010 },
|
||||
{ _MMIO(0x2538c), 0x00000000 },
|
||||
{ _MMIO(0x25384), 0x0800aaaa },
|
||||
{ _MMIO(0x25400), 0x00000004 },
|
||||
{ _MMIO(0x2540c), 0x06029000 },
|
||||
{ _MMIO(0x25410), 0x00000002 },
|
||||
{ _MMIO(0x25404), 0x5c30ffff },
|
||||
{ _MMIO(0x25100), 0x00000016 },
|
||||
{ _MMIO(0x25110), 0x00000400 },
|
||||
{ _MMIO(0x25104), 0x00000000 },
|
||||
{ _MMIO(0x26804), 0x00001211 },
|
||||
{ _MMIO(0x26884), 0x00000100 },
|
||||
{ _MMIO(0x26900), 0x00000002 },
|
||||
{ _MMIO(0x26908), 0x00700000 },
|
||||
{ _MMIO(0x26904), 0x00000000 },
|
||||
{ _MMIO(0x26984), 0x00001022 },
|
||||
{ _MMIO(0x26a04), 0x00000011 },
|
||||
{ _MMIO(0x26a80), 0x00000006 },
|
||||
{ _MMIO(0x26a88), 0x00000c02 },
|
||||
{ _MMIO(0x26a84), 0x00000000 },
|
||||
{ _MMIO(0x26b04), 0x00001000 },
|
||||
{ _MMIO(0x26b80), 0x00000002 },
|
||||
{ _MMIO(0x26b8c), 0x00000007 },
|
||||
{ _MMIO(0x26b84), 0x00000000 },
|
||||
{ _MMIO(0x27804), 0x00004844 },
|
||||
{ _MMIO(0x27884), 0x00000400 },
|
||||
{ _MMIO(0x27900), 0x00000002 },
|
||||
{ _MMIO(0x27908), 0x0e000000 },
|
||||
{ _MMIO(0x27904), 0x00000000 },
|
||||
{ _MMIO(0x27984), 0x00004088 },
|
||||
{ _MMIO(0x27a04), 0x00000044 },
|
||||
{ _MMIO(0x27a80), 0x00000006 },
|
||||
{ _MMIO(0x27a88), 0x00018040 },
|
||||
{ _MMIO(0x27a84), 0x00000000 },
|
||||
{ _MMIO(0x27b04), 0x00004000 },
|
||||
{ _MMIO(0x27b80), 0x00000002 },
|
||||
{ _MMIO(0x27b8c), 0x000000e0 },
|
||||
{ _MMIO(0x27b84), 0x00000000 },
|
||||
{ _MMIO(0x26104), 0x00002222 },
|
||||
{ _MMIO(0x26184), 0x0c006666 },
|
||||
{ _MMIO(0x26284), 0x04000000 },
|
||||
{ _MMIO(0x26304), 0x04000000 },
|
||||
{ _MMIO(0x26400), 0x00000002 },
|
||||
{ _MMIO(0x26410), 0x000000a0 },
|
||||
{ _MMIO(0x26404), 0x00000000 },
|
||||
{ _MMIO(0x25420), 0x04108020 },
|
||||
{ _MMIO(0x25424), 0x1284a420 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x00042049 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg *
|
||||
get_render_basic_mux_config(struct drm_i915_private *dev_priv,
|
||||
int *len)
|
||||
{
|
||||
*len = ARRAY_SIZE(mux_config_render_basic);
|
||||
return mux_config_render_basic;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_compute_basic[] = {
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0x00800000 },
|
||||
{ _MMIO(0x2718), 0xaaaaaaaa },
|
||||
{ _MMIO(0x271c), 0xaaaaaaaa },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2724), 0x00800000 },
|
||||
{ _MMIO(0x2728), 0xaaaaaaaa },
|
||||
{ _MMIO(0x272c), 0xaaaaaaaa },
|
||||
{ _MMIO(0x2740), 0x00000000 },
|
||||
{ _MMIO(0x2744), 0x00000000 },
|
||||
{ _MMIO(0x2748), 0x00000000 },
|
||||
{ _MMIO(0x274c), 0x00000000 },
|
||||
{ _MMIO(0x2750), 0x00000000 },
|
||||
{ _MMIO(0x2754), 0x00000000 },
|
||||
{ _MMIO(0x2758), 0x00000000 },
|
||||
{ _MMIO(0x275c), 0x00000000 },
|
||||
{ _MMIO(0x236c), 0x00000000 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_compute_basic[] = {
|
||||
{ _MMIO(0x253a4), 0x00000000 },
|
||||
{ _MMIO(0x2681c), 0x01f00800 },
|
||||
{ _MMIO(0x26820), 0x00001000 },
|
||||
{ _MMIO(0x2781c), 0x01f00800 },
|
||||
{ _MMIO(0x26520), 0x00000007 },
|
||||
{ _MMIO(0x265a0), 0x00000007 },
|
||||
{ _MMIO(0x25380), 0x00000010 },
|
||||
{ _MMIO(0x2538c), 0x00300000 },
|
||||
{ _MMIO(0x25384), 0xaa8aaaaa },
|
||||
{ _MMIO(0x25404), 0xffffffff },
|
||||
{ _MMIO(0x26800), 0x00004202 },
|
||||
{ _MMIO(0x26808), 0x00605817 },
|
||||
{ _MMIO(0x2680c), 0x10001005 },
|
||||
{ _MMIO(0x26804), 0x00000000 },
|
||||
{ _MMIO(0x27800), 0x00000102 },
|
||||
{ _MMIO(0x27808), 0x0c0701e0 },
|
||||
{ _MMIO(0x2780c), 0x000200a0 },
|
||||
{ _MMIO(0x27804), 0x00000000 },
|
||||
{ _MMIO(0x26484), 0x44000000 },
|
||||
{ _MMIO(0x26704), 0x44000000 },
|
||||
{ _MMIO(0x26500), 0x00000006 },
|
||||
{ _MMIO(0x26510), 0x00000001 },
|
||||
{ _MMIO(0x26504), 0x88000000 },
|
||||
{ _MMIO(0x26580), 0x00000006 },
|
||||
{ _MMIO(0x26590), 0x00000020 },
|
||||
{ _MMIO(0x26584), 0x00000000 },
|
||||
{ _MMIO(0x26104), 0x55822222 },
|
||||
{ _MMIO(0x26184), 0xaa866666 },
|
||||
{ _MMIO(0x25420), 0x08320c83 },
|
||||
{ _MMIO(0x25424), 0x06820c83 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x00000c03 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg *
|
||||
get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
|
||||
int *len)
|
||||
{
|
||||
*len = ARRAY_SIZE(mux_config_compute_basic);
|
||||
return mux_config_compute_basic;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_compute_extended[] = {
|
||||
{ _MMIO(0x2724), 0xf0800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0xf0800000 },
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x2770), 0x0007fe2a },
|
||||
{ _MMIO(0x2774), 0x0000ff00 },
|
||||
{ _MMIO(0x2778), 0x0007fe6a },
|
||||
{ _MMIO(0x277c), 0x0000ff00 },
|
||||
{ _MMIO(0x2780), 0x0007fe92 },
|
||||
{ _MMIO(0x2784), 0x0000ff00 },
|
||||
{ _MMIO(0x2788), 0x0007fea2 },
|
||||
{ _MMIO(0x278c), 0x0000ff00 },
|
||||
{ _MMIO(0x2790), 0x0007fe32 },
|
||||
{ _MMIO(0x2794), 0x0000ff00 },
|
||||
{ _MMIO(0x2798), 0x0007fe9a },
|
||||
{ _MMIO(0x279c), 0x0000ff00 },
|
||||
{ _MMIO(0x27a0), 0x0007ff23 },
|
||||
{ _MMIO(0x27a4), 0x0000ff00 },
|
||||
{ _MMIO(0x27a8), 0x0007fff3 },
|
||||
{ _MMIO(0x27ac), 0x0000fffe },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_compute_extended[] = {
|
||||
{ _MMIO(0x2681c), 0x3eb00800 },
|
||||
{ _MMIO(0x26820), 0x00900000 },
|
||||
{ _MMIO(0x25384), 0x02aaaaaa },
|
||||
{ _MMIO(0x25404), 0x03ffffff },
|
||||
{ _MMIO(0x26800), 0x00142284 },
|
||||
{ _MMIO(0x26808), 0x0e629062 },
|
||||
{ _MMIO(0x2680c), 0x3f6f55cb },
|
||||
{ _MMIO(0x26810), 0x00000014 },
|
||||
{ _MMIO(0x26804), 0x00000000 },
|
||||
{ _MMIO(0x26104), 0x02aaaaaa },
|
||||
{ _MMIO(0x26184), 0x02aaaaaa },
|
||||
{ _MMIO(0x25420), 0x00000000 },
|
||||
{ _MMIO(0x25424), 0x00000000 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x00000000 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg *
|
||||
get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
|
||||
int *len)
|
||||
{
|
||||
*len = ARRAY_SIZE(mux_config_compute_extended);
|
||||
return mux_config_compute_extended;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_memory_reads[] = {
|
||||
{ _MMIO(0x2724), 0xf0800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0xf0800000 },
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x274c), 0x76543298 },
|
||||
{ _MMIO(0x2748), 0x98989898 },
|
||||
{ _MMIO(0x2744), 0x000000e4 },
|
||||
{ _MMIO(0x2740), 0x00000000 },
|
||||
{ _MMIO(0x275c), 0x98a98a98 },
|
||||
{ _MMIO(0x2758), 0x88888888 },
|
||||
{ _MMIO(0x2754), 0x000c5500 },
|
||||
{ _MMIO(0x2750), 0x00000000 },
|
||||
{ _MMIO(0x2770), 0x0007f81a },
|
||||
{ _MMIO(0x2774), 0x0000fc00 },
|
||||
{ _MMIO(0x2778), 0x0007f82a },
|
||||
{ _MMIO(0x277c), 0x0000fc00 },
|
||||
{ _MMIO(0x2780), 0x0007f872 },
|
||||
{ _MMIO(0x2784), 0x0000fc00 },
|
||||
{ _MMIO(0x2788), 0x0007f8ba },
|
||||
{ _MMIO(0x278c), 0x0000fc00 },
|
||||
{ _MMIO(0x2790), 0x0007f87a },
|
||||
{ _MMIO(0x2794), 0x0000fc00 },
|
||||
{ _MMIO(0x2798), 0x0007f8ea },
|
||||
{ _MMIO(0x279c), 0x0000fc00 },
|
||||
{ _MMIO(0x27a0), 0x0007f8e2 },
|
||||
{ _MMIO(0x27a4), 0x0000fc00 },
|
||||
{ _MMIO(0x27a8), 0x0007f8f2 },
|
||||
{ _MMIO(0x27ac), 0x0000fc00 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_memory_reads[] = {
|
||||
{ _MMIO(0x253a4), 0x34300000 },
|
||||
{ _MMIO(0x25440), 0x2d800000 },
|
||||
{ _MMIO(0x25444), 0x00000008 },
|
||||
{ _MMIO(0x25128), 0x0e600000 },
|
||||
{ _MMIO(0x25380), 0x00000450 },
|
||||
{ _MMIO(0x25390), 0x00052c43 },
|
||||
{ _MMIO(0x25384), 0x00000000 },
|
||||
{ _MMIO(0x25400), 0x00006144 },
|
||||
{ _MMIO(0x25408), 0x0a418820 },
|
||||
{ _MMIO(0x2540c), 0x000820e6 },
|
||||
{ _MMIO(0x25404), 0xff500000 },
|
||||
{ _MMIO(0x25100), 0x000005d6 },
|
||||
{ _MMIO(0x2510c), 0x0ef00000 },
|
||||
{ _MMIO(0x25104), 0x00000000 },
|
||||
{ _MMIO(0x25420), 0x02108421 },
|
||||
{ _MMIO(0x25424), 0x00008421 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x00000000 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg *
|
||||
get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
|
||||
int *len)
|
||||
{
|
||||
*len = ARRAY_SIZE(mux_config_memory_reads);
|
||||
return mux_config_memory_reads;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_memory_writes[] = {
|
||||
{ _MMIO(0x2724), 0xf0800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0xf0800000 },
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x274c), 0x76543298 },
|
||||
{ _MMIO(0x2748), 0x98989898 },
|
||||
{ _MMIO(0x2744), 0x000000e4 },
|
||||
{ _MMIO(0x2740), 0x00000000 },
|
||||
{ _MMIO(0x275c), 0xbabababa },
|
||||
{ _MMIO(0x2758), 0x88888888 },
|
||||
{ _MMIO(0x2754), 0x000c5500 },
|
||||
{ _MMIO(0x2750), 0x00000000 },
|
||||
{ _MMIO(0x2770), 0x0007f81a },
|
||||
{ _MMIO(0x2774), 0x0000fc00 },
|
||||
{ _MMIO(0x2778), 0x0007f82a },
|
||||
{ _MMIO(0x277c), 0x0000fc00 },
|
||||
{ _MMIO(0x2780), 0x0007f822 },
|
||||
{ _MMIO(0x2784), 0x0000fc00 },
|
||||
{ _MMIO(0x2788), 0x0007f8ba },
|
||||
{ _MMIO(0x278c), 0x0000fc00 },
|
||||
{ _MMIO(0x2790), 0x0007f87a },
|
||||
{ _MMIO(0x2794), 0x0000fc00 },
|
||||
{ _MMIO(0x2798), 0x0007f8ea },
|
||||
{ _MMIO(0x279c), 0x0000fc00 },
|
||||
{ _MMIO(0x27a0), 0x0007f8e2 },
|
||||
{ _MMIO(0x27a4), 0x0000fc00 },
|
||||
{ _MMIO(0x27a8), 0x0007f8f2 },
|
||||
{ _MMIO(0x27ac), 0x0000fc00 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_memory_writes[] = {
|
||||
{ _MMIO(0x253a4), 0x34300000 },
|
||||
{ _MMIO(0x25440), 0x01500000 },
|
||||
{ _MMIO(0x25444), 0x00000120 },
|
||||
{ _MMIO(0x25128), 0x0c200000 },
|
||||
{ _MMIO(0x25380), 0x00000450 },
|
||||
{ _MMIO(0x25390), 0x00052c43 },
|
||||
{ _MMIO(0x25384), 0x00000000 },
|
||||
{ _MMIO(0x25400), 0x00007184 },
|
||||
{ _MMIO(0x25408), 0x0a418820 },
|
||||
{ _MMIO(0x2540c), 0x000820e6 },
|
||||
{ _MMIO(0x25404), 0xff500000 },
|
||||
{ _MMIO(0x25100), 0x000005d6 },
|
||||
{ _MMIO(0x2510c), 0x1e700000 },
|
||||
{ _MMIO(0x25104), 0x00000000 },
|
||||
{ _MMIO(0x25420), 0x02108421 },
|
||||
{ _MMIO(0x25424), 0x00008421 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x00000000 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg *
|
||||
get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
|
||||
int *len)
|
||||
{
|
||||
*len = ARRAY_SIZE(mux_config_memory_writes);
|
||||
return mux_config_memory_writes;
|
||||
}
|
||||
|
||||
static const struct i915_oa_reg b_counter_config_sampler_balance[] = {
|
||||
{ _MMIO(0x2740), 0x00000000 },
|
||||
{ _MMIO(0x2744), 0x00800000 },
|
||||
{ _MMIO(0x2710), 0x00000000 },
|
||||
{ _MMIO(0x2714), 0x00800000 },
|
||||
{ _MMIO(0x2720), 0x00000000 },
|
||||
{ _MMIO(0x2724), 0x00800000 },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg mux_config_sampler_balance[] = {
|
||||
{ _MMIO(0x2eb9c), 0x01906400 },
|
||||
{ _MMIO(0x2fb9c), 0x01906400 },
|
||||
{ _MMIO(0x253a4), 0x00000000 },
|
||||
{ _MMIO(0x26b9c), 0x01906400 },
|
||||
{ _MMIO(0x27b9c), 0x01906400 },
|
||||
{ _MMIO(0x27104), 0x00a00000 },
|
||||
{ _MMIO(0x27184), 0x00a50000 },
|
||||
{ _MMIO(0x2e804), 0x00500000 },
|
||||
{ _MMIO(0x2e984), 0x00500000 },
|
||||
{ _MMIO(0x2eb04), 0x00500000 },
|
||||
{ _MMIO(0x2eb80), 0x00000084 },
|
||||
{ _MMIO(0x2eb8c), 0x14200000 },
|
||||
{ _MMIO(0x2eb84), 0x00000000 },
|
||||
{ _MMIO(0x2f804), 0x00050000 },
|
||||
{ _MMIO(0x2f984), 0x00050000 },
|
||||
{ _MMIO(0x2fb04), 0x00050000 },
|
||||
{ _MMIO(0x2fb80), 0x00000084 },
|
||||
{ _MMIO(0x2fb8c), 0x00050800 },
|
||||
{ _MMIO(0x2fb84), 0x00000000 },
|
||||
{ _MMIO(0x25380), 0x00000010 },
|
||||
{ _MMIO(0x2538c), 0x000000c0 },
|
||||
{ _MMIO(0x25384), 0xaa550000 },
|
||||
{ _MMIO(0x25404), 0xffffc000 },
|
||||
{ _MMIO(0x26804), 0x50000000 },
|
||||
{ _MMIO(0x26984), 0x50000000 },
|
||||
{ _MMIO(0x26b04), 0x50000000 },
|
||||
{ _MMIO(0x26b80), 0x00000084 },
|
||||
{ _MMIO(0x26b90), 0x00050800 },
|
||||
{ _MMIO(0x26b84), 0x00000000 },
|
||||
{ _MMIO(0x27804), 0x05000000 },
|
||||
{ _MMIO(0x27984), 0x05000000 },
|
||||
{ _MMIO(0x27b04), 0x05000000 },
|
||||
{ _MMIO(0x27b80), 0x00000084 },
|
||||
{ _MMIO(0x27b90), 0x00000142 },
|
||||
{ _MMIO(0x27b84), 0x00000000 },
|
||||
{ _MMIO(0x26104), 0xa0000000 },
|
||||
{ _MMIO(0x26184), 0xa5000000 },
|
||||
{ _MMIO(0x25424), 0x00008620 },
|
||||
{ _MMIO(0x2541c), 0x00000000 },
|
||||
{ _MMIO(0x25428), 0x0004a54a },
|
||||
};
|
||||
|
||||
static const struct i915_oa_reg *
|
||||
get_sampler_balance_mux_config(struct drm_i915_private *dev_priv,
|
||||
int *len)
|
||||
{
|
||||
*len = ARRAY_SIZE(mux_config_sampler_balance);
|
||||
return mux_config_sampler_balance;
|
||||
}
|
||||
|
||||
int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->perf.oa.mux_regs = NULL;
|
||||
dev_priv->perf.oa.mux_regs_len = 0;
|
||||
dev_priv->perf.oa.b_counter_regs = NULL;
|
||||
dev_priv->perf.oa.b_counter_regs_len = 0;
|
||||
|
||||
switch (dev_priv->perf.oa.metrics_set) {
|
||||
case METRIC_SET_ID_RENDER_BASIC:
|
||||
dev_priv->perf.oa.mux_regs =
|
||||
get_render_basic_mux_config(dev_priv,
|
||||
&dev_priv->perf.oa.mux_regs_len);
|
||||
if (!dev_priv->perf.oa.mux_regs) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised so userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_render_basic;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_render_basic);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_COMPUTE_BASIC:
|
||||
dev_priv->perf.oa.mux_regs =
|
||||
get_compute_basic_mux_config(dev_priv,
|
||||
&dev_priv->perf.oa.mux_regs_len);
|
||||
if (!dev_priv->perf.oa.mux_regs) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised so userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_compute_basic;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_compute_basic);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_COMPUTE_EXTENDED:
|
||||
dev_priv->perf.oa.mux_regs =
|
||||
get_compute_extended_mux_config(dev_priv,
|
||||
&dev_priv->perf.oa.mux_regs_len);
|
||||
if (!dev_priv->perf.oa.mux_regs) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised so userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_compute_extended;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_compute_extended);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_MEMORY_READS:
|
||||
dev_priv->perf.oa.mux_regs =
|
||||
get_memory_reads_mux_config(dev_priv,
|
||||
&dev_priv->perf.oa.mux_regs_len);
|
||||
if (!dev_priv->perf.oa.mux_regs) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised so userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_memory_reads;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_memory_reads);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_MEMORY_WRITES:
|
||||
dev_priv->perf.oa.mux_regs =
|
||||
get_memory_writes_mux_config(dev_priv,
|
||||
&dev_priv->perf.oa.mux_regs_len);
|
||||
if (!dev_priv->perf.oa.mux_regs) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised so userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_memory_writes;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_memory_writes);
|
||||
|
||||
return 0;
|
||||
case METRIC_SET_ID_SAMPLER_BALANCE:
|
||||
dev_priv->perf.oa.mux_regs =
|
||||
get_sampler_balance_mux_config(dev_priv,
|
||||
&dev_priv->perf.oa.mux_regs_len);
|
||||
if (!dev_priv->perf.oa.mux_regs) {
|
||||
DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set");
|
||||
|
||||
/* EINVAL because *_register_sysfs already checked this
|
||||
* and so it wouldn't have been advertised so userspace and
|
||||
* so shouldn't have been requested
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->perf.oa.b_counter_regs =
|
||||
b_counter_config_sampler_balance;
|
||||
dev_priv->perf.oa.b_counter_regs_len =
|
||||
ARRAY_SIZE(b_counter_config_sampler_balance);
|
||||
|
||||
return 0;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_render_basic_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_render_basic_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_render_basic[] = {
|
||||
&dev_attr_render_basic_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_render_basic = {
|
||||
.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212",
|
||||
.attrs = attrs_render_basic,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_compute_basic_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_compute_basic_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_compute_basic[] = {
|
||||
&dev_attr_compute_basic_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_compute_basic = {
|
||||
.name = "39ad14bc-2380-45c4-91eb-fbcb3aa7ae7b",
|
||||
.attrs = attrs_compute_basic,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_compute_extended_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_compute_extended_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_compute_extended[] = {
|
||||
&dev_attr_compute_extended_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_compute_extended = {
|
||||
.name = "3865be28-6982-49fe-9494-e4d1b4795413",
|
||||
.attrs = attrs_compute_extended,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_memory_reads_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_memory_reads_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_memory_reads[] = {
|
||||
&dev_attr_memory_reads_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_memory_reads = {
|
||||
.name = "bb5ed49b-2497-4095-94f6-26ba294db88a",
|
||||
.attrs = attrs_memory_reads,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_memory_writes_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_memory_writes_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_memory_writes[] = {
|
||||
&dev_attr_memory_writes_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_memory_writes = {
|
||||
.name = "3358d639-9b5f-45ab-976d-9b08cbfc6240",
|
||||
.attrs = attrs_memory_writes,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_sampler_balance_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_BALANCE);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_sampler_balance_id = {
|
||||
.attr = { .name = "id", .mode = 0444 },
|
||||
.show = show_sampler_balance_id,
|
||||
.store = NULL,
|
||||
};
|
||||
|
||||
static struct attribute *attrs_sampler_balance[] = {
|
||||
&dev_attr_sampler_balance_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group group_sampler_balance = {
|
||||
.name = "bc274488-b4b6-40c7-90da-b77d7ad16189",
|
||||
.attrs = attrs_sampler_balance,
|
||||
};
|
||||
|
||||
int
|
||||
i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int mux_len;
|
||||
int ret = 0;
|
||||
|
||||
if (get_render_basic_mux_config(dev_priv, &mux_len)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
|
||||
if (ret)
|
||||
goto error_render_basic;
|
||||
}
|
||||
if (get_compute_basic_mux_config(dev_priv, &mux_len)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
|
||||
if (ret)
|
||||
goto error_compute_basic;
|
||||
}
|
||||
if (get_compute_extended_mux_config(dev_priv, &mux_len)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
|
||||
if (ret)
|
||||
goto error_compute_extended;
|
||||
}
|
||||
if (get_memory_reads_mux_config(dev_priv, &mux_len)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
|
||||
if (ret)
|
||||
goto error_memory_reads;
|
||||
}
|
||||
if (get_memory_writes_mux_config(dev_priv, &mux_len)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
|
||||
if (ret)
|
||||
goto error_memory_writes;
|
||||
}
|
||||
if (get_sampler_balance_mux_config(dev_priv, &mux_len)) {
|
||||
ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
|
||||
if (ret)
|
||||
goto error_sampler_balance;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_sampler_balance:
|
||||
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
|
||||
error_memory_writes:
|
||||
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
|
||||
error_memory_reads:
|
||||
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
|
||||
error_compute_extended:
|
||||
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
|
||||
error_compute_basic:
|
||||
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
|
||||
error_render_basic:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int mux_len;
|
||||
|
||||
if (get_render_basic_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
|
||||
if (get_compute_basic_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
|
||||
if (get_compute_extended_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
|
||||
if (get_memory_reads_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
|
||||
if (get_memory_writes_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
|
||||
if (get_sampler_balance_mux_config(dev_priv, &mux_len))
|
||||
sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
|
||||
}
|
38
drivers/gpu/drm/i915/i915_oa_hsw.h
Normal file
38
drivers/gpu/drm/i915/i915_oa_hsw.h
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Autogenerated file, DO NOT EDIT manually!
|
||||
*
|
||||
* Copyright (c) 2015 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __I915_OA_HSW_H__
|
||||
#define __I915_OA_HSW_H__
|
||||
|
||||
extern int i915_oa_n_builtin_metric_sets_hsw;
|
||||
|
||||
extern int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern int i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern void i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv);
|
||||
|
||||
#endif
|
@ -50,7 +50,7 @@ struct i915_params i915 __read_mostly = {
|
||||
.error_capture = true,
|
||||
.invert_brightness = 0,
|
||||
.disable_display = 0,
|
||||
.enable_cmd_parser = 1,
|
||||
.enable_cmd_parser = true,
|
||||
.use_mmio_flip = 0,
|
||||
.mmio_debug = 0,
|
||||
.verbose_state_checks = 1,
|
||||
@ -188,9 +188,9 @@ MODULE_PARM_DESC(invert_brightness,
|
||||
module_param_named(disable_display, i915.disable_display, bool, 0400);
|
||||
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
|
||||
|
||||
module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
|
||||
module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400);
|
||||
MODULE_PARM_DESC(enable_cmd_parser,
|
||||
"Enable command parsing (1=enabled [default], 0=disabled)");
|
||||
"Enable command parsing (true=enabled [default], false=disabled)");
|
||||
|
||||
module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
|
||||
MODULE_PARM_DESC(use_mmio_flip,
|
||||
|
@ -44,7 +44,6 @@ struct i915_params {
|
||||
int disable_power_well;
|
||||
int enable_ips;
|
||||
int invert_brightness;
|
||||
int enable_cmd_parser;
|
||||
int enable_guc_loading;
|
||||
int enable_guc_submission;
|
||||
int guc_log_level;
|
||||
@ -53,6 +52,7 @@ struct i915_params {
|
||||
int edp_vswing;
|
||||
unsigned int inject_load_failure;
|
||||
/* leave bools at the end to not create holes */
|
||||
bool enable_cmd_parser;
|
||||
bool enable_hangcheck;
|
||||
bool fastboot;
|
||||
bool prefault_disable;
|
||||
|
@ -54,6 +54,7 @@
|
||||
#define CHV_COLORS \
|
||||
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
|
||||
|
||||
/* Keep in gen based order, and chronological order within a gen */
|
||||
#define GEN2_FEATURES \
|
||||
.gen = 2, .num_pipes = 1, \
|
||||
.has_overlay = 1, .overlay_needs_physical = 1, \
|
||||
@ -65,17 +66,19 @@
|
||||
|
||||
static const struct intel_device_info intel_i830_info = {
|
||||
GEN2_FEATURES,
|
||||
.platform = INTEL_I830,
|
||||
.is_mobile = 1, .cursor_needs_physical = 1,
|
||||
.num_pipes = 2, /* legal, last one wins */
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_845g_info = {
|
||||
static const struct intel_device_info intel_i845g_info = {
|
||||
GEN2_FEATURES,
|
||||
.platform = INTEL_I845G,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i85x_info = {
|
||||
GEN2_FEATURES,
|
||||
.is_i85x = 1, .is_mobile = 1,
|
||||
.platform = INTEL_I85X, .is_mobile = 1,
|
||||
.num_pipes = 2, /* legal, last one wins */
|
||||
.cursor_needs_physical = 1,
|
||||
.has_fbc = 1,
|
||||
@ -83,6 +86,7 @@ static const struct intel_device_info intel_i85x_info = {
|
||||
|
||||
static const struct intel_device_info intel_i865g_info = {
|
||||
GEN2_FEATURES,
|
||||
.platform = INTEL_I865G,
|
||||
};
|
||||
|
||||
#define GEN3_FEATURES \
|
||||
@ -94,12 +98,14 @@ static const struct intel_device_info intel_i865g_info = {
|
||||
|
||||
static const struct intel_device_info intel_i915g_info = {
|
||||
GEN3_FEATURES,
|
||||
.is_i915g = 1, .cursor_needs_physical = 1,
|
||||
.platform = INTEL_I915G, .cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.hws_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i915gm_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_I915GM,
|
||||
.is_mobile = 1,
|
||||
.cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
@ -107,15 +113,18 @@ static const struct intel_device_info intel_i915gm_info = {
|
||||
.has_fbc = 1,
|
||||
.hws_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i945g_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_I945G,
|
||||
.has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.hws_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i945gm_info = {
|
||||
GEN3_FEATURES,
|
||||
.is_i945gm = 1, .is_mobile = 1,
|
||||
.platform = INTEL_I945GM, .is_mobile = 1,
|
||||
.has_hotplug = 1, .cursor_needs_physical = 1,
|
||||
.has_overlay = 1, .overlay_needs_physical = 1,
|
||||
.supports_tv = 1,
|
||||
@ -123,6 +132,20 @@ static const struct intel_device_info intel_i945gm_info = {
|
||||
.hws_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g33_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_G33,
|
||||
.has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_pineview_info = {
|
||||
GEN3_FEATURES,
|
||||
.platform = INTEL_PINEVIEW, .is_mobile = 1,
|
||||
.has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
};
|
||||
|
||||
#define GEN4_FEATURES \
|
||||
.gen = 4, .num_pipes = 2, \
|
||||
.has_hotplug = 1, \
|
||||
@ -133,50 +156,36 @@ static const struct intel_device_info intel_i945gm_info = {
|
||||
|
||||
static const struct intel_device_info intel_i965g_info = {
|
||||
GEN4_FEATURES,
|
||||
.is_broadwater = 1,
|
||||
.platform = INTEL_I965G,
|
||||
.has_overlay = 1,
|
||||
.hws_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_i965gm_info = {
|
||||
GEN4_FEATURES,
|
||||
.is_crestline = 1,
|
||||
.platform = INTEL_I965GM,
|
||||
.is_mobile = 1, .has_fbc = 1,
|
||||
.has_overlay = 1,
|
||||
.supports_tv = 1,
|
||||
.hws_needs_physical = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g33_info = {
|
||||
GEN3_FEATURES,
|
||||
.is_g33 = 1,
|
||||
.has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_g45_info = {
|
||||
GEN4_FEATURES,
|
||||
.is_g4x = 1,
|
||||
.platform = INTEL_G45,
|
||||
.has_pipe_cxsr = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_gm45_info = {
|
||||
GEN4_FEATURES,
|
||||
.is_g4x = 1,
|
||||
.platform = INTEL_GM45,
|
||||
.is_mobile = 1, .has_fbc = 1,
|
||||
.has_pipe_cxsr = 1,
|
||||
.supports_tv = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_pineview_info = {
|
||||
GEN3_FEATURES,
|
||||
.is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
|
||||
.has_hotplug = 1,
|
||||
.has_overlay = 1,
|
||||
};
|
||||
|
||||
#define GEN5_FEATURES \
|
||||
.gen = 5, .num_pipes = 2, \
|
||||
.has_hotplug = 1, \
|
||||
@ -187,10 +196,12 @@ static const struct intel_device_info intel_pineview_info = {
|
||||
|
||||
static const struct intel_device_info intel_ironlake_d_info = {
|
||||
GEN5_FEATURES,
|
||||
.platform = INTEL_IRONLAKE,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ironlake_m_info = {
|
||||
GEN5_FEATURES,
|
||||
.platform = INTEL_IRONLAKE,
|
||||
.is_mobile = 1,
|
||||
};
|
||||
|
||||
@ -204,15 +215,18 @@ static const struct intel_device_info intel_ironlake_m_info = {
|
||||
.has_rc6p = 1, \
|
||||
.has_gmbus_irq = 1, \
|
||||
.has_hw_contexts = 1, \
|
||||
.has_aliasing_ppgtt = 1, \
|
||||
GEN_DEFAULT_PIPEOFFSETS, \
|
||||
CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_d_info = {
|
||||
GEN6_FEATURES,
|
||||
.platform = INTEL_SANDYBRIDGE,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_sandybridge_m_info = {
|
||||
GEN6_FEATURES,
|
||||
.platform = INTEL_SANDYBRIDGE,
|
||||
.is_mobile = 1,
|
||||
};
|
||||
|
||||
@ -226,46 +240,49 @@ static const struct intel_device_info intel_sandybridge_m_info = {
|
||||
.has_rc6p = 1, \
|
||||
.has_gmbus_irq = 1, \
|
||||
.has_hw_contexts = 1, \
|
||||
.has_aliasing_ppgtt = 1, \
|
||||
.has_full_ppgtt = 1, \
|
||||
GEN_DEFAULT_PIPEOFFSETS, \
|
||||
IVB_CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_d_info = {
|
||||
GEN7_FEATURES,
|
||||
.is_ivybridge = 1,
|
||||
.platform = INTEL_IVYBRIDGE,
|
||||
.has_l3_dpf = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_m_info = {
|
||||
GEN7_FEATURES,
|
||||
.is_ivybridge = 1,
|
||||
.platform = INTEL_IVYBRIDGE,
|
||||
.is_mobile = 1,
|
||||
.has_l3_dpf = 1,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_ivybridge_q_info = {
|
||||
GEN7_FEATURES,
|
||||
.is_ivybridge = 1,
|
||||
.platform = INTEL_IVYBRIDGE,
|
||||
.num_pipes = 0, /* legal, last one wins */
|
||||
.has_l3_dpf = 1,
|
||||
};
|
||||
|
||||
#define VLV_FEATURES \
|
||||
.gen = 7, .num_pipes = 2, \
|
||||
.has_psr = 1, \
|
||||
.has_runtime_pm = 1, \
|
||||
.has_rc6 = 1, \
|
||||
.has_gmbus_irq = 1, \
|
||||
.has_hw_contexts = 1, \
|
||||
.has_gmch_display = 1, \
|
||||
.has_hotplug = 1, \
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE, \
|
||||
GEN_DEFAULT_PIPEOFFSETS, \
|
||||
CURSOR_OFFSETS
|
||||
|
||||
static const struct intel_device_info intel_valleyview_info = {
|
||||
VLV_FEATURES,
|
||||
.is_valleyview = 1,
|
||||
.platform = INTEL_VALLEYVIEW,
|
||||
.gen = 7,
|
||||
.is_lp = 1,
|
||||
.num_pipes = 2,
|
||||
.has_psr = 1,
|
||||
.has_runtime_pm = 1,
|
||||
.has_rc6 = 1,
|
||||
.has_gmbus_irq = 1,
|
||||
.has_hw_contexts = 1,
|
||||
.has_gmch_display = 1,
|
||||
.has_hotplug = 1,
|
||||
.has_aliasing_ppgtt = 1,
|
||||
.has_full_ppgtt = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS
|
||||
};
|
||||
|
||||
#define HSW_FEATURES \
|
||||
@ -281,7 +298,7 @@ static const struct intel_device_info intel_valleyview_info = {
|
||||
|
||||
static const struct intel_device_info intel_haswell_info = {
|
||||
HSW_FEATURES,
|
||||
.is_haswell = 1,
|
||||
.platform = INTEL_HASWELL,
|
||||
.has_l3_dpf = 1,
|
||||
};
|
||||
|
||||
@ -289,26 +306,28 @@ static const struct intel_device_info intel_haswell_info = {
|
||||
HSW_FEATURES, \
|
||||
BDW_COLORS, \
|
||||
.has_logical_ring_contexts = 1, \
|
||||
.has_full_48bit_ppgtt = 1, \
|
||||
.has_64bit_reloc = 1
|
||||
|
||||
static const struct intel_device_info intel_broadwell_info = {
|
||||
BDW_FEATURES,
|
||||
.gen = 8,
|
||||
.is_broadwell = 1,
|
||||
.platform = INTEL_BROADWELL,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_broadwell_gt3_info = {
|
||||
BDW_FEATURES,
|
||||
.gen = 8,
|
||||
.is_broadwell = 1,
|
||||
.platform = INTEL_BROADWELL,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_cherryview_info = {
|
||||
.gen = 8, .num_pipes = 3,
|
||||
.has_hotplug = 1,
|
||||
.is_lp = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.is_cherryview = 1,
|
||||
.platform = INTEL_CHERRYVIEW,
|
||||
.has_64bit_reloc = 1,
|
||||
.has_psr = 1,
|
||||
.has_runtime_pm = 1,
|
||||
@ -318,6 +337,8 @@ static const struct intel_device_info intel_cherryview_info = {
|
||||
.has_hw_contexts = 1,
|
||||
.has_logical_ring_contexts = 1,
|
||||
.has_gmch_display = 1,
|
||||
.has_aliasing_ppgtt = 1,
|
||||
.has_full_ppgtt = 1,
|
||||
.display_mmio_offset = VLV_DISPLAY_BASE,
|
||||
GEN_CHV_PIPEOFFSETS,
|
||||
CURSOR_OFFSETS,
|
||||
@ -326,7 +347,7 @@ static const struct intel_device_info intel_cherryview_info = {
|
||||
|
||||
static const struct intel_device_info intel_skylake_info = {
|
||||
BDW_FEATURES,
|
||||
.is_skylake = 1,
|
||||
.platform = INTEL_SKYLAKE,
|
||||
.gen = 9,
|
||||
.has_csr = 1,
|
||||
.has_guc = 1,
|
||||
@ -335,7 +356,7 @@ static const struct intel_device_info intel_skylake_info = {
|
||||
|
||||
static const struct intel_device_info intel_skylake_gt3_info = {
|
||||
BDW_FEATURES,
|
||||
.is_skylake = 1,
|
||||
.platform = INTEL_SKYLAKE,
|
||||
.gen = 9,
|
||||
.has_csr = 1,
|
||||
.has_guc = 1,
|
||||
@ -343,36 +364,50 @@ static const struct intel_device_info intel_skylake_gt3_info = {
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
|
||||
};
|
||||
|
||||
#define GEN9_LP_FEATURES \
|
||||
.gen = 9, \
|
||||
.is_lp = 1, \
|
||||
.has_hotplug = 1, \
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
|
||||
.num_pipes = 3, \
|
||||
.has_64bit_reloc = 1, \
|
||||
.has_ddi = 1, \
|
||||
.has_fpga_dbg = 1, \
|
||||
.has_fbc = 1, \
|
||||
.has_runtime_pm = 1, \
|
||||
.has_pooled_eu = 0, \
|
||||
.has_csr = 1, \
|
||||
.has_resource_streamer = 1, \
|
||||
.has_rc6 = 1, \
|
||||
.has_dp_mst = 1, \
|
||||
.has_gmbus_irq = 1, \
|
||||
.has_hw_contexts = 1, \
|
||||
.has_logical_ring_contexts = 1, \
|
||||
.has_guc = 1, \
|
||||
.has_decoupled_mmio = 1, \
|
||||
.has_aliasing_ppgtt = 1, \
|
||||
.has_full_ppgtt = 1, \
|
||||
.has_full_48bit_ppgtt = 1, \
|
||||
GEN_DEFAULT_PIPEOFFSETS, \
|
||||
IVB_CURSOR_OFFSETS, \
|
||||
BDW_COLORS
|
||||
|
||||
static const struct intel_device_info intel_broxton_info = {
|
||||
.is_broxton = 1,
|
||||
.gen = 9,
|
||||
.has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.num_pipes = 3,
|
||||
.has_64bit_reloc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fpga_dbg = 1,
|
||||
.has_fbc = 1,
|
||||
.has_runtime_pm = 1,
|
||||
.has_pooled_eu = 0,
|
||||
.has_csr = 1,
|
||||
.has_resource_streamer = 1,
|
||||
.has_rc6 = 1,
|
||||
.has_dp_mst = 1,
|
||||
.has_gmbus_irq = 1,
|
||||
.has_hw_contexts = 1,
|
||||
.has_logical_ring_contexts = 1,
|
||||
.has_guc = 1,
|
||||
.has_decoupled_mmio = 1,
|
||||
GEN9_LP_FEATURES,
|
||||
.platform = INTEL_BROXTON,
|
||||
.ddb_size = 512,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
BDW_COLORS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_geminilake_info = {
|
||||
GEN9_LP_FEATURES,
|
||||
.platform = INTEL_GEMINILAKE,
|
||||
.is_alpha_support = 1,
|
||||
.ddb_size = 1024,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_kabylake_info = {
|
||||
BDW_FEATURES,
|
||||
.is_kabylake = 1,
|
||||
.platform = INTEL_KABYLAKE,
|
||||
.gen = 9,
|
||||
.has_csr = 1,
|
||||
.has_guc = 1,
|
||||
@ -381,7 +416,7 @@ static const struct intel_device_info intel_kabylake_info = {
|
||||
|
||||
static const struct intel_device_info intel_kabylake_gt3_info = {
|
||||
BDW_FEATURES,
|
||||
.is_kabylake = 1,
|
||||
.platform = INTEL_KABYLAKE,
|
||||
.gen = 9,
|
||||
.has_csr = 1,
|
||||
.has_guc = 1,
|
||||
@ -397,7 +432,7 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
|
||||
*/
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
INTEL_I830_IDS(&intel_i830_info),
|
||||
INTEL_I845G_IDS(&intel_845g_info),
|
||||
INTEL_I845G_IDS(&intel_i845g_info),
|
||||
INTEL_I85X_IDS(&intel_i85x_info),
|
||||
INTEL_I865G_IDS(&intel_i865g_info),
|
||||
INTEL_I915G_IDS(&intel_i915g_info),
|
||||
@ -421,12 +456,14 @@ static const struct pci_device_id pciidlist[] = {
|
||||
INTEL_VLV_IDS(&intel_valleyview_info),
|
||||
INTEL_BDW_GT12_IDS(&intel_broadwell_info),
|
||||
INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
|
||||
INTEL_BDW_RSVD_IDS(&intel_broadwell_info),
|
||||
INTEL_CHV_IDS(&intel_cherryview_info),
|
||||
INTEL_SKL_GT1_IDS(&intel_skylake_info),
|
||||
INTEL_SKL_GT2_IDS(&intel_skylake_info),
|
||||
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
|
||||
INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
|
||||
INTEL_BXT_IDS(&intel_broxton_info),
|
||||
INTEL_GLK_IDS(&intel_geminilake_info),
|
||||
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
|
||||
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
|
||||
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
|
||||
|
2096
drivers/gpu/drm/i915/i915_perf.c
Normal file
2096
drivers/gpu/drm/i915/i915_perf.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -62,6 +62,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
|
||||
(port) == PORT_B ? (b) : (c))
|
||||
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
|
||||
#define _PHY3(phy, a, b, c) ((phy) == DPIO_PHY0 ? (a) : \
|
||||
(phy) == DPIO_PHY1 ? (b) : (c))
|
||||
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
|
||||
|
||||
#define _MASKED_FIELD(mask, value) ({ \
|
||||
if (__builtin_constant_p(mask)) \
|
||||
@ -107,6 +110,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define GRDOM_RESET_STATUS (1 << 1)
|
||||
#define GRDOM_RESET_ENABLE (1 << 0)
|
||||
|
||||
/* BSpec only has register offset, PCI device and bit found empirically */
|
||||
#define I830_CLOCK_GATE 0xc8 /* device 0 */
|
||||
#define I830_L2_CACHE_CLOCK_GATE_DISABLE (1 << 2)
|
||||
|
||||
#define GCDGMBUS 0xcc
|
||||
|
||||
#define GCFGC2 0xda
|
||||
@ -294,7 +301,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
* Instruction field definitions used by the command parser
|
||||
*/
|
||||
#define INSTR_CLIENT_SHIFT 29
|
||||
#define INSTR_CLIENT_MASK 0xE0000000
|
||||
#define INSTR_MI_CLIENT 0x0
|
||||
#define INSTR_BC_CLIENT 0x2
|
||||
#define INSTR_RC_CLIENT 0x3
|
||||
@ -615,7 +621,344 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8)
|
||||
#define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4)
|
||||
|
||||
#define OACONTROL _MMIO(0x2360)
|
||||
#define GEN7_OACONTROL _MMIO(0x2360)
|
||||
#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000
|
||||
#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F
|
||||
#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6
|
||||
#define GEN7_OACONTROL_TIMER_ENABLE (1<<5)
|
||||
#define GEN7_OACONTROL_FORMAT_A13 (0<<2)
|
||||
#define GEN7_OACONTROL_FORMAT_A29 (1<<2)
|
||||
#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2)
|
||||
#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2)
|
||||
#define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2)
|
||||
#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2)
|
||||
#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2)
|
||||
#define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2)
|
||||
#define GEN7_OACONTROL_FORMAT_SHIFT 2
|
||||
#define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1)
|
||||
#define GEN7_OACONTROL_ENABLE (1<<0)
|
||||
|
||||
#define GEN8_OACTXID _MMIO(0x2364)
|
||||
|
||||
#define GEN8_OACONTROL _MMIO(0x2B00)
|
||||
#define GEN8_OA_REPORT_FORMAT_A12 (0<<2)
|
||||
#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2)
|
||||
#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2)
|
||||
#define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2)
|
||||
#define GEN8_OA_REPORT_FORMAT_SHIFT 2
|
||||
#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1)
|
||||
#define GEN8_OA_COUNTER_ENABLE (1<<0)
|
||||
|
||||
#define GEN8_OACTXCONTROL _MMIO(0x2360)
|
||||
#define GEN8_OA_TIMER_PERIOD_MASK 0x3F
|
||||
#define GEN8_OA_TIMER_PERIOD_SHIFT 2
|
||||
#define GEN8_OA_TIMER_ENABLE (1<<1)
|
||||
#define GEN8_OA_COUNTER_RESUME (1<<0)
|
||||
|
||||
#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */
|
||||
#define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3)
|
||||
#define GEN7_OABUFFER_EDGE_TRIGGER (1<<2)
|
||||
#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1)
|
||||
#define GEN7_OABUFFER_RESUME (1<<0)
|
||||
|
||||
#define GEN8_OABUFFER _MMIO(0x2b14)
|
||||
|
||||
#define GEN7_OASTATUS1 _MMIO(0x2364)
|
||||
#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0
|
||||
#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2)
|
||||
#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1)
|
||||
#define GEN7_OASTATUS1_REPORT_LOST (1<<0)
|
||||
|
||||
#define GEN7_OASTATUS2 _MMIO(0x2368)
|
||||
#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0
|
||||
|
||||
#define GEN8_OASTATUS _MMIO(0x2b08)
|
||||
#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3)
|
||||
#define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2)
|
||||
#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1)
|
||||
#define GEN8_OASTATUS_REPORT_LOST (1<<0)
|
||||
|
||||
#define GEN8_OAHEADPTR _MMIO(0x2B0C)
|
||||
#define GEN8_OATAILPTR _MMIO(0x2B10)
|
||||
|
||||
#define OABUFFER_SIZE_128K (0<<3)
|
||||
#define OABUFFER_SIZE_256K (1<<3)
|
||||
#define OABUFFER_SIZE_512K (2<<3)
|
||||
#define OABUFFER_SIZE_1M (3<<3)
|
||||
#define OABUFFER_SIZE_2M (4<<3)
|
||||
#define OABUFFER_SIZE_4M (5<<3)
|
||||
#define OABUFFER_SIZE_8M (6<<3)
|
||||
#define OABUFFER_SIZE_16M (7<<3)
|
||||
|
||||
#define OA_MEM_SELECT_GGTT (1<<0)
|
||||
|
||||
#define EU_PERF_CNTL0 _MMIO(0xe458)
|
||||
|
||||
#define GDT_CHICKEN_BITS _MMIO(0x9840)
|
||||
#define GT_NOA_ENABLE 0x00000080
|
||||
|
||||
/*
|
||||
* OA Boolean state
|
||||
*/
|
||||
|
||||
#define OAREPORTTRIG1 _MMIO(0x2740)
|
||||
#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
|
||||
#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
|
||||
#define OAREPORTTRIG2 _MMIO(0x2744)
|
||||
#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
|
||||
#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
|
||||
#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
|
||||
#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
|
||||
#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
|
||||
#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
|
||||
#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
|
||||
#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
|
||||
#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
|
||||
#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
|
||||
#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
|
||||
#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
|
||||
#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
|
||||
#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
|
||||
#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
|
||||
#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
|
||||
#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
|
||||
#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
|
||||
#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
|
||||
#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
|
||||
#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
|
||||
#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
|
||||
#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
|
||||
#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
|
||||
#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
|
||||
|
||||
#define OAREPORTTRIG3 _MMIO(0x2748)
|
||||
#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0
|
||||
#define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4
|
||||
#define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8
|
||||
#define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12
|
||||
#define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16
|
||||
#define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20
|
||||
#define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24
|
||||
#define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG4 _MMIO(0x274c)
|
||||
#define OAREPORTTRIG4_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0
|
||||
#define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4
|
||||
#define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8
|
||||
#define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12
|
||||
#define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16
|
||||
#define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20
|
||||
#define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24
|
||||
#define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG5 _MMIO(0x2750)
|
||||
#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
|
||||
#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
|
||||
|
||||
#define OAREPORTTRIG6 _MMIO(0x2754)
|
||||
#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
|
||||
#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
|
||||
#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
|
||||
#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
|
||||
#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
|
||||
#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
|
||||
#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
|
||||
#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
|
||||
#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
|
||||
#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
|
||||
#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
|
||||
#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
|
||||
#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
|
||||
#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
|
||||
#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
|
||||
#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
|
||||
#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
|
||||
#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
|
||||
#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
|
||||
#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
|
||||
#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
|
||||
#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
|
||||
#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
|
||||
#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
|
||||
#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
|
||||
|
||||
#define OAREPORTTRIG7 _MMIO(0x2758)
|
||||
#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0
|
||||
#define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4
|
||||
#define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8
|
||||
#define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12
|
||||
#define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16
|
||||
#define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20
|
||||
#define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24
|
||||
#define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28
|
||||
|
||||
#define OAREPORTTRIG8 _MMIO(0x275c)
|
||||
#define OAREPORTTRIG8_NOA_SELECT_MASK 0xf
|
||||
#define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0
|
||||
#define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4
|
||||
#define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8
|
||||
#define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12
|
||||
#define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16
|
||||
#define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20
|
||||
#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
|
||||
#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
|
||||
|
||||
#define OASTARTTRIG1 _MMIO(0x2710)
|
||||
#define OASTARTTRIG1_THRESHOLD_COUNT_MASK_MBZ 0xffff0000
|
||||
#define OASTARTTRIG1_THRESHOLD_MASK 0xffff
|
||||
|
||||
#define OASTARTTRIG2 _MMIO(0x2714)
|
||||
#define OASTARTTRIG2_INVERT_A_0 (1<<0)
|
||||
#define OASTARTTRIG2_INVERT_A_1 (1<<1)
|
||||
#define OASTARTTRIG2_INVERT_A_2 (1<<2)
|
||||
#define OASTARTTRIG2_INVERT_A_3 (1<<3)
|
||||
#define OASTARTTRIG2_INVERT_A_4 (1<<4)
|
||||
#define OASTARTTRIG2_INVERT_A_5 (1<<5)
|
||||
#define OASTARTTRIG2_INVERT_A_6 (1<<6)
|
||||
#define OASTARTTRIG2_INVERT_A_7 (1<<7)
|
||||
#define OASTARTTRIG2_INVERT_A_8 (1<<8)
|
||||
#define OASTARTTRIG2_INVERT_A_9 (1<<9)
|
||||
#define OASTARTTRIG2_INVERT_A_10 (1<<10)
|
||||
#define OASTARTTRIG2_INVERT_A_11 (1<<11)
|
||||
#define OASTARTTRIG2_INVERT_A_12 (1<<12)
|
||||
#define OASTARTTRIG2_INVERT_A_13 (1<<13)
|
||||
#define OASTARTTRIG2_INVERT_A_14 (1<<14)
|
||||
#define OASTARTTRIG2_INVERT_A_15 (1<<15)
|
||||
#define OASTARTTRIG2_INVERT_B_0 (1<<16)
|
||||
#define OASTARTTRIG2_INVERT_B_1 (1<<17)
|
||||
#define OASTARTTRIG2_INVERT_B_2 (1<<18)
|
||||
#define OASTARTTRIG2_INVERT_B_3 (1<<19)
|
||||
#define OASTARTTRIG2_INVERT_C_0 (1<<20)
|
||||
#define OASTARTTRIG2_INVERT_C_1 (1<<21)
|
||||
#define OASTARTTRIG2_INVERT_D_0 (1<<22)
|
||||
#define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23)
|
||||
#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24)
|
||||
#define OASTARTTRIG2_EVENT_SELECT_0 (1<<28)
|
||||
#define OASTARTTRIG2_EVENT_SELECT_1 (1<<29)
|
||||
#define OASTARTTRIG2_EVENT_SELECT_2 (1<<30)
|
||||
#define OASTARTTRIG2_EVENT_SELECT_3 (1<<31)
|
||||
|
||||
#define OASTARTTRIG3 _MMIO(0x2718)
|
||||
#define OASTARTTRIG3_NOA_SELECT_MASK 0xf
|
||||
#define OASTARTTRIG3_NOA_SELECT_8_SHIFT 0
|
||||
#define OASTARTTRIG3_NOA_SELECT_9_SHIFT 4
|
||||
#define OASTARTTRIG3_NOA_SELECT_10_SHIFT 8
|
||||
#define OASTARTTRIG3_NOA_SELECT_11_SHIFT 12
|
||||
#define OASTARTTRIG3_NOA_SELECT_12_SHIFT 16
|
||||
#define OASTARTTRIG3_NOA_SELECT_13_SHIFT 20
|
||||
#define OASTARTTRIG3_NOA_SELECT_14_SHIFT 24
|
||||
#define OASTARTTRIG3_NOA_SELECT_15_SHIFT 28
|
||||
|
||||
#define OASTARTTRIG4 _MMIO(0x271c)
|
||||
#define OASTARTTRIG4_NOA_SELECT_MASK 0xf
|
||||
#define OASTARTTRIG4_NOA_SELECT_0_SHIFT 0
|
||||
#define OASTARTTRIG4_NOA_SELECT_1_SHIFT 4
|
||||
#define OASTARTTRIG4_NOA_SELECT_2_SHIFT 8
|
||||
#define OASTARTTRIG4_NOA_SELECT_3_SHIFT 12
|
||||
#define OASTARTTRIG4_NOA_SELECT_4_SHIFT 16
|
||||
#define OASTARTTRIG4_NOA_SELECT_5_SHIFT 20
|
||||
#define OASTARTTRIG4_NOA_SELECT_6_SHIFT 24
|
||||
#define OASTARTTRIG4_NOA_SELECT_7_SHIFT 28
|
||||
|
||||
#define OASTARTTRIG5 _MMIO(0x2720)
|
||||
#define OASTARTTRIG5_THRESHOLD_COUNT_MASK_MBZ 0xffff0000
|
||||
#define OASTARTTRIG5_THRESHOLD_MASK 0xffff
|
||||
|
||||
#define OASTARTTRIG6 _MMIO(0x2724)
|
||||
#define OASTARTTRIG6_INVERT_A_0 (1<<0)
|
||||
#define OASTARTTRIG6_INVERT_A_1 (1<<1)
|
||||
#define OASTARTTRIG6_INVERT_A_2 (1<<2)
|
||||
#define OASTARTTRIG6_INVERT_A_3 (1<<3)
|
||||
#define OASTARTTRIG6_INVERT_A_4 (1<<4)
|
||||
#define OASTARTTRIG6_INVERT_A_5 (1<<5)
|
||||
#define OASTARTTRIG6_INVERT_A_6 (1<<6)
|
||||
#define OASTARTTRIG6_INVERT_A_7 (1<<7)
|
||||
#define OASTARTTRIG6_INVERT_A_8 (1<<8)
|
||||
#define OASTARTTRIG6_INVERT_A_9 (1<<9)
|
||||
#define OASTARTTRIG6_INVERT_A_10 (1<<10)
|
||||
#define OASTARTTRIG6_INVERT_A_11 (1<<11)
|
||||
#define OASTARTTRIG6_INVERT_A_12 (1<<12)
|
||||
#define OASTARTTRIG6_INVERT_A_13 (1<<13)
|
||||
#define OASTARTTRIG6_INVERT_A_14 (1<<14)
|
||||
#define OASTARTTRIG6_INVERT_A_15 (1<<15)
|
||||
#define OASTARTTRIG6_INVERT_B_0 (1<<16)
|
||||
#define OASTARTTRIG6_INVERT_B_1 (1<<17)
|
||||
#define OASTARTTRIG6_INVERT_B_2 (1<<18)
|
||||
#define OASTARTTRIG6_INVERT_B_3 (1<<19)
|
||||
#define OASTARTTRIG6_INVERT_C_0 (1<<20)
|
||||
#define OASTARTTRIG6_INVERT_C_1 (1<<21)
|
||||
#define OASTARTTRIG6_INVERT_D_0 (1<<22)
|
||||
#define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23)
|
||||
#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24)
|
||||
#define OASTARTTRIG6_EVENT_SELECT_4 (1<<28)
|
||||
#define OASTARTTRIG6_EVENT_SELECT_5 (1<<29)
|
||||
#define OASTARTTRIG6_EVENT_SELECT_6 (1<<30)
|
||||
#define OASTARTTRIG6_EVENT_SELECT_7 (1<<31)
|
||||
|
||||
#define OASTARTTRIG7 _MMIO(0x2728)
|
||||
#define OASTARTTRIG7_NOA_SELECT_MASK 0xf
|
||||
#define OASTARTTRIG7_NOA_SELECT_8_SHIFT 0
|
||||
#define OASTARTTRIG7_NOA_SELECT_9_SHIFT 4
|
||||
#define OASTARTTRIG7_NOA_SELECT_10_SHIFT 8
|
||||
#define OASTARTTRIG7_NOA_SELECT_11_SHIFT 12
|
||||
#define OASTARTTRIG7_NOA_SELECT_12_SHIFT 16
|
||||
#define OASTARTTRIG7_NOA_SELECT_13_SHIFT 20
|
||||
#define OASTARTTRIG7_NOA_SELECT_14_SHIFT 24
|
||||
#define OASTARTTRIG7_NOA_SELECT_15_SHIFT 28
|
||||
|
||||
#define OASTARTTRIG8 _MMIO(0x272c)
|
||||
#define OASTARTTRIG8_NOA_SELECT_MASK 0xf
|
||||
#define OASTARTTRIG8_NOA_SELECT_0_SHIFT 0
|
||||
#define OASTARTTRIG8_NOA_SELECT_1_SHIFT 4
|
||||
#define OASTARTTRIG8_NOA_SELECT_2_SHIFT 8
|
||||
#define OASTARTTRIG8_NOA_SELECT_3_SHIFT 12
|
||||
#define OASTARTTRIG8_NOA_SELECT_4_SHIFT 16
|
||||
#define OASTARTTRIG8_NOA_SELECT_5_SHIFT 20
|
||||
#define OASTARTTRIG8_NOA_SELECT_6_SHIFT 24
|
||||
#define OASTARTTRIG8_NOA_SELECT_7_SHIFT 28
|
||||
|
||||
/* CECX_0 */
|
||||
#define OACEC_COMPARE_LESS_OR_EQUAL 6
|
||||
#define OACEC_COMPARE_NOT_EQUAL 5
|
||||
#define OACEC_COMPARE_LESS_THAN 4
|
||||
#define OACEC_COMPARE_GREATER_OR_EQUAL 3
|
||||
#define OACEC_COMPARE_EQUAL 2
|
||||
#define OACEC_COMPARE_GREATER_THAN 1
|
||||
#define OACEC_COMPARE_ANY_EQUAL 0
|
||||
|
||||
#define OACEC_COMPARE_VALUE_MASK 0xffff
|
||||
#define OACEC_COMPARE_VALUE_SHIFT 3
|
||||
|
||||
#define OACEC_SELECT_NOA (0<<19)
|
||||
#define OACEC_SELECT_PREV (1<<19)
|
||||
#define OACEC_SELECT_BOOLEAN (2<<19)
|
||||
|
||||
/* CECX_1 */
|
||||
#define OACEC_MASK_MASK 0xffff
|
||||
#define OACEC_CONSIDERATIONS_MASK 0xffff
|
||||
#define OACEC_CONSIDERATIONS_SHIFT 16
|
||||
|
||||
#define OACEC0_0 _MMIO(0x2770)
|
||||
#define OACEC0_1 _MMIO(0x2774)
|
||||
#define OACEC1_0 _MMIO(0x2778)
|
||||
#define OACEC1_1 _MMIO(0x277c)
|
||||
#define OACEC2_0 _MMIO(0x2780)
|
||||
#define OACEC2_1 _MMIO(0x2784)
|
||||
#define OACEC3_0 _MMIO(0x2788)
|
||||
#define OACEC3_1 _MMIO(0x278c)
|
||||
#define OACEC4_0 _MMIO(0x2790)
|
||||
#define OACEC4_1 _MMIO(0x2794)
|
||||
#define OACEC5_0 _MMIO(0x2798)
|
||||
#define OACEC5_1 _MMIO(0x279c)
|
||||
#define OACEC6_0 _MMIO(0x27a0)
|
||||
#define OACEC6_1 _MMIO(0x27a4)
|
||||
#define OACEC7_0 _MMIO(0x27a8)
|
||||
#define OACEC7_1 _MMIO(0x27ac)
|
||||
|
||||
|
||||
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
|
||||
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
|
||||
@ -708,9 +1051,15 @@ enum skl_disp_power_wells {
|
||||
/* These numbers are fixed and must match the position of the pw bits */
|
||||
SKL_DISP_PW_MISC_IO,
|
||||
SKL_DISP_PW_DDI_A_E,
|
||||
GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
|
||||
SKL_DISP_PW_DDI_B,
|
||||
SKL_DISP_PW_DDI_C,
|
||||
SKL_DISP_PW_DDI_D,
|
||||
|
||||
GLK_DISP_PW_AUX_A = 8,
|
||||
GLK_DISP_PW_AUX_B,
|
||||
GLK_DISP_PW_AUX_C,
|
||||
|
||||
SKL_DISP_PW_1 = 14,
|
||||
SKL_DISP_PW_2,
|
||||
|
||||
@ -720,6 +1069,7 @@ enum skl_disp_power_wells {
|
||||
|
||||
BXT_DPIO_CMN_A,
|
||||
BXT_DPIO_CMN_BC,
|
||||
GLK_DPIO_CMN_C,
|
||||
};
|
||||
|
||||
#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
|
||||
@ -1188,8 +1538,10 @@ enum skl_disp_power_wells {
|
||||
/* BXT PHY registers */
|
||||
#define _BXT_PHY0_BASE 0x6C000
|
||||
#define _BXT_PHY1_BASE 0x162000
|
||||
#define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \
|
||||
_BXT_PHY1_BASE)
|
||||
#define _BXT_PHY2_BASE 0x163000
|
||||
#define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \
|
||||
_BXT_PHY1_BASE, \
|
||||
_BXT_PHY2_BASE)
|
||||
|
||||
#define _BXT_PHY(phy, reg) \
|
||||
_MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
|
||||
@ -1201,7 +1553,6 @@ enum skl_disp_power_wells {
|
||||
_MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
|
||||
|
||||
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
|
||||
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
|
||||
|
||||
#define _BXT_PHY_CTL_DDI_A 0x64C00
|
||||
#define _BXT_PHY_CTL_DDI_B 0x64C10
|
||||
@ -1214,9 +1565,11 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define _PHY_CTL_FAMILY_EDP 0x64C80
|
||||
#define _PHY_CTL_FAMILY_DDI 0x64C90
|
||||
#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
|
||||
#define COMMON_RESET_DIS (1 << 31)
|
||||
#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \
|
||||
_PHY_CTL_FAMILY_EDP)
|
||||
#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \
|
||||
_PHY_CTL_FAMILY_EDP, \
|
||||
_PHY_CTL_FAMILY_DDI_C)
|
||||
|
||||
/* BXT PHY PLL registers */
|
||||
#define _PORT_PLL_A 0x46074
|
||||
@ -1225,6 +1578,8 @@ enum skl_disp_power_wells {
|
||||
#define PORT_PLL_ENABLE (1 << 31)
|
||||
#define PORT_PLL_LOCK (1 << 30)
|
||||
#define PORT_PLL_REF_SEL (1 << 27)
|
||||
#define PORT_PLL_POWER_ENABLE (1 << 26)
|
||||
#define PORT_PLL_POWER_STATE (1 << 25)
|
||||
#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
|
||||
|
||||
#define _PORT_PLL_EBB_0_A 0x162034
|
||||
@ -1435,6 +1790,21 @@ enum skl_disp_power_wells {
|
||||
#define DEEMPH_SHIFT 24
|
||||
#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
|
||||
|
||||
#define _PORT_TX_DW5_LN0_A 0x162514
|
||||
#define _PORT_TX_DW5_LN0_B 0x6C514
|
||||
#define _PORT_TX_DW5_LN0_C 0x6C914
|
||||
#define _PORT_TX_DW5_GRP_A 0x162D14
|
||||
#define _PORT_TX_DW5_GRP_B 0x6CD14
|
||||
#define _PORT_TX_DW5_GRP_C 0x6CF14
|
||||
#define BXT_PORT_TX_DW5_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
|
||||
_PORT_TX_DW5_LN0_B, \
|
||||
_PORT_TX_DW5_LN0_C)
|
||||
#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
|
||||
_PORT_TX_DW5_GRP_B, \
|
||||
_PORT_TX_DW5_GRP_C)
|
||||
#define DCC_DELAY_RANGE_1 (1 << 9)
|
||||
#define DCC_DELAY_RANGE_2 (1 << 8)
|
||||
|
||||
#define _PORT_TX_DW14_LN0_A 0x162538
|
||||
#define _PORT_TX_DW14_LN0_B 0x6C538
|
||||
#define _PORT_TX_DW14_LN0_C 0x6C938
|
||||
@ -2920,7 +3290,7 @@ enum skl_disp_power_wells {
|
||||
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
|
||||
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
|
||||
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
|
||||
(IS_BROXTON(dev_priv) ? \
|
||||
(IS_GEN9_LP(dev_priv) ? \
|
||||
INTERVAL_0_833_US(us) : \
|
||||
INTERVAL_1_33_US(us)) : \
|
||||
INTERVAL_1_28_US(us))
|
||||
@ -2929,7 +3299,7 @@ enum skl_disp_power_wells {
|
||||
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
|
||||
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
|
||||
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
|
||||
(IS_BROXTON(dev_priv) ? \
|
||||
(IS_GEN9_LP(dev_priv) ? \
|
||||
INTERVAL_0_833_TO_US(interval) : \
|
||||
INTERVAL_1_33_TO_US(interval)) : \
|
||||
INTERVAL_1_28_TO_US(interval))
|
||||
@ -5374,18 +5744,21 @@ enum {
|
||||
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
|
||||
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
|
||||
|
||||
#define SPCNTR(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
|
||||
#define SPLINOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
|
||||
#define SPSTRIDE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
|
||||
#define SPPOS(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
|
||||
#define SPSIZE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
|
||||
#define SPKEYMINVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
|
||||
#define SPKEYMSK(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
|
||||
#define SPSURF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
|
||||
#define SPKEYMAXVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
|
||||
#define SPTILEOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
|
||||
#define SPCONSTALPHA(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
|
||||
#define SPGAMC(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
|
||||
#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
|
||||
_MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
|
||||
|
||||
#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
|
||||
#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
|
||||
#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE)
|
||||
#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS)
|
||||
#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE)
|
||||
#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL)
|
||||
#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK)
|
||||
#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF)
|
||||
#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
|
||||
#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
|
||||
#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
|
||||
#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
|
||||
|
||||
/*
|
||||
* CHV pipe B sprite CSC
|
||||
@ -5394,29 +5767,32 @@ enum {
|
||||
* |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
|
||||
* |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
|
||||
*/
|
||||
#define SPCSCYGOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
|
||||
#define SPCSCCBOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
|
||||
#define SPCSCCROFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
|
||||
#define _MMIO_CHV_SPCSC(plane_id, reg) \
|
||||
_MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg))
|
||||
|
||||
#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900)
|
||||
#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904)
|
||||
#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908)
|
||||
#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
|
||||
#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
|
||||
|
||||
#define SPCSCC01(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
|
||||
#define SPCSCC23(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
|
||||
#define SPCSCC45(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
|
||||
#define SPCSCC67(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
|
||||
#define SPCSCC8(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
|
||||
#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c)
|
||||
#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910)
|
||||
#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914)
|
||||
#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918)
|
||||
#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c)
|
||||
#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
|
||||
#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
|
||||
|
||||
#define SPCSCYGICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
|
||||
#define SPCSCCBICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
|
||||
#define SPCSCCRICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
|
||||
#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920)
|
||||
#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924)
|
||||
#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928)
|
||||
#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
|
||||
#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
|
||||
|
||||
#define SPCSCYGOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
|
||||
#define SPCSCCBOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
|
||||
#define SPCSCCROCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
|
||||
#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c)
|
||||
#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930)
|
||||
#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934)
|
||||
#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
|
||||
#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
|
||||
|
||||
@ -6914,6 +7290,7 @@ enum {
|
||||
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
|
||||
|
||||
#define GEN6_UCGCTL3 _MMIO(0x9408)
|
||||
# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
|
||||
|
||||
#define GEN7_UCGCTL4 _MMIO(0x940c)
|
||||
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
|
||||
@ -8299,6 +8676,21 @@ enum {
|
||||
#define BXT_PIPE_SELECT_SHIFT 7
|
||||
#define BXT_PIPE_SELECT_MASK (7 << 7)
|
||||
#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
|
||||
#define GLK_PHY_STATUS_PORT_READY (1 << 31) /* RO */
|
||||
#define GLK_ULPS_NOT_ACTIVE (1 << 30) /* RO */
|
||||
#define GLK_MIPIIO_RESET_RELEASED (1 << 28)
|
||||
#define GLK_CLOCK_LANE_STOP_STATE (1 << 27) /* RO */
|
||||
#define GLK_DATA_LANE_STOP_STATE (1 << 26) /* RO */
|
||||
#define GLK_LP_WAKE (1 << 22)
|
||||
#define GLK_LP11_LOW_PWR_MODE (1 << 21)
|
||||
#define GLK_LP00_LOW_PWR_MODE (1 << 20)
|
||||
#define GLK_FIREWALL_ENABLE (1 << 16)
|
||||
#define BXT_PIXEL_OVERLAP_CNT_MASK (0xf << 10)
|
||||
#define BXT_PIXEL_OVERLAP_CNT_SHIFT 10
|
||||
#define BXT_DSC_ENABLE (1 << 3)
|
||||
#define BXT_RGB_FLIP (1 << 2)
|
||||
#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
|
||||
#define GLK_MIPIIO_ENABLE (1 << 0)
|
||||
|
||||
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
|
||||
#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
|
||||
|
@ -56,13 +56,12 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
|
||||
i915_redisable_vga(dev_priv);
|
||||
}
|
||||
|
||||
int i915_save_state(struct drm_device *dev)
|
||||
int i915_save_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
int i;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
i915_save_display(dev_priv);
|
||||
|
||||
@ -97,18 +96,17 @@ int i915_save_state(struct drm_device *dev)
|
||||
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_restore_state(struct drm_device *dev)
|
||||
int i915_restore_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
int i;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
i915_gem_restore_fences(dev_priv);
|
||||
|
||||
@ -145,9 +143,9 @@ int i915_restore_state(struct drm_device *dev)
|
||||
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
intel_i2c_reset(dev);
|
||||
intel_i2c_reset(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -17,6 +17,92 @@
|
||||
|
||||
static DEFINE_SPINLOCK(i915_sw_fence_lock);
|
||||
|
||||
enum {
|
||||
DEBUG_FENCE_IDLE = 0,
|
||||
DEBUG_FENCE_NOTIFY,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
||||
|
||||
static void *i915_sw_fence_debug_hint(void *addr)
|
||||
{
|
||||
return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
|
||||
}
|
||||
|
||||
static struct debug_obj_descr i915_sw_fence_debug_descr = {
|
||||
.name = "i915_sw_fence",
|
||||
.debug_hint = i915_sw_fence_debug_hint,
|
||||
};
|
||||
|
||||
static inline void debug_fence_init(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_init(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_activate(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_activate(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_set_state(struct i915_sw_fence *fence,
|
||||
int old, int new)
|
||||
{
|
||||
debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new);
|
||||
}
|
||||
|
||||
static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_deactivate(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_destroy(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_free(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
static inline void debug_fence_assert(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_object_assert_init(fence, &i915_sw_fence_debug_descr);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void debug_fence_init(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_activate(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_set_state(struct i915_sw_fence *fence,
|
||||
int old, int new)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_destroy(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_free(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debug_fence_assert(struct i915_sw_fence *fence)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
|
||||
enum i915_sw_fence_notify state)
|
||||
{
|
||||
@ -26,25 +112,37 @@ static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
|
||||
return fn(fence, state);
|
||||
}
|
||||
|
||||
static void i915_sw_fence_free(struct kref *kref)
|
||||
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
||||
void i915_sw_fence_fini(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_fence_free(fence);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void i915_sw_fence_release(struct kref *kref)
|
||||
{
|
||||
struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
|
||||
|
||||
WARN_ON(atomic_read(&fence->pending) > 0);
|
||||
debug_fence_destroy(fence);
|
||||
|
||||
if (fence->flags & I915_SW_FENCE_MASK)
|
||||
if (fence->flags & I915_SW_FENCE_MASK) {
|
||||
__i915_sw_fence_notify(fence, FENCE_FREE);
|
||||
else
|
||||
} else {
|
||||
i915_sw_fence_fini(fence);
|
||||
kfree(fence);
|
||||
}
|
||||
}
|
||||
|
||||
static void i915_sw_fence_put(struct i915_sw_fence *fence)
|
||||
{
|
||||
kref_put(&fence->kref, i915_sw_fence_free);
|
||||
debug_fence_assert(fence);
|
||||
kref_put(&fence->kref, i915_sw_fence_release);
|
||||
}
|
||||
|
||||
static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_fence_assert(fence);
|
||||
kref_get(&fence->kref);
|
||||
return fence;
|
||||
}
|
||||
@ -56,6 +154,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
|
||||
wait_queue_t *pos, *next;
|
||||
unsigned long flags;
|
||||
|
||||
debug_fence_deactivate(fence);
|
||||
atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
|
||||
|
||||
/*
|
||||
@ -88,23 +187,33 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
|
||||
} while (1);
|
||||
}
|
||||
spin_unlock_irqrestore(&x->lock, flags);
|
||||
|
||||
debug_fence_assert(fence);
|
||||
}
|
||||
|
||||
static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
|
||||
struct list_head *continuation)
|
||||
{
|
||||
debug_fence_assert(fence);
|
||||
|
||||
if (!atomic_dec_and_test(&fence->pending))
|
||||
return;
|
||||
|
||||
debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY);
|
||||
|
||||
if (fence->flags & I915_SW_FENCE_MASK &&
|
||||
__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
|
||||
return;
|
||||
|
||||
debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE);
|
||||
|
||||
__i915_sw_fence_wake_up_all(fence, continuation);
|
||||
}
|
||||
|
||||
static void i915_sw_fence_complete(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_fence_assert(fence);
|
||||
|
||||
if (WARN_ON(i915_sw_fence_done(fence)))
|
||||
return;
|
||||
|
||||
@ -113,6 +222,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence)
|
||||
|
||||
static void i915_sw_fence_await(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_fence_assert(fence);
|
||||
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
|
||||
}
|
||||
|
||||
@ -123,18 +233,26 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
|
||||
{
|
||||
BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
|
||||
|
||||
debug_fence_init(fence);
|
||||
|
||||
__init_waitqueue_head(&fence->wait, name, key);
|
||||
kref_init(&fence->kref);
|
||||
atomic_set(&fence->pending, 1);
|
||||
fence->flags = (unsigned long)fn;
|
||||
}
|
||||
|
||||
void i915_sw_fence_commit(struct i915_sw_fence *fence)
|
||||
static void __i915_sw_fence_commit(struct i915_sw_fence *fence)
|
||||
{
|
||||
i915_sw_fence_complete(fence);
|
||||
i915_sw_fence_put(fence);
|
||||
}
|
||||
|
||||
void i915_sw_fence_commit(struct i915_sw_fence *fence)
|
||||
{
|
||||
debug_fence_activate(fence);
|
||||
__i915_sw_fence_commit(fence);
|
||||
}
|
||||
|
||||
static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
|
||||
{
|
||||
list_del(&wq->task_list);
|
||||
@ -206,9 +324,13 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
unsigned long flags;
|
||||
int pending;
|
||||
|
||||
debug_fence_assert(fence);
|
||||
|
||||
if (i915_sw_fence_done(signaler))
|
||||
return 0;
|
||||
|
||||
debug_fence_assert(signaler);
|
||||
|
||||
/* The dependency graph must be acyclic. */
|
||||
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
|
||||
return -EINVAL;
|
||||
@ -279,7 +401,7 @@ static void timer_i915_sw_fence_wake(unsigned long data)
|
||||
dma_fence_put(cb->dma);
|
||||
cb->dma = NULL;
|
||||
|
||||
i915_sw_fence_commit(cb->fence);
|
||||
__i915_sw_fence_commit(cb->fence);
|
||||
cb->timer.function = NULL;
|
||||
}
|
||||
|
||||
@ -290,7 +412,7 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
|
||||
|
||||
del_timer_sync(&cb->timer);
|
||||
if (cb->timer.function)
|
||||
i915_sw_fence_commit(cb->fence);
|
||||
__i915_sw_fence_commit(cb->fence);
|
||||
dma_fence_put(cb->dma);
|
||||
|
||||
kfree(cb);
|
||||
@ -304,6 +426,8 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
|
||||
struct i915_sw_dma_fence_cb *cb;
|
||||
int ret;
|
||||
|
||||
debug_fence_assert(fence);
|
||||
|
||||
if (dma_fence_is_signaled(dma))
|
||||
return 0;
|
||||
|
||||
@ -349,6 +473,8 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
|
||||
struct dma_fence *excl;
|
||||
int ret = 0, pending;
|
||||
|
||||
debug_fence_assert(fence);
|
||||
|
||||
if (write) {
|
||||
struct dma_fence **shared;
|
||||
unsigned int count, i;
|
||||
|
@ -56,6 +56,12 @@ do { \
|
||||
__i915_sw_fence_init((fence), (fn), NULL, NULL)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
||||
void i915_sw_fence_fini(struct i915_sw_fence *fence);
|
||||
#else
|
||||
static inline void i915_sw_fence_fini(struct i915_sw_fence *fence) {}
|
||||
#endif
|
||||
|
||||
void i915_sw_fence_commit(struct i915_sw_fence *fence);
|
||||
|
||||
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
|
@ -535,7 +535,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
error_priv.dev = dev;
|
||||
error_priv.i915 = dev_priv;
|
||||
i915_error_state_get(dev, &error_priv);
|
||||
|
||||
ret = i915_error_state_to_str(&error_str, &error_priv);
|
||||
@ -560,7 +560,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
|
||||
DRM_DEBUG_DRIVER("Resetting error state\n");
|
||||
i915_destroy_error_state(&dev_priv->drm);
|
||||
i915_destroy_error_state(dev_priv);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ TRACE_EVENT(i915_gem_evict,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = vm->dev->primary->index;
|
||||
__entry->dev = vm->i915->drm.primary->index;
|
||||
__entry->vm = vm;
|
||||
__entry->size = size;
|
||||
__entry->align = align;
|
||||
@ -443,13 +443,41 @@ TRACE_EVENT(i915_gem_evict_vm,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = vm->dev->primary->index;
|
||||
__entry->dev = vm->i915->drm.primary->index;
|
||||
__entry->vm = vm;
|
||||
),
|
||||
|
||||
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_evict_vma,
|
||||
TP_PROTO(struct i915_vma *vma, unsigned int flags),
|
||||
TP_ARGS(vma, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(struct i915_address_space *, vm)
|
||||
__field(u64, start)
|
||||
__field(u64, size)
|
||||
__field(unsigned long, color)
|
||||
__field(unsigned int, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = vma->vm->i915->drm.primary->index;
|
||||
__entry->vm = vma->vm;
|
||||
__entry->start = vma->node.start;
|
||||
__entry->size = vma->node.size;
|
||||
__entry->color = vma->node.color;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x",
|
||||
__entry->dev, __entry->vm,
|
||||
__entry->start, __entry->size,
|
||||
__entry->color, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_ring_sync_to,
|
||||
TP_PROTO(struct drm_i915_gem_request *to,
|
||||
struct drm_i915_gem_request *from),
|
||||
@ -711,7 +739,7 @@ DECLARE_EVENT_CLASS(i915_ppgtt,
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vm = vm;
|
||||
__entry->dev = vm->dev->primary->index;
|
||||
__entry->dev = vm->i915->drm.primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
|
||||
|
64
drivers/gpu/drm/i915/i915_utils.h
Normal file
64
drivers/gpu/drm/i915/i915_utils.h
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __I915_UTILS_H
|
||||
#define __I915_UTILS_H
|
||||
|
||||
#define range_overflows(start, size, max) ({ \
|
||||
typeof(start) start__ = (start); \
|
||||
typeof(size) size__ = (size); \
|
||||
typeof(max) max__ = (max); \
|
||||
(void)(&start__ == &size__); \
|
||||
(void)(&start__ == &max__); \
|
||||
start__ > max__ || size__ > max__ - start__; \
|
||||
})
|
||||
|
||||
#define range_overflows_t(type, start, size, max) \
|
||||
range_overflows((type)(start), (type)(size), (type)(max))
|
||||
|
||||
/* Note we don't consider signbits :| */
|
||||
#define overflows_type(x, T) \
|
||||
(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
|
||||
|
||||
#define ptr_mask_bits(ptr) ({ \
|
||||
unsigned long __v = (unsigned long)(ptr); \
|
||||
(typeof(ptr))(__v & PAGE_MASK); \
|
||||
})
|
||||
|
||||
#define ptr_unpack_bits(ptr, bits) ({ \
|
||||
unsigned long __v = (unsigned long)(ptr); \
|
||||
(bits) = __v & ~PAGE_MASK; \
|
||||
(typeof(ptr))(__v & PAGE_MASK); \
|
||||
})
|
||||
|
||||
#define ptr_pack_bits(ptr, bits) \
|
||||
((typeof(ptr))((unsigned long)(ptr) | (bits)))
|
||||
|
||||
#define fetch_and_zero(ptr) ({ \
|
||||
typeof(*ptr) __T = *(ptr); \
|
||||
*(ptr) = (typeof(*ptr))0; \
|
||||
__T; \
|
||||
})
|
||||
|
||||
#endif /* !__I915_UTILS_H */
|
@ -95,8 +95,13 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
|
||||
if (view) {
|
||||
vma->ggtt_view = *view;
|
||||
if (view->type == I915_GGTT_VIEW_PARTIAL) {
|
||||
GEM_BUG_ON(range_overflows_t(u64,
|
||||
view->params.partial.offset,
|
||||
view->params.partial.size,
|
||||
obj->base.size >> PAGE_SHIFT));
|
||||
vma->size = view->params.partial.size;
|
||||
vma->size <<= PAGE_SHIFT;
|
||||
GEM_BUG_ON(vma->size >= obj->base.size);
|
||||
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
|
||||
vma->size =
|
||||
intel_rotation_info_size(&view->params.rotated);
|
||||
@ -176,6 +181,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
||||
if (bind_flags == 0)
|
||||
return 0;
|
||||
|
||||
if (GEM_WARN_ON(range_overflows(vma->node.start,
|
||||
vma->node.size,
|
||||
vma->vm->total)))
|
||||
return -ENODEV;
|
||||
|
||||
if (vma_flags == 0 && vma->vm->allocate_va_range) {
|
||||
trace_i915_va_alloc(vma);
|
||||
ret = vma->vm->allocate_va_range(vma->vm,
|
||||
@ -198,9 +208,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
|
||||
void __iomem *ptr;
|
||||
|
||||
/* Access through the GTT requires the device to be awake. */
|
||||
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
|
||||
assert_rpm_wakelock_held(vma->vm->i915);
|
||||
|
||||
lockdep_assert_held(&vma->vm->dev->struct_mutex);
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
|
||||
return IO_ERR_PTR(-ENODEV);
|
||||
|
||||
@ -297,10 +307,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
|
||||
vma->flags &= ~I915_VMA_CAN_FENCE;
|
||||
}
|
||||
|
||||
bool i915_gem_valid_gtt_space(struct i915_vma *vma,
|
||||
unsigned long cache_level)
|
||||
static bool color_differs(struct drm_mm_node *node, unsigned long color)
|
||||
{
|
||||
struct drm_mm_node *gtt_space = &vma->node;
|
||||
return node->allocated && node->color != color;
|
||||
}
|
||||
|
||||
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
|
||||
{
|
||||
struct drm_mm_node *node = &vma->node;
|
||||
struct drm_mm_node *other;
|
||||
|
||||
/*
|
||||
@ -313,18 +327,16 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma,
|
||||
if (vma->vm->mm.color_adjust == NULL)
|
||||
return true;
|
||||
|
||||
if (!drm_mm_node_allocated(gtt_space))
|
||||
return true;
|
||||
/* Only valid to be called on an already inserted vma */
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(node));
|
||||
GEM_BUG_ON(list_empty(&node->node_list));
|
||||
|
||||
if (list_empty(>t_space->node_list))
|
||||
return true;
|
||||
|
||||
other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
|
||||
if (other->allocated && !drm_mm_hole_follows(other) && other->color != cache_level)
|
||||
other = list_prev_entry(node, node_list);
|
||||
if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
|
||||
return false;
|
||||
|
||||
other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
|
||||
if (other->allocated && !drm_mm_hole_follows(gtt_space) && other->color != cache_level)
|
||||
other = list_next_entry(node, node_list);
|
||||
if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@ -347,7 +359,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma,
|
||||
static int
|
||||
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
|
||||
struct drm_i915_private *dev_priv = vma->vm->i915;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
u64 start, end;
|
||||
int ret;
|
||||
@ -391,7 +403,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
|
||||
if (flags & PIN_OFFSET_FIXED) {
|
||||
u64 offset = flags & PIN_OFFSET_MASK;
|
||||
if (offset & (alignment - 1) || offset > end - size) {
|
||||
if (offset & (alignment - 1) ||
|
||||
range_overflows(offset, size, end)) {
|
||||
ret = -EINVAL;
|
||||
goto err_unpin;
|
||||
}
|
||||
@ -401,7 +414,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
vma->node.color = obj->cache_level;
|
||||
ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
|
||||
if (ret) {
|
||||
ret = i915_gem_evict_for_vma(vma);
|
||||
ret = i915_gem_evict_for_vma(vma, flags);
|
||||
if (ret == 0)
|
||||
ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
|
||||
if (ret)
|
||||
@ -469,7 +482,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
|
||||
unsigned int bound = vma->flags;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&vma->vm->dev->struct_mutex);
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
|
||||
GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
|
||||
|
||||
@ -567,7 +580,7 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
|
||||
for_each_active(active, idx) {
|
||||
ret = i915_gem_active_retire(&vma->last_read[idx],
|
||||
&vma->vm->dev->struct_mutex);
|
||||
&vma->vm->i915->drm.struct_mutex);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
@ -628,6 +641,7 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
* reaped by the shrinker.
|
||||
*/
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
|
||||
|
||||
destroy:
|
||||
if (unlikely(i915_vma_is_closed(vma)))
|
||||
|
@ -178,15 +178,23 @@ static inline void i915_vma_put(struct i915_vma *vma)
|
||||
i915_gem_object_put(vma->obj);
|
||||
}
|
||||
|
||||
static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
|
||||
{
|
||||
return a - b;
|
||||
}
|
||||
|
||||
static inline long
|
||||
i915_vma_compare(struct i915_vma *vma,
|
||||
struct i915_address_space *vm,
|
||||
const struct i915_ggtt_view *view)
|
||||
{
|
||||
ptrdiff_t cmp;
|
||||
|
||||
GEM_BUG_ON(view && !i915_is_ggtt(vm));
|
||||
|
||||
if (vma->vm != vm)
|
||||
return vma->vm - vm;
|
||||
cmp = ptrdiff(vma->vm, vm);
|
||||
if (cmp)
|
||||
return cmp;
|
||||
|
||||
if (!view)
|
||||
return vma->ggtt_view.type;
|
||||
@ -282,7 +290,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
|
||||
*/
|
||||
static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
|
||||
{
|
||||
lockdep_assert_held(&vma->vm->dev->struct_mutex);
|
||||
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
|
||||
GEM_BUG_ON(vma->iomap == NULL);
|
||||
i915_vma_unpin(vma);
|
||||
}
|
||||
@ -311,7 +319,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
|
||||
static inline bool
|
||||
i915_vma_pin_fence(struct i915_vma *vma)
|
||||
{
|
||||
lockdep_assert_held(&vma->vm->dev->struct_mutex);
|
||||
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
|
||||
if (vma->fence) {
|
||||
vma->fence->pin_count++;
|
||||
return true;
|
||||
@ -330,7 +338,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
|
||||
static inline void
|
||||
i915_vma_unpin_fence(struct i915_vma *vma)
|
||||
{
|
||||
lockdep_assert_held(&vma->vm->dev->struct_mutex);
|
||||
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
|
||||
if (vma->fence) {
|
||||
GEM_BUG_ON(vma->fence->pin_count <= 0);
|
||||
vma->fence->pin_count--;
|
||||
|
@ -265,37 +265,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll_config *shared_dpll)
|
||||
{
|
||||
enum intel_dpll_id i;
|
||||
|
||||
/* Copy shared dpll state */
|
||||
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
|
||||
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
|
||||
|
||||
shared_dpll[i] = pll->config;
|
||||
}
|
||||
}
|
||||
|
||||
struct intel_shared_dpll_config *
|
||||
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(s);
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
|
||||
|
||||
if (!state->dpll_set) {
|
||||
state->dpll_set = true;
|
||||
|
||||
intel_atomic_duplicate_dpll_state(to_i915(s->dev),
|
||||
state->shared_dpll);
|
||||
}
|
||||
|
||||
return state->shared_dpll;
|
||||
}
|
||||
|
||||
struct drm_atomic_state *
|
||||
intel_atomic_state_alloc(struct drm_device *dev)
|
||||
{
|
||||
|
@ -737,25 +737,49 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
|
||||
return dev_priv->cdclk_freq;
|
||||
}
|
||||
|
||||
/*
|
||||
* get the intel_encoder according to the parameter port and pipe
|
||||
* intel_encoder is saved by the index of pipe
|
||||
* MST & (pipe >= 0): return the av_enc_map[pipe],
|
||||
* when port is matched
|
||||
* MST & (pipe < 0): this is invalid
|
||||
* Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
|
||||
* will get the right intel_encoder with port matched
|
||||
* Non-MST & (pipe < 0): get the right intel_encoder with port matched
|
||||
*/
|
||||
static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
|
||||
int port, int pipe)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (WARN_ON(pipe >= I915_MAX_PIPES))
|
||||
return NULL;
|
||||
|
||||
/* MST */
|
||||
if (pipe >= 0)
|
||||
return dev_priv->av_enc_map[pipe];
|
||||
if (pipe >= 0) {
|
||||
encoder = dev_priv->av_enc_map[pipe];
|
||||
/*
|
||||
* when bootup, audio driver may not know it is
|
||||
* MST or not. So it will poll all the port & pipe
|
||||
* combinations
|
||||
*/
|
||||
if (encoder != NULL && encoder->port == port &&
|
||||
encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
return encoder;
|
||||
}
|
||||
|
||||
/* Non-MST */
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
struct intel_encoder *encoder;
|
||||
if (pipe > 0)
|
||||
return NULL;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
encoder = dev_priv->av_enc_map[pipe];
|
||||
if (encoder == NULL)
|
||||
continue;
|
||||
|
||||
if (encoder->type == INTEL_OUTPUT_DP_MST)
|
||||
continue;
|
||||
|
||||
if (port == encoder->port)
|
||||
return encoder;
|
||||
}
|
||||
@ -781,9 +805,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
|
||||
|
||||
/* 1. get the pipe */
|
||||
intel_encoder = get_saved_enc(dev_priv, port, pipe);
|
||||
if (!intel_encoder || !intel_encoder->base.crtc ||
|
||||
(intel_encoder->type != INTEL_OUTPUT_HDMI &&
|
||||
intel_encoder->type != INTEL_OUTPUT_DP)) {
|
||||
if (!intel_encoder || !intel_encoder->base.crtc) {
|
||||
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
|
||||
err = -ENODEV;
|
||||
goto unlock;
|
||||
@ -906,6 +928,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->num_pipes == 0)
|
||||
return;
|
||||
|
||||
ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to add audio component (%d)\n", ret);
|
||||
|
@ -114,16 +114,18 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
|
||||
panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
|
||||
((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
|
||||
panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
|
||||
dvo_timing->hsync_pulse_width;
|
||||
((dvo_timing->hsync_pulse_width_hi << 8) |
|
||||
dvo_timing->hsync_pulse_width_lo);
|
||||
panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
|
||||
((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
|
||||
|
||||
panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
|
||||
dvo_timing->vactive_lo;
|
||||
panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
|
||||
dvo_timing->vsync_off;
|
||||
((dvo_timing->vsync_off_hi << 4) | dvo_timing->vsync_off_lo);
|
||||
panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
|
||||
dvo_timing->vsync_pulse_width;
|
||||
((dvo_timing->vsync_pulse_width_hi << 4) |
|
||||
dvo_timing->vsync_pulse_width_lo);
|
||||
panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
|
||||
((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
|
||||
panel_fixed_mode->clock = dvo_timing->clock * 10;
|
||||
@ -330,17 +332,19 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
|
||||
|
||||
method = &backlight_data->backlight_control[panel_type];
|
||||
dev_priv->vbt.backlight.type = method->type;
|
||||
dev_priv->vbt.backlight.controller = method->controller;
|
||||
}
|
||||
|
||||
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
|
||||
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
|
||||
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
|
||||
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
|
||||
"active %s, min brightness %u, level %u\n",
|
||||
"active %s, min brightness %u, level %u, controller %u\n",
|
||||
dev_priv->vbt.backlight.pwm_freq_hz,
|
||||
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
|
||||
dev_priv->vbt.backlight.min_brightness,
|
||||
backlight_data->level[panel_type]);
|
||||
backlight_data->level[panel_type],
|
||||
dev_priv->vbt.backlight.controller);
|
||||
}
|
||||
|
||||
/* Try to find sdvo panel data */
|
||||
@ -1159,6 +1163,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
info->supports_dvi = is_dvi;
|
||||
info->supports_hdmi = is_hdmi;
|
||||
info->supports_dp = is_dp;
|
||||
info->supports_edp = is_edp;
|
||||
|
||||
DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
|
||||
port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
|
||||
@ -1411,13 +1416,16 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (vbt->bdb_offset + sizeof(struct bdb_header) > size) {
|
||||
if (range_overflows_t(size_t,
|
||||
vbt->bdb_offset,
|
||||
sizeof(struct bdb_header),
|
||||
size)) {
|
||||
DRM_DEBUG_DRIVER("BDB header incomplete\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
bdb = get_bdb_header(vbt);
|
||||
if (vbt->bdb_offset + bdb->bdb_size > size) {
|
||||
if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
|
||||
DRM_DEBUG_DRIVER("BDB incomplete\n");
|
||||
return false;
|
||||
}
|
||||
@ -1662,6 +1670,9 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
|
||||
};
|
||||
int i;
|
||||
|
||||
if (HAS_DDI(dev_priv))
|
||||
return dev_priv->vbt.ddi_port_info[port].supports_edp;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
return false;
|
||||
|
||||
@ -1779,7 +1790,7 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
|
||||
if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv)))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
|
@ -154,7 +154,7 @@ static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
|
||||
|
||||
static inline struct intel_wait *to_wait(struct rb_node *node)
|
||||
{
|
||||
return container_of(node, struct intel_wait, node);
|
||||
return rb_entry(node, struct intel_wait, node);
|
||||
}
|
||||
|
||||
static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
|
||||
@ -427,7 +427,7 @@ static bool signal_complete(struct drm_i915_gem_request *request)
|
||||
|
||||
static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
|
||||
{
|
||||
return container_of(rb, struct drm_i915_gem_request, signaling.node);
|
||||
return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
|
||||
}
|
||||
|
||||
static void signaler_set_rtpriority(void)
|
||||
@ -623,6 +623,12 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
|
||||
/* The engines should be idle and all requests accounted for! */
|
||||
WARN_ON(READ_ONCE(b->first_wait));
|
||||
WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
|
||||
WARN_ON(READ_ONCE(b->first_signal));
|
||||
WARN_ON(!RB_EMPTY_ROOT(&b->signals));
|
||||
|
||||
if (!IS_ERR_OR_NULL(b->signaler))
|
||||
kthread_stop(b->signaler);
|
||||
|
||||
|
@ -836,12 +836,11 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
|
||||
.destroy = intel_encoder_destroy,
|
||||
};
|
||||
|
||||
void intel_crt_init(struct drm_device *dev)
|
||||
void intel_crt_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_connector *connector;
|
||||
struct intel_crt *crt;
|
||||
struct intel_connector *intel_connector;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
i915_reg_t adpa_reg;
|
||||
u32 adpa;
|
||||
|
||||
@ -881,10 +880,10 @@ void intel_crt_init(struct drm_device *dev)
|
||||
|
||||
connector = &intel_connector->base;
|
||||
crt->connector = intel_connector;
|
||||
drm_connector_init(dev, &intel_connector->base,
|
||||
drm_connector_init(&dev_priv->drm, &intel_connector->base,
|
||||
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
|
||||
|
||||
drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
|
||||
drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs,
|
||||
DRM_MODE_ENCODER_DAC, "CRT");
|
||||
|
||||
intel_connector_attach_encoder(intel_connector, &crt->base);
|
||||
|
@ -389,7 +389,7 @@ static void csr_load_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_csr *csr;
|
||||
const struct firmware *fw;
|
||||
const struct firmware *fw = NULL;
|
||||
int ret;
|
||||
|
||||
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
|
||||
@ -405,7 +405,7 @@ static void csr_load_work_fn(struct work_struct *work)
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
DRM_INFO("Finished loading %s (v%u.%u)\n",
|
||||
DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
|
||||
dev_priv->csr.fw_path,
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version));
|
||||
|
@ -442,7 +442,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
|
||||
|
||||
hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
return hdmi_level;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
@ -484,7 +484,7 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
|
||||
const struct ddi_buf_trans *ddi_translations_edp;
|
||||
const struct ddi_buf_trans *ddi_translations;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
return;
|
||||
|
||||
if (IS_KABYLAKE(dev_priv)) {
|
||||
@ -567,7 +567,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
|
||||
enum port port = intel_ddi_get_encoder_port(encoder);
|
||||
const struct ddi_buf_trans *ddi_translations_hdmi;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
return;
|
||||
|
||||
hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
|
||||
@ -1057,7 +1057,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
|
||||
return 0;
|
||||
|
||||
pll = &dev_priv->shared_dplls[dpll];
|
||||
state = &pll->config.hw_state;
|
||||
state = &pll->state.hw_state;
|
||||
|
||||
clock.m1 = 2;
|
||||
clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
|
||||
@ -1091,7 +1091,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
|
||||
hsw_ddi_clock_get(encoder, pipe_config);
|
||||
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_ddi_clock_get(encoder, pipe_config);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
bxt_ddi_clock_get(encoder, pipe_config);
|
||||
}
|
||||
|
||||
@ -1153,7 +1153,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
return skl_ddi_pll_select(intel_crtc, crtc_state,
|
||||
intel_encoder);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
return bxt_ddi_pll_select(intel_crtc, crtc_state,
|
||||
intel_encoder);
|
||||
else
|
||||
@ -1429,7 +1429,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
||||
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
|
||||
|
||||
out:
|
||||
if (ret && IS_BROXTON(dev_priv)) {
|
||||
if (ret && IS_GEN9_LP(dev_priv)) {
|
||||
tmp = I915_READ(BXT_PHY_CTL(port));
|
||||
if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
|
||||
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
|
||||
@ -1643,7 +1643,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_ddi_set_iboost(encoder, level);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
|
||||
|
||||
return DDI_BUF_TRANS_SELECT(level);
|
||||
@ -1701,7 +1701,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
|
||||
|
||||
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
|
||||
bool has_hdmi_sink,
|
||||
struct drm_display_mode *adjusted_mode,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
@ -1715,13 +1716,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
|
||||
intel_prepare_hdmi_ddi_buffers(encoder);
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_ddi_set_iboost(encoder, level);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
bxt_ddi_vswing_sequence(dev_priv, level, port,
|
||||
INTEL_OUTPUT_HDMI);
|
||||
|
||||
intel_hdmi->set_infoframes(drm_encoder,
|
||||
has_hdmi_sink,
|
||||
adjusted_mode);
|
||||
crtc_state, conn_state);
|
||||
}
|
||||
|
||||
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
|
||||
@ -1742,8 +1743,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
|
||||
}
|
||||
if (type == INTEL_OUTPUT_HDMI) {
|
||||
intel_ddi_pre_enable_hdmi(intel_encoder,
|
||||
crtc->config->has_hdmi_sink,
|
||||
&crtc->config->base.adjusted_mode,
|
||||
pipe_config->has_hdmi_sink,
|
||||
pipe_config, conn_state,
|
||||
crtc->config->shared_dpll);
|
||||
}
|
||||
}
|
||||
@ -1949,6 +1950,19 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
|
||||
udelay(600);
|
||||
}
|
||||
|
||||
bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc *intel_crtc)
|
||||
{
|
||||
u32 temp;
|
||||
|
||||
if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
|
||||
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
@ -2014,11 +2028,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
break;
|
||||
}
|
||||
|
||||
if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
|
||||
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
|
||||
pipe_config->has_audio = true;
|
||||
}
|
||||
pipe_config->has_audio =
|
||||
intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
|
||||
|
||||
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
|
||||
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
|
||||
@ -2042,7 +2053,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
|
||||
intel_ddi_clock_get(encoder, pipe_config);
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
pipe_config->lane_lat_optim_mask =
|
||||
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
|
||||
}
|
||||
@ -2066,7 +2077,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
|
||||
else
|
||||
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
|
||||
|
||||
if (IS_BROXTON(dev_priv) && ret)
|
||||
if (IS_GEN9_LP(dev_priv) && ret)
|
||||
pipe_config->lane_lat_optim_mask =
|
||||
bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
|
||||
pipe_config->lane_count);
|
||||
@ -2123,10 +2134,10 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct intel_shared_dpll *pll = NULL;
|
||||
struct intel_shared_dpll_config tmp_pll_config;
|
||||
struct intel_shared_dpll_state tmp_pll_state;
|
||||
enum intel_dpll_id dpll_id;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
dpll_id = (enum intel_dpll_id)dig_port->port;
|
||||
/*
|
||||
* Select the required PLL. This works for platforms where
|
||||
@ -2139,11 +2150,11 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
|
||||
pll->active_mask);
|
||||
return NULL;
|
||||
}
|
||||
tmp_pll_config = pll->config;
|
||||
tmp_pll_state = pll->state;
|
||||
if (!bxt_ddi_dp_set_dpll_hw_state(clock,
|
||||
&pll->config.hw_state)) {
|
||||
&pll->state.hw_state)) {
|
||||
DRM_ERROR("Could not setup DPLL\n");
|
||||
pll->config = tmp_pll_config;
|
||||
pll->state = tmp_pll_state;
|
||||
return NULL;
|
||||
}
|
||||
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
@ -2154,9 +2165,8 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
|
||||
return pll;
|
||||
}
|
||||
|
||||
void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
@ -2218,12 +2228,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
intel_encoder = &intel_dig_port->base;
|
||||
encoder = &intel_encoder->base;
|
||||
|
||||
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
|
||||
drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
|
||||
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
|
||||
|
||||
intel_encoder->compute_config = intel_ddi_compute_config;
|
||||
intel_encoder->enable = intel_enable_ddi;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
|
||||
intel_encoder->pre_enable = intel_ddi_pre_enable;
|
||||
intel_encoder->disable = intel_disable_ddi;
|
||||
@ -2244,7 +2254,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
* configuration so that we use the proper lane count for our
|
||||
* calculations.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv) && port == PORT_A) {
|
||||
if (IS_GEN9_LP(dev_priv) && port == PORT_A) {
|
||||
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
|
||||
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
|
||||
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
|
||||
|
@ -24,11 +24,51 @@
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define PLATFORM_NAME(x) [INTEL_##x] = #x
|
||||
static const char * const platform_names[] = {
|
||||
PLATFORM_NAME(I830),
|
||||
PLATFORM_NAME(I845G),
|
||||
PLATFORM_NAME(I85X),
|
||||
PLATFORM_NAME(I865G),
|
||||
PLATFORM_NAME(I915G),
|
||||
PLATFORM_NAME(I915GM),
|
||||
PLATFORM_NAME(I945G),
|
||||
PLATFORM_NAME(I945GM),
|
||||
PLATFORM_NAME(G33),
|
||||
PLATFORM_NAME(PINEVIEW),
|
||||
PLATFORM_NAME(I965G),
|
||||
PLATFORM_NAME(I965GM),
|
||||
PLATFORM_NAME(G45),
|
||||
PLATFORM_NAME(GM45),
|
||||
PLATFORM_NAME(IRONLAKE),
|
||||
PLATFORM_NAME(SANDYBRIDGE),
|
||||
PLATFORM_NAME(IVYBRIDGE),
|
||||
PLATFORM_NAME(VALLEYVIEW),
|
||||
PLATFORM_NAME(HASWELL),
|
||||
PLATFORM_NAME(BROADWELL),
|
||||
PLATFORM_NAME(CHERRYVIEW),
|
||||
PLATFORM_NAME(SKYLAKE),
|
||||
PLATFORM_NAME(BROXTON),
|
||||
PLATFORM_NAME(KABYLAKE),
|
||||
PLATFORM_NAME(GEMINILAKE),
|
||||
};
|
||||
#undef PLATFORM_NAME
|
||||
|
||||
const char *intel_platform_name(enum intel_platform platform)
|
||||
{
|
||||
if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
|
||||
platform_names[platform] == NULL))
|
||||
return "<unknown>";
|
||||
|
||||
return platform_names[platform];
|
||||
}
|
||||
|
||||
void intel_device_info_dump(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const struct intel_device_info *info = &dev_priv->info;
|
||||
|
||||
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
|
||||
DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
|
||||
intel_platform_name(info->platform),
|
||||
info->gen,
|
||||
dev_priv->drm.pdev->device,
|
||||
dev_priv->drm.pdev->revision);
|
||||
@ -270,6 +310,12 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
enum pipe pipe;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
info->num_scalers[PIPE_A] = 2;
|
||||
info->num_scalers[PIPE_B] = 2;
|
||||
info->num_scalers[PIPE_C] = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Skylake and Broxton currently don't expose the topmost plane as its
|
||||
* use is exclusive with the legacy cursor and we only want to expose
|
||||
@ -278,7 +324,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
||||
* we don't expose the topmost plane at all to prevent ABI breakage
|
||||
* down the line.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEMINILAKE(dev_priv))
|
||||
for_each_pipe(dev_priv, pipe)
|
||||
info->num_sprites[pipe] = 3;
|
||||
else if (IS_BROXTON(dev_priv)) {
|
||||
info->num_sprites[PIPE_A] = 2;
|
||||
info->num_sprites[PIPE_B] = 2;
|
||||
info->num_sprites[PIPE_C] = 1;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -156,38 +156,28 @@ static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
|
||||
u8 source_max, sink_max;
|
||||
|
||||
source_max = intel_dig_port->max_lanes;
|
||||
sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
|
||||
sink_max = intel_dp->max_sink_lane_count;
|
||||
|
||||
return min(source_max, sink_max);
|
||||
}
|
||||
|
||||
/*
|
||||
* The units on the numbers in the next two are... bizarre. Examples will
|
||||
* make it clearer; this one parallels an example in the eDP spec.
|
||||
*
|
||||
* intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
|
||||
*
|
||||
* 270000 * 1 * 8 / 10 == 216000
|
||||
*
|
||||
* The actual data capacity of that configuration is 2.16Gbit/s, so the
|
||||
* units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
|
||||
* or equivalently, kilopixels per second - so for 1680x1050R it'd be
|
||||
* 119000. At 18bpp that's 2142000 kilobits per second.
|
||||
*
|
||||
* Thus the strange-looking division by 10 in intel_dp_link_required, to
|
||||
* get the result in decakilobits instead of kilobits.
|
||||
*/
|
||||
|
||||
static int
|
||||
int
|
||||
intel_dp_link_required(int pixel_clock, int bpp)
|
||||
{
|
||||
return (pixel_clock * bpp + 9) / 10;
|
||||
/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
|
||||
return DIV_ROUND_UP(pixel_clock * bpp, 8);
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
|
||||
{
|
||||
return (max_link_clock * max_lanes * 8) / 10;
|
||||
/* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
|
||||
* link rate that is generally expressed in Gbps. Since, 8 bits of data
|
||||
* is transmitted every LS_Clk per lane, there is no need to account for
|
||||
* the channel encoding that is done in the PHY layer here.
|
||||
*/
|
||||
|
||||
return max_link_clock * max_lanes;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -223,7 +213,7 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
|
||||
|
||||
*sink_rates = default_rates;
|
||||
|
||||
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
|
||||
return (intel_dp->max_sink_link_bw >> 3) + 1;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -233,7 +223,7 @@ intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
int size;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
*source_rates = bxt_rates;
|
||||
size = ARRAY_SIZE(bxt_rates);
|
||||
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
@ -288,6 +278,44 @@ static int intel_dp_common_rates(struct intel_dp *intel_dp,
|
||||
common_rates);
|
||||
}
|
||||
|
||||
static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
|
||||
int *common_rates, int link_rate)
|
||||
{
|
||||
int common_len;
|
||||
int index;
|
||||
|
||||
common_len = intel_dp_common_rates(intel_dp, common_rates);
|
||||
for (index = 0; index < common_len; index++) {
|
||||
if (link_rate == common_rates[common_len - index - 1])
|
||||
return common_len - index - 1;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
|
||||
int link_rate, uint8_t lane_count)
|
||||
{
|
||||
int common_rates[DP_MAX_SUPPORTED_RATES];
|
||||
int link_rate_index;
|
||||
|
||||
link_rate_index = intel_dp_link_rate_index(intel_dp,
|
||||
common_rates,
|
||||
link_rate);
|
||||
if (link_rate_index > 0) {
|
||||
intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
|
||||
intel_dp->max_sink_lane_count = lane_count;
|
||||
} else if (lane_count > 1) {
|
||||
intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
|
||||
intel_dp->max_sink_lane_count = lane_count >> 1;
|
||||
} else {
|
||||
DRM_ERROR("Link Training Unsuccessful\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
intel_dp_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
@ -465,14 +493,50 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
|
||||
}
|
||||
}
|
||||
|
||||
static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
|
||||
|
||||
/*
|
||||
* We don't have power sequencer currently.
|
||||
* Pick one that's not used by other ports.
|
||||
*/
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (encoder->type != INTEL_OUTPUT_DP &&
|
||||
encoder->type != INTEL_OUTPUT_EDP)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
if (encoder->type == INTEL_OUTPUT_EDP) {
|
||||
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
|
||||
intel_dp->active_pipe != intel_dp->pps_pipe);
|
||||
|
||||
if (intel_dp->pps_pipe != INVALID_PIPE)
|
||||
pipes &= ~(1 << intel_dp->pps_pipe);
|
||||
} else {
|
||||
WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
|
||||
|
||||
if (intel_dp->active_pipe != INVALID_PIPE)
|
||||
pipes &= ~(1 << intel_dp->active_pipe);
|
||||
}
|
||||
}
|
||||
|
||||
if (pipes == 0)
|
||||
return INVALID_PIPE;
|
||||
|
||||
return ffs(pipes) - 1;
|
||||
}
|
||||
|
||||
static enum pipe
|
||||
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_encoder *encoder;
|
||||
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
|
||||
enum pipe pipe;
|
||||
|
||||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
@ -480,33 +544,20 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
|
||||
/* We should never land here with regular DP ports */
|
||||
WARN_ON(!is_edp(intel_dp));
|
||||
|
||||
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
|
||||
intel_dp->active_pipe != intel_dp->pps_pipe);
|
||||
|
||||
if (intel_dp->pps_pipe != INVALID_PIPE)
|
||||
return intel_dp->pps_pipe;
|
||||
|
||||
/*
|
||||
* We don't have power sequencer currently.
|
||||
* Pick one that's not used by other ports.
|
||||
*/
|
||||
for_each_intel_encoder(dev, encoder) {
|
||||
struct intel_dp *tmp;
|
||||
|
||||
if (encoder->type != INTEL_OUTPUT_EDP)
|
||||
continue;
|
||||
|
||||
tmp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
if (tmp->pps_pipe != INVALID_PIPE)
|
||||
pipes &= ~(1 << tmp->pps_pipe);
|
||||
}
|
||||
pipe = vlv_find_free_pps(dev_priv);
|
||||
|
||||
/*
|
||||
* Didn't find one. This should not happen since there
|
||||
* are two power sequencers and up to two eDP ports.
|
||||
*/
|
||||
if (WARN_ON(pipes == 0))
|
||||
if (WARN_ON(pipe == INVALID_PIPE))
|
||||
pipe = PIPE_A;
|
||||
else
|
||||
pipe = ffs(pipes) - 1;
|
||||
|
||||
vlv_steal_power_sequencer(dev, pipe);
|
||||
intel_dp->pps_pipe = pipe;
|
||||
@ -646,7 +697,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
|
||||
!IS_BROXTON(dev_priv)))
|
||||
!IS_GEN9_LP(dev_priv)))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -662,11 +713,18 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
|
||||
for_each_intel_encoder(dev, encoder) {
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (encoder->type != INTEL_OUTPUT_EDP)
|
||||
if (encoder->type != INTEL_OUTPUT_DP &&
|
||||
encoder->type != INTEL_OUTPUT_EDP)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
if (IS_BROXTON(dev_priv))
|
||||
|
||||
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
|
||||
|
||||
if (encoder->type != INTEL_OUTPUT_EDP)
|
||||
continue;
|
||||
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
intel_dp->pps_reset = true;
|
||||
else
|
||||
intel_dp->pps_pipe = INVALID_PIPE;
|
||||
@ -689,7 +747,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
|
||||
|
||||
memset(regs, 0, sizeof(*regs));
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
pps_idx = bxt_power_sequencer_idx(intel_dp);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
pps_idx = vlv_power_sequencer_pipe(intel_dp);
|
||||
@ -698,7 +756,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
|
||||
regs->pp_stat = PP_STATUS(pps_idx);
|
||||
regs->pp_on = PP_ON_DELAYS(pps_idx);
|
||||
regs->pp_off = PP_OFF_DELAYS(pps_idx);
|
||||
if (!IS_BROXTON(dev_priv))
|
||||
if (!IS_GEN9_LP(dev_priv))
|
||||
regs->pp_div = PP_DIVISOR(pps_idx);
|
||||
}
|
||||
|
||||
@ -2402,6 +2460,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
|
||||
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
|
||||
DP_SET_POWER_D3);
|
||||
} else {
|
||||
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
|
||||
|
||||
/*
|
||||
* When turning on, we need to retry for 1ms to give the sink
|
||||
* time to wake up.
|
||||
@ -2413,6 +2473,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
|
||||
break;
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
if (ret == 1 && lspcon->active)
|
||||
lspcon_wait_pcon_mode(lspcon);
|
||||
}
|
||||
|
||||
if (ret != 1)
|
||||
@ -2820,6 +2883,8 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
|
||||
enum pipe pipe = intel_dp->pps_pipe;
|
||||
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
|
||||
|
||||
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
|
||||
|
||||
edp_panel_vdd_off_sync(intel_dp);
|
||||
|
||||
/*
|
||||
@ -2854,22 +2919,23 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
|
||||
struct intel_dp *intel_dp;
|
||||
enum port port;
|
||||
|
||||
if (encoder->type != INTEL_OUTPUT_EDP)
|
||||
if (encoder->type != INTEL_OUTPUT_DP &&
|
||||
encoder->type != INTEL_OUTPUT_EDP)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
port = dp_to_dig_port(intel_dp)->port;
|
||||
|
||||
WARN(intel_dp->active_pipe == pipe,
|
||||
"stealing pipe %c power sequencer from active (e)DP port %c\n",
|
||||
pipe_name(pipe), port_name(port));
|
||||
|
||||
if (intel_dp->pps_pipe != pipe)
|
||||
continue;
|
||||
|
||||
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
|
||||
pipe_name(pipe), port_name(port));
|
||||
|
||||
WARN(encoder->base.crtc,
|
||||
"stealing pipe %c power sequencer from active eDP port %c\n",
|
||||
pipe_name(pipe), port_name(port));
|
||||
|
||||
/* make sure vdd is off before we steal it */
|
||||
vlv_detach_power_sequencer(intel_dp);
|
||||
}
|
||||
@ -2885,19 +2951,17 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
|
||||
|
||||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
|
||||
|
||||
if (intel_dp->pps_pipe == crtc->pipe)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If another power sequencer was being used on this
|
||||
* port previously make sure to turn off vdd there while
|
||||
* we still have control of it.
|
||||
*/
|
||||
if (intel_dp->pps_pipe != INVALID_PIPE)
|
||||
if (intel_dp->pps_pipe != INVALID_PIPE &&
|
||||
intel_dp->pps_pipe != crtc->pipe) {
|
||||
/*
|
||||
* If another power sequencer was being used on this
|
||||
* port previously make sure to turn off vdd there while
|
||||
* we still have control of it.
|
||||
*/
|
||||
vlv_detach_power_sequencer(intel_dp);
|
||||
}
|
||||
|
||||
/*
|
||||
* We may be stealing the power
|
||||
@ -2905,6 +2969,11 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
|
||||
*/
|
||||
vlv_steal_power_sequencer(dev, crtc->pipe);
|
||||
|
||||
intel_dp->active_pipe = crtc->pipe;
|
||||
|
||||
if (!is_edp(intel_dp))
|
||||
return;
|
||||
|
||||
/* now it's all ours */
|
||||
intel_dp->pps_pipe = crtc->pipe;
|
||||
|
||||
@ -2980,7 +3049,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
else if (INTEL_GEN(dev_priv) >= 9) {
|
||||
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
|
||||
@ -3491,6 +3560,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
msleep(intel_dp->panel_power_down_delay);
|
||||
|
||||
intel_dp->DP = DP;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
pps_lock(intel_dp);
|
||||
intel_dp->active_pipe = INVALID_PIPE;
|
||||
pps_unlock(intel_dp);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
@ -3569,7 +3644,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
|
||||
if (val == 0)
|
||||
break;
|
||||
|
||||
/* Value read is in kHz while drm clock is saved in deca-kHz */
|
||||
/* Value read multiplied by 200kHz gives the per-lane
|
||||
* link rate in kHz. The source rates are, however,
|
||||
* stored in terms of LS_Clk kHz. The full conversion
|
||||
* back to symbols is
|
||||
* (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
|
||||
*/
|
||||
intel_dp->sink_rates[i] = (val * 200) / 10;
|
||||
}
|
||||
intel_dp->num_sink_rates = i;
|
||||
@ -3835,7 +3915,7 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
|
||||
DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
|
||||
intel_dp->aux.i2c_nack_count,
|
||||
intel_dp->aux.i2c_defer_count);
|
||||
intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
|
||||
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
|
||||
} else {
|
||||
struct edid *block = intel_connector->detect_edid;
|
||||
|
||||
@ -3851,11 +3931,11 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
|
||||
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
|
||||
|
||||
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
|
||||
intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
|
||||
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_STANDARD;
|
||||
}
|
||||
|
||||
/* Set test active flag here so userspace doesn't interrupt things */
|
||||
intel_dp->compliance_test_active = 1;
|
||||
intel_dp->compliance.test_active = 1;
|
||||
|
||||
return test_result;
|
||||
}
|
||||
@ -3881,22 +3961,22 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
|
||||
switch (rxdata) {
|
||||
case DP_TEST_LINK_TRAINING:
|
||||
DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
|
||||
intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
|
||||
intel_dp->compliance.test_type = DP_TEST_LINK_TRAINING;
|
||||
response = intel_dp_autotest_link_training(intel_dp);
|
||||
break;
|
||||
case DP_TEST_LINK_VIDEO_PATTERN:
|
||||
DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
|
||||
intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
|
||||
intel_dp->compliance.test_type = DP_TEST_LINK_VIDEO_PATTERN;
|
||||
response = intel_dp_autotest_video_pattern(intel_dp);
|
||||
break;
|
||||
case DP_TEST_LINK_EDID_READ:
|
||||
DRM_DEBUG_KMS("EDID test requested\n");
|
||||
intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
|
||||
intel_dp->compliance.test_type = DP_TEST_LINK_EDID_READ;
|
||||
response = intel_dp_autotest_edid(intel_dp);
|
||||
break;
|
||||
case DP_TEST_LINK_PHY_TEST_PATTERN:
|
||||
DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
|
||||
intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
|
||||
intel_dp->compliance.test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
|
||||
response = intel_dp_autotest_phy_pattern(intel_dp);
|
||||
break;
|
||||
default:
|
||||
@ -4020,7 +4100,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
|
||||
return;
|
||||
|
||||
/* if link training is requested we should perform it always */
|
||||
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
|
||||
if ((intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) ||
|
||||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
|
||||
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
|
||||
intel_encoder->base.name);
|
||||
@ -4054,9 +4134,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
|
||||
* Clearing compliance test variables to allow capturing
|
||||
* of values for next automated test request.
|
||||
*/
|
||||
intel_dp->compliance_test_active = 0;
|
||||
intel_dp->compliance_test_type = 0;
|
||||
intel_dp->compliance_test_data = 0;
|
||||
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
|
||||
|
||||
/*
|
||||
* Now read the DPCD to see if it's actually running
|
||||
@ -4148,9 +4226,10 @@ static enum drm_connector_status
|
||||
edp_detect(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_device *dev = intel_dp_to_dev(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum drm_connector_status status;
|
||||
|
||||
status = intel_panel_detect(dev);
|
||||
status = intel_panel_detect(dev_priv);
|
||||
if (status == connector_status_unknown)
|
||||
status = connector_status_connected;
|
||||
|
||||
@ -4296,7 +4375,7 @@ static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
return ibx_digital_port_connected(dev_priv, port);
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
return cpt_digital_port_connected(dev_priv, port);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
return bxt_digital_port_connected(dev_priv, port);
|
||||
else if (IS_GM45(dev_priv))
|
||||
return gm45_digital_port_connected(dev_priv, port);
|
||||
@ -4373,9 +4452,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
|
||||
status = connector_status_disconnected;
|
||||
|
||||
if (status == connector_status_disconnected) {
|
||||
intel_dp->compliance_test_active = 0;
|
||||
intel_dp->compliance_test_type = 0;
|
||||
intel_dp->compliance_test_data = 0;
|
||||
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
|
||||
|
||||
if (intel_dp->is_mst) {
|
||||
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
|
||||
@ -4396,6 +4473,12 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
|
||||
yesno(intel_dp_source_supports_hbr2(intel_dp)),
|
||||
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
|
||||
|
||||
/* Set the max lane count for sink */
|
||||
intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
|
||||
|
||||
/* Set the max link BW for sink */
|
||||
intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
|
||||
|
||||
intel_dp_print_rates(intel_dp);
|
||||
|
||||
intel_dp_read_desc(intel_dp);
|
||||
@ -4751,27 +4834,41 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
|
||||
edp_panel_vdd_schedule_off(intel_dp);
|
||||
}
|
||||
|
||||
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
|
||||
|
||||
if ((intel_dp->DP & DP_PORT_EN) == 0)
|
||||
return INVALID_PIPE;
|
||||
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
|
||||
else
|
||||
return PORT_TO_PIPE(intel_dp->DP);
|
||||
}
|
||||
|
||||
void intel_dp_encoder_reset(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
|
||||
|
||||
if (!HAS_DDI(dev_priv))
|
||||
intel_dp->DP = I915_READ(intel_dp->output_reg);
|
||||
|
||||
if (IS_GEN9(dev_priv) && lspcon->active)
|
||||
if (lspcon->active)
|
||||
lspcon_resume(lspcon);
|
||||
|
||||
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
|
||||
return;
|
||||
|
||||
pps_lock(intel_dp);
|
||||
|
||||
/* Reinit the power sequencer, in case BIOS did something with it. */
|
||||
intel_dp_pps_init(encoder->dev, intel_dp);
|
||||
intel_edp_panel_vdd_sanitize(intel_dp);
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
/* Reinit the power sequencer, in case BIOS did something with it. */
|
||||
intel_dp_pps_init(encoder->dev, intel_dp);
|
||||
intel_edp_panel_vdd_sanitize(intel_dp);
|
||||
}
|
||||
|
||||
pps_unlock(intel_dp);
|
||||
}
|
||||
@ -4879,7 +4976,7 @@ bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
|
||||
if (INTEL_GEN(dev_priv) < 5)
|
||||
return false;
|
||||
|
||||
if (port == PORT_A)
|
||||
if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
|
||||
return true;
|
||||
|
||||
return intel_bios_is_port_edp(dev_priv, port);
|
||||
@ -4926,7 +5023,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
|
||||
|
||||
pp_on = I915_READ(regs.pp_on);
|
||||
pp_off = I915_READ(regs.pp_off);
|
||||
if (!IS_BROXTON(dev_priv)) {
|
||||
if (!IS_GEN9_LP(dev_priv)) {
|
||||
I915_WRITE(regs.pp_ctrl, pp_ctl);
|
||||
pp_div = I915_READ(regs.pp_div);
|
||||
}
|
||||
@ -4944,7 +5041,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
|
||||
seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
|
||||
PANEL_POWER_DOWN_DELAY_SHIFT;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
|
||||
BXT_POWER_CYCLE_DELAY_SHIFT;
|
||||
if (tmp > 0)
|
||||
@ -5101,7 +5198,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
||||
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
|
||||
/* Compute the divisor for the pp clock, simply match the Bspec
|
||||
* formula. */
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
pp_div = I915_READ(regs.pp_ctrl);
|
||||
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
|
||||
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
|
||||
@ -5127,7 +5224,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
||||
|
||||
I915_WRITE(regs.pp_on, pp_on);
|
||||
I915_WRITE(regs.pp_off, pp_off);
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
I915_WRITE(regs.pp_ctrl, pp_div);
|
||||
else
|
||||
I915_WRITE(regs.pp_div, pp_div);
|
||||
@ -5135,7 +5232,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
|
||||
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
|
||||
I915_READ(regs.pp_on),
|
||||
I915_READ(regs.pp_off),
|
||||
IS_BROXTON(dev_priv) ?
|
||||
IS_GEN9_LP(dev_priv) ?
|
||||
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
|
||||
I915_READ(regs.pp_div));
|
||||
}
|
||||
@ -5515,7 +5612,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
|
||||
}
|
||||
|
||||
downclock_mode = intel_find_panel_downclock
|
||||
(dev, fixed_mode, connector);
|
||||
(dev_priv, fixed_mode, connector);
|
||||
|
||||
if (!downclock_mode) {
|
||||
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
|
||||
@ -5624,10 +5721,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
* If the current pipe isn't valid, try the PPS pipe, and if that
|
||||
* fails just assume pipe A.
|
||||
*/
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
|
||||
else
|
||||
pipe = PORT_TO_PIPE(intel_dp->DP);
|
||||
pipe = vlv_active_pipe(intel_dp);
|
||||
|
||||
if (pipe != PIPE_A && pipe != PIPE_B)
|
||||
pipe = intel_dp->pps_pipe;
|
||||
@ -5676,6 +5770,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
return false;
|
||||
|
||||
intel_dp->pps_pipe = INVALID_PIPE;
|
||||
intel_dp->active_pipe = INVALID_PIPE;
|
||||
|
||||
/* intel_dp vfuncs */
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
@ -5704,6 +5799,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
else
|
||||
type = DRM_MODE_CONNECTOR_DisplayPort;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
|
||||
|
||||
/*
|
||||
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
|
||||
* for DP the encoder type can be set by the caller to
|
||||
@ -5793,11 +5891,10 @@ fail:
|
||||
return false;
|
||||
}
|
||||
|
||||
bool intel_dp_init(struct drm_device *dev,
|
||||
bool intel_dp_init(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t output_reg,
|
||||
enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
@ -5814,8 +5911,9 @@ bool intel_dp_init(struct drm_device *dev,
|
||||
intel_encoder = &intel_dig_port->base;
|
||||
encoder = &intel_encoder->base;
|
||||
|
||||
if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
|
||||
DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
|
||||
if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
|
||||
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
|
||||
"DP %c", port_name(port)))
|
||||
goto err_encoder_init;
|
||||
|
||||
intel_encoder->compute_config = intel_dp_compute_config;
|
||||
|
@ -37,6 +37,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
|
||||
struct intel_digital_port *intel_dig_port = intel_mst->primary;
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(conn_state->connector);
|
||||
struct drm_atomic_state *state;
|
||||
int bpp;
|
||||
int lane_count, slots;
|
||||
@ -58,6 +60,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
state = pipe_config->base.state;
|
||||
|
||||
if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port))
|
||||
pipe_config->has_audio = true;
|
||||
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
|
||||
|
||||
pipe_config->pbn = mst_pbn;
|
||||
@ -83,6 +87,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
|
||||
struct intel_dp *intel_dp = &intel_dig_port->dp;
|
||||
struct intel_connector *connector =
|
||||
to_intel_connector(old_conn_state->connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
|
||||
@ -93,6 +98,10 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to update payload %d\n", ret);
|
||||
}
|
||||
if (old_crtc_state->has_audio) {
|
||||
intel_audio_codec_disable(encoder);
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
|
||||
@ -205,6 +214,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
|
||||
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
|
||||
|
||||
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
|
||||
if (pipe_config->has_audio) {
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
}
|
||||
}
|
||||
|
||||
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
|
||||
@ -227,6 +240,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
|
||||
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
|
||||
u32 temp, flags = 0;
|
||||
|
||||
pipe_config->has_audio =
|
||||
intel_ddi_is_audio_enabled(dev_priv, crtc);
|
||||
|
||||
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
if (temp & TRANS_DDI_PHSYNC)
|
||||
flags |= DRM_MODE_FLAG_PHSYNC;
|
||||
@ -334,7 +350,17 @@ static enum drm_mode_status
|
||||
intel_dp_mst_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct intel_dp *intel_dp = intel_connector->mst_port;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
int bpp = 24; /* MST uses fixed bpp */
|
||||
int max_rate, mode_rate, max_lanes, max_link_clock;
|
||||
|
||||
max_link_clock = intel_dp_max_link_rate(intel_dp);
|
||||
max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
|
||||
|
||||
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
|
||||
mode_rate = intel_dp_link_required(mode->clock, bpp);
|
||||
|
||||
/* TODO - validate mode against available PBN for link */
|
||||
if (mode->clock < 10000)
|
||||
@ -343,7 +369,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
return MODE_H_ILLEGAL;
|
||||
|
||||
if (mode->clock > max_dotclk)
|
||||
if (mode_rate > max_rate || mode->clock > max_dotclk)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
|
@ -130,6 +130,18 @@ struct bxt_ddi_phy_info {
|
||||
*/
|
||||
enum dpio_phy rcomp_phy;
|
||||
|
||||
/**
|
||||
* @reset_delay: delay in us to wait before setting the common reset
|
||||
* bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
|
||||
*/
|
||||
int reset_delay;
|
||||
|
||||
/**
|
||||
* @pwron_mask: Mask with the appropriate bit set that would cause the
|
||||
* punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
|
||||
*/
|
||||
u32 pwron_mask;
|
||||
|
||||
/**
|
||||
* @channel: struct containing per channel information.
|
||||
*/
|
||||
@ -145,6 +157,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
|
||||
[DPIO_PHY0] = {
|
||||
.dual_channel = true,
|
||||
.rcomp_phy = DPIO_PHY1,
|
||||
.pwron_mask = BIT(0),
|
||||
|
||||
.channel = {
|
||||
[DPIO_CH0] = { .port = PORT_B },
|
||||
@ -154,6 +167,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
|
||||
[DPIO_PHY1] = {
|
||||
.dual_channel = false,
|
||||
.rcomp_phy = -1,
|
||||
.pwron_mask = BIT(1),
|
||||
|
||||
.channel = {
|
||||
[DPIO_CH0] = { .port = PORT_A },
|
||||
@ -161,20 +175,77 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
|
||||
[DPIO_PHY0] = {
|
||||
.dual_channel = false,
|
||||
.rcomp_phy = DPIO_PHY1,
|
||||
.pwron_mask = BIT(0),
|
||||
.reset_delay = 20,
|
||||
|
||||
.channel = {
|
||||
[DPIO_CH0] = { .port = PORT_B },
|
||||
}
|
||||
},
|
||||
[DPIO_PHY1] = {
|
||||
.dual_channel = false,
|
||||
.rcomp_phy = -1,
|
||||
.pwron_mask = BIT(3),
|
||||
.reset_delay = 20,
|
||||
|
||||
.channel = {
|
||||
[DPIO_CH0] = { .port = PORT_A },
|
||||
}
|
||||
},
|
||||
[DPIO_PHY2] = {
|
||||
.dual_channel = false,
|
||||
.rcomp_phy = DPIO_PHY1,
|
||||
.pwron_mask = BIT(1),
|
||||
.reset_delay = 20,
|
||||
|
||||
.channel = {
|
||||
[DPIO_CH0] = { .port = PORT_C },
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
|
||||
{
|
||||
return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
|
||||
BIT(phy_info->channel[DPIO_CH0].port);
|
||||
}
|
||||
|
||||
void bxt_port_to_phy_channel(enum port port,
|
||||
static const struct bxt_ddi_phy_info *
|
||||
bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
|
||||
{
|
||||
if (IS_GEMINILAKE(dev_priv)) {
|
||||
*count = ARRAY_SIZE(glk_ddi_phy_info);
|
||||
return glk_ddi_phy_info;
|
||||
} else {
|
||||
*count = ARRAY_SIZE(bxt_ddi_phy_info);
|
||||
return bxt_ddi_phy_info;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct bxt_ddi_phy_info *
|
||||
bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
|
||||
{
|
||||
int count;
|
||||
const struct bxt_ddi_phy_info *phy_list =
|
||||
bxt_get_phy_list(dev_priv, &count);
|
||||
|
||||
return &phy_list[phy];
|
||||
}
|
||||
|
||||
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
|
||||
enum dpio_phy *phy, enum dpio_channel *ch)
|
||||
{
|
||||
const struct bxt_ddi_phy_info *phy_info;
|
||||
int i;
|
||||
const struct bxt_ddi_phy_info *phy_info, *phys;
|
||||
int i, count;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
|
||||
phy_info = &bxt_ddi_phy_info[i];
|
||||
phys = bxt_get_phy_list(dev_priv, &count);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
phy_info = &phys[i];
|
||||
|
||||
if (port == phy_info->channel[DPIO_CH0].port) {
|
||||
*phy = i;
|
||||
@ -203,7 +274,7 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy;
|
||||
enum dpio_channel ch;
|
||||
|
||||
bxt_port_to_phy_channel(port, &phy, &ch);
|
||||
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
|
||||
|
||||
/*
|
||||
* While we write to the group register to program all lanes at once we
|
||||
@ -241,10 +312,12 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
|
||||
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
|
||||
const struct bxt_ddi_phy_info *phy_info;
|
||||
enum port port;
|
||||
|
||||
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
|
||||
phy_info = bxt_get_phy_info(dev_priv, phy);
|
||||
|
||||
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
|
||||
return false;
|
||||
|
||||
if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
|
||||
@ -255,14 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (phy_info->rcomp_phy == -1 &&
|
||||
!(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
|
||||
DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
|
||||
phy);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
|
||||
DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
|
||||
phy);
|
||||
@ -306,9 +371,11 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
|
||||
static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
|
||||
const struct bxt_ddi_phy_info *phy_info;
|
||||
u32 val;
|
||||
|
||||
phy_info = bxt_get_phy_info(dev_priv, phy);
|
||||
|
||||
if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
|
||||
/* Still read out the GRC value for state verification */
|
||||
if (phy_info->rcomp_phy != -1)
|
||||
@ -317,7 +384,6 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
|
||||
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
|
||||
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
|
||||
"won't reprogram it\n", phy);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -326,7 +392,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
|
||||
val |= GT_DISPLAY_POWER_ON(phy);
|
||||
val |= phy_info->pwron_mask;
|
||||
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
|
||||
|
||||
/*
|
||||
@ -367,6 +433,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
|
||||
|
||||
if (phy_info->rcomp_phy != -1) {
|
||||
uint32_t grc_code;
|
||||
|
||||
bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
|
||||
|
||||
/*
|
||||
* PHY0 isn't connected to an RCOMP resistor so copy over
|
||||
* the corresponding calibrated value from PHY1, and disable
|
||||
@ -384,31 +453,34 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
|
||||
I915_WRITE(BXT_PORT_REF_DW8(phy), val);
|
||||
}
|
||||
|
||||
if (phy_info->reset_delay)
|
||||
udelay(phy_info->reset_delay);
|
||||
|
||||
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
|
||||
val |= COMMON_RESET_DIS;
|
||||
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
|
||||
|
||||
if (phy_info->rcomp_phy == -1)
|
||||
bxt_phy_wait_grc_done(dev_priv, phy);
|
||||
|
||||
}
|
||||
|
||||
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
|
||||
{
|
||||
const struct bxt_ddi_phy_info *phy_info;
|
||||
uint32_t val;
|
||||
|
||||
phy_info = bxt_get_phy_info(dev_priv, phy);
|
||||
|
||||
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
|
||||
val &= ~COMMON_RESET_DIS;
|
||||
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
|
||||
|
||||
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
|
||||
val &= ~GT_DISPLAY_POWER_ON(phy);
|
||||
val &= ~phy_info->pwron_mask;
|
||||
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
|
||||
}
|
||||
|
||||
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
|
||||
{
|
||||
const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
|
||||
const struct bxt_ddi_phy_info *phy_info =
|
||||
bxt_get_phy_info(dev_priv, phy);
|
||||
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
|
||||
bool was_enabled;
|
||||
|
||||
@ -461,10 +533,12 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
|
||||
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
|
||||
const struct bxt_ddi_phy_info *phy_info;
|
||||
uint32_t mask;
|
||||
bool ok;
|
||||
|
||||
phy_info = bxt_get_phy_info(dev_priv, phy);
|
||||
|
||||
#define _CHK(reg, mask, exp, fmt, ...) \
|
||||
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
|
||||
## __VA_ARGS__)
|
||||
@ -540,7 +614,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
|
||||
enum dpio_channel ch;
|
||||
int lane;
|
||||
|
||||
bxt_port_to_phy_channel(port, &phy, &ch);
|
||||
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
|
||||
|
||||
for (lane = 0; lane < 4; lane++) {
|
||||
u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
|
||||
@ -568,7 +642,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
|
||||
int lane;
|
||||
uint8_t mask;
|
||||
|
||||
bxt_port_to_phy_channel(port, &phy, &ch);
|
||||
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
|
||||
|
||||
mask = 0;
|
||||
for (lane = 0; lane < 4; lane++) {
|
||||
|
@ -23,6 +23,25 @@
|
||||
|
||||
#include "intel_drv.h"
|
||||
|
||||
/**
|
||||
* DOC: Display PLLs
|
||||
*
|
||||
* Display PLLs used for driving outputs vary by platform. While some have
|
||||
* per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
|
||||
* from a pool. In the latter scenario, it is possible that multiple pipes
|
||||
* share a PLL if their configurations match.
|
||||
*
|
||||
* This file provides an abstraction over display PLLs. The function
|
||||
* intel_shared_dpll_init() initializes the PLLs for the given platform. The
|
||||
* users of a PLL are tracked and that tracking is integrated with the atomic
|
||||
* modest interface. During an atomic operation, a PLL can be requested for a
|
||||
* given CRTC and encoder configuration by calling intel_get_shared_dpll() and
|
||||
* a previously used PLL can be released with intel_release_shared_dpll().
|
||||
* Changes to the users are first staged in the atomic state, and then made
|
||||
* effective by calling intel_shared_dpll_swap_state() during the atomic
|
||||
* commit phase.
|
||||
*/
|
||||
|
||||
struct intel_shared_dpll *
|
||||
skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
|
||||
{
|
||||
@ -38,11 +57,11 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
|
||||
pll = &dev_priv->shared_dplls[i];
|
||||
|
||||
/* Only want to check enabled timings first */
|
||||
if (pll->config.crtc_mask == 0)
|
||||
if (pll->state.crtc_mask == 0)
|
||||
continue;
|
||||
|
||||
if (memcmp(&dpll_hw_state, &pll->config.hw_state,
|
||||
sizeof(pll->config.hw_state)) == 0) {
|
||||
if (memcmp(&dpll_hw_state, &pll->state.hw_state,
|
||||
sizeof(pll->state.hw_state)) == 0) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
@ -52,8 +71,8 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
|
||||
for (i = DPLL_ID_SKL_DPLL1;
|
||||
((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
|
||||
pll = &dev_priv->shared_dplls[i];
|
||||
if (pll->config.crtc_mask == 0) {
|
||||
pll->config.hw_state = dpll_hw_state;
|
||||
if (pll->state.crtc_mask == 0) {
|
||||
pll->state.hw_state = dpll_hw_state;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -61,6 +80,45 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
|
||||
return pll;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll_state *shared_dpll)
|
||||
{
|
||||
enum intel_dpll_id i;
|
||||
|
||||
/* Copy shared dpll state */
|
||||
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
|
||||
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
|
||||
|
||||
shared_dpll[i] = pll->state;
|
||||
}
|
||||
}
|
||||
|
||||
static struct intel_shared_dpll_state *
|
||||
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
|
||||
{
|
||||
struct intel_atomic_state *state = to_intel_atomic_state(s);
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
|
||||
|
||||
if (!state->dpll_set) {
|
||||
state->dpll_set = true;
|
||||
|
||||
intel_atomic_duplicate_dpll_state(to_i915(s->dev),
|
||||
state->shared_dpll);
|
||||
}
|
||||
|
||||
return state->shared_dpll;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_get_shared_dpll_by_id - get a DPLL given its id
|
||||
* @dev_priv: i915 device instance
|
||||
* @id: pll id
|
||||
*
|
||||
* Returns:
|
||||
* A pointer to the DPLL with @id
|
||||
*/
|
||||
struct intel_shared_dpll *
|
||||
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
|
||||
enum intel_dpll_id id)
|
||||
@ -68,6 +126,14 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
|
||||
return &dev_priv->shared_dplls[id];
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_get_shared_dpll_id - get the id of a DPLL
|
||||
* @dev_priv: i915 device instance
|
||||
* @pll: the DPLL
|
||||
*
|
||||
* Returns:
|
||||
* The id of @pll
|
||||
*/
|
||||
enum intel_dpll_id
|
||||
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
@ -79,28 +145,6 @@ intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
|
||||
return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
|
||||
}
|
||||
|
||||
void
|
||||
intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
|
||||
|
||||
config[id].crtc_mask |= 1 << crtc->pipe;
|
||||
}
|
||||
|
||||
void
|
||||
intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
|
||||
|
||||
config[id].crtc_mask &= ~(1 << crtc->pipe);
|
||||
}
|
||||
|
||||
/* For ILK+ */
|
||||
void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
@ -118,6 +162,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
pll->name, onoff(state), onoff(cur_state));
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_prepare_shared_dpll - call a dpll's prepare hook
|
||||
* @crtc: CRTC which has a shared dpll
|
||||
*
|
||||
* This calls the PLL's prepare hook if it has one and if the PLL is not
|
||||
* already enabled. The prepare hook is platform specific.
|
||||
*/
|
||||
void intel_prepare_shared_dpll(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
@ -128,24 +179,22 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->dpll_lock);
|
||||
WARN_ON(!pll->config.crtc_mask);
|
||||
WARN_ON(!pll->state.crtc_mask);
|
||||
if (!pll->active_mask) {
|
||||
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
|
||||
WARN_ON(pll->on);
|
||||
assert_shared_dpll_disabled(dev_priv, pll);
|
||||
|
||||
pll->funcs.mode_set(dev_priv, pll);
|
||||
pll->funcs.prepare(dev_priv, pll);
|
||||
}
|
||||
mutex_unlock(&dev_priv->dpll_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_enable_shared_dpll - enable PCH PLL
|
||||
* @dev_priv: i915 private structure
|
||||
* @pipe: pipe PLL to enable
|
||||
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
|
||||
* @crtc: CRTC which has a shared DPLL
|
||||
*
|
||||
* The PCH PLL needs to be enabled before the PCH transcoder, since it
|
||||
* drives the transcoder clock.
|
||||
* Enable the shared DPLL used by @crtc.
|
||||
*/
|
||||
void intel_enable_shared_dpll(struct intel_crtc *crtc)
|
||||
{
|
||||
@ -161,7 +210,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
|
||||
mutex_lock(&dev_priv->dpll_lock);
|
||||
old_mask = pll->active_mask;
|
||||
|
||||
if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) ||
|
||||
if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
|
||||
WARN_ON(pll->active_mask & crtc_mask))
|
||||
goto out;
|
||||
|
||||
@ -186,6 +235,12 @@ out:
|
||||
mutex_unlock(&dev_priv->dpll_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_disable_shared_dpll - disable a CRTC's shared DPLL
|
||||
* @crtc: CRTC which has a shared DPLL
|
||||
*
|
||||
* Disable the shared DPLL used by @crtc.
|
||||
*/
|
||||
void intel_disable_shared_dpll(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
@ -230,7 +285,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_shared_dpll *pll;
|
||||
struct intel_shared_dpll_config *shared_dpll;
|
||||
struct intel_shared_dpll_state *shared_dpll;
|
||||
enum intel_dpll_id i;
|
||||
|
||||
shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
|
||||
@ -270,7 +325,7 @@ static void
|
||||
intel_reference_shared_dpll(struct intel_shared_dpll *pll,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_shared_dpll_config *shared_dpll;
|
||||
struct intel_shared_dpll_state *shared_dpll;
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
enum intel_dpll_id i = pll->id;
|
||||
|
||||
@ -284,13 +339,24 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
|
||||
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
|
||||
pipe_name(crtc->pipe));
|
||||
|
||||
intel_shared_dpll_config_get(shared_dpll, pll, crtc);
|
||||
shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
|
||||
}
|
||||
|
||||
void intel_shared_dpll_commit(struct drm_atomic_state *state)
|
||||
/**
|
||||
* intel_shared_dpll_swap_state - make atomic DPLL configuration effective
|
||||
* @state: atomic state
|
||||
*
|
||||
* This is the dpll version of drm_atomic_helper_swap_state() since the
|
||||
* helper does not handle driver-specific global state.
|
||||
*
|
||||
* For consistency with atomic helpers this function does a complete swap,
|
||||
* i.e. it also puts the current state into @state, even though there is no
|
||||
* need for that at this moment.
|
||||
*/
|
||||
void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->dev);
|
||||
struct intel_shared_dpll_config *shared_dpll;
|
||||
struct intel_shared_dpll_state *shared_dpll;
|
||||
struct intel_shared_dpll *pll;
|
||||
enum intel_dpll_id i;
|
||||
|
||||
@ -299,8 +365,13 @@ void intel_shared_dpll_commit(struct drm_atomic_state *state)
|
||||
|
||||
shared_dpll = to_intel_atomic_state(state)->shared_dpll;
|
||||
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
|
||||
struct intel_shared_dpll_state tmp;
|
||||
|
||||
pll = &dev_priv->shared_dplls[i];
|
||||
pll->config = shared_dpll[i];
|
||||
|
||||
tmp = pll->state;
|
||||
pll->state = shared_dpll[i];
|
||||
shared_dpll[i] = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
@ -323,11 +394,11 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
return val & DPLL_VCO_ENABLE;
|
||||
}
|
||||
|
||||
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
|
||||
I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
|
||||
I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
|
||||
I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
|
||||
}
|
||||
|
||||
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
|
||||
@ -349,7 +420,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
|
||||
/* PCH refclock must be enabled first */
|
||||
ibx_assert_pch_refclk_enabled(dev_priv);
|
||||
|
||||
I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
|
||||
I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
|
||||
|
||||
/* Wait for the clocks to stabilize. */
|
||||
POSTING_READ(PCH_DPLL(pll->id));
|
||||
@ -360,7 +431,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
|
||||
*
|
||||
* So write it again.
|
||||
*/
|
||||
I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
|
||||
I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
|
||||
POSTING_READ(PCH_DPLL(pll->id));
|
||||
udelay(200);
|
||||
}
|
||||
@ -412,8 +483,19 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
|
||||
return pll;
|
||||
}
|
||||
|
||||
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
|
||||
"fp0: 0x%x, fp1: 0x%x\n",
|
||||
hw_state->dpll,
|
||||
hw_state->dpll_md,
|
||||
hw_state->fp0,
|
||||
hw_state->fp1);
|
||||
}
|
||||
|
||||
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
|
||||
.mode_set = ibx_pch_dpll_mode_set,
|
||||
.prepare = ibx_pch_dpll_prepare,
|
||||
.enable = ibx_pch_dpll_enable,
|
||||
.disable = ibx_pch_dpll_disable,
|
||||
.get_hw_state = ibx_pch_dpll_get_hw_state,
|
||||
@ -422,7 +504,7 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
|
||||
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
|
||||
I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
|
||||
POSTING_READ(WRPLL_CTL(pll->id));
|
||||
udelay(20);
|
||||
}
|
||||
@ -430,7 +512,7 @@ static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
|
||||
static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
|
||||
I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
|
||||
POSTING_READ(SPLL_CTL);
|
||||
udelay(20);
|
||||
}
|
||||
@ -798,6 +880,13 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
|
||||
return pll;
|
||||
}
|
||||
|
||||
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
|
||||
hw_state->wrpll, hw_state->spll);
|
||||
}
|
||||
|
||||
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
|
||||
.enable = hsw_ddi_wrpll_enable,
|
||||
.disable = hsw_ddi_wrpll_disable,
|
||||
@ -873,7 +962,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
|
||||
|
||||
val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
|
||||
DPLL_CTRL1_LINK_RATE_MASK(pll->id));
|
||||
val |= pll->config.hw_state.ctrl1 << (pll->id * 6);
|
||||
val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
|
||||
|
||||
I915_WRITE(DPLL_CTRL1, val);
|
||||
POSTING_READ(DPLL_CTRL1);
|
||||
@ -886,8 +975,8 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
|
||||
skl_ddi_pll_write_ctrl1(dev_priv, pll);
|
||||
|
||||
I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
|
||||
I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
|
||||
I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
|
||||
I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
|
||||
POSTING_READ(regs[pll->id].cfgcr1);
|
||||
POSTING_READ(regs[pll->id].cfgcr2);
|
||||
|
||||
@ -1353,6 +1442,16 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
|
||||
return pll;
|
||||
}
|
||||
|
||||
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
DRM_DEBUG_KMS("dpll_hw_state: "
|
||||
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
|
||||
hw_state->ctrl1,
|
||||
hw_state->cfgcr1,
|
||||
hw_state->cfgcr2);
|
||||
}
|
||||
|
||||
static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
|
||||
.enable = skl_ddi_pll_enable,
|
||||
.disable = skl_ddi_pll_disable,
|
||||
@ -1373,13 +1472,23 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy;
|
||||
enum dpio_channel ch;
|
||||
|
||||
bxt_port_to_phy_channel(port, &phy, &ch);
|
||||
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
|
||||
|
||||
/* Non-SSC reference */
|
||||
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
|
||||
temp |= PORT_PLL_REF_SEL;
|
||||
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
|
||||
|
||||
if (IS_GEMINILAKE(dev_priv)) {
|
||||
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
|
||||
temp |= PORT_PLL_POWER_ENABLE;
|
||||
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
|
||||
|
||||
if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
|
||||
PORT_PLL_POWER_STATE), 200))
|
||||
DRM_ERROR("Power state not set for PLL:%d\n", port);
|
||||
}
|
||||
|
||||
/* Disable 10 bit clock */
|
||||
temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
|
||||
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
|
||||
@ -1388,31 +1497,31 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
/* Write P1 & P2 */
|
||||
temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
|
||||
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
|
||||
temp |= pll->config.hw_state.ebb0;
|
||||
temp |= pll->state.hw_state.ebb0;
|
||||
I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
|
||||
|
||||
/* Write M2 integer */
|
||||
temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
|
||||
temp &= ~PORT_PLL_M2_MASK;
|
||||
temp |= pll->config.hw_state.pll0;
|
||||
temp |= pll->state.hw_state.pll0;
|
||||
I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
|
||||
|
||||
/* Write N */
|
||||
temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
|
||||
temp &= ~PORT_PLL_N_MASK;
|
||||
temp |= pll->config.hw_state.pll1;
|
||||
temp |= pll->state.hw_state.pll1;
|
||||
I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
|
||||
|
||||
/* Write M2 fraction */
|
||||
temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
|
||||
temp &= ~PORT_PLL_M2_FRAC_MASK;
|
||||
temp |= pll->config.hw_state.pll2;
|
||||
temp |= pll->state.hw_state.pll2;
|
||||
I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
|
||||
|
||||
/* Write M2 fraction enable */
|
||||
temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
|
||||
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
|
||||
temp |= pll->config.hw_state.pll3;
|
||||
temp |= pll->state.hw_state.pll3;
|
||||
I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
|
||||
|
||||
/* Write coeff */
|
||||
@ -1420,24 +1529,24 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
temp &= ~PORT_PLL_PROP_COEFF_MASK;
|
||||
temp &= ~PORT_PLL_INT_COEFF_MASK;
|
||||
temp &= ~PORT_PLL_GAIN_CTL_MASK;
|
||||
temp |= pll->config.hw_state.pll6;
|
||||
temp |= pll->state.hw_state.pll6;
|
||||
I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
|
||||
|
||||
/* Write calibration val */
|
||||
temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
|
||||
temp &= ~PORT_PLL_TARGET_CNT_MASK;
|
||||
temp |= pll->config.hw_state.pll8;
|
||||
temp |= pll->state.hw_state.pll8;
|
||||
I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
|
||||
|
||||
temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
|
||||
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
|
||||
temp |= pll->config.hw_state.pll9;
|
||||
temp |= pll->state.hw_state.pll9;
|
||||
I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
|
||||
|
||||
temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
|
||||
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
|
||||
temp &= ~PORT_PLL_DCO_AMP_MASK;
|
||||
temp |= pll->config.hw_state.pll10;
|
||||
temp |= pll->state.hw_state.pll10;
|
||||
I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
|
||||
|
||||
/* Recalibrate with new settings */
|
||||
@ -1445,7 +1554,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
temp |= PORT_PLL_RECALIBRATE;
|
||||
I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
|
||||
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
|
||||
temp |= pll->config.hw_state.ebb4;
|
||||
temp |= pll->state.hw_state.ebb4;
|
||||
I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
|
||||
|
||||
/* Enable PLL */
|
||||
@ -1458,6 +1567,12 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
200))
|
||||
DRM_ERROR("PLL %d not locked\n", port);
|
||||
|
||||
if (IS_GEMINILAKE(dev_priv)) {
|
||||
temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
|
||||
temp |= DCC_DELAY_RANGE_2;
|
||||
I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
|
||||
}
|
||||
|
||||
/*
|
||||
* While we write to the group register to program all lanes at once we
|
||||
* can read only lane registers and we pick lanes 0/1 for that.
|
||||
@ -1465,7 +1580,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
||||
temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
|
||||
temp &= ~LANE_STAGGER_MASK;
|
||||
temp &= ~LANESTAGGER_STRAP_OVRD;
|
||||
temp |= pll->config.hw_state.pcsdw12;
|
||||
temp |= pll->state.hw_state.pcsdw12;
|
||||
I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
|
||||
}
|
||||
|
||||
@ -1479,6 +1594,16 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
|
||||
temp &= ~PORT_PLL_ENABLE;
|
||||
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
|
||||
POSTING_READ(BXT_PORT_PLL_ENABLE(port));
|
||||
|
||||
if (IS_GEMINILAKE(dev_priv)) {
|
||||
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
|
||||
temp &= ~PORT_PLL_POWER_ENABLE;
|
||||
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
|
||||
|
||||
if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
|
||||
PORT_PLL_POWER_STATE), 200))
|
||||
DRM_ERROR("Power state not reset for PLL:%d\n", port);
|
||||
}
|
||||
}
|
||||
|
||||
static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
@ -1491,7 +1616,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy;
|
||||
enum dpio_channel ch;
|
||||
|
||||
bxt_port_to_phy_channel(port, &phy, &ch);
|
||||
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
|
||||
|
||||
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
|
||||
return false;
|
||||
@ -1758,6 +1883,25 @@ bxt_get_dpll(struct intel_crtc *crtc,
|
||||
return pll;
|
||||
}
|
||||
|
||||
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
|
||||
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
|
||||
"pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
|
||||
hw_state->ebb0,
|
||||
hw_state->ebb4,
|
||||
hw_state->pll0,
|
||||
hw_state->pll1,
|
||||
hw_state->pll2,
|
||||
hw_state->pll3,
|
||||
hw_state->pll6,
|
||||
hw_state->pll8,
|
||||
hw_state->pll9,
|
||||
hw_state->pll10,
|
||||
hw_state->pcsdw12);
|
||||
}
|
||||
|
||||
static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
|
||||
.enable = bxt_ddi_pll_enable,
|
||||
.disable = bxt_ddi_pll_disable,
|
||||
@ -1798,6 +1942,9 @@ struct intel_dpll_mgr {
|
||||
struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
struct intel_encoder *encoder);
|
||||
|
||||
void (*dump_hw_state)(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *hw_state);
|
||||
};
|
||||
|
||||
static const struct dpll_info pch_plls[] = {
|
||||
@ -1809,6 +1956,7 @@ static const struct dpll_info pch_plls[] = {
|
||||
static const struct intel_dpll_mgr pch_pll_mgr = {
|
||||
.dpll_info = pch_plls,
|
||||
.get_dpll = ibx_get_dpll,
|
||||
.dump_hw_state = ibx_dump_hw_state,
|
||||
};
|
||||
|
||||
static const struct dpll_info hsw_plls[] = {
|
||||
@ -1824,6 +1972,7 @@ static const struct dpll_info hsw_plls[] = {
|
||||
static const struct intel_dpll_mgr hsw_pll_mgr = {
|
||||
.dpll_info = hsw_plls,
|
||||
.get_dpll = hsw_get_dpll,
|
||||
.dump_hw_state = hsw_dump_hw_state,
|
||||
};
|
||||
|
||||
static const struct dpll_info skl_plls[] = {
|
||||
@ -1837,6 +1986,7 @@ static const struct dpll_info skl_plls[] = {
|
||||
static const struct intel_dpll_mgr skl_pll_mgr = {
|
||||
.dpll_info = skl_plls,
|
||||
.get_dpll = skl_get_dpll,
|
||||
.dump_hw_state = skl_dump_hw_state,
|
||||
};
|
||||
|
||||
static const struct dpll_info bxt_plls[] = {
|
||||
@ -1849,8 +1999,15 @@ static const struct dpll_info bxt_plls[] = {
|
||||
static const struct intel_dpll_mgr bxt_pll_mgr = {
|
||||
.dpll_info = bxt_plls,
|
||||
.get_dpll = bxt_get_dpll,
|
||||
.dump_hw_state = bxt_dump_hw_state,
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_shared_dpll_init - Initialize shared DPLLs
|
||||
* @dev: drm device
|
||||
*
|
||||
* Initialize shared DPLLs for @dev.
|
||||
*/
|
||||
void intel_shared_dpll_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
@ -1860,7 +2017,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
dpll_mgr = &skl_pll_mgr;
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
dpll_mgr = &bxt_pll_mgr;
|
||||
else if (HAS_DDI(dev_priv))
|
||||
dpll_mgr = &hsw_pll_mgr;
|
||||
@ -1894,6 +2051,21 @@ void intel_shared_dpll_init(struct drm_device *dev)
|
||||
intel_ddi_pll_init(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
|
||||
* @crtc: CRTC
|
||||
* @crtc_state: atomic state for @crtc
|
||||
* @encoder: encoder
|
||||
*
|
||||
* Find an appropriate DPLL for the given CRTC and encoder combination. A
|
||||
* reference from the @crtc to the returned pll is registered in the atomic
|
||||
* state. That configuration is made effective by calling
|
||||
* intel_shared_dpll_swap_state(). The reference should be released by calling
|
||||
* intel_release_shared_dpll().
|
||||
*
|
||||
* Returns:
|
||||
* A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
|
||||
*/
|
||||
struct intel_shared_dpll *
|
||||
intel_get_shared_dpll(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
@ -1907,3 +2079,48 @@ intel_get_shared_dpll(struct intel_crtc *crtc,
|
||||
|
||||
return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
|
||||
* @dpll: dpll in use by @crtc
|
||||
* @crtc: crtc
|
||||
* @state: atomic state
|
||||
*
|
||||
* This function releases the reference from @crtc to @dpll from the
|
||||
* atomic @state. The new configuration is made effective by calling
|
||||
* intel_shared_dpll_swap_state().
|
||||
*/
|
||||
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
|
||||
struct intel_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct intel_shared_dpll_state *shared_dpll_state;
|
||||
|
||||
shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
|
||||
shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_shared_dpll_dump_hw_state - write hw_state to dmesg
|
||||
* @dev_priv: i915 drm device
|
||||
* @hw_state: hw state to be written to the log
|
||||
*
|
||||
* Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
|
||||
*/
|
||||
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *hw_state)
|
||||
{
|
||||
if (dev_priv->dpll_mgr) {
|
||||
dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
|
||||
} else {
|
||||
/* fallback for platforms that don't use the shared dpll
|
||||
* infrastructure
|
||||
*/
|
||||
DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
|
||||
"fp0: 0x%x, fp1: 0x%x\n",
|
||||
hw_state->dpll,
|
||||
hw_state->dpll_md,
|
||||
hw_state->fp0,
|
||||
hw_state->fp1);
|
||||
}
|
||||
}
|
||||
|
@ -40,32 +40,72 @@ struct intel_encoder;
|
||||
struct intel_shared_dpll;
|
||||
struct intel_dpll_mgr;
|
||||
|
||||
/**
|
||||
* enum intel_dpll_id - possible DPLL ids
|
||||
*
|
||||
* Enumeration of possible IDs for a DPLL. Real shared dpll ids must be >= 0.
|
||||
*/
|
||||
enum intel_dpll_id {
|
||||
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
|
||||
/* real shared dpll ids must be >= 0 */
|
||||
/**
|
||||
* @DPLL_ID_PRIVATE: non-shared dpll in use
|
||||
*/
|
||||
DPLL_ID_PRIVATE = -1,
|
||||
|
||||
/**
|
||||
* @DPLL_ID_PCH_PLL_A: DPLL A in ILK, SNB and IVB
|
||||
*/
|
||||
DPLL_ID_PCH_PLL_A = 0,
|
||||
/**
|
||||
* @DPLL_ID_PCH_PLL_B: DPLL B in ILK, SNB and IVB
|
||||
*/
|
||||
DPLL_ID_PCH_PLL_B = 1,
|
||||
/* hsw/bdw */
|
||||
|
||||
|
||||
/**
|
||||
* @DPLL_ID_WRPLL1: HSW and BDW WRPLL1
|
||||
*/
|
||||
DPLL_ID_WRPLL1 = 0,
|
||||
/**
|
||||
* @DPLL_ID_WRPLL2: HSW and BDW WRPLL2
|
||||
*/
|
||||
DPLL_ID_WRPLL2 = 1,
|
||||
/**
|
||||
* @DPLL_ID_SPLL: HSW and BDW SPLL
|
||||
*/
|
||||
DPLL_ID_SPLL = 2,
|
||||
/**
|
||||
* @DPLL_ID_LCPLL_810: HSW and BDW 0.81 GHz LCPLL
|
||||
*/
|
||||
DPLL_ID_LCPLL_810 = 3,
|
||||
/**
|
||||
* @DPLL_ID_LCPLL_1350: HSW and BDW 1.35 GHz LCPLL
|
||||
*/
|
||||
DPLL_ID_LCPLL_1350 = 4,
|
||||
/**
|
||||
* @DPLL_ID_LCPLL_2700: HSW and BDW 2.7 GHz LCPLL
|
||||
*/
|
||||
DPLL_ID_LCPLL_2700 = 5,
|
||||
|
||||
/* skl */
|
||||
|
||||
/**
|
||||
* @DPLL_ID_SKL_DPLL0: SKL and later DPLL0
|
||||
*/
|
||||
DPLL_ID_SKL_DPLL0 = 0,
|
||||
/**
|
||||
* @DPLL_ID_SKL_DPLL1: SKL and later DPLL1
|
||||
*/
|
||||
DPLL_ID_SKL_DPLL1 = 1,
|
||||
/**
|
||||
* @DPLL_ID_SKL_DPLL2: SKL and later DPLL2
|
||||
*/
|
||||
DPLL_ID_SKL_DPLL2 = 2,
|
||||
/**
|
||||
* @DPLL_ID_SKL_DPLL3: SKL and later DPLL3
|
||||
*/
|
||||
DPLL_ID_SKL_DPLL3 = 3,
|
||||
};
|
||||
#define I915_NUM_PLLS 6
|
||||
|
||||
/** Inform the state checker that the DPLL is kept enabled even if not
|
||||
* in use by any crtc.
|
||||
*/
|
||||
#define INTEL_DPLL_ALWAYS_ON (1 << 0)
|
||||
|
||||
struct intel_dpll_hw_state {
|
||||
/* i9xx, pch plls */
|
||||
uint32_t dpll;
|
||||
@ -93,36 +133,120 @@ struct intel_dpll_hw_state {
|
||||
pcsdw12;
|
||||
};
|
||||
|
||||
struct intel_shared_dpll_config {
|
||||
unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
|
||||
/**
|
||||
* struct intel_shared_dpll_state - hold the DPLL atomic state
|
||||
*
|
||||
* This structure holds an atomic state for the DPLL, that can represent
|
||||
* either its current state (in struct &intel_shared_dpll) or a desired
|
||||
* future state which would be applied by an atomic mode set (stored in
|
||||
* a struct &intel_atomic_state).
|
||||
*
|
||||
* See also intel_get_shared_dpll() and intel_release_shared_dpll().
|
||||
*/
|
||||
struct intel_shared_dpll_state {
|
||||
/**
|
||||
* @crtc_mask: mask of CRTC using this DPLL, active or not
|
||||
*/
|
||||
unsigned crtc_mask;
|
||||
|
||||
/**
|
||||
* @hw_state: hardware configuration for the DPLL stored in
|
||||
* struct &intel_dpll_hw_state.
|
||||
*/
|
||||
struct intel_dpll_hw_state hw_state;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct intel_shared_dpll_funcs - platform specific hooks for managing DPLLs
|
||||
*/
|
||||
struct intel_shared_dpll_funcs {
|
||||
/* The mode_set hook is optional and should be used together with the
|
||||
* intel_prepare_shared_dpll function. */
|
||||
void (*mode_set)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
/**
|
||||
* @prepare:
|
||||
*
|
||||
* Optional hook to perform operations prior to enabling the PLL.
|
||||
* Called from intel_prepare_shared_dpll() function unless the PLL
|
||||
* is already enabled.
|
||||
*/
|
||||
void (*prepare)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
|
||||
/**
|
||||
* @enable:
|
||||
*
|
||||
* Hook for enabling the pll, called from intel_enable_shared_dpll()
|
||||
* if the pll is not already enabled.
|
||||
*/
|
||||
void (*enable)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
|
||||
/**
|
||||
* @disable:
|
||||
*
|
||||
* Hook for disabling the pll, called from intel_disable_shared_dpll()
|
||||
* only when it is safe to disable the pll, i.e., there are no more
|
||||
* tracked users for it.
|
||||
*/
|
||||
void (*disable)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
|
||||
/**
|
||||
* @get_hw_state:
|
||||
*
|
||||
* Hook for reading the values currently programmed to the DPLL
|
||||
* registers. This is used for initial hw state readout and state
|
||||
* verification after a mode set.
|
||||
*/
|
||||
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_dpll_hw_state *hw_state);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct intel_shared_dpll - display PLL with tracked state and users
|
||||
*/
|
||||
struct intel_shared_dpll {
|
||||
struct intel_shared_dpll_config config;
|
||||
/**
|
||||
* @state:
|
||||
*
|
||||
* Store the state for the pll, including the its hw state
|
||||
* and CRTCs using it.
|
||||
*/
|
||||
struct intel_shared_dpll_state state;
|
||||
|
||||
unsigned active_mask; /* mask of active CRTCs (i.e. DPMS on) */
|
||||
bool on; /* is the PLL actually active? Disabled during modeset */
|
||||
/**
|
||||
* @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL
|
||||
*/
|
||||
unsigned active_mask;
|
||||
|
||||
/**
|
||||
* @on: is the PLL actually active? Disabled during modeset
|
||||
*/
|
||||
bool on;
|
||||
|
||||
/**
|
||||
* @name: DPLL name; used for logging
|
||||
*/
|
||||
const char *name;
|
||||
/* should match the index in the dev_priv->shared_dplls array */
|
||||
|
||||
/**
|
||||
* @id: unique indentifier for this DPLL; should match the index in the
|
||||
* dev_priv->shared_dplls array
|
||||
*/
|
||||
enum intel_dpll_id id;
|
||||
|
||||
/**
|
||||
* @funcs: platform specific hooks
|
||||
*/
|
||||
struct intel_shared_dpll_funcs funcs;
|
||||
|
||||
#define INTEL_DPLL_ALWAYS_ON (1 << 0)
|
||||
/**
|
||||
* @flags:
|
||||
*
|
||||
* INTEL_DPLL_ALWAYS_ON
|
||||
* Inform the state checker that the DPLL is kept enabled even if
|
||||
* not in use by any CRTC.
|
||||
*/
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
@ -138,14 +262,6 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
|
||||
enum intel_dpll_id
|
||||
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll);
|
||||
void
|
||||
intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_crtc *crtc);
|
||||
void
|
||||
intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
|
||||
struct intel_shared_dpll *pll,
|
||||
struct intel_crtc *crtc);
|
||||
void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll,
|
||||
bool state);
|
||||
@ -154,12 +270,18 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *state,
|
||||
struct intel_encoder *encoder);
|
||||
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
|
||||
struct intel_crtc *crtc,
|
||||
struct drm_atomic_state *state);
|
||||
void intel_prepare_shared_dpll(struct intel_crtc *crtc);
|
||||
void intel_enable_shared_dpll(struct intel_crtc *crtc);
|
||||
void intel_disable_shared_dpll(struct intel_crtc *crtc);
|
||||
void intel_shared_dpll_commit(struct drm_atomic_state *state);
|
||||
void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
|
||||
void intel_shared_dpll_init(struct drm_device *dev);
|
||||
|
||||
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct intel_dpll_hw_state *hw_state);
|
||||
|
||||
/* BXT dpll related functions */
|
||||
bool bxt_ddi_dp_set_dpll_hw_state(int clock,
|
||||
struct intel_dpll_hw_state *dpll_hw_state);
|
||||
|
@ -359,7 +359,7 @@ struct intel_atomic_state {
|
||||
/* SKL/KBL Only */
|
||||
unsigned int cdclk_pll_vco;
|
||||
|
||||
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
|
||||
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
|
||||
|
||||
/*
|
||||
* Current watermarks can't be trusted during hardware readout, so
|
||||
@ -692,8 +692,9 @@ struct intel_crtc {
|
||||
* some outputs connected to this crtc.
|
||||
*/
|
||||
bool active;
|
||||
unsigned long enabled_power_domains;
|
||||
bool lowfreq_avail;
|
||||
u8 plane_ids_mask;
|
||||
unsigned long enabled_power_domains;
|
||||
struct intel_overlay *overlay;
|
||||
struct intel_flip_work *flip_work;
|
||||
|
||||
@ -767,7 +768,8 @@ struct intel_plane_wm_parameters {
|
||||
|
||||
struct intel_plane {
|
||||
struct drm_plane base;
|
||||
int plane;
|
||||
u8 plane;
|
||||
enum plane_id id;
|
||||
enum pipe pipe;
|
||||
bool can_scale;
|
||||
int max_downscale;
|
||||
@ -841,11 +843,13 @@ struct intel_hdmi {
|
||||
enum hdmi_picture_aspect aspect_ratio;
|
||||
struct intel_connector *attached_connector;
|
||||
void (*write_infoframe)(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len);
|
||||
void (*set_infoframes)(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode);
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state);
|
||||
bool (*infoframe_enabled)(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
};
|
||||
@ -881,6 +885,16 @@ struct intel_dp_desc {
|
||||
u8 sw_minor_rev;
|
||||
} __packed;
|
||||
|
||||
struct intel_dp_compliance_data {
|
||||
unsigned long edid;
|
||||
};
|
||||
|
||||
struct intel_dp_compliance {
|
||||
unsigned long test_type;
|
||||
struct intel_dp_compliance_data test_data;
|
||||
bool test_active;
|
||||
};
|
||||
|
||||
struct intel_dp {
|
||||
i915_reg_t output_reg;
|
||||
i915_reg_t aux_ch_ctl_reg;
|
||||
@ -903,6 +917,10 @@ struct intel_dp {
|
||||
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
|
||||
uint8_t num_sink_rates;
|
||||
int sink_rates[DP_MAX_SUPPORTED_RATES];
|
||||
/* Max lane count for the sink as per DPCD registers */
|
||||
uint8_t max_sink_lane_count;
|
||||
/* Max link BW for the sink as per DPCD registers */
|
||||
int max_sink_link_bw;
|
||||
/* sink or branch descriptor */
|
||||
struct intel_dp_desc desc;
|
||||
struct drm_dp_aux aux;
|
||||
@ -925,6 +943,12 @@ struct intel_dp {
|
||||
* this port. Only relevant on VLV/CHV.
|
||||
*/
|
||||
enum pipe pps_pipe;
|
||||
/*
|
||||
* Pipe currently driving the port. Used for preventing
|
||||
* the use of the PPS for any pipe currentrly driving
|
||||
* external DP as that will mess things up on VLV.
|
||||
*/
|
||||
enum pipe active_pipe;
|
||||
/*
|
||||
* Set if the sequencer may be reset due to a power transition,
|
||||
* requiring a reinitialization. Only relevant on BXT.
|
||||
@ -956,9 +980,7 @@ struct intel_dp {
|
||||
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
|
||||
|
||||
/* Displayport compliance testing */
|
||||
unsigned long compliance_test_type;
|
||||
unsigned long compliance_test_data;
|
||||
bool compliance_test_active;
|
||||
struct intel_dp_compliance compliance;
|
||||
};
|
||||
|
||||
struct intel_lspcon {
|
||||
@ -1090,6 +1112,12 @@ dp_to_dig_port(struct intel_dp *intel_dp)
|
||||
return container_of(intel_dp, struct intel_digital_port, dp);
|
||||
}
|
||||
|
||||
static inline struct intel_lspcon *
|
||||
dp_to_lspcon(struct intel_dp *intel_dp)
|
||||
{
|
||||
return &dp_to_dig_port(intel_dp)->lspcon;
|
||||
}
|
||||
|
||||
static inline struct intel_digital_port *
|
||||
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
||||
{
|
||||
@ -1142,7 +1170,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
|
||||
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_crt.c */
|
||||
void intel_crt_init(struct drm_device *dev);
|
||||
void intel_crt_init(struct drm_i915_private *dev_priv);
|
||||
void intel_crt_reset(struct drm_encoder *encoder);
|
||||
|
||||
/* intel_ddi.c */
|
||||
@ -1153,7 +1181,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
|
||||
struct drm_connector_state *old_conn_state);
|
||||
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
|
||||
void hsw_fdi_link_train(struct drm_crtc *crtc);
|
||||
void intel_ddi_init(struct drm_device *dev, enum port port);
|
||||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
|
||||
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
|
||||
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
|
||||
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
|
||||
@ -1166,6 +1194,8 @@ bool intel_ddi_pll_select(struct intel_crtc *crtc,
|
||||
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
|
||||
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
|
||||
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
|
||||
bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc *intel_crtc);
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
struct intel_encoder *
|
||||
@ -1210,7 +1240,7 @@ unsigned int intel_fb_xy_to_linear(int x, int y,
|
||||
void intel_add_fb_offsets(int *x, int *y,
|
||||
const struct intel_plane_state *state, int plane);
|
||||
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev);
|
||||
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
|
||||
void intel_mark_busy(struct drm_i915_private *dev_priv);
|
||||
void intel_mark_idle(struct drm_i915_private *dev_priv);
|
||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
@ -1378,12 +1408,15 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
|
||||
void intel_csr_ucode_resume(struct drm_i915_private *);
|
||||
|
||||
/* intel_dp.c */
|
||||
bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
|
||||
bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
|
||||
enum port port);
|
||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
int link_rate, uint8_t lane_count,
|
||||
bool link_mst);
|
||||
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
|
||||
int link_rate, uint8_t lane_count);
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
||||
@ -1445,6 +1478,8 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
|
||||
bool __intel_dp_read_desc(struct intel_dp *intel_dp,
|
||||
struct intel_dp_desc *desc);
|
||||
bool intel_dp_read_desc(struct intel_dp *intel_dp);
|
||||
int intel_dp_link_required(int pixel_clock, int bpp);
|
||||
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
|
||||
|
||||
/* intel_dp_aux_backlight.c */
|
||||
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
|
||||
@ -1453,13 +1488,13 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
|
||||
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
|
||||
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
|
||||
/* intel_dsi.c */
|
||||
void intel_dsi_init(struct drm_device *dev);
|
||||
void intel_dsi_init(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_dsi_dcs_backlight.c */
|
||||
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
|
||||
|
||||
/* intel_dvo.c */
|
||||
void intel_dvo_init(struct drm_device *dev);
|
||||
void intel_dvo_init(struct drm_i915_private *dev_priv);
|
||||
/* intel_hotplug.c */
|
||||
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
|
||||
|
||||
@ -1523,7 +1558,8 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
|
||||
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_hdmi.c */
|
||||
void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
|
||||
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
|
||||
enum port port);
|
||||
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
|
||||
@ -1534,7 +1570,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
|
||||
|
||||
|
||||
/* intel_lvds.c */
|
||||
void intel_lvds_init(struct drm_device *dev);
|
||||
void intel_lvds_init(struct drm_i915_private *dev_priv);
|
||||
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
|
||||
bool intel_is_dual_link_lvds(struct drm_device *dev);
|
||||
|
||||
@ -1579,9 +1615,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
|
||||
void intel_panel_enable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_disable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_destroy_backlight(struct drm_connector *connector);
|
||||
enum drm_connector_status intel_panel_detect(struct drm_device *dev);
|
||||
enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
|
||||
extern struct drm_display_mode *intel_find_panel_downclock(
|
||||
struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_connector *connector);
|
||||
|
||||
@ -1607,7 +1643,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
|
||||
void intel_psr_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin);
|
||||
void intel_psr_init(struct drm_device *dev);
|
||||
void intel_psr_init(struct drm_i915_private *dev_priv);
|
||||
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits);
|
||||
|
||||
@ -1711,7 +1747,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
|
||||
void intel_update_watermarks(struct intel_crtc *crtc);
|
||||
void intel_init_pm(struct drm_i915_private *dev_priv);
|
||||
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_pm_setup(struct drm_device *dev);
|
||||
void intel_pm_setup(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_teardown(void);
|
||||
void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
@ -1752,7 +1788,7 @@ static inline int intel_enable_rc6(void)
|
||||
}
|
||||
|
||||
/* intel_sdvo.c */
|
||||
bool intel_sdvo_init(struct drm_device *dev,
|
||||
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg, enum port port);
|
||||
|
||||
|
||||
@ -1767,7 +1803,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc);
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
|
||||
|
||||
/* intel_tv.c */
|
||||
void intel_tv_init(struct drm_device *dev);
|
||||
void intel_tv_init(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_atomic.c */
|
||||
int intel_connector_atomic_get_property(struct drm_connector *connector,
|
||||
@ -1779,8 +1815,6 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state);
|
||||
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
|
||||
void intel_atomic_state_clear(struct drm_atomic_state *);
|
||||
struct intel_shared_dpll_config *
|
||||
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
|
||||
|
||||
static inline struct intel_crtc_state *
|
||||
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
|
||||
@ -1794,6 +1828,20 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
|
||||
return to_intel_crtc_state(crtc_state);
|
||||
}
|
||||
|
||||
static inline struct intel_crtc_state *
|
||||
intel_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
crtc_state = drm_atomic_get_existing_crtc_state(state, &crtc->base);
|
||||
|
||||
if (crtc_state)
|
||||
return to_intel_crtc_state(crtc_state);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct intel_plane_state *
|
||||
intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
|
||||
struct intel_plane *plane)
|
||||
@ -1827,4 +1875,10 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
|
||||
/* intel_lspcon.c */
|
||||
bool lspcon_init(struct intel_digital_port *intel_dig_port);
|
||||
void lspcon_resume(struct intel_lspcon *lspcon);
|
||||
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
|
||||
|
||||
/* intel_pipe_crc.c */
|
||||
int intel_pipe_crc_create(struct drm_minor *minor);
|
||||
void intel_pipe_crc_cleanup(struct drm_minor *minor);
|
||||
extern const struct file_operations i915_display_crc_ctl_fops;
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
|
@ -340,7 +340,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
/* DSI uses short packets for sync events, so clear mode flags for DSI */
|
||||
adjusted_mode->flags = 0;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
/* Dual link goes to DSI transcoder A. */
|
||||
if (intel_dsi->ports == BIT(PORT_C))
|
||||
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
|
||||
@ -379,7 +379,8 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
|
||||
val &= ~ULPS_STATE_MASK;
|
||||
val |= (ULPS_STATE_ENTER | DEVICE_READY);
|
||||
I915_WRITE(MIPI_DEVICE_READY(port), val);
|
||||
usleep_range(2, 3);
|
||||
/* at least 2us - relaxed for hrtimer subsystem optimization */
|
||||
usleep_range(10, 50);
|
||||
|
||||
/* 3. Exit ULPS */
|
||||
val = I915_READ(MIPI_DEVICE_READY(port));
|
||||
@ -441,7 +442,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
vlv_dsi_device_ready(encoder);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
bxt_dsi_device_ready(encoder);
|
||||
}
|
||||
|
||||
@ -464,7 +465,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
||||
}
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
|
||||
i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
u32 temp;
|
||||
|
||||
@ -476,7 +477,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
||||
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
|
||||
temp |= (intel_dsi->dual_link - 1)
|
||||
<< DUAL_LINK_MODE_SHIFT;
|
||||
temp |= intel_crtc->pipe ?
|
||||
if (IS_BROXTON(dev_priv))
|
||||
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
|
||||
else
|
||||
temp |= intel_crtc->pipe ?
|
||||
LANE_CONFIGURATION_DUAL_LINK_B :
|
||||
LANE_CONFIGURATION_DUAL_LINK_A;
|
||||
}
|
||||
@ -494,7 +498,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
|
||||
enum port port;
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
|
||||
i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
u32 temp;
|
||||
|
||||
@ -663,7 +667,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
DRM_DEBUG_KMS("\n");
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
|
||||
i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
|
||||
i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
|
||||
u32 val;
|
||||
|
||||
@ -695,8 +699,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
|
||||
usleep_range(2000, 2500);
|
||||
}
|
||||
|
||||
intel_disable_dsi_pll(encoder);
|
||||
}
|
||||
|
||||
static void intel_dsi_post_disable(struct intel_encoder *encoder,
|
||||
@ -712,6 +714,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
|
||||
|
||||
intel_dsi_clear_device_ready(encoder);
|
||||
|
||||
intel_disable_dsi_pll(encoder);
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
u32 val;
|
||||
|
||||
@ -755,12 +759,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
* configuration, otherwise accessing DSI registers will hang the
|
||||
* machine. See BSpec North Display Engine registers/MIPI[BXT].
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
|
||||
goto out_put_power;
|
||||
|
||||
/* XXX: this only works for one DSI output */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
|
||||
i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
|
||||
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
|
||||
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
|
||||
|
||||
@ -785,7 +789,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
|
||||
continue;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
u32 tmp = I915_READ(MIPI_CTRL(port));
|
||||
tmp &= BXT_PIPE_SELECT_MASK;
|
||||
tmp >>= BXT_PIPE_SELECT_SHIFT;
|
||||
@ -973,7 +977,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
|
||||
u32 pclk;
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
bxt_dsi_get_pipe_config(encoder, pipe_config);
|
||||
|
||||
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
|
||||
@ -1065,7 +1069,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
/*
|
||||
* Program hdisplay and vdisplay on MIPI transcoder.
|
||||
* This is different from calculated hactive and
|
||||
@ -1152,7 +1156,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
|
||||
tmp &= ~READ_REQUEST_PRIORITY_MASK;
|
||||
I915_WRITE(MIPI_CTRL(port), tmp |
|
||||
READ_REQUEST_PRIORITY_HIGH);
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
} else if (IS_GEN9_LP(dev_priv)) {
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
|
||||
tmp = I915_READ(MIPI_CTRL(port));
|
||||
@ -1190,7 +1194,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
|
||||
if (intel_dsi->clock_stop)
|
||||
tmp |= CLOCKSTOP;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
tmp |= BXT_DPHY_DEFEATURE_EN;
|
||||
if (!is_cmd_mode(intel_dsi))
|
||||
tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
|
||||
@ -1241,7 +1245,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
|
||||
I915_WRITE(MIPI_INIT_COUNT(port),
|
||||
txclkesc(intel_dsi->escape_clk_div, 100));
|
||||
|
||||
if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
|
||||
if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
|
||||
/*
|
||||
* BXT spec says write MIPI_INIT_COUNT for
|
||||
* both the ports, even if only one is
|
||||
@ -1424,15 +1428,15 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
|
||||
}
|
||||
}
|
||||
|
||||
void intel_dsi_init(struct drm_device *dev)
|
||||
void intel_dsi_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_dsi *intel_dsi;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
struct intel_connector *intel_connector;
|
||||
struct drm_connector *connector;
|
||||
struct drm_display_mode *scan, *fixed_mode = NULL;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
enum port port;
|
||||
unsigned int i;
|
||||
|
||||
@ -1444,7 +1448,7 @@ void intel_dsi_init(struct drm_device *dev)
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
} else if (IS_GEN9_LP(dev_priv)) {
|
||||
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
|
||||
} else {
|
||||
DRM_ERROR("Unsupported Mipi device to reg base");
|
||||
@ -1485,7 +1489,7 @@ void intel_dsi_init(struct drm_device *dev)
|
||||
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
|
||||
* port C. BXT isn't limited like this.
|
||||
*/
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
|
||||
else if (port == PORT_A)
|
||||
intel_encoder->crtc_mask = BIT(PIPE_A);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include <drm/drm_panel.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <video/mipi_display.h>
|
||||
#include <asm/intel-mid.h>
|
||||
@ -305,19 +306,44 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
}
|
||||
|
||||
static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
|
||||
u8 gpio_source, u8 gpio_index, bool value)
|
||||
{
|
||||
/* XXX: this table is a quick ugly hack. */
|
||||
static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
|
||||
struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
|
||||
|
||||
if (!gpio_desc) {
|
||||
gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
|
||||
"panel", gpio_index,
|
||||
value ? GPIOD_OUT_LOW :
|
||||
GPIOD_OUT_HIGH);
|
||||
|
||||
if (IS_ERR_OR_NULL(gpio_desc)) {
|
||||
DRM_ERROR("GPIO index %u request failed (%ld)\n",
|
||||
gpio_index, PTR_ERR(gpio_desc));
|
||||
return;
|
||||
}
|
||||
|
||||
bxt_gpio_table[gpio_index] = gpio_desc;
|
||||
}
|
||||
|
||||
gpiod_set_value(gpio_desc, value);
|
||||
}
|
||||
|
||||
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
{
|
||||
struct drm_device *dev = intel_dsi->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
u8 gpio_source, gpio_index;
|
||||
u8 gpio_source, gpio_index = 0, gpio_number;
|
||||
bool value;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
if (dev_priv->vbt.dsi.seq_version >= 3)
|
||||
data++;
|
||||
gpio_index = *data++;
|
||||
|
||||
gpio_index = *data++;
|
||||
gpio_number = *data++;
|
||||
|
||||
/* gpio source in sequence v2 only */
|
||||
if (dev_priv->vbt.dsi.seq_version == 2)
|
||||
@ -329,11 +355,11 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
|
||||
value = *data++ & 1;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv))
|
||||
vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
|
||||
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
|
||||
else if (IS_CHERRYVIEW(dev_priv))
|
||||
chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
|
||||
chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
|
||||
else
|
||||
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
|
||||
bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
@ -156,8 +156,10 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
|
||||
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
|
||||
config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
|
||||
|
||||
/* wait at least 0.5 us after ungating before enabling VCO */
|
||||
usleep_range(1, 10);
|
||||
/* wait at least 0.5 us after ungating before enabling VCO,
|
||||
* allow hrtimer subsystem optimization by relaxing timing
|
||||
*/
|
||||
usleep_range(10, 50);
|
||||
|
||||
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
|
||||
|
||||
@ -351,7 +353,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
|
||||
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
|
||||
struct intel_crtc_state *config)
|
||||
{
|
||||
if (IS_BROXTON(to_i915(encoder->base.dev)))
|
||||
if (IS_GEN9_LP(to_i915(encoder->base.dev)))
|
||||
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
|
||||
else
|
||||
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
|
||||
@ -504,7 +506,7 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
|
||||
|
||||
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
return bxt_dsi_pll_is_enabled(dev_priv);
|
||||
|
||||
MISSING_CASE(INTEL_DEVID(dev_priv));
|
||||
@ -519,7 +521,7 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
return vlv_compute_dsi_pll(encoder, config);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
return bxt_compute_dsi_pll(encoder, config);
|
||||
|
||||
return -ENODEV;
|
||||
@ -532,7 +534,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder,
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
vlv_enable_dsi_pll(encoder, config);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
bxt_enable_dsi_pll(encoder, config);
|
||||
}
|
||||
|
||||
@ -542,7 +544,7 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder)
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
vlv_disable_dsi_pll(encoder);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
bxt_disable_dsi_pll(encoder);
|
||||
}
|
||||
|
||||
@ -566,7 +568,7 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
bxt_dsi_reset_clocks(encoder, port);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
vlv_dsi_reset_clocks(encoder, port);
|
||||
|
@ -422,9 +422,8 @@ static enum port intel_dvo_port(i915_reg_t dvo_reg)
|
||||
return PORT_C;
|
||||
}
|
||||
|
||||
void intel_dvo_init(struct drm_device *dev)
|
||||
void intel_dvo_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_dvo *intel_dvo;
|
||||
struct intel_connector *intel_connector;
|
||||
@ -511,7 +510,7 @@ void intel_dvo_init(struct drm_device *dev)
|
||||
continue;
|
||||
|
||||
port = intel_dvo_port(dvo->dvo_reg);
|
||||
drm_encoder_init(dev, &intel_encoder->base,
|
||||
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
|
||||
&intel_dvo_enc_funcs, encoder_type,
|
||||
"DVO %c", port_name(port));
|
||||
|
||||
@ -523,14 +522,14 @@ void intel_dvo_init(struct drm_device *dev)
|
||||
case INTEL_DVO_CHIP_TMDS:
|
||||
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
|
||||
(1 << INTEL_OUTPUT_DVO);
|
||||
drm_connector_init(dev, connector,
|
||||
drm_connector_init(&dev_priv->drm, connector,
|
||||
&intel_dvo_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_DVII);
|
||||
encoder_type = DRM_MODE_ENCODER_TMDS;
|
||||
break;
|
||||
case INTEL_DVO_CHIP_LVDS:
|
||||
intel_encoder->cloneable = 0;
|
||||
drm_connector_init(dev, connector,
|
||||
drm_connector_init(&dev_priv->drm, connector,
|
||||
&intel_dvo_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_LVDS);
|
||||
encoder_type = DRM_MODE_ENCODER_LVDS;
|
||||
|
@ -111,13 +111,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
|
||||
|
||||
/**
|
||||
* intel_engines_init() - allocate, populate and init the Engine Command Streamers
|
||||
* @dev: DRM device.
|
||||
* @dev_priv: i915 device private
|
||||
*
|
||||
* Return: non-zero if the initialization failed.
|
||||
*/
|
||||
int intel_engines_init(struct drm_device *dev)
|
||||
int intel_engines_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
|
||||
unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
|
||||
unsigned int mask = 0;
|
||||
@ -257,7 +256,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
|
||||
|
||||
WARN_ON(engine->scratch);
|
||||
|
||||
obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
|
||||
obj = i915_gem_object_create_stolen(engine->i915, size);
|
||||
if (!obj)
|
||||
obj = i915_gem_object_create_internal(engine->i915, size);
|
||||
if (IS_ERR(obj)) {
|
||||
@ -305,15 +304,30 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = intel_engine_init_breadcrumbs(engine);
|
||||
/* We may need to do things with the shrinker which
|
||||
* require us to immediately switch back to the default
|
||||
* context. This can cause a problem as pinning the
|
||||
* default context also requires GTT space which may not
|
||||
* be available. To avoid this we always pin the default
|
||||
* context.
|
||||
*/
|
||||
ret = engine->context_pin(engine, engine->i915->kernel_context);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_engine_init_breadcrumbs(engine);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
ret = i915_gem_render_state_init(engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_unpin;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unpin:
|
||||
engine->context_unpin(engine, engine->i915->kernel_context);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -331,6 +345,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
|
||||
intel_engine_fini_breadcrumbs(engine);
|
||||
intel_engine_cleanup_cmd_parser(engine);
|
||||
i915_gem_batch_pool_fini(&engine->batch_pool);
|
||||
|
||||
engine->context_unpin(engine, engine->i915->kernel_context);
|
||||
}
|
||||
|
||||
u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
|
||||
|
@ -538,7 +538,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
|
||||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
end = ggtt->stolen_size - 8 * 1024 * 1024;
|
||||
else
|
||||
end = ggtt->stolen_usable_size;
|
||||
end = U64_MAX;
|
||||
|
||||
/* HACK: This code depends on what we will do in *_enable_fbc. If that
|
||||
* code changes, this code needs to change as well.
|
||||
@ -1317,7 +1317,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
|
||||
if (!HAS_FBC(dev_priv))
|
||||
return 0;
|
||||
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
@ -145,9 +145,9 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
* important and we should probably use that space with FBC or other
|
||||
* features. */
|
||||
if (size * 2 < ggtt->stolen_usable_size)
|
||||
obj = i915_gem_object_create_stolen(dev, size);
|
||||
obj = i915_gem_object_create_stolen(dev_priv, size);
|
||||
if (obj == NULL)
|
||||
obj = i915_gem_object_create(dev, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj)) {
|
||||
DRM_ERROR("failed to allocate framebuffer\n");
|
||||
ret = PTR_ERR(obj);
|
||||
|
@ -23,15 +23,6 @@
|
||||
#ifndef _INTEL_GUC_FWIF_H
|
||||
#define _INTEL_GUC_FWIF_H
|
||||
|
||||
/*
|
||||
* This file is partially autogenerated, although currently with some manual
|
||||
* fixups afterwards. In future, it should be entirely autogenerated, in order
|
||||
* to ensure that the definitions herein remain in sync with those used by the
|
||||
* GuC's own firmware.
|
||||
*
|
||||
* EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
|
||||
*/
|
||||
|
||||
#define GFXCORE_FAMILY_GEN9 12
|
||||
#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
|
||||
|
||||
@ -489,18 +480,18 @@ union guc_log_control {
|
||||
} __packed;
|
||||
|
||||
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
|
||||
enum host2guc_action {
|
||||
HOST2GUC_ACTION_DEFAULT = 0x0,
|
||||
HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
|
||||
HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
|
||||
HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
|
||||
HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
|
||||
HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
|
||||
HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
|
||||
HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
|
||||
HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
|
||||
HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
|
||||
HOST2GUC_ACTION_LIMIT
|
||||
enum intel_guc_action {
|
||||
INTEL_GUC_ACTION_DEFAULT = 0x0,
|
||||
INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
|
||||
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
|
||||
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
|
||||
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
|
||||
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
|
||||
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
|
||||
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
|
||||
INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
|
||||
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
|
||||
INTEL_GUC_ACTION_LIMIT
|
||||
};
|
||||
|
||||
/*
|
||||
@ -509,22 +500,22 @@ enum host2guc_action {
|
||||
* by the fact that all the MASK bits are set. The remaining bits
|
||||
* give more detail.
|
||||
*/
|
||||
#define GUC2HOST_RESPONSE_MASK ((u32)0xF0000000)
|
||||
#define GUC2HOST_IS_RESPONSE(x) ((u32)(x) >= GUC2HOST_RESPONSE_MASK)
|
||||
#define GUC2HOST_STATUS(x) (GUC2HOST_RESPONSE_MASK | (x))
|
||||
#define INTEL_GUC_RECV_MASK ((u32)0xF0000000)
|
||||
#define INTEL_GUC_RECV_IS_RESPONSE(x) ((u32)(x) >= INTEL_GUC_RECV_MASK)
|
||||
#define INTEL_GUC_RECV_STATUS(x) (INTEL_GUC_RECV_MASK | (x))
|
||||
|
||||
/* GUC will return status back to SOFT_SCRATCH_O_REG */
|
||||
enum guc2host_status {
|
||||
GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
|
||||
GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
|
||||
GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
|
||||
GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
|
||||
enum intel_guc_status {
|
||||
INTEL_GUC_STATUS_SUCCESS = INTEL_GUC_RECV_STATUS(0x0),
|
||||
INTEL_GUC_STATUS_ALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x10),
|
||||
INTEL_GUC_STATUS_DEALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x20),
|
||||
INTEL_GUC_STATUS_GENERIC_FAIL = INTEL_GUC_RECV_STATUS(0x0000F000)
|
||||
};
|
||||
|
||||
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
|
||||
enum guc2host_message {
|
||||
GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1),
|
||||
GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3)
|
||||
enum intel_guc_recv_message {
|
||||
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
|
||||
INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3)
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -28,7 +28,7 @@
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_guc.h"
|
||||
#include "intel_uc.h"
|
||||
|
||||
/**
|
||||
* DOC: GuC-specific firmware loader
|
||||
@ -220,14 +220,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
|
||||
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
|
||||
|
||||
if (guc->ads_vma) {
|
||||
u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
|
||||
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
|
||||
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
|
||||
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
|
||||
}
|
||||
|
||||
/* If GuC submission is enabled, set up additional parameters here */
|
||||
if (i915.enable_guc_submission) {
|
||||
u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma);
|
||||
u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
|
||||
u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
|
||||
|
||||
pgs >>= PAGE_SHIFT;
|
||||
@ -297,7 +297,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
|
||||
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
|
||||
|
||||
/* Set the source address for the new blob */
|
||||
offset = i915_ggtt_offset(vma) + guc_fw->header_offset;
|
||||
offset = guc_ggtt_offset(vma) + guc_fw->header_offset;
|
||||
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
|
||||
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
|
||||
|
||||
@ -437,7 +437,7 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv)
|
||||
|
||||
/**
|
||||
* intel_guc_setup() - finish preparing the GuC for activity
|
||||
* @dev: drm device
|
||||
* @dev_priv: i915 device private
|
||||
*
|
||||
* Called from gem_init_hw() during driver loading and also after a GPU reset.
|
||||
*
|
||||
@ -448,9 +448,8 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv)
|
||||
*
|
||||
* Return: non-zero code on error
|
||||
*/
|
||||
int intel_guc_setup(struct drm_device *dev)
|
||||
int intel_guc_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
const char *fw_path = guc_fw->guc_fw_path;
|
||||
int retries, ret, err;
|
||||
@ -588,11 +587,12 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
||||
static void guc_fw_fetch(struct drm_i915_private *dev_priv,
|
||||
struct intel_guc_fw *guc_fw)
|
||||
{
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct drm_i915_gem_object *obj;
|
||||
const struct firmware *fw;
|
||||
const struct firmware *fw = NULL;
|
||||
struct guc_css_header *css;
|
||||
size_t size;
|
||||
int err;
|
||||
@ -648,7 +648,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
||||
|
||||
/* Header and uCode will be loaded to WOPCM. Size of the two. */
|
||||
size = guc_fw->header_size + guc_fw->ucode_size;
|
||||
if (size > guc_wopcm_size(to_i915(dev))) {
|
||||
if (size > guc_wopcm_size(dev_priv)) {
|
||||
DRM_NOTE("Firmware is too large to fit in WOPCM\n");
|
||||
goto fail;
|
||||
}
|
||||
@ -675,9 +675,9 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
||||
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
|
||||
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
if (IS_ERR_OR_NULL(obj)) {
|
||||
err = obj ? PTR_ERR(obj) : -ENOMEM;
|
||||
goto fail;
|
||||
@ -699,12 +699,12 @@ fail:
|
||||
DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
|
||||
err, fw, guc_fw->guc_fw_obj);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
obj = guc_fw->guc_fw_obj;
|
||||
if (obj)
|
||||
i915_gem_object_put(obj);
|
||||
guc_fw->guc_fw_obj = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
release_firmware(fw); /* OK even if fw is NULL */
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
|
||||
@ -712,16 +712,15 @@ fail:
|
||||
|
||||
/**
|
||||
* intel_guc_init() - define parameters and fetch firmware
|
||||
* @dev: drm device
|
||||
* @dev_priv: i915 device private
|
||||
*
|
||||
* Called early during driver load, but after GEM is initialised.
|
||||
*
|
||||
* The firmware will be transferred to the GuC's memory later,
|
||||
* when intel_guc_setup() is called.
|
||||
*/
|
||||
void intel_guc_init(struct drm_device *dev)
|
||||
void intel_guc_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
const char *fw_path;
|
||||
|
||||
@ -754,7 +753,6 @@ void intel_guc_init(struct drm_device *dev)
|
||||
fw_path = ""; /* unknown device */
|
||||
}
|
||||
|
||||
guc_fw->guc_dev = dev;
|
||||
guc_fw->guc_fw_path = fw_path;
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
|
||||
guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
|
||||
@ -769,20 +767,19 @@ void intel_guc_init(struct drm_device *dev)
|
||||
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
|
||||
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
|
||||
guc_fw_fetch(dev, guc_fw);
|
||||
guc_fw_fetch(dev_priv, guc_fw);
|
||||
/* status must now be FAIL or SUCCESS */
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_guc_fini() - clean up all allocated resources
|
||||
* @dev: drm device
|
||||
* @dev_priv: i915 device private
|
||||
*/
|
||||
void intel_guc_fini(struct drm_device *dev)
|
||||
void intel_guc_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
guc_interrupts_release(dev_priv);
|
||||
i915_guc_submission_disable(dev_priv);
|
||||
i915_guc_submission_fini(dev_priv);
|
||||
@ -790,7 +787,7 @@ void intel_guc_fini(struct drm_device *dev)
|
||||
if (guc_fw->guc_fw_obj)
|
||||
i915_gem_object_put(guc_fw->guc_fw_obj);
|
||||
guc_fw->guc_fw_obj = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
|
||||
}
|
||||
|
@ -236,13 +236,13 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
memset(&engine->hangcheck.instdone, 0,
|
||||
sizeof(engine->hangcheck.instdone));
|
||||
|
||||
return HANGCHECK_ACTIVE;
|
||||
return ENGINE_ACTIVE_HEAD;
|
||||
}
|
||||
|
||||
if (!subunits_stuck(engine))
|
||||
return HANGCHECK_ACTIVE;
|
||||
return ENGINE_ACTIVE_SUBUNITS;
|
||||
|
||||
return HANGCHECK_HUNG;
|
||||
return ENGINE_DEAD;
|
||||
}
|
||||
|
||||
static enum intel_engine_hangcheck_action
|
||||
@ -253,11 +253,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
u32 tmp;
|
||||
|
||||
ha = head_stuck(engine, acthd);
|
||||
if (ha != HANGCHECK_HUNG)
|
||||
if (ha != ENGINE_DEAD)
|
||||
return ha;
|
||||
|
||||
if (IS_GEN2(dev_priv))
|
||||
return HANGCHECK_HUNG;
|
||||
return ENGINE_DEAD;
|
||||
|
||||
/* Is the chip hanging on a WAIT_FOR_EVENT?
|
||||
* If so we can simply poke the RB_WAIT bit
|
||||
@ -270,25 +270,144 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
|
||||
"Kicking stuck wait on %s",
|
||||
engine->name);
|
||||
I915_WRITE_CTL(engine, tmp);
|
||||
return HANGCHECK_KICK;
|
||||
return ENGINE_WAIT_KICK;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
|
||||
switch (semaphore_passed(engine)) {
|
||||
default:
|
||||
return HANGCHECK_HUNG;
|
||||
return ENGINE_DEAD;
|
||||
case 1:
|
||||
i915_handle_error(dev_priv, 0,
|
||||
"Kicking stuck semaphore on %s",
|
||||
engine->name);
|
||||
I915_WRITE_CTL(engine, tmp);
|
||||
return HANGCHECK_KICK;
|
||||
return ENGINE_WAIT_KICK;
|
||||
case 0:
|
||||
return HANGCHECK_WAIT;
|
||||
return ENGINE_WAIT;
|
||||
}
|
||||
}
|
||||
|
||||
return HANGCHECK_HUNG;
|
||||
return ENGINE_DEAD;
|
||||
}
|
||||
|
||||
static void hangcheck_load_sample(struct intel_engine_cs *engine,
|
||||
struct intel_engine_hangcheck *hc)
|
||||
{
|
||||
/* We don't strictly need an irq-barrier here, as we are not
|
||||
* serving an interrupt request, be paranoid in case the
|
||||
* barrier has side-effects (such as preventing a broken
|
||||
* cacheline snoop) and so be sure that we can see the seqno
|
||||
* advance. If the seqno should stick, due to a stale
|
||||
* cacheline, we would erroneously declare the GPU hung.
|
||||
*/
|
||||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
|
||||
hc->acthd = intel_engine_get_active_head(engine);
|
||||
hc->seqno = intel_engine_get_seqno(engine);
|
||||
}
|
||||
|
||||
static void hangcheck_store_sample(struct intel_engine_cs *engine,
|
||||
const struct intel_engine_hangcheck *hc)
|
||||
{
|
||||
engine->hangcheck.acthd = hc->acthd;
|
||||
engine->hangcheck.seqno = hc->seqno;
|
||||
engine->hangcheck.action = hc->action;
|
||||
engine->hangcheck.stalled = hc->stalled;
|
||||
}
|
||||
|
||||
static enum intel_engine_hangcheck_action
|
||||
hangcheck_get_action(struct intel_engine_cs *engine,
|
||||
const struct intel_engine_hangcheck *hc)
|
||||
{
|
||||
if (engine->hangcheck.seqno != hc->seqno)
|
||||
return ENGINE_ACTIVE_SEQNO;
|
||||
|
||||
if (i915_seqno_passed(hc->seqno, intel_engine_last_submit(engine)))
|
||||
return ENGINE_IDLE;
|
||||
|
||||
return engine_stuck(engine, hc->acthd);
|
||||
}
|
||||
|
||||
static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
|
||||
struct intel_engine_hangcheck *hc)
|
||||
{
|
||||
unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
|
||||
|
||||
hc->action = hangcheck_get_action(engine, hc);
|
||||
|
||||
/* We always increment the progress
|
||||
* if the engine is busy and still processing
|
||||
* the same request, so that no single request
|
||||
* can run indefinitely (such as a chain of
|
||||
* batches). The only time we do not increment
|
||||
* the hangcheck score on this ring, if this
|
||||
* engine is in a legitimate wait for another
|
||||
* engine. In that case the waiting engine is a
|
||||
* victim and we want to be sure we catch the
|
||||
* right culprit. Then every time we do kick
|
||||
* the ring, make it as a progress as the seqno
|
||||
* advancement might ensure and if not, it
|
||||
* will catch the hanging engine.
|
||||
*/
|
||||
|
||||
switch (hc->action) {
|
||||
case ENGINE_IDLE:
|
||||
case ENGINE_ACTIVE_SEQNO:
|
||||
/* Clear head and subunit states on seqno movement */
|
||||
hc->acthd = 0;
|
||||
|
||||
memset(&engine->hangcheck.instdone, 0,
|
||||
sizeof(engine->hangcheck.instdone));
|
||||
|
||||
/* Intentional fall through */
|
||||
case ENGINE_WAIT_KICK:
|
||||
case ENGINE_WAIT:
|
||||
engine->hangcheck.action_timestamp = jiffies;
|
||||
break;
|
||||
|
||||
case ENGINE_ACTIVE_HEAD:
|
||||
case ENGINE_ACTIVE_SUBUNITS:
|
||||
/* Seqno stuck with still active engine gets leeway,
|
||||
* in hopes that it is just a long shader.
|
||||
*/
|
||||
timeout = I915_SEQNO_DEAD_TIMEOUT;
|
||||
break;
|
||||
|
||||
case ENGINE_DEAD:
|
||||
break;
|
||||
|
||||
default:
|
||||
MISSING_CASE(hc->action);
|
||||
}
|
||||
|
||||
hc->stalled = time_after(jiffies,
|
||||
engine->hangcheck.action_timestamp + timeout);
|
||||
}
|
||||
|
||||
static void hangcheck_declare_hang(struct drm_i915_private *i915,
|
||||
unsigned int hung,
|
||||
unsigned int stuck)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
char msg[80];
|
||||
unsigned int tmp;
|
||||
int len;
|
||||
|
||||
/* If some rings hung but others were still busy, only
|
||||
* blame the hanging rings in the synopsis.
|
||||
*/
|
||||
if (stuck != hung)
|
||||
hung &= ~stuck;
|
||||
len = scnprintf(msg, sizeof(msg),
|
||||
"%s on ", stuck == hung ? "No progress" : "Hang");
|
||||
for_each_engine_masked(engine, i915, hung, tmp)
|
||||
len += scnprintf(msg + len, sizeof(msg) - len,
|
||||
"%s, ", engine->name);
|
||||
msg[len-2] = '\0';
|
||||
|
||||
return i915_handle_error(i915, hung, msg);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -308,10 +427,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
enum intel_engine_id id;
|
||||
unsigned int hung = 0, stuck = 0;
|
||||
int busy_count = 0;
|
||||
#define BUSY 1
|
||||
#define KICK 5
|
||||
#define HUNG 20
|
||||
#define ACTIVE_DECAY 15
|
||||
|
||||
if (!i915.enable_hangcheck)
|
||||
return;
|
||||
@ -319,6 +434,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
if (!READ_ONCE(dev_priv->gt.awake))
|
||||
return;
|
||||
|
||||
if (i915_terminally_wedged(&dev_priv->gpu_error))
|
||||
return;
|
||||
|
||||
/* As enabling the GPU requires fairly extensive mmio access,
|
||||
* periodically arm the mmio checker to see if we are triggering
|
||||
* any invalid access.
|
||||
@ -326,112 +444,26 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
||||
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
bool busy = intel_engine_has_waiter(engine);
|
||||
u64 acthd;
|
||||
u32 seqno;
|
||||
u32 submit;
|
||||
struct intel_engine_hangcheck cur_state, *hc = &cur_state;
|
||||
const bool busy = intel_engine_has_waiter(engine);
|
||||
|
||||
semaphore_clear_deadlocks(dev_priv);
|
||||
|
||||
/* We don't strictly need an irq-barrier here, as we are not
|
||||
* serving an interrupt request, be paranoid in case the
|
||||
* barrier has side-effects (such as preventing a broken
|
||||
* cacheline snoop) and so be sure that we can see the seqno
|
||||
* advance. If the seqno should stick, due to a stale
|
||||
* cacheline, we would erroneously declare the GPU hung.
|
||||
*/
|
||||
if (engine->irq_seqno_barrier)
|
||||
engine->irq_seqno_barrier(engine);
|
||||
hangcheck_load_sample(engine, hc);
|
||||
hangcheck_accumulate_sample(engine, hc);
|
||||
hangcheck_store_sample(engine, hc);
|
||||
|
||||
acthd = intel_engine_get_active_head(engine);
|
||||
seqno = intel_engine_get_seqno(engine);
|
||||
submit = intel_engine_last_submit(engine);
|
||||
|
||||
if (engine->hangcheck.seqno == seqno) {
|
||||
if (i915_seqno_passed(seqno, submit)) {
|
||||
engine->hangcheck.action = HANGCHECK_IDLE;
|
||||
} else {
|
||||
/* We always increment the hangcheck score
|
||||
* if the engine is busy and still processing
|
||||
* the same request, so that no single request
|
||||
* can run indefinitely (such as a chain of
|
||||
* batches). The only time we do not increment
|
||||
* the hangcheck score on this ring, if this
|
||||
* engine is in a legitimate wait for another
|
||||
* engine. In that case the waiting engine is a
|
||||
* victim and we want to be sure we catch the
|
||||
* right culprit. Then every time we do kick
|
||||
* the ring, add a small increment to the
|
||||
* score so that we can catch a batch that is
|
||||
* being repeatedly kicked and so responsible
|
||||
* for stalling the machine.
|
||||
*/
|
||||
engine->hangcheck.action =
|
||||
engine_stuck(engine, acthd);
|
||||
|
||||
switch (engine->hangcheck.action) {
|
||||
case HANGCHECK_IDLE:
|
||||
case HANGCHECK_WAIT:
|
||||
break;
|
||||
case HANGCHECK_ACTIVE:
|
||||
engine->hangcheck.score += BUSY;
|
||||
break;
|
||||
case HANGCHECK_KICK:
|
||||
engine->hangcheck.score += KICK;
|
||||
break;
|
||||
case HANGCHECK_HUNG:
|
||||
engine->hangcheck.score += HUNG;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
|
||||
hung |= intel_engine_flag(engine);
|
||||
if (engine->hangcheck.action != HANGCHECK_HUNG)
|
||||
stuck |= intel_engine_flag(engine);
|
||||
}
|
||||
} else {
|
||||
engine->hangcheck.action = HANGCHECK_ACTIVE;
|
||||
|
||||
/* Gradually reduce the count so that we catch DoS
|
||||
* attempts across multiple batches.
|
||||
*/
|
||||
if (engine->hangcheck.score > 0)
|
||||
engine->hangcheck.score -= ACTIVE_DECAY;
|
||||
if (engine->hangcheck.score < 0)
|
||||
engine->hangcheck.score = 0;
|
||||
|
||||
/* Clear head and subunit states on seqno movement */
|
||||
acthd = 0;
|
||||
|
||||
memset(&engine->hangcheck.instdone, 0,
|
||||
sizeof(engine->hangcheck.instdone));
|
||||
if (engine->hangcheck.stalled) {
|
||||
hung |= intel_engine_flag(engine);
|
||||
if (hc->action != ENGINE_DEAD)
|
||||
stuck |= intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
engine->hangcheck.seqno = seqno;
|
||||
engine->hangcheck.acthd = acthd;
|
||||
busy_count += busy;
|
||||
}
|
||||
|
||||
if (hung) {
|
||||
char msg[80];
|
||||
unsigned int tmp;
|
||||
int len;
|
||||
|
||||
/* If some rings hung but others were still busy, only
|
||||
* blame the hanging rings in the synopsis.
|
||||
*/
|
||||
if (stuck != hung)
|
||||
hung &= ~stuck;
|
||||
len = scnprintf(msg, sizeof(msg),
|
||||
"%s on ", stuck == hung ? "No progress" : "Hang");
|
||||
for_each_engine_masked(engine, dev_priv, hung, tmp)
|
||||
len += scnprintf(msg + len, sizeof(msg) - len,
|
||||
"%s, ", engine->name);
|
||||
msg[len-2] = '\0';
|
||||
|
||||
return i915_handle_error(dev_priv, hung, msg);
|
||||
}
|
||||
if (hung)
|
||||
hangcheck_declare_hang(dev_priv, hung, stuck);
|
||||
|
||||
/* Reset timer in case GPU hangs without another request being added */
|
||||
if (busy_count)
|
||||
|
@ -133,6 +133,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static void g4x_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
@ -187,13 +188,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
static void ibx_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
const uint32_t *data = frame;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
int i;
|
||||
@ -246,13 +248,14 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
static void cpt_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
const uint32_t *data = frame;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
int i;
|
||||
@ -303,13 +306,14 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
static void vlv_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
const uint32_t *data = frame;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
int i;
|
||||
@ -361,14 +365,14 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len)
|
||||
{
|
||||
const uint32_t *data = frame;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
|
||||
i915_reg_t data_reg;
|
||||
int i;
|
||||
@ -425,6 +429,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
|
||||
* bytes by one.
|
||||
*/
|
||||
static void intel_write_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
union hdmi_infoframe *frame)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
@ -443,26 +448,25 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
|
||||
buffer[3] = 0;
|
||||
len++;
|
||||
|
||||
intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
|
||||
intel_hdmi->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
|
||||
}
|
||||
|
||||
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
union hdmi_infoframe frame;
|
||||
int ret;
|
||||
|
||||
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
|
||||
adjusted_mode);
|
||||
&crtc_state->base.adjusted_mode);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("couldn't fill AVI infoframe\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_hdmi->rgb_quant_range_selectable) {
|
||||
if (intel_crtc->config->limited_color_range)
|
||||
if (crtc_state->limited_color_range)
|
||||
frame.avi.quantization_range =
|
||||
HDMI_QUANTIZATION_RANGE_LIMITED;
|
||||
else
|
||||
@ -470,10 +474,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
|
||||
HDMI_QUANTIZATION_RANGE_FULL;
|
||||
}
|
||||
|
||||
intel_write_infoframe(encoder, &frame);
|
||||
intel_write_infoframe(encoder, crtc_state, &frame);
|
||||
}
|
||||
|
||||
static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
|
||||
static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
union hdmi_infoframe frame;
|
||||
int ret;
|
||||
@ -486,27 +491,28 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
|
||||
|
||||
frame.spd.sdi = HDMI_SPD_SDI_PC;
|
||||
|
||||
intel_write_infoframe(encoder, &frame);
|
||||
intel_write_infoframe(encoder, crtc_state, &frame);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
union hdmi_infoframe frame;
|
||||
int ret;
|
||||
|
||||
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
|
||||
adjusted_mode);
|
||||
&crtc_state->base.adjusted_mode);
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
intel_write_infoframe(encoder, &frame);
|
||||
intel_write_infoframe(encoder, crtc_state, &frame);
|
||||
}
|
||||
|
||||
static void g4x_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
@ -560,28 +566,22 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
||||
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_spd_infoframe(encoder);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
|
||||
static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_connector *connector;
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
|
||||
/*
|
||||
* HDMI cloning is only supported on g4x which doesn't
|
||||
* support deep color or GCP infoframes anyway so no
|
||||
* need to worry about multiple HDMI sinks here.
|
||||
*/
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
|
||||
if (connector->encoder == encoder)
|
||||
return connector->display_info.bpc > 8;
|
||||
|
||||
return false;
|
||||
return connector->display_info.bpc > 8;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -627,15 +627,17 @@ static bool gcp_default_phase_possible(int pipe_bpp,
|
||||
mode->crtc_htotal/2 % pixels_per_group == 0);
|
||||
}
|
||||
|
||||
static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
|
||||
static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
i915_reg_t reg;
|
||||
u32 val = 0;
|
||||
|
||||
if (HAS_DDI(dev_priv))
|
||||
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
|
||||
reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
@ -644,12 +646,12 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
|
||||
return false;
|
||||
|
||||
/* Indicate color depth whenever the sink supports deep color */
|
||||
if (hdmi_sink_is_deep_color(encoder))
|
||||
if (hdmi_sink_is_deep_color(conn_state))
|
||||
val |= GCP_COLOR_INDICATION;
|
||||
|
||||
/* Enable default_phase whenever the display mode is suitably aligned */
|
||||
if (gcp_default_phase_possible(crtc->config->pipe_bpp,
|
||||
&crtc->config->base.adjusted_mode))
|
||||
if (gcp_default_phase_possible(crtc_state->pipe_bpp,
|
||||
&crtc_state->base.adjusted_mode))
|
||||
val |= GCP_DEFAULT_PHASE_ENABLE;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
@ -659,10 +661,11 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
|
||||
|
||||
static void ibx_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
@ -698,23 +701,24 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder))
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
|
||||
val |= VIDEO_DIP_ENABLE_GCP;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
||||
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_spd_infoframe(encoder);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static void cpt_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
@ -740,24 +744,25 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
|
||||
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder))
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
|
||||
val |= VIDEO_DIP_ENABLE_GCP;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
||||
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_spd_infoframe(encoder);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static void vlv_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
@ -792,25 +797,25 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
|
||||
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
|
||||
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
|
||||
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder))
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
|
||||
val |= VIDEO_DIP_ENABLE_GCP;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
||||
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_spd_infoframe(encoder);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
|
||||
}
|
||||
|
||||
static void hsw_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
|
||||
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
assert_hdmi_port_disabled(intel_hdmi);
|
||||
@ -825,15 +830,15 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
|
||||
return;
|
||||
}
|
||||
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder))
|
||||
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
|
||||
val |= VIDEO_DIP_ENABLE_GCP_HSW;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
|
||||
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_spd_infoframe(encoder);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
|
||||
intel_hdmi_set_avi_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_spd_infoframe(encoder, crtc_state);
|
||||
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
|
||||
}
|
||||
|
||||
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
|
||||
@ -852,31 +857,32 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
|
||||
adapter, enable);
|
||||
}
|
||||
|
||||
static void intel_hdmi_prepare(struct intel_encoder *encoder)
|
||||
static void intel_hdmi_prepare(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
|
||||
u32 hdmi_val;
|
||||
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
|
||||
|
||||
hdmi_val = SDVO_ENCODING_HDMI;
|
||||
if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range)
|
||||
if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
|
||||
hdmi_val |= HDMI_COLOR_RANGE_16_235;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
|
||||
|
||||
if (crtc->config->pipe_bpp > 24)
|
||||
if (crtc_state->pipe_bpp > 24)
|
||||
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
|
||||
else
|
||||
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
|
||||
|
||||
if (crtc->config->has_hdmi_sink)
|
||||
if (crtc_state->has_hdmi_sink)
|
||||
hdmi_val |= HDMI_MODE_SELECT_HDMI;
|
||||
|
||||
if (HAS_PCH_CPT(dev_priv))
|
||||
@ -979,9 +985,9 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
|
||||
|
||||
WARN_ON(!crtc->config->has_hdmi_sink);
|
||||
WARN_ON(!pipe_config->has_hdmi_sink);
|
||||
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
intel_audio_codec_enable(encoder, pipe_config, conn_state);
|
||||
@ -1015,14 +1021,13 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
temp |= SDVO_ENABLE;
|
||||
if (crtc->config->has_audio)
|
||||
if (pipe_config->has_audio)
|
||||
temp |= SDVO_AUDIO_ENABLE;
|
||||
|
||||
/*
|
||||
@ -1066,7 +1071,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u32 temp;
|
||||
@ -1128,7 +1133,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
|
||||
u32 temp;
|
||||
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
@ -1170,7 +1175,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base, false, NULL);
|
||||
intel_hdmi->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state);
|
||||
|
||||
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
|
||||
}
|
||||
@ -1246,7 +1251,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
/* BXT DPLL can't generate 223-240 MHz */
|
||||
if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
|
||||
if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
|
||||
return MODE_CLOCK_RANGE;
|
||||
|
||||
/* CHV DPLL can't generate 216-240 MHz */
|
||||
@ -1642,13 +1647,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
|
||||
intel_hdmi_prepare(encoder);
|
||||
intel_hdmi_prepare(encoder, pipe_config);
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base,
|
||||
pipe_config->has_hdmi_sink,
|
||||
adjusted_mode);
|
||||
pipe_config, conn_state);
|
||||
}
|
||||
|
||||
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
@ -1659,7 +1663,6 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
struct intel_hdmi *intel_hdmi = &dport->hdmi;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
|
||||
vlv_phy_pre_encoder_enable(encoder);
|
||||
|
||||
@ -1669,7 +1672,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base,
|
||||
pipe_config->has_hdmi_sink,
|
||||
adjusted_mode);
|
||||
pipe_config, conn_state);
|
||||
|
||||
g4x_enable_hdmi(encoder, pipe_config, conn_state);
|
||||
|
||||
@ -1680,7 +1683,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
intel_hdmi_prepare(encoder);
|
||||
intel_hdmi_prepare(encoder, pipe_config);
|
||||
|
||||
vlv_phy_pre_pll_enable(encoder);
|
||||
}
|
||||
@ -1689,7 +1692,7 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
intel_hdmi_prepare(encoder);
|
||||
intel_hdmi_prepare(encoder, pipe_config);
|
||||
|
||||
chv_phy_pre_pll_enable(encoder);
|
||||
}
|
||||
@ -1732,9 +1735,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
struct intel_hdmi *intel_hdmi = &dport->hdmi;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
|
||||
chv_phy_pre_encoder_enable(encoder);
|
||||
|
||||
@ -1743,8 +1743,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
|
||||
chv_set_phy_signal_level(encoder, 128, 102, false);
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base,
|
||||
intel_crtc->config->has_hdmi_sink,
|
||||
adjusted_mode);
|
||||
pipe_config->has_hdmi_sink,
|
||||
pipe_config, conn_state);
|
||||
|
||||
g4x_enable_hdmi(encoder, pipe_config, conn_state);
|
||||
|
||||
@ -1809,13 +1809,13 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
|
||||
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
ddc_pin = GMBUS_PIN_1_BXT;
|
||||
else
|
||||
ddc_pin = GMBUS_PIN_DPB;
|
||||
break;
|
||||
case PORT_C:
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
ddc_pin = GMBUS_PIN_2_BXT;
|
||||
else
|
||||
ddc_pin = GMBUS_PIN_DPC;
|
||||
@ -1933,10 +1933,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
}
|
||||
}
|
||||
|
||||
void intel_hdmi_init(struct drm_device *dev,
|
||||
void intel_hdmi_init(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t hdmi_reg, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_connector *intel_connector;
|
||||
@ -1953,8 +1952,9 @@ void intel_hdmi_init(struct drm_device *dev,
|
||||
|
||||
intel_encoder = &intel_dig_port->base;
|
||||
|
||||
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
|
||||
DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
|
||||
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
|
||||
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
|
||||
"HDMI %c", port_name(port));
|
||||
|
||||
intel_encoder->compute_config = intel_hdmi_compute_config;
|
||||
if (HAS_PCH_SPLIT(dev_priv)) {
|
||||
|
@ -72,7 +72,7 @@ static const struct gmbus_pin gmbus_pins_bxt[] = {
|
||||
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
|
||||
unsigned int pin)
|
||||
{
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
return &gmbus_pins_bxt[pin];
|
||||
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
return &gmbus_pins_skl[pin];
|
||||
@ -87,7 +87,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
unsigned int size;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
size = ARRAY_SIZE(gmbus_pins_bxt);
|
||||
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
size = ARRAY_SIZE(gmbus_pins_skl);
|
||||
@ -111,10 +111,8 @@ to_intel_gmbus(struct i2c_adapter *i2c)
|
||||
}
|
||||
|
||||
void
|
||||
intel_i2c_reset(struct drm_device *dev)
|
||||
intel_i2c_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
I915_WRITE(GMBUS0, 0);
|
||||
I915_WRITE(GMBUS4, 0);
|
||||
}
|
||||
@ -141,7 +139,7 @@ static u32 get_reserved(struct intel_gmbus *bus)
|
||||
u32 reserved = 0;
|
||||
|
||||
/* On most chips, these bits must be preserved in software. */
|
||||
if (!IS_I830(dev_priv) && !IS_845G(dev_priv))
|
||||
if (!IS_I830(dev_priv) && !IS_I845G(dev_priv))
|
||||
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
|
||||
(GPIO_DATA_PULLUP_DISABLE |
|
||||
GPIO_CLOCK_PULLUP_DISABLE);
|
||||
@ -211,7 +209,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
|
||||
adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
|
||||
intel_i2c_reset(&dev_priv->drm);
|
||||
intel_i2c_reset(dev_priv);
|
||||
intel_i2c_quirk_set(dev_priv, true);
|
||||
set_data(bus, 1);
|
||||
set_clock(bus, 1);
|
||||
@ -617,11 +615,10 @@ static const struct i2c_algorithm gmbus_algorithm = {
|
||||
|
||||
/**
|
||||
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device private
|
||||
*/
|
||||
int intel_setup_gmbus(struct drm_device *dev)
|
||||
int intel_setup_gmbus(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct intel_gmbus *bus;
|
||||
unsigned int pin;
|
||||
@ -678,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
intel_i2c_reset(&dev_priv->drm);
|
||||
intel_i2c_reset(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -724,9 +721,8 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
|
||||
mutex_unlock(&dev_priv->gmbus_mutex);
|
||||
}
|
||||
|
||||
void intel_teardown_gmbus(struct drm_device *dev)
|
||||
void intel_teardown_gmbus(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_gmbus *bus;
|
||||
unsigned int pin;
|
||||
|
||||
|
@ -230,8 +230,6 @@ enum {
|
||||
|
||||
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine);
|
||||
static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine);
|
||||
static void execlists_init_reg_state(u32 *reg_state,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
@ -415,7 +413,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
|
||||
static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
|
||||
ctx->execlists_force_single_submission);
|
||||
i915_gem_context_force_single_submission(ctx));
|
||||
}
|
||||
|
||||
static bool can_merge_ctx(const struct i915_gem_context *prev,
|
||||
@ -514,15 +512,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
RB_CLEAR_NODE(&cursor->priotree.node);
|
||||
cursor->priotree.priority = INT_MAX;
|
||||
|
||||
/* We keep the previous context alive until we retire the
|
||||
* following request. This ensures that any the context object
|
||||
* is still pinned for any residual writes the HW makes into it
|
||||
* on the context switch into the next object following the
|
||||
* breadcrumb. Otherwise, we may retire the context too early.
|
||||
*/
|
||||
cursor->previous_context = engine->last_context;
|
||||
engine->last_context = cursor->ctx;
|
||||
|
||||
__i915_gem_request_submit(cursor);
|
||||
last = cursor;
|
||||
submit = true;
|
||||
@ -695,7 +684,6 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
|
||||
|
||||
static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
||||
{
|
||||
static DEFINE_MUTEX(lock);
|
||||
struct intel_engine_cs *engine = NULL;
|
||||
struct i915_dependency *dep, *p;
|
||||
struct i915_dependency stack;
|
||||
@ -704,8 +692,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
||||
if (prio <= READ_ONCE(request->priotree.priority))
|
||||
return;
|
||||
|
||||
/* Need global lock to use the temporary link inside i915_dependency */
|
||||
mutex_lock(&lock);
|
||||
/* Need BKL in order to use the temporary link inside i915_dependency */
|
||||
lockdep_assert_held(&request->i915->drm.struct_mutex);
|
||||
|
||||
stack.signaler = &request->priotree;
|
||||
list_add(&stack.dfs_link, &dfs);
|
||||
@ -734,7 +722,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
||||
if (prio > READ_ONCE(p->signaler->priority))
|
||||
list_move_tail(&p->dfs_link, &dfs);
|
||||
|
||||
p = list_next_entry(dep, dfs_link);
|
||||
list_safe_reset_next(dep, p, dfs_link);
|
||||
if (!RB_EMPTY_NODE(&pt->node))
|
||||
continue;
|
||||
|
||||
@ -772,80 +760,14 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
|
||||
if (engine)
|
||||
spin_unlock_irq(&engine->timeline->lock);
|
||||
|
||||
mutex_unlock(&lock);
|
||||
|
||||
/* XXX Do we need to preempt to make room for us and our deps? */
|
||||
}
|
||||
|
||||
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
struct intel_context *ce = &request->ctx->engine[engine->id];
|
||||
int ret;
|
||||
|
||||
/* Flush enough space to reduce the likelihood of waiting after
|
||||
* we start building the request - in which case we will just
|
||||
* have to repeat work.
|
||||
*/
|
||||
request->reserved_space += EXECLISTS_REQUEST_SIZE;
|
||||
|
||||
if (!ce->state) {
|
||||
ret = execlists_context_deferred_alloc(request->ctx, engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
request->ring = ce->ring;
|
||||
|
||||
ret = intel_lr_context_pin(request->ctx, engine);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
/*
|
||||
* Check that the GuC has space for the request before
|
||||
* going any further, as the i915_add_request() call
|
||||
* later on mustn't fail ...
|
||||
*/
|
||||
ret = i915_guc_wq_reserve(request);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(request, 0);
|
||||
if (ret)
|
||||
goto err_unreserve;
|
||||
|
||||
if (!ce->initialised) {
|
||||
ret = engine->init_context(request);
|
||||
if (ret)
|
||||
goto err_unreserve;
|
||||
|
||||
ce->initialised = true;
|
||||
}
|
||||
|
||||
/* Note that after this point, we have committed to using
|
||||
* this request as it is being used to both track the
|
||||
* state of engine initialisation and liveness of the
|
||||
* golden renderstate above. Think twice before you try
|
||||
* to cancel/unwind this request now.
|
||||
*/
|
||||
|
||||
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
|
||||
return 0;
|
||||
|
||||
err_unreserve:
|
||||
if (i915.enable_guc_submission)
|
||||
i915_guc_wq_unreserve(request);
|
||||
err_unpin:
|
||||
intel_lr_context_unpin(request->ctx, engine);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
static int execlists_context_pin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
unsigned int flags;
|
||||
void *vaddr;
|
||||
int ret;
|
||||
|
||||
@ -854,8 +776,20 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
||||
if (ce->pin_count++)
|
||||
return 0;
|
||||
|
||||
ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
|
||||
PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
|
||||
if (!ce->state) {
|
||||
ret = execlists_context_deferred_alloc(ctx, engine);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
GEM_BUG_ON(!ce->state);
|
||||
|
||||
flags = PIN_GLOBAL;
|
||||
if (ctx->ggtt_offset_bias)
|
||||
flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
|
||||
if (i915_gem_context_is_kernel(ctx))
|
||||
flags |= PIN_HIGH;
|
||||
|
||||
ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -865,7 +799,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
||||
goto unpin_vma;
|
||||
}
|
||||
|
||||
ret = intel_ring_pin(ce->ring);
|
||||
ret = intel_ring_pin(ce->ring, ctx->ggtt_offset_bias);
|
||||
if (ret)
|
||||
goto unpin_map;
|
||||
|
||||
@ -895,8 +829,8 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_lr_context_unpin(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
static void execlists_context_unpin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
|
||||
@ -914,6 +848,63 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
|
||||
i915_gem_context_put(ctx);
|
||||
}
|
||||
|
||||
static int execlists_request_alloc(struct drm_i915_gem_request *request)
|
||||
{
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
struct intel_context *ce = &request->ctx->engine[engine->id];
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(!ce->pin_count);
|
||||
|
||||
/* Flush enough space to reduce the likelihood of waiting after
|
||||
* we start building the request - in which case we will just
|
||||
* have to repeat work.
|
||||
*/
|
||||
request->reserved_space += EXECLISTS_REQUEST_SIZE;
|
||||
|
||||
GEM_BUG_ON(!ce->ring);
|
||||
request->ring = ce->ring;
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
/*
|
||||
* Check that the GuC has space for the request before
|
||||
* going any further, as the i915_add_request() call
|
||||
* later on mustn't fail ...
|
||||
*/
|
||||
ret = i915_guc_wq_reserve(request);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(request, 0);
|
||||
if (ret)
|
||||
goto err_unreserve;
|
||||
|
||||
if (!ce->initialised) {
|
||||
ret = engine->init_context(request);
|
||||
if (ret)
|
||||
goto err_unreserve;
|
||||
|
||||
ce->initialised = true;
|
||||
}
|
||||
|
||||
/* Note that after this point, we have committed to using
|
||||
* this request as it is being used to both track the
|
||||
* state of engine initialisation and liveness of the
|
||||
* golden renderstate above. Think twice before you try
|
||||
* to cancel/unwind this request now.
|
||||
*/
|
||||
|
||||
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
|
||||
return 0;
|
||||
|
||||
err_unreserve:
|
||||
if (i915.enable_guc_submission)
|
||||
i915_guc_wq_unreserve(request);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
{
|
||||
int ret, i;
|
||||
@ -1246,7 +1237,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
|
||||
struct i915_vma *vma;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
|
||||
obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size));
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
@ -1344,15 +1335,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void lrc_init_hws(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
||||
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
|
||||
engine->status_page.ggtt_offset);
|
||||
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
|
||||
}
|
||||
|
||||
static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
@ -1362,20 +1344,19 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
lrc_init_hws(engine);
|
||||
|
||||
intel_engine_reset_breadcrumbs(engine);
|
||||
intel_engine_init_hangcheck(engine);
|
||||
|
||||
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
|
||||
|
||||
I915_WRITE(RING_MODE_GEN7(engine),
|
||||
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
|
||||
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
|
||||
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
|
||||
engine->status_page.ggtt_offset);
|
||||
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
|
||||
|
||||
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
|
||||
|
||||
intel_engine_init_hangcheck(engine);
|
||||
|
||||
/* After a GPU reset, we may have requests to replay */
|
||||
if (!execlists_elsp_idle(engine)) {
|
||||
engine->execlist_port[0].count = 0;
|
||||
@ -1794,13 +1775,12 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
|
||||
if (engine->cleanup)
|
||||
engine->cleanup(engine);
|
||||
|
||||
intel_engine_cleanup_common(engine);
|
||||
|
||||
if (engine->status_page.vma) {
|
||||
i915_gem_object_unpin_map(engine->status_page.vma->obj);
|
||||
engine->status_page.vma = NULL;
|
||||
}
|
||||
intel_lr_context_unpin(dev_priv->kernel_context, engine);
|
||||
|
||||
intel_engine_cleanup_common(engine);
|
||||
|
||||
lrc_destroy_wa_ctx_obj(engine);
|
||||
engine->i915 = NULL;
|
||||
@ -1825,6 +1805,12 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
||||
/* Default vfuncs which can be overriden by each engine. */
|
||||
engine->init_hw = gen8_init_common_ring;
|
||||
engine->reset_hw = reset_common_ring;
|
||||
|
||||
engine->context_pin = execlists_context_pin;
|
||||
engine->context_unpin = execlists_context_unpin;
|
||||
|
||||
engine->request_alloc = execlists_request_alloc;
|
||||
|
||||
engine->emit_flush = gen8_emit_flush;
|
||||
engine->emit_breadcrumb = gen8_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
|
||||
@ -1907,18 +1893,6 @@ logical_ring_init(struct intel_engine_cs *engine)
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = execlists_context_deferred_alloc(dctx, engine);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* As this is the default context, always pin it */
|
||||
ret = intel_lr_context_pin(dctx, engine);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin context for %s: %d\n",
|
||||
engine->name, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* And setup the hardware status page. */
|
||||
ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
|
||||
if (ret) {
|
||||
@ -2240,7 +2214,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
||||
/* One extra page as the sharing data between driver and GuC */
|
||||
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
|
||||
|
||||
ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
|
||||
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
|
||||
if (IS_ERR(ctx_obj)) {
|
||||
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
|
||||
return PTR_ERR(ctx_obj);
|
||||
|
@ -63,14 +63,12 @@ enum {
|
||||
};
|
||||
|
||||
/* Logical Rings */
|
||||
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
|
||||
void intel_logical_ring_stop(struct intel_engine_cs *engine);
|
||||
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
|
||||
int logical_render_ring_init(struct intel_engine_cs *engine);
|
||||
int logical_xcs_ring_init(struct intel_engine_cs *engine);
|
||||
|
||||
int intel_engines_init(struct drm_device *dev);
|
||||
int intel_engines_init(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* Logical Ring Contexts */
|
||||
|
||||
@ -79,13 +77,10 @@ int intel_engines_init(struct drm_device *dev);
|
||||
#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
|
||||
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_gem_context;
|
||||
|
||||
uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
|
||||
void intel_lr_context_unpin(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine);
|
||||
|
||||
struct drm_i915_private;
|
||||
|
||||
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
|
||||
uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
|
||||
|
@ -35,21 +35,59 @@ static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
|
||||
return &dig_port->dp;
|
||||
}
|
||||
|
||||
static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case DRM_LSPCON_MODE_PCON:
|
||||
return "PCON";
|
||||
case DRM_LSPCON_MODE_LS:
|
||||
return "LS";
|
||||
case DRM_LSPCON_MODE_INVALID:
|
||||
return "INVALID";
|
||||
default:
|
||||
MISSING_CASE(mode);
|
||||
return "INVALID";
|
||||
}
|
||||
}
|
||||
|
||||
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
|
||||
{
|
||||
enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
|
||||
enum drm_lspcon_mode current_mode;
|
||||
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
|
||||
|
||||
if (drm_lspcon_get_mode(adapter, ¤t_mode))
|
||||
if (drm_lspcon_get_mode(adapter, ¤t_mode)) {
|
||||
DRM_ERROR("Error reading LSPCON mode\n");
|
||||
else
|
||||
DRM_DEBUG_KMS("Current LSPCON mode %s\n",
|
||||
current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS");
|
||||
return DRM_LSPCON_MODE_INVALID;
|
||||
}
|
||||
return current_mode;
|
||||
}
|
||||
|
||||
static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
|
||||
enum drm_lspcon_mode mode)
|
||||
{
|
||||
enum drm_lspcon_mode current_mode;
|
||||
|
||||
current_mode = lspcon_get_current_mode(lspcon);
|
||||
if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID)
|
||||
goto out;
|
||||
|
||||
DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
|
||||
lspcon_mode_name(mode));
|
||||
|
||||
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode ||
|
||||
current_mode == DRM_LSPCON_MODE_INVALID, 100);
|
||||
if (current_mode != mode)
|
||||
DRM_DEBUG_KMS("LSPCON mode hasn't settled\n");
|
||||
|
||||
out:
|
||||
DRM_DEBUG_KMS("Current LSPCON mode %s\n",
|
||||
lspcon_mode_name(current_mode));
|
||||
|
||||
return current_mode;
|
||||
}
|
||||
|
||||
static int lspcon_change_mode(struct intel_lspcon *lspcon,
|
||||
enum drm_lspcon_mode mode, bool force)
|
||||
enum drm_lspcon_mode mode)
|
||||
{
|
||||
int err;
|
||||
enum drm_lspcon_mode current_mode;
|
||||
@ -77,10 +115,30 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
|
||||
{
|
||||
uint8_t rev;
|
||||
|
||||
if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
|
||||
&rev) != 1) {
|
||||
DRM_DEBUG_KMS("Native AUX CH down\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Native AUX CH up, DPCD version: %d.%d\n",
|
||||
rev >> 4, rev & 0xf);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool lspcon_probe(struct intel_lspcon *lspcon)
|
||||
{
|
||||
enum drm_dp_dual_mode_type adaptor_type;
|
||||
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
|
||||
enum drm_lspcon_mode expected_mode;
|
||||
|
||||
expected_mode = lspcon_wake_native_aux_ch(lspcon) ?
|
||||
DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
|
||||
|
||||
/* Lets probe the adaptor and check its type */
|
||||
adaptor_type = drm_dp_dual_mode_detect(adapter);
|
||||
@ -92,7 +150,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
|
||||
|
||||
/* Yay ... got a LSPCON device */
|
||||
DRM_DEBUG_KMS("LSPCON detected\n");
|
||||
lspcon->mode = lspcon_get_current_mode(lspcon);
|
||||
lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
|
||||
lspcon->active = true;
|
||||
return true;
|
||||
}
|
||||
@ -132,14 +190,29 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
|
||||
|
||||
void lspcon_resume(struct intel_lspcon *lspcon)
|
||||
{
|
||||
lspcon_resume_in_pcon_wa(lspcon);
|
||||
enum drm_lspcon_mode expected_mode;
|
||||
|
||||
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
|
||||
if (lspcon_wake_native_aux_ch(lspcon)) {
|
||||
expected_mode = DRM_LSPCON_MODE_PCON;
|
||||
lspcon_resume_in_pcon_wa(lspcon);
|
||||
} else {
|
||||
expected_mode = DRM_LSPCON_MODE_LS;
|
||||
}
|
||||
|
||||
if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON)
|
||||
return;
|
||||
|
||||
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON))
|
||||
DRM_ERROR("LSPCON resume failed\n");
|
||||
else
|
||||
DRM_DEBUG_KMS("LSPCON resume success\n");
|
||||
}
|
||||
|
||||
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
|
||||
{
|
||||
lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
|
||||
}
|
||||
|
||||
bool lspcon_init(struct intel_digital_port *intel_dig_port)
|
||||
{
|
||||
struct intel_dp *dp = &intel_dig_port->dp;
|
||||
@ -166,8 +239,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
|
||||
* 2.0 sinks.
|
||||
*/
|
||||
if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
|
||||
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
|
||||
true) < 0) {
|
||||
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
|
||||
DRM_ERROR("LSPCON mode change to PCON failed\n");
|
||||
return false;
|
||||
}
|
||||
|
@ -460,13 +460,13 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
|
||||
static enum drm_connector_status
|
||||
intel_lvds_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
enum drm_connector_status status;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
status = intel_panel_detect(dev);
|
||||
status = intel_panel_detect(dev_priv);
|
||||
if (status != connector_status_unknown)
|
||||
return status;
|
||||
|
||||
@ -971,9 +971,9 @@ static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
|
||||
* Create the connector, register the LVDS DDC bus, and try to figure out what
|
||||
* modes we can display on the LVDS panel (if present).
|
||||
*/
|
||||
void intel_lvds_init(struct drm_device *dev)
|
||||
void intel_lvds_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_lvds_encoder *lvds_encoder;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_lvds_connector *lvds_connector;
|
||||
|
@ -182,7 +182,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
|
||||
table->size = ARRAY_SIZE(skylake_mocs_table);
|
||||
table->table = skylake_mocs_table;
|
||||
result = true;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
} else if (IS_GEN9_LP(dev_priv)) {
|
||||
table->size = ARRAY_SIZE(broxton_mocs_table);
|
||||
table->table = broxton_mocs_table;
|
||||
result = true;
|
||||
@ -380,7 +380,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
||||
|
||||
/**
|
||||
* intel_mocs_init_l3cc_table() - program the mocs control table
|
||||
* @dev: The the device to be programmed.
|
||||
* @dev_priv: i915 device private
|
||||
*
|
||||
* This function simply programs the mocs registers for the given table
|
||||
* starting at the given address. This register set is programmed in pairs.
|
||||
@ -392,9 +392,8 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
|
||||
*
|
||||
* Return: Nothing.
|
||||
*/
|
||||
void intel_mocs_init_l3cc_table(struct drm_device *dev)
|
||||
void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_mocs_table table;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -53,7 +53,7 @@
|
||||
#include "i915_drv.h"
|
||||
|
||||
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
|
||||
void intel_mocs_init_l3cc_table(struct drm_device *dev);
|
||||
void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
|
||||
int intel_mocs_init_engine(struct intel_engine_cs *engine);
|
||||
|
||||
#endif
|
||||
|
@ -187,6 +187,29 @@ struct intel_overlay {
|
||||
struct i915_gem_active last_flip;
|
||||
};
|
||||
|
||||
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
|
||||
bool enable)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
u8 val;
|
||||
|
||||
/* WA_OVERLAY_CLKGATE:alm */
|
||||
if (enable)
|
||||
I915_WRITE(DSPCLK_GATE_D, 0);
|
||||
else
|
||||
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
|
||||
pci_bus_read_config_byte(pdev->bus,
|
||||
PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
|
||||
if (enable)
|
||||
val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
|
||||
else
|
||||
val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
|
||||
pci_bus_write_config_byte(pdev->bus,
|
||||
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
|
||||
}
|
||||
|
||||
static struct overlay_registers __iomem *
|
||||
intel_overlay_map_regs(struct intel_overlay *overlay)
|
||||
{
|
||||
@ -262,6 +285,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
||||
|
||||
overlay->active = true;
|
||||
|
||||
if (IS_I830(dev_priv))
|
||||
i830_overlay_clock_gating(dev_priv, false);
|
||||
|
||||
ring = req->ring;
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
|
||||
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
|
||||
@ -272,8 +298,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
||||
return intel_overlay_do_wait_request(overlay, req, NULL);
|
||||
}
|
||||
|
||||
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
|
||||
struct i915_vma *vma)
|
||||
{
|
||||
enum pipe pipe = overlay->crtc->pipe;
|
||||
|
||||
WARN_ON(overlay->old_vma);
|
||||
|
||||
i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
|
||||
vma ? vma->obj : NULL,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
intel_frontbuffer_flip_prepare(overlay->i915,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
overlay->old_vma = overlay->vma;
|
||||
if (vma)
|
||||
overlay->vma = i915_vma_get(vma);
|
||||
else
|
||||
overlay->vma = NULL;
|
||||
}
|
||||
|
||||
/* overlay needs to be enabled in OCMD reg */
|
||||
static int intel_overlay_continue(struct intel_overlay *overlay,
|
||||
struct i915_vma *vma,
|
||||
bool load_polyphase_filter)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
@ -308,27 +356,35 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_advance(ring);
|
||||
|
||||
intel_overlay_flip_prepare(overlay, vma);
|
||||
|
||||
intel_overlay_submit_request(overlay, req, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = fetch_and_zero(&overlay->old_vma);
|
||||
if (WARN_ON(!vma))
|
||||
return;
|
||||
|
||||
intel_frontbuffer_flip_complete(overlay->i915,
|
||||
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
|
||||
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
i915_vma_put(vma);
|
||||
}
|
||||
|
||||
static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
|
||||
struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = fetch_and_zero(&overlay->old_vma);
|
||||
if (WARN_ON(!vma))
|
||||
return;
|
||||
|
||||
i915_gem_track_fb(vma->obj, NULL,
|
||||
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
|
||||
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
i915_vma_put(vma);
|
||||
intel_overlay_release_old_vma(overlay);
|
||||
}
|
||||
|
||||
static void intel_overlay_off_tail(struct i915_gem_active *active,
|
||||
@ -336,25 +392,21 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
struct i915_vma *vma;
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
|
||||
/* never have the overlay hw on without showing a frame */
|
||||
vma = fetch_and_zero(&overlay->vma);
|
||||
if (WARN_ON(!vma))
|
||||
return;
|
||||
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
i915_vma_put(vma);
|
||||
intel_overlay_release_old_vma(overlay);
|
||||
|
||||
overlay->crtc->overlay = NULL;
|
||||
overlay->crtc = NULL;
|
||||
overlay->active = false;
|
||||
|
||||
if (IS_I830(dev_priv))
|
||||
i830_overlay_clock_gating(dev_priv, true);
|
||||
}
|
||||
|
||||
/* overlay needs to be disabled in OCMD reg */
|
||||
static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct drm_i915_gem_request *req;
|
||||
struct intel_ring *ring;
|
||||
u32 flip_addr = overlay->flip_addr;
|
||||
@ -379,25 +431,21 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
}
|
||||
|
||||
ring = req->ring;
|
||||
|
||||
/* wait for overlay to go idle */
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
|
||||
/* turn overlay off */
|
||||
if (IS_I830(dev_priv)) {
|
||||
/* Workaround: Don't disable the overlay fully, since otherwise
|
||||
* it dies on the next OVERLAY_ON cmd. */
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
} else {
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_emit(ring,
|
||||
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
}
|
||||
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
|
||||
intel_ring_emit(ring, flip_addr);
|
||||
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
|
||||
|
||||
intel_ring_advance(ring);
|
||||
|
||||
intel_overlay_flip_prepare(overlay, NULL);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, req,
|
||||
intel_overlay_off_tail);
|
||||
}
|
||||
@ -542,51 +590,57 @@ static int uv_vsubsampling(u32 format)
|
||||
|
||||
static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
|
||||
{
|
||||
u32 mask, shift, ret;
|
||||
if (IS_GEN2(dev_priv)) {
|
||||
mask = 0x1f;
|
||||
shift = 5;
|
||||
} else {
|
||||
mask = 0x3f;
|
||||
shift = 6;
|
||||
}
|
||||
ret = ((offset + width + mask) >> shift) - (offset >> shift);
|
||||
if (!IS_GEN2(dev_priv))
|
||||
ret <<= 1;
|
||||
ret -= 1;
|
||||
return ret << 2;
|
||||
u32 sw;
|
||||
|
||||
if (IS_GEN2(dev_priv))
|
||||
sw = ALIGN((offset & 31) + width, 32);
|
||||
else
|
||||
sw = ALIGN((offset & 63) + width, 64);
|
||||
|
||||
if (sw == 0)
|
||||
return 0;
|
||||
|
||||
return (sw - 32) >> 3;
|
||||
}
|
||||
|
||||
static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
|
||||
0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
|
||||
0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
|
||||
0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
|
||||
0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
|
||||
0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
|
||||
0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
|
||||
0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
|
||||
0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
|
||||
0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
|
||||
0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
|
||||
0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
|
||||
0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
|
||||
0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
|
||||
0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
|
||||
0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
|
||||
0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
|
||||
0xb000, 0x3000, 0x0800, 0x3000, 0xb000
|
||||
static const u16 y_static_hcoeffs[N_PHASES][N_HORIZ_Y_TAPS] = {
|
||||
[ 0] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, },
|
||||
[ 1] = { 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, },
|
||||
[ 2] = { 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, },
|
||||
[ 3] = { 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, },
|
||||
[ 4] = { 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, },
|
||||
[ 5] = { 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, },
|
||||
[ 6] = { 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, },
|
||||
[ 7] = { 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, },
|
||||
[ 8] = { 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, },
|
||||
[ 9] = { 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, },
|
||||
[10] = { 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, },
|
||||
[11] = { 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, },
|
||||
[12] = { 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, },
|
||||
[13] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, },
|
||||
[14] = { 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, },
|
||||
[15] = { 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, },
|
||||
[16] = { 0xb000, 0x3000, 0x0800, 0x3000, 0xb000, },
|
||||
};
|
||||
|
||||
static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
|
||||
0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
|
||||
0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
|
||||
0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
|
||||
0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
|
||||
0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
|
||||
0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
|
||||
0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
|
||||
0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
|
||||
0x3000, 0x0800, 0x3000
|
||||
static const u16 uv_static_hcoeffs[N_PHASES][N_HORIZ_UV_TAPS] = {
|
||||
[ 0] = { 0x3000, 0x1800, 0x1800, },
|
||||
[ 1] = { 0xb000, 0x18d0, 0x2e60, },
|
||||
[ 2] = { 0xb000, 0x1990, 0x2ce0, },
|
||||
[ 3] = { 0xb020, 0x1a68, 0x2b40, },
|
||||
[ 4] = { 0xb040, 0x1b20, 0x29e0, },
|
||||
[ 5] = { 0xb060, 0x1bd8, 0x2880, },
|
||||
[ 6] = { 0xb080, 0x1c88, 0x3e60, },
|
||||
[ 7] = { 0xb0a0, 0x1d28, 0x3c00, },
|
||||
[ 8] = { 0xb0c0, 0x1db8, 0x39e0, },
|
||||
[ 9] = { 0xb0e0, 0x1e40, 0x37e0, },
|
||||
[10] = { 0xb100, 0x1eb8, 0x3620, },
|
||||
[11] = { 0xb100, 0x1f18, 0x34a0, },
|
||||
[12] = { 0xb100, 0x1f68, 0x3360, },
|
||||
[13] = { 0xb0e0, 0x1fa8, 0x3240, },
|
||||
[14] = { 0xb0c0, 0x1fe0, 0x3140, },
|
||||
[15] = { 0xb060, 0x1ff0, 0x30a0, },
|
||||
[16] = { 0x3000, 0x0800, 0x3000, },
|
||||
};
|
||||
|
||||
static void update_polyphase_filter(struct overlay_registers __iomem *regs)
|
||||
@ -659,16 +713,19 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
|
||||
static void update_colorkey(struct intel_overlay *overlay,
|
||||
struct overlay_registers __iomem *regs)
|
||||
{
|
||||
const struct drm_framebuffer *fb =
|
||||
overlay->crtc->base.primary->fb;
|
||||
const struct intel_plane_state *state =
|
||||
to_intel_plane_state(overlay->crtc->base.primary->state);
|
||||
u32 key = overlay->color_key;
|
||||
u32 flags;
|
||||
u32 format = 0;
|
||||
u32 flags = 0;
|
||||
|
||||
flags = 0;
|
||||
if (overlay->color_key_enabled)
|
||||
flags |= DST_KEY_ENABLE;
|
||||
|
||||
switch (fb->format->format) {
|
||||
if (state->base.visible)
|
||||
format = state->base.fb->format->format;
|
||||
|
||||
switch (format) {
|
||||
case DRM_FORMAT_C8:
|
||||
key = 0;
|
||||
flags |= CLK_RGB8I_MASK;
|
||||
@ -834,18 +891,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
|
||||
intel_overlay_unmap_regs(overlay, regs);
|
||||
|
||||
ret = intel_overlay_continue(overlay, scale_changed);
|
||||
ret = intel_overlay_continue(overlay, vma, scale_changed);
|
||||
if (ret)
|
||||
goto out_unpin;
|
||||
|
||||
i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
|
||||
vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
overlay->old_vma = overlay->vma;
|
||||
overlay->vma = vma;
|
||||
|
||||
intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
return 0;
|
||||
|
||||
out_unpin:
|
||||
@ -919,12 +968,13 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
|
||||
static int check_overlay_dst(struct intel_overlay *overlay,
|
||||
struct drm_intel_overlay_put_image *rec)
|
||||
{
|
||||
struct drm_display_mode *mode = &overlay->crtc->base.mode;
|
||||
const struct intel_crtc_state *pipe_config =
|
||||
overlay->crtc->config;
|
||||
|
||||
if (rec->dst_x < mode->hdisplay &&
|
||||
rec->dst_x + rec->dst_width <= mode->hdisplay &&
|
||||
rec->dst_y < mode->vdisplay &&
|
||||
rec->dst_y + rec->dst_height <= mode->vdisplay)
|
||||
if (rec->dst_x < pipe_config->pipe_src_w &&
|
||||
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
|
||||
rec->dst_y < pipe_config->pipe_src_h &&
|
||||
rec->dst_y + rec->dst_height <= pipe_config->pipe_src_h)
|
||||
return 0;
|
||||
else
|
||||
return -EINVAL;
|
||||
@ -956,7 +1006,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
|
||||
u32 tmp;
|
||||
|
||||
/* check src dimensions */
|
||||
if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
|
||||
if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
|
||||
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
|
||||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
|
||||
return -EINVAL;
|
||||
@ -1008,7 +1058,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
|
||||
return -EINVAL;
|
||||
|
||||
/* stride checking */
|
||||
if (IS_I830(dev_priv) || IS_845G(dev_priv))
|
||||
if (IS_I830(dev_priv) || IS_I845G(dev_priv))
|
||||
stride_mask = 255;
|
||||
else
|
||||
stride_mask = 63;
|
||||
@ -1056,33 +1106,6 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the pipe currently connected to the panel fitter,
|
||||
* or -1 if the panel fitter is not present or not in use
|
||||
*/
|
||||
static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 pfit_control;
|
||||
|
||||
/* i830 doesn't have a panel fitter */
|
||||
if (INTEL_GEN(dev_priv) <= 3 &&
|
||||
(IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
|
||||
return -1;
|
||||
|
||||
pfit_control = I915_READ(PFIT_CONTROL);
|
||||
|
||||
/* See if the panel fitter is in use */
|
||||
if ((pfit_control & PFIT_ENABLE) == 0)
|
||||
return -1;
|
||||
|
||||
/* 965 can place panel fitter on either pipe */
|
||||
if (IS_GEN4(dev_priv))
|
||||
return (pfit_control >> 29) & 0x3;
|
||||
|
||||
/* older chips can only use pipe 1 */
|
||||
return 1;
|
||||
}
|
||||
|
||||
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
@ -1144,7 +1167,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
|
||||
goto out_unlock;
|
||||
|
||||
if (overlay->crtc != crtc) {
|
||||
struct drm_display_mode *mode = &crtc->base.mode;
|
||||
ret = intel_overlay_switch_off(overlay);
|
||||
if (ret != 0)
|
||||
goto out_unlock;
|
||||
@ -1157,8 +1179,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
|
||||
crtc->overlay = overlay;
|
||||
|
||||
/* line too wide, i.e. one-line-mode */
|
||||
if (mode->hdisplay > 1024 &&
|
||||
intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
|
||||
if (crtc->config->pipe_src_w > 1024 &&
|
||||
crtc->config->gmch_pfit.control & PFIT_ENABLE) {
|
||||
overlay->pfit_active = true;
|
||||
update_pfit_vscale_ratio(overlay);
|
||||
} else
|
||||
@ -1213,6 +1235,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
drm_modeset_unlock_all(dev);
|
||||
i915_gem_object_put(new_bo);
|
||||
|
||||
kfree(params);
|
||||
|
||||
@ -1390,10 +1413,9 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
||||
|
||||
reg_bo = NULL;
|
||||
if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
|
||||
reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
|
||||
PAGE_SIZE);
|
||||
reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
|
||||
if (reg_bo == NULL)
|
||||
reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
|
||||
reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
|
||||
if (IS_ERR(reg_bo))
|
||||
goto out_free;
|
||||
overlay->reg_bo = reg_bo;
|
||||
|
@ -48,7 +48,7 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
|
||||
|
||||
/**
|
||||
* intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
|
||||
* @dev: drm device
|
||||
* @dev_priv: i915 device instance
|
||||
* @fixed_mode : panel native mode
|
||||
* @connector: LVDS/eDP connector
|
||||
*
|
||||
@ -56,7 +56,7 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
|
||||
* Find the reduced downclock for LVDS/eDP in EDID.
|
||||
*/
|
||||
struct drm_display_mode *
|
||||
intel_find_panel_downclock(struct drm_device *dev,
|
||||
intel_find_panel_downclock(struct drm_i915_private *dev_priv,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
@ -94,7 +94,7 @@ intel_find_panel_downclock(struct drm_device *dev,
|
||||
}
|
||||
|
||||
if (temp_downclock < fixed_mode->clock)
|
||||
return drm_mode_duplicate(dev, tmp_mode);
|
||||
return drm_mode_duplicate(&dev_priv->drm, tmp_mode);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
@ -375,10 +375,8 @@ out:
|
||||
}
|
||||
|
||||
enum drm_connector_status
|
||||
intel_panel_detect(struct drm_device *dev)
|
||||
intel_panel_detect(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* Assume that the BIOS does not lie through the OpRegion... */
|
||||
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
|
||||
return *dev_priv->opregion.lid_state & 0x1 ?
|
||||
@ -1039,10 +1037,7 @@ static void bxt_enable_backlight(struct intel_connector *connector)
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
u32 pwm_ctl, val;
|
||||
|
||||
/* To use 2nd set of backlight registers, utility pin has to be
|
||||
* enabled with PWM mode.
|
||||
* The field should only be changed when the utility pin is disabled
|
||||
*/
|
||||
/* Controller 1 uses the utility pin. */
|
||||
if (panel->backlight.controller == 1) {
|
||||
val = I915_READ(UTIL_PIN_CTL);
|
||||
if (val & UTIL_PIN_ENABLE) {
|
||||
@ -1332,8 +1327,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
*/
|
||||
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
int clock;
|
||||
|
||||
if (IS_G4X(dev_priv))
|
||||
@ -1608,19 +1602,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u32 pwm_ctl, val;
|
||||
|
||||
/*
|
||||
* For BXT hard coding the Backlight controller to 0.
|
||||
* TODO : Read the controller value from VBT and generalize
|
||||
*/
|
||||
panel->backlight.controller = 0;
|
||||
panel->backlight.controller = dev_priv->vbt.backlight.controller;
|
||||
|
||||
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
|
||||
|
||||
/* Keeping the check if controller 1 is to be programmed.
|
||||
* This will come into affect once the VBT parsing
|
||||
* is fixed for controller selection, and controller 1 is used
|
||||
* for a prticular display configuration.
|
||||
*/
|
||||
/* Controller 1 uses the utility pin. */
|
||||
if (panel->backlight.controller == 1) {
|
||||
val = I915_READ(UTIL_PIN_CTL);
|
||||
panel->backlight.util_pin_active_low =
|
||||
@ -1756,7 +1742,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
|
||||
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
|
||||
return;
|
||||
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
if (IS_GEN9_LP(dev_priv)) {
|
||||
panel->backlight.setup = bxt_setup_backlight;
|
||||
panel->backlight.enable = bxt_enable_backlight;
|
||||
panel->backlight.disable = bxt_disable_backlight;
|
||||
|
939
drivers/gpu/drm/i915/intel_pipe_crc.c
Normal file
939
drivers/gpu/drm/i915/intel_pipe_crc.c
Normal file
@ -0,0 +1,939 @@
|
||||
/*
|
||||
* Copyright © 2013 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Author: Damien Lespiau <damien.lespiau@intel.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/circ_buf.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include "intel_drv.h"
|
||||
|
||||
struct pipe_crc_info {
|
||||
const char *name;
|
||||
struct drm_i915_private *dev_priv;
|
||||
enum pipe pipe;
|
||||
};
|
||||
|
||||
/* As the drm_debugfs_init() routines are called before dev->dev_private is
|
||||
* allocated we need to hook into the minor for release.
|
||||
*/
|
||||
static int drm_add_fake_info_node(struct drm_minor *minor,
|
||||
struct dentry *ent, const void *key)
|
||||
{
|
||||
struct drm_info_node *node;
|
||||
|
||||
node = kmalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (node == NULL) {
|
||||
debugfs_remove(ent);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
node->minor = minor;
|
||||
node->dent = ent;
|
||||
node->info_ent = (void *) key;
|
||||
|
||||
mutex_lock(&minor->debugfs_lock);
|
||||
list_add(&node->list, &minor->debugfs_list);
|
||||
mutex_unlock(&minor->debugfs_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
|
||||
{
|
||||
struct pipe_crc_info *info = inode->i_private;
|
||||
struct drm_i915_private *dev_priv = info->dev_priv;
|
||||
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
|
||||
|
||||
if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
|
||||
return -ENODEV;
|
||||
|
||||
spin_lock_irq(&pipe_crc->lock);
|
||||
|
||||
if (pipe_crc->opened) {
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
return -EBUSY; /* already open */
|
||||
}
|
||||
|
||||
pipe_crc->opened = true;
|
||||
filep->private_data = inode->i_private;
|
||||
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
|
||||
{
|
||||
struct pipe_crc_info *info = inode->i_private;
|
||||
struct drm_i915_private *dev_priv = info->dev_priv;
|
||||
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
|
||||
|
||||
spin_lock_irq(&pipe_crc->lock);
|
||||
pipe_crc->opened = false;
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* (6 fields, 8 chars each, space separated (5) + '\n') */
|
||||
#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
|
||||
/* account for \'0' */
|
||||
#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
|
||||
|
||||
static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
|
||||
{
|
||||
assert_spin_locked(&pipe_crc->lock);
|
||||
return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
|
||||
INTEL_PIPE_CRC_ENTRIES_NR);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
|
||||
loff_t *pos)
|
||||
{
|
||||
struct pipe_crc_info *info = filep->private_data;
|
||||
struct drm_i915_private *dev_priv = info->dev_priv;
|
||||
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
|
||||
char buf[PIPE_CRC_BUFFER_LEN];
|
||||
int n_entries;
|
||||
ssize_t bytes_read;
|
||||
|
||||
/*
|
||||
* Don't allow user space to provide buffers not big enough to hold
|
||||
* a line of data.
|
||||
*/
|
||||
if (count < PIPE_CRC_LINE_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
|
||||
return 0;
|
||||
|
||||
/* nothing to read */
|
||||
spin_lock_irq(&pipe_crc->lock);
|
||||
while (pipe_crc_data_count(pipe_crc) == 0) {
|
||||
int ret;
|
||||
|
||||
if (filep->f_flags & O_NONBLOCK) {
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
|
||||
pipe_crc_data_count(pipe_crc), pipe_crc->lock);
|
||||
if (ret) {
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* We now have one or more entries to read */
|
||||
n_entries = count / PIPE_CRC_LINE_LEN;
|
||||
|
||||
bytes_read = 0;
|
||||
while (n_entries > 0) {
|
||||
struct intel_pipe_crc_entry *entry =
|
||||
&pipe_crc->entries[pipe_crc->tail];
|
||||
|
||||
if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
|
||||
INTEL_PIPE_CRC_ENTRIES_NR) < 1)
|
||||
break;
|
||||
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
|
||||
pipe_crc->tail = (pipe_crc->tail + 1) &
|
||||
(INTEL_PIPE_CRC_ENTRIES_NR - 1);
|
||||
|
||||
bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
|
||||
"%8u %8x %8x %8x %8x %8x\n",
|
||||
entry->frame, entry->crc[0],
|
||||
entry->crc[1], entry->crc[2],
|
||||
entry->crc[3], entry->crc[4]);
|
||||
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
|
||||
if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
|
||||
return -EFAULT;
|
||||
|
||||
user_buf += PIPE_CRC_LINE_LEN;
|
||||
n_entries--;
|
||||
|
||||
spin_lock_irq(&pipe_crc->lock);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
|
||||
return bytes_read;
|
||||
}
|
||||
|
||||
static const struct file_operations i915_pipe_crc_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = i915_pipe_crc_open,
|
||||
.read = i915_pipe_crc_read,
|
||||
.release = i915_pipe_crc_release,
|
||||
};
|
||||
|
||||
static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
|
||||
{
|
||||
.name = "i915_pipe_A_crc",
|
||||
.pipe = PIPE_A,
|
||||
},
|
||||
{
|
||||
.name = "i915_pipe_B_crc",
|
||||
.pipe = PIPE_B,
|
||||
},
|
||||
{
|
||||
.name = "i915_pipe_C_crc",
|
||||
.pipe = PIPE_C,
|
||||
},
|
||||
};
|
||||
|
||||
static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(minor->dev);
|
||||
struct dentry *ent;
|
||||
struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
|
||||
|
||||
info->dev_priv = dev_priv;
|
||||
ent = debugfs_create_file(info->name, S_IRUGO, root, info,
|
||||
&i915_pipe_crc_fops);
|
||||
if (!ent)
|
||||
return -ENOMEM;
|
||||
|
||||
return drm_add_fake_info_node(minor, ent, info);
|
||||
}
|
||||
|
||||
static const char * const pipe_crc_sources[] = {
|
||||
"none",
|
||||
"plane1",
|
||||
"plane2",
|
||||
"pf",
|
||||
"pipe",
|
||||
"TV",
|
||||
"DP-B",
|
||||
"DP-C",
|
||||
"DP-D",
|
||||
"auto",
|
||||
};
|
||||
|
||||
static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
|
||||
{
|
||||
BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
|
||||
return pipe_crc_sources[source];
|
||||
}
|
||||
|
||||
static int display_crc_ctl_show(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = m->private;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < I915_MAX_PIPES; i++)
|
||||
seq_printf(m, "%c %s\n", pipe_name(i),
|
||||
pipe_crc_source_name(dev_priv->pipe_crc[i].source));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int display_crc_ctl_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, display_crc_ctl_show, inode->i_private);
|
||||
}
|
||||
|
||||
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
|
||||
uint32_t *val)
|
||||
{
|
||||
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
|
||||
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
|
||||
|
||||
switch (*source) {
|
||||
case INTEL_PIPE_CRC_SOURCE_PIPE:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_NONE:
|
||||
*val = 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe,
|
||||
enum intel_pipe_crc_source *source)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_crtc *crtc;
|
||||
struct intel_digital_port *dig_port;
|
||||
int ret = 0;
|
||||
|
||||
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
for_each_intel_encoder(dev, encoder) {
|
||||
if (!encoder->base.crtc)
|
||||
continue;
|
||||
|
||||
crtc = to_intel_crtc(encoder->base.crtc);
|
||||
|
||||
if (crtc->pipe != pipe)
|
||||
continue;
|
||||
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_TVOUT:
|
||||
*source = INTEL_PIPE_CRC_SOURCE_TV;
|
||||
break;
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_EDP:
|
||||
dig_port = enc_to_dig_port(&encoder->base);
|
||||
switch (dig_port->port) {
|
||||
case PORT_B:
|
||||
*source = INTEL_PIPE_CRC_SOURCE_DP_B;
|
||||
break;
|
||||
case PORT_C:
|
||||
*source = INTEL_PIPE_CRC_SOURCE_DP_C;
|
||||
break;
|
||||
case PORT_D:
|
||||
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "nonexisting DP port %c\n",
|
||||
port_name(dig_port->port));
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe,
|
||||
enum intel_pipe_crc_source *source,
|
||||
uint32_t *val)
|
||||
{
|
||||
bool need_stable_symbols = false;
|
||||
|
||||
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
|
||||
int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (*source) {
|
||||
case INTEL_PIPE_CRC_SOURCE_PIPE:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_DP_B:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
|
||||
need_stable_symbols = true;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_DP_C:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
|
||||
need_stable_symbols = true;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_DP_D:
|
||||
if (!IS_CHERRYVIEW(dev_priv))
|
||||
return -EINVAL;
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
|
||||
need_stable_symbols = true;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_NONE:
|
||||
*val = 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* When the pipe CRC tap point is after the transcoders we need
|
||||
* to tweak symbol-level features to produce a deterministic series of
|
||||
* symbols for a given frame. We need to reset those features only once
|
||||
* a frame (instead of every nth symbol):
|
||||
* - DC-balance: used to ensure a better clock recovery from the data
|
||||
* link (SDVO)
|
||||
* - DisplayPort scrambling: used for EMI reduction
|
||||
*/
|
||||
if (need_stable_symbols) {
|
||||
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
|
||||
|
||||
tmp |= DC_BALANCE_RESET_VLV;
|
||||
switch (pipe) {
|
||||
case PIPE_A:
|
||||
tmp |= PIPE_A_SCRAMBLE_RESET;
|
||||
break;
|
||||
case PIPE_B:
|
||||
tmp |= PIPE_B_SCRAMBLE_RESET;
|
||||
break;
|
||||
case PIPE_C:
|
||||
tmp |= PIPE_C_SCRAMBLE_RESET;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
I915_WRITE(PORT_DFT2_G4X, tmp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe,
|
||||
enum intel_pipe_crc_source *source,
|
||||
uint32_t *val)
|
||||
{
|
||||
bool need_stable_symbols = false;
|
||||
|
||||
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
|
||||
int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (*source) {
|
||||
case INTEL_PIPE_CRC_SOURCE_PIPE:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_TV:
|
||||
if (!SUPPORTS_TV(dev_priv))
|
||||
return -EINVAL;
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_DP_B:
|
||||
if (!IS_G4X(dev_priv))
|
||||
return -EINVAL;
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
|
||||
need_stable_symbols = true;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_DP_C:
|
||||
if (!IS_G4X(dev_priv))
|
||||
return -EINVAL;
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
|
||||
need_stable_symbols = true;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_DP_D:
|
||||
if (!IS_G4X(dev_priv))
|
||||
return -EINVAL;
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
|
||||
need_stable_symbols = true;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_NONE:
|
||||
*val = 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* When the pipe CRC tap point is after the transcoders we need
|
||||
* to tweak symbol-level features to produce a deterministic series of
|
||||
* symbols for a given frame. We need to reset those features only once
|
||||
* a frame (instead of every nth symbol):
|
||||
* - DC-balance: used to ensure a better clock recovery from the data
|
||||
* link (SDVO)
|
||||
* - DisplayPort scrambling: used for EMI reduction
|
||||
*/
|
||||
if (need_stable_symbols) {
|
||||
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
|
||||
|
||||
WARN_ON(!IS_G4X(dev_priv));
|
||||
|
||||
I915_WRITE(PORT_DFT_I9XX,
|
||||
I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
|
||||
|
||||
if (pipe == PIPE_A)
|
||||
tmp |= PIPE_A_SCRAMBLE_RESET;
|
||||
else
|
||||
tmp |= PIPE_B_SCRAMBLE_RESET;
|
||||
|
||||
I915_WRITE(PORT_DFT2_G4X, tmp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
|
||||
|
||||
switch (pipe) {
|
||||
case PIPE_A:
|
||||
tmp &= ~PIPE_A_SCRAMBLE_RESET;
|
||||
break;
|
||||
case PIPE_B:
|
||||
tmp &= ~PIPE_B_SCRAMBLE_RESET;
|
||||
break;
|
||||
case PIPE_C:
|
||||
tmp &= ~PIPE_C_SCRAMBLE_RESET;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
|
||||
tmp &= ~DC_BALANCE_RESET_VLV;
|
||||
I915_WRITE(PORT_DFT2_G4X, tmp);
|
||||
|
||||
}
|
||||
|
||||
static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
|
||||
|
||||
if (pipe == PIPE_A)
|
||||
tmp &= ~PIPE_A_SCRAMBLE_RESET;
|
||||
else
|
||||
tmp &= ~PIPE_B_SCRAMBLE_RESET;
|
||||
I915_WRITE(PORT_DFT2_G4X, tmp);
|
||||
|
||||
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
|
||||
I915_WRITE(PORT_DFT_I9XX,
|
||||
I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
|
||||
}
|
||||
}
|
||||
|
||||
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
|
||||
uint32_t *val)
|
||||
{
|
||||
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
|
||||
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
|
||||
|
||||
switch (*source) {
|
||||
case INTEL_PIPE_CRC_SOURCE_PLANE1:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_PLANE2:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_PIPE:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_NONE:
|
||||
*val = 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
|
||||
struct intel_crtc_state *pipe_config;
|
||||
struct drm_atomic_state *state;
|
||||
int ret = 0;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
state = drm_atomic_state_alloc(dev);
|
||||
if (!state) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
|
||||
pipe_config = intel_atomic_get_crtc_state(state, crtc);
|
||||
if (IS_ERR(pipe_config)) {
|
||||
ret = PTR_ERR(pipe_config);
|
||||
goto out;
|
||||
}
|
||||
|
||||
pipe_config->pch_pfit.force_thru = enable;
|
||||
if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
|
||||
pipe_config->pch_pfit.enabled != enable)
|
||||
pipe_config->base.connectors_changed = true;
|
||||
|
||||
ret = drm_atomic_commit(state);
|
||||
out:
|
||||
WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
|
||||
drm_modeset_unlock_all(dev);
|
||||
drm_atomic_state_put(state);
|
||||
}
|
||||
|
||||
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe,
|
||||
enum intel_pipe_crc_source *source,
|
||||
uint32_t *val)
|
||||
{
|
||||
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
|
||||
*source = INTEL_PIPE_CRC_SOURCE_PF;
|
||||
|
||||
switch (*source) {
|
||||
case INTEL_PIPE_CRC_SOURCE_PLANE1:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_PLANE2:
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_PF:
|
||||
if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
|
||||
hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
|
||||
|
||||
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
|
||||
break;
|
||||
case INTEL_PIPE_CRC_SOURCE_NONE:
|
||||
*val = 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe,
|
||||
enum intel_pipe_crc_source source)
|
||||
{
|
||||
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
|
||||
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 val = 0; /* shut up gcc */
|
||||
int ret;
|
||||
|
||||
if (pipe_crc->source == source)
|
||||
return 0;
|
||||
|
||||
/* forbid changing the source without going back to 'none' */
|
||||
if (pipe_crc->source && source)
|
||||
return -EINVAL;
|
||||
|
||||
power_domain = POWER_DOMAIN_PIPE(pipe);
|
||||
if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
|
||||
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev_priv))
|
||||
ret = i8xx_pipe_crc_ctl_reg(&source, &val);
|
||||
else if (INTEL_GEN(dev_priv) < 5)
|
||||
ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
|
||||
else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
|
||||
ret = ilk_pipe_crc_ctl_reg(&source, &val);
|
||||
else
|
||||
ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
|
||||
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
|
||||
/* none -> real source transition */
|
||||
if (source) {
|
||||
struct intel_pipe_crc_entry *entries;
|
||||
|
||||
DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
|
||||
pipe_name(pipe), pipe_crc_source_name(source));
|
||||
|
||||
entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
|
||||
sizeof(pipe_crc->entries[0]),
|
||||
GFP_KERNEL);
|
||||
if (!entries) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
|
||||
* enabled and disabled dynamically based on package C states,
|
||||
* user space can't make reliable use of the CRCs, so let's just
|
||||
* completely disable it.
|
||||
*/
|
||||
hsw_disable_ips(crtc);
|
||||
|
||||
spin_lock_irq(&pipe_crc->lock);
|
||||
kfree(pipe_crc->entries);
|
||||
pipe_crc->entries = entries;
|
||||
pipe_crc->head = 0;
|
||||
pipe_crc->tail = 0;
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
}
|
||||
|
||||
pipe_crc->source = source;
|
||||
|
||||
I915_WRITE(PIPE_CRC_CTL(pipe), val);
|
||||
POSTING_READ(PIPE_CRC_CTL(pipe));
|
||||
|
||||
/* real source -> none transition */
|
||||
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
|
||||
struct intel_pipe_crc_entry *entries;
|
||||
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
|
||||
pipe);
|
||||
|
||||
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
|
||||
drm_modeset_lock(&crtc->base.mutex, NULL);
|
||||
if (crtc->base.state->active)
|
||||
intel_wait_for_vblank(dev_priv, pipe);
|
||||
drm_modeset_unlock(&crtc->base.mutex);
|
||||
|
||||
spin_lock_irq(&pipe_crc->lock);
|
||||
entries = pipe_crc->entries;
|
||||
pipe_crc->entries = NULL;
|
||||
pipe_crc->head = 0;
|
||||
pipe_crc->tail = 0;
|
||||
spin_unlock_irq(&pipe_crc->lock);
|
||||
|
||||
kfree(entries);
|
||||
|
||||
if (IS_G4X(dev_priv))
|
||||
g4x_undo_pipe_scramble_reset(dev_priv, pipe);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
vlv_undo_pipe_scramble_reset(dev_priv, pipe);
|
||||
else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
|
||||
hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
|
||||
|
||||
hsw_enable_ips(crtc);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse pipe CRC command strings:
|
||||
* command: wsp* object wsp+ name wsp+ source wsp*
|
||||
* object: 'pipe'
|
||||
* name: (A | B | C)
|
||||
* source: (none | plane1 | plane2 | pf)
|
||||
* wsp: (#0x20 | #0x9 | #0xA)+
|
||||
*
|
||||
* eg.:
|
||||
* "pipe A plane1" -> Start CRC computations on plane1 of pipe A
|
||||
* "pipe A none" -> Stop CRC
|
||||
*/
|
||||
static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
|
||||
{
|
||||
int n_words = 0;
|
||||
|
||||
while (*buf) {
|
||||
char *end;
|
||||
|
||||
/* skip leading white space */
|
||||
buf = skip_spaces(buf);
|
||||
if (!*buf)
|
||||
break; /* end of buffer */
|
||||
|
||||
/* find end of word */
|
||||
for (end = buf; *end && !isspace(*end); end++)
|
||||
;
|
||||
|
||||
if (n_words == max_words) {
|
||||
DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
|
||||
max_words);
|
||||
return -EINVAL; /* ran out of words[] before bytes */
|
||||
}
|
||||
|
||||
if (*end)
|
||||
*end++ = '\0';
|
||||
words[n_words++] = buf;
|
||||
buf = end;
|
||||
}
|
||||
|
||||
return n_words;
|
||||
}
|
||||
|
||||
enum intel_pipe_crc_object {
|
||||
PIPE_CRC_OBJECT_PIPE,
|
||||
};
|
||||
|
||||
static const char * const pipe_crc_objects[] = {
|
||||
"pipe",
|
||||
};
|
||||
|
||||
static int
|
||||
display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
|
||||
if (!strcmp(buf, pipe_crc_objects[i])) {
|
||||
*o = i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
|
||||
{
|
||||
const char name = buf[0];
|
||||
|
||||
if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
|
||||
return -EINVAL;
|
||||
|
||||
*pipe = name - 'A';
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
|
||||
if (!strcmp(buf, pipe_crc_sources[i])) {
|
||||
*s = i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
|
||||
char *buf, size_t len)
|
||||
{
|
||||
#define N_WORDS 3
|
||||
int n_words;
|
||||
char *words[N_WORDS];
|
||||
enum pipe pipe;
|
||||
enum intel_pipe_crc_object object;
|
||||
enum intel_pipe_crc_source source;
|
||||
|
||||
n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
|
||||
if (n_words != N_WORDS) {
|
||||
DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
|
||||
N_WORDS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (display_crc_ctl_parse_object(words[0], &object) < 0) {
|
||||
DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
|
||||
DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (display_crc_ctl_parse_source(words[2], &source) < 0) {
|
||||
DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return pipe_crc_set_source(dev_priv, pipe, source);
|
||||
}
|
||||
|
||||
static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
|
||||
size_t len, loff_t *offp)
|
||||
{
|
||||
struct seq_file *m = file->private_data;
|
||||
struct drm_i915_private *dev_priv = m->private;
|
||||
char *tmpbuf;
|
||||
int ret;
|
||||
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
if (len > PAGE_SIZE - 1) {
|
||||
DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
|
||||
PAGE_SIZE);
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
tmpbuf = kmalloc(len + 1, GFP_KERNEL);
|
||||
if (!tmpbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(tmpbuf, ubuf, len)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
tmpbuf[len] = '\0';
|
||||
|
||||
ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
|
||||
|
||||
out:
|
||||
kfree(tmpbuf);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*offp += len;
|
||||
return len;
|
||||
}
|
||||
|
||||
const struct file_operations i915_display_crc_ctl_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = display_crc_ctl_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = display_crc_ctl_write
|
||||
};
|
||||
|
||||
void intel_display_crc_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
enum pipe pipe;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
|
||||
|
||||
pipe_crc->opened = false;
|
||||
spin_lock_init(&pipe_crc->lock);
|
||||
init_waitqueue_head(&pipe_crc->wq);
|
||||
}
|
||||
}
|
||||
|
||||
int intel_pipe_crc_create(struct drm_minor *minor)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
|
||||
ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_pipe_crc_cleanup(struct drm_minor *minor)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
|
||||
struct drm_info_list *info_list =
|
||||
(struct drm_info_list *)&i915_pipe_crc_data[i];
|
||||
|
||||
drm_debugfs_remove_files(info_list, 1, minor);
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -813,15 +813,13 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
|
||||
|
||||
/**
|
||||
* intel_psr_init - Init basic PSR work and mutex.
|
||||
* @dev: DRM device
|
||||
* @dev_priv: i915 device private
|
||||
*
|
||||
* This function is called only once at driver load to initialize basic
|
||||
* PSR stuff.
|
||||
*/
|
||||
void intel_psr_init(struct drm_device *dev)
|
||||
void intel_psr_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
|
||||
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
|
||||
|
||||
|
@ -1805,10 +1805,9 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_ring_pin(struct intel_ring *ring)
|
||||
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
|
||||
{
|
||||
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
|
||||
unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
|
||||
unsigned int flags;
|
||||
enum i915_map_type map;
|
||||
struct i915_vma *vma = ring->vma;
|
||||
void *addr;
|
||||
@ -1818,6 +1817,9 @@ int intel_ring_pin(struct intel_ring *ring)
|
||||
|
||||
map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
|
||||
|
||||
flags = PIN_GLOBAL;
|
||||
if (offset_bias)
|
||||
flags |= PIN_OFFSET_BIAS | offset_bias;
|
||||
if (vma->obj->stolen)
|
||||
flags |= PIN_MAPPABLE;
|
||||
|
||||
@ -1869,9 +1871,9 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
|
||||
obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
|
||||
obj = i915_gem_object_create_stolen(dev_priv, size);
|
||||
if (!obj)
|
||||
obj = i915_gem_object_create(&dev_priv->drm, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
@ -1912,7 +1914,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
|
||||
* of the buffer.
|
||||
*/
|
||||
ring->effective_size = size;
|
||||
if (IS_I830(engine->i915) || IS_845G(engine->i915))
|
||||
if (IS_I830(engine->i915) || IS_I845G(engine->i915))
|
||||
ring->effective_size -= 2 * CACHELINE_BYTES;
|
||||
|
||||
ring->last_retired_head = -1;
|
||||
@ -1939,8 +1941,26 @@ intel_ring_free(struct intel_ring *ring)
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
static int intel_ring_context_pin(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
static int context_pin(struct i915_gem_context *ctx, unsigned int flags)
|
||||
{
|
||||
struct i915_vma *vma = ctx->engine[RCS].state;
|
||||
int ret;
|
||||
|
||||
/* Clear this page out of any CPU caches for coherent swap-in/out.
|
||||
* We only want to do this on the first bind so that we do not stall
|
||||
* on an active context (which by nature is already on the GPU).
|
||||
*/
|
||||
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
|
||||
ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
|
||||
}
|
||||
|
||||
static int intel_ring_context_pin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
int ret;
|
||||
@ -1951,13 +1971,15 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
|
||||
return 0;
|
||||
|
||||
if (ce->state) {
|
||||
struct i915_vma *vma;
|
||||
unsigned int flags;
|
||||
|
||||
vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
flags = 0;
|
||||
if (i915_gem_context_is_kernel(ctx))
|
||||
flags = PIN_HIGH;
|
||||
|
||||
ret = context_pin(ctx, flags);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
/* The kernel context is only used as a placeholder for flushing the
|
||||
@ -1967,7 +1989,7 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
|
||||
* as during eviction we cannot allocate and pin the renderstate in
|
||||
* order to initialise the context.
|
||||
*/
|
||||
if (ctx == ctx->i915->kernel_context)
|
||||
if (i915_gem_context_is_kernel(ctx))
|
||||
ce->initialised = true;
|
||||
|
||||
i915_gem_context_get(ctx);
|
||||
@ -1978,12 +2000,13 @@ error:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void intel_ring_context_unpin(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx)
|
||||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(ce->pin_count == 0);
|
||||
|
||||
if (--ce->pin_count)
|
||||
return;
|
||||
@ -2008,17 +2031,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* We may need to do things with the shrinker which
|
||||
* require us to immediately switch back to the default
|
||||
* context. This can cause a problem as pinning the
|
||||
* default context also requires GTT space which may not
|
||||
* be available. To avoid this we always pin the default
|
||||
* context.
|
||||
*/
|
||||
ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
|
||||
if (IS_ERR(ring)) {
|
||||
ret = PTR_ERR(ring);
|
||||
@ -2036,7 +2048,8 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = intel_ring_pin(ring);
|
||||
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
|
||||
ret = intel_ring_pin(ring, 4096);
|
||||
if (ret) {
|
||||
intel_ring_free(ring);
|
||||
goto error;
|
||||
@ -2077,8 +2090,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
|
||||
|
||||
intel_engine_cleanup_common(engine);
|
||||
|
||||
intel_ring_context_unpin(dev_priv->kernel_context, engine);
|
||||
|
||||
engine->i915 = NULL;
|
||||
dev_priv->engine[engine->id] = NULL;
|
||||
kfree(engine);
|
||||
@ -2095,16 +2106,19 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
|
||||
static int ring_request_alloc(struct drm_i915_gem_request *request)
|
||||
{
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
|
||||
|
||||
/* Flush enough space to reduce the likelihood of waiting after
|
||||
* we start building the request - in which case we will just
|
||||
* have to repeat work.
|
||||
*/
|
||||
request->reserved_space += LEGACY_REQUEST_SIZE;
|
||||
|
||||
GEM_BUG_ON(!request->engine->buffer);
|
||||
request->ring = request->engine->buffer;
|
||||
|
||||
ret = intel_ring_begin(request, 0);
|
||||
@ -2452,7 +2466,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
|
||||
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
obj = i915_gem_object_create(&dev_priv->drm, 4096);
|
||||
obj = i915_gem_object_create(dev_priv, 4096);
|
||||
if (IS_ERR(obj))
|
||||
goto err;
|
||||
|
||||
@ -2584,6 +2598,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
|
||||
engine->init_hw = init_ring_common;
|
||||
engine->reset_hw = reset_ring_common;
|
||||
|
||||
engine->context_pin = intel_ring_context_pin;
|
||||
engine->context_unpin = intel_ring_context_unpin;
|
||||
|
||||
engine->request_alloc = ring_request_alloc;
|
||||
|
||||
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
|
||||
if (i915.semaphores) {
|
||||
@ -2608,7 +2627,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
|
||||
engine->emit_bb_start = gen6_emit_bb_start;
|
||||
else if (INTEL_GEN(dev_priv) >= 4)
|
||||
engine->emit_bb_start = i965_emit_bb_start;
|
||||
else if (IS_I830(dev_priv) || IS_845G(dev_priv))
|
||||
else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
|
||||
engine->emit_bb_start = i830_emit_bb_start;
|
||||
else
|
||||
engine->emit_bb_start = i915_emit_bb_start;
|
||||
|
@ -65,14 +65,37 @@ struct intel_hw_status_page {
|
||||
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
|
||||
|
||||
enum intel_engine_hangcheck_action {
|
||||
HANGCHECK_IDLE = 0,
|
||||
HANGCHECK_WAIT,
|
||||
HANGCHECK_ACTIVE,
|
||||
HANGCHECK_KICK,
|
||||
HANGCHECK_HUNG,
|
||||
ENGINE_IDLE = 0,
|
||||
ENGINE_WAIT,
|
||||
ENGINE_ACTIVE_SEQNO,
|
||||
ENGINE_ACTIVE_HEAD,
|
||||
ENGINE_ACTIVE_SUBUNITS,
|
||||
ENGINE_WAIT_KICK,
|
||||
ENGINE_DEAD,
|
||||
};
|
||||
|
||||
#define HANGCHECK_SCORE_RING_HUNG 31
|
||||
static inline const char *
|
||||
hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
|
||||
{
|
||||
switch (a) {
|
||||
case ENGINE_IDLE:
|
||||
return "idle";
|
||||
case ENGINE_WAIT:
|
||||
return "wait";
|
||||
case ENGINE_ACTIVE_SEQNO:
|
||||
return "active seqno";
|
||||
case ENGINE_ACTIVE_HEAD:
|
||||
return "active head";
|
||||
case ENGINE_ACTIVE_SUBUNITS:
|
||||
return "active subunits";
|
||||
case ENGINE_WAIT_KICK:
|
||||
return "wait kick";
|
||||
case ENGINE_DEAD:
|
||||
return "dead";
|
||||
}
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
#define I915_MAX_SLICES 3
|
||||
#define I915_MAX_SUBSLICES 3
|
||||
@ -104,10 +127,11 @@ struct intel_instdone {
|
||||
struct intel_engine_hangcheck {
|
||||
u64 acthd;
|
||||
u32 seqno;
|
||||
int score;
|
||||
enum intel_engine_hangcheck_action action;
|
||||
unsigned long action_timestamp;
|
||||
int deadlock;
|
||||
struct intel_instdone instdone;
|
||||
bool stalled;
|
||||
};
|
||||
|
||||
struct intel_ring {
|
||||
@ -242,6 +266,11 @@ struct intel_engine_cs {
|
||||
void (*reset_hw)(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_request *req);
|
||||
|
||||
int (*context_pin)(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx);
|
||||
void (*context_unpin)(struct intel_engine_cs *engine,
|
||||
struct i915_gem_context *ctx);
|
||||
int (*request_alloc)(struct drm_i915_gem_request *req);
|
||||
int (*init_context)(struct drm_i915_gem_request *req);
|
||||
|
||||
int (*emit_flush)(struct drm_i915_gem_request *request,
|
||||
@ -355,7 +384,24 @@ struct intel_engine_cs {
|
||||
bool preempt_wa;
|
||||
u32 ctx_desc_template;
|
||||
|
||||
struct i915_gem_context *last_context;
|
||||
/* Contexts are pinned whilst they are active on the GPU. The last
|
||||
* context executed remains active whilst the GPU is idle - the
|
||||
* switch away and write to the context object only occurs on the
|
||||
* next execution. Contexts are only unpinned on retirement of the
|
||||
* following request ensuring that we can always write to the object
|
||||
* on the context switch even after idling. Across suspend, we switch
|
||||
* to the kernel context and trash it as the save may not happen
|
||||
* before the hardware is powered down.
|
||||
*/
|
||||
struct i915_gem_context *last_retired_context;
|
||||
|
||||
/* We track the current MI_SET_CONTEXT in order to eliminate
|
||||
* redudant context switches. This presumes that requests are not
|
||||
* reordered! Or when they are the tracking is updated along with
|
||||
* the emission of individual requests into the legacy command
|
||||
* stream (ring).
|
||||
*/
|
||||
struct i915_gem_context *legacy_active_context;
|
||||
|
||||
struct intel_engine_hangcheck hangcheck;
|
||||
|
||||
@ -437,7 +483,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
|
||||
|
||||
struct intel_ring *
|
||||
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
|
||||
int intel_ring_pin(struct intel_ring *ring);
|
||||
int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
|
||||
void intel_ring_unpin(struct intel_ring *ring);
|
||||
void intel_ring_free(struct intel_ring *ring);
|
||||
|
||||
@ -446,8 +492,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
|
||||
|
||||
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
|
||||
|
||||
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
|
||||
|
||||
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
|
||||
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
|
||||
|
||||
|
@ -453,6 +453,57 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_A) | \
|
||||
BIT(POWER_DOMAIN_PIPE_B) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_B) | \
|
||||
BIT(POWER_DOMAIN_PIPE_C) | \
|
||||
BIT(POWER_DOMAIN_TRANSCODER_C) | \
|
||||
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_AUDIO) | \
|
||||
BIT(POWER_DOMAIN_VGA) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DISPLAY_DDI_A_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DISPLAY_DDI_B_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DISPLAY_DDI_C_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_AUX_B) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
|
||||
BIT(POWER_DOMAIN_AUX_C) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
|
||||
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
BIT(POWER_DOMAIN_MODESET) | \
|
||||
BIT(POWER_DOMAIN_AUX_A) | \
|
||||
BIT(POWER_DOMAIN_INIT))
|
||||
|
||||
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
|
||||
@ -530,7 +581,7 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
|
||||
u32 mask;
|
||||
|
||||
mask = DC_STATE_EN_UPTO_DC5;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
mask |= DC_STATE_EN_DC9;
|
||||
else
|
||||
mask |= DC_STATE_EN_UPTO_DC6;
|
||||
@ -694,7 +745,7 @@ gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
struct i915_power_well *power_well, bool enable)
|
||||
{
|
||||
uint32_t tmp, fuse_status;
|
||||
uint32_t req_mask, state_mask;
|
||||
@ -720,11 +771,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case SKL_DISP_PW_DDI_A_E:
|
||||
case SKL_DISP_PW_MISC_IO:
|
||||
case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */
|
||||
case SKL_DISP_PW_DDI_B:
|
||||
case SKL_DISP_PW_DDI_C:
|
||||
case SKL_DISP_PW_DDI_D:
|
||||
case SKL_DISP_PW_MISC_IO:
|
||||
case GLK_DISP_PW_AUX_A:
|
||||
case GLK_DISP_PW_AUX_B:
|
||||
case GLK_DISP_PW_AUX_C:
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unknown power well %lu\n", power_well->id);
|
||||
@ -884,6 +938,12 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
|
||||
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
|
||||
if (power_well->count > 0)
|
||||
bxt_ddi_phy_verify_state(dev_priv, power_well->data);
|
||||
|
||||
if (IS_GEMINILAKE(dev_priv)) {
|
||||
power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
|
||||
if (power_well->count > 0)
|
||||
bxt_ddi_phy_verify_state(dev_priv, power_well->data);
|
||||
}
|
||||
}
|
||||
|
||||
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
|
||||
@ -911,7 +971,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
|
||||
gen9_assert_dbuf_enabled(dev_priv);
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
bxt_verify_ddi_phy_power_wells(dev_priv);
|
||||
}
|
||||
|
||||
@ -2161,6 +2221,91 @@ static struct i915_power_well bxt_power_wells[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct i915_power_well glk_power_wells[] = {
|
||||
{
|
||||
.name = "always-on",
|
||||
.always_on = 1,
|
||||
.domains = POWER_DOMAIN_MASK,
|
||||
.ops = &i9xx_always_on_power_well_ops,
|
||||
},
|
||||
{
|
||||
.name = "power well 1",
|
||||
/* Handled by the DMC firmware */
|
||||
.domains = 0,
|
||||
.ops = &skl_power_well_ops,
|
||||
.id = SKL_DISP_PW_1,
|
||||
},
|
||||
{
|
||||
.name = "DC off",
|
||||
.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
|
||||
.ops = &gen9_dc_off_power_well_ops,
|
||||
.id = SKL_DISP_PW_DC_OFF,
|
||||
},
|
||||
{
|
||||
.name = "power well 2",
|
||||
.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.id = SKL_DISP_PW_2,
|
||||
},
|
||||
{
|
||||
.name = "dpio-common-a",
|
||||
.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
|
||||
.ops = &bxt_dpio_cmn_power_well_ops,
|
||||
.id = BXT_DPIO_CMN_A,
|
||||
.data = DPIO_PHY1,
|
||||
},
|
||||
{
|
||||
.name = "dpio-common-b",
|
||||
.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
|
||||
.ops = &bxt_dpio_cmn_power_well_ops,
|
||||
.id = BXT_DPIO_CMN_BC,
|
||||
.data = DPIO_PHY0,
|
||||
},
|
||||
{
|
||||
.name = "dpio-common-c",
|
||||
.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
|
||||
.ops = &bxt_dpio_cmn_power_well_ops,
|
||||
.id = GLK_DPIO_CMN_C,
|
||||
.data = DPIO_PHY2,
|
||||
},
|
||||
{
|
||||
.name = "AUX A",
|
||||
.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.id = GLK_DISP_PW_AUX_A,
|
||||
},
|
||||
{
|
||||
.name = "AUX B",
|
||||
.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.id = GLK_DISP_PW_AUX_B,
|
||||
},
|
||||
{
|
||||
.name = "AUX C",
|
||||
.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.id = GLK_DISP_PW_AUX_C,
|
||||
},
|
||||
{
|
||||
.name = "DDI A power well",
|
||||
.domains = GLK_DISPLAY_DDI_A_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.id = GLK_DISP_PW_DDI_A,
|
||||
},
|
||||
{
|
||||
.name = "DDI B power well",
|
||||
.domains = GLK_DISPLAY_DDI_B_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.id = SKL_DISP_PW_DDI_B,
|
||||
},
|
||||
{
|
||||
.name = "DDI C power well",
|
||||
.domains = GLK_DISPLAY_DDI_C_POWER_DOMAINS,
|
||||
.ops = &skl_power_well_ops,
|
||||
.id = SKL_DISP_PW_DDI_C,
|
||||
},
|
||||
};
|
||||
|
||||
static int
|
||||
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
|
||||
int disable_power_well)
|
||||
@ -2181,7 +2326,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
max_dc = 2;
|
||||
mask = 0;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
} else if (IS_GEN9_LP(dev_priv)) {
|
||||
max_dc = 1;
|
||||
/*
|
||||
* DC9 has a separate HW flow from the rest of the DC states,
|
||||
@ -2257,6 +2402,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
set_power_wells(power_domains, skl_power_wells);
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
set_power_wells(power_domains, bxt_power_wells);
|
||||
} else if (IS_GEMINILAKE(dev_priv)) {
|
||||
set_power_wells(power_domains, glk_power_wells);
|
||||
} else if (IS_CHERRYVIEW(dev_priv)) {
|
||||
set_power_wells(power_domains, chv_power_wells);
|
||||
} else if (IS_VALLEYVIEW(dev_priv)) {
|
||||
@ -2585,7 +2732,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
skl_display_core_init(dev_priv, resume);
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
} else if (IS_GEN9_LP(dev_priv)) {
|
||||
bxt_display_core_init(dev_priv, resume);
|
||||
} else if (IS_CHERRYVIEW(dev_priv)) {
|
||||
mutex_lock(&power_domains->lock);
|
||||
@ -2624,7 +2771,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_display_core_uninit(dev_priv);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
else if (IS_GEN9_LP(dev_priv))
|
||||
bxt_display_core_uninit(dev_priv);
|
||||
}
|
||||
|
||||
|
@ -1296,7 +1296,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
/* done in crtc_mode_set as the dpll_md reg must be written early */
|
||||
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
|
||||
IS_G33(dev_priv)) {
|
||||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
|
||||
/* done in crtc_mode_set as it lives inside the dpll register */
|
||||
} else {
|
||||
sdvox |= (crtc_state->pixel_multiplier - 1)
|
||||
@ -2342,9 +2342,9 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
|
||||
}
|
||||
|
||||
static u8
|
||||
intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
|
||||
intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
|
||||
struct intel_sdvo *sdvo)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct sdvo_device_mapping *my_mapping, *other_mapping;
|
||||
|
||||
if (sdvo->port == PORT_B) {
|
||||
@ -2934,9 +2934,9 @@ static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
|
||||
|
||||
static bool
|
||||
intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
|
||||
struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
|
||||
sdvo->ddc.owner = THIS_MODULE;
|
||||
sdvo->ddc.class = I2C_CLASS_DDC;
|
||||
@ -2957,10 +2957,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
|
||||
WARN_ON(port != PORT_B && port != PORT_C);
|
||||
}
|
||||
|
||||
bool intel_sdvo_init(struct drm_device *dev,
|
||||
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t sdvo_reg, enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_sdvo *intel_sdvo;
|
||||
int i;
|
||||
@ -2973,16 +2972,18 @@ bool intel_sdvo_init(struct drm_device *dev,
|
||||
|
||||
intel_sdvo->sdvo_reg = sdvo_reg;
|
||||
intel_sdvo->port = port;
|
||||
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
|
||||
intel_sdvo->slave_addr =
|
||||
intel_sdvo_get_slave_addr(dev_priv, intel_sdvo) >> 1;
|
||||
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
|
||||
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
|
||||
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev_priv))
|
||||
goto err_i2c_bus;
|
||||
|
||||
/* encoder type will be decided later */
|
||||
intel_encoder = &intel_sdvo->base;
|
||||
intel_encoder->type = INTEL_OUTPUT_SDVO;
|
||||
intel_encoder->port = port;
|
||||
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
|
||||
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
|
||||
&intel_sdvo_enc_funcs, 0,
|
||||
"SDVO %c", port_name(port));
|
||||
|
||||
/* Read the regs to test if we can talk to the device */
|
||||
|
@ -203,8 +203,8 @@ skl_update_plane(struct drm_plane *drm_plane,
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
enum plane_id plane_id = intel_plane->id;
|
||||
enum pipe pipe = intel_plane->pipe;
|
||||
u32 plane_ctl;
|
||||
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
|
||||
u32 surf_addr = plane_state->main.offset;
|
||||
@ -229,9 +229,9 @@ skl_update_plane(struct drm_plane *drm_plane,
|
||||
plane_ctl |= skl_plane_ctl_rotation(rotation);
|
||||
|
||||
if (key->flags) {
|
||||
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
|
||||
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
|
||||
I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
|
||||
I915_WRITE(PLANE_KEYVAL(pipe, plane_id), key->min_value);
|
||||
I915_WRITE(PLANE_KEYMAX(pipe, plane_id), key->max_value);
|
||||
I915_WRITE(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
|
||||
}
|
||||
|
||||
if (key->flags & I915_SET_COLORKEY_DESTINATION)
|
||||
@ -245,36 +245,36 @@ skl_update_plane(struct drm_plane *drm_plane,
|
||||
crtc_w--;
|
||||
crtc_h--;
|
||||
|
||||
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
|
||||
I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
|
||||
I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w);
|
||||
I915_WRITE(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
|
||||
I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
|
||||
I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
|
||||
|
||||
/* program plane scaler */
|
||||
if (plane_state->scaler_id >= 0) {
|
||||
int scaler_id = plane_state->scaler_id;
|
||||
const struct intel_scaler *scaler;
|
||||
|
||||
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
|
||||
PS_PLANE_SEL(plane));
|
||||
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
|
||||
plane_id, PS_PLANE_SEL(plane_id));
|
||||
|
||||
scaler = &crtc_state->scaler_state.scalers[scaler_id];
|
||||
|
||||
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
|
||||
PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
|
||||
PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
|
||||
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
|
||||
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
|
||||
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
|
||||
((crtc_w + 1) << 16)|(crtc_h + 1));
|
||||
|
||||
I915_WRITE(PLANE_POS(pipe, plane), 0);
|
||||
I915_WRITE(PLANE_POS(pipe, plane_id), 0);
|
||||
} else {
|
||||
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
|
||||
I915_WRITE(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
|
||||
}
|
||||
|
||||
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
|
||||
I915_WRITE(PLANE_SURF(pipe, plane),
|
||||
I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
|
||||
I915_WRITE(PLANE_SURF(pipe, plane_id),
|
||||
intel_fb_gtt_offset(fb, rotation) + surf_addr);
|
||||
POSTING_READ(PLANE_SURF(pipe, plane));
|
||||
POSTING_READ(PLANE_SURF(pipe, plane_id));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -283,20 +283,20 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
enum plane_id plane_id = intel_plane->id;
|
||||
enum pipe pipe = intel_plane->pipe;
|
||||
|
||||
I915_WRITE(PLANE_CTL(pipe, plane), 0);
|
||||
I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
|
||||
|
||||
I915_WRITE(PLANE_SURF(pipe, plane), 0);
|
||||
POSTING_READ(PLANE_SURF(pipe, plane));
|
||||
I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
|
||||
POSTING_READ(PLANE_SURF(pipe, plane_id));
|
||||
}
|
||||
|
||||
static void
|
||||
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
|
||||
int plane = intel_plane->plane;
|
||||
enum plane_id plane_id = intel_plane->id;
|
||||
|
||||
/* Seems RGB data bypasses the CSC always */
|
||||
if (!format_is_yuv(format))
|
||||
@ -312,23 +312,23 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
|
||||
* Cb and Cr apparently come in as signed already, so no
|
||||
* need for any offset. For Y we need to remove the offset.
|
||||
*/
|
||||
I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
|
||||
I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
|
||||
I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
|
||||
I915_WRITE(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
|
||||
I915_WRITE(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
|
||||
I915_WRITE(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
|
||||
|
||||
I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
|
||||
I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
|
||||
I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
|
||||
I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
|
||||
I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
|
||||
I915_WRITE(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
|
||||
I915_WRITE(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
|
||||
I915_WRITE(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
|
||||
I915_WRITE(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
|
||||
I915_WRITE(SPCSCC8(plane_id), SPCSC_C0(8263));
|
||||
|
||||
I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
|
||||
I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
|
||||
I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
|
||||
I915_WRITE(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
|
||||
I915_WRITE(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
|
||||
I915_WRITE(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
|
||||
|
||||
I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
|
||||
I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
|
||||
I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
|
||||
I915_WRITE(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
|
||||
I915_WRITE(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
|
||||
I915_WRITE(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -340,8 +340,8 @@ vlv_update_plane(struct drm_plane *dplane,
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
struct drm_framebuffer *fb = plane_state->base.fb;
|
||||
int pipe = intel_plane->pipe;
|
||||
int plane = intel_plane->plane;
|
||||
enum pipe pipe = intel_plane->pipe;
|
||||
enum plane_id plane_id = intel_plane->id;
|
||||
u32 sprctl;
|
||||
u32 sprsurf_offset, linear_offset;
|
||||
unsigned int rotation = plane_state->base.rotation;
|
||||
@ -434,9 +434,9 @@ vlv_update_plane(struct drm_plane *dplane,
|
||||
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
|
||||
|
||||
if (key->flags) {
|
||||
I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
|
||||
I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
|
||||
I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
|
||||
I915_WRITE(SPKEYMINVAL(pipe, plane_id), key->min_value);
|
||||
I915_WRITE(SPKEYMAXVAL(pipe, plane_id), key->max_value);
|
||||
I915_WRITE(SPKEYMSK(pipe, plane_id), key->channel_mask);
|
||||
}
|
||||
|
||||
if (key->flags & I915_SET_COLORKEY_SOURCE)
|
||||
@ -445,21 +445,21 @@ vlv_update_plane(struct drm_plane *dplane,
|
||||
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
|
||||
chv_update_csc(intel_plane, fb->format->format);
|
||||
|
||||
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
|
||||
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
|
||||
I915_WRITE(SPSTRIDE(pipe, plane_id), fb->pitches[0]);
|
||||
I915_WRITE(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
|
||||
|
||||
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
|
||||
I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
|
||||
I915_WRITE(SPTILEOFF(pipe, plane_id), (y << 16) | x);
|
||||
else
|
||||
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
|
||||
I915_WRITE(SPLINOFF(pipe, plane_id), linear_offset);
|
||||
|
||||
I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
|
||||
I915_WRITE(SPCONSTALPHA(pipe, plane_id), 0);
|
||||
|
||||
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
|
||||
I915_WRITE(SPCNTR(pipe, plane), sprctl);
|
||||
I915_WRITE(SPSURF(pipe, plane),
|
||||
I915_WRITE(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
|
||||
I915_WRITE(SPCNTR(pipe, plane_id), sprctl);
|
||||
I915_WRITE(SPSURF(pipe, plane_id),
|
||||
intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
|
||||
POSTING_READ(SPSURF(pipe, plane));
|
||||
POSTING_READ(SPSURF(pipe, plane_id));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -468,13 +468,13 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
struct drm_device *dev = dplane->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_plane *intel_plane = to_intel_plane(dplane);
|
||||
int pipe = intel_plane->pipe;
|
||||
int plane = intel_plane->plane;
|
||||
enum pipe pipe = intel_plane->pipe;
|
||||
enum plane_id plane_id = intel_plane->id;
|
||||
|
||||
I915_WRITE(SPCNTR(pipe, plane), 0);
|
||||
I915_WRITE(SPCNTR(pipe, plane_id), 0);
|
||||
|
||||
I915_WRITE(SPSURF(pipe, plane), 0);
|
||||
POSTING_READ(SPSURF(pipe, plane));
|
||||
I915_WRITE(SPSURF(pipe, plane_id), 0);
|
||||
POSTING_READ(SPSURF(pipe, plane_id));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1112,6 +1112,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
|
||||
|
||||
intel_plane->pipe = pipe;
|
||||
intel_plane->plane = plane;
|
||||
intel_plane->id = PLANE_SPRITE0 + plane;
|
||||
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
|
||||
intel_plane->check_plane = intel_check_sprite_plane;
|
||||
|
||||
|
@ -1537,9 +1537,9 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
|
||||
};
|
||||
|
||||
void
|
||||
intel_tv_init(struct drm_device *dev)
|
||||
intel_tv_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_connector *connector;
|
||||
struct intel_tv *intel_tv;
|
||||
struct intel_encoder *intel_encoder;
|
||||
|
142
drivers/gpu/drm/i915/intel_uc.c
Normal file
142
drivers/gpu/drm/i915/intel_uc.c
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_uc.h"
|
||||
|
||||
void intel_uc_init_early(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
mutex_init(&dev_priv->guc.send_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read GuC command/status register (SOFT_SCRATCH_0)
|
||||
* Return true if it contains a response rather than a command
|
||||
*/
|
||||
static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
|
||||
u32 val = I915_READ(SOFT_SCRATCH(0));
|
||||
*status = val;
|
||||
return INTEL_GUC_RECV_IS_RESPONSE(val);
|
||||
}
|
||||
|
||||
int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
u32 status;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(len < 1 || len > 15))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&guc->send_mutex);
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
dev_priv->guc.action_count += 1;
|
||||
dev_priv->guc.action_cmd = action[0];
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
I915_WRITE(SOFT_SCRATCH(i), action[i]);
|
||||
|
||||
POSTING_READ(SOFT_SCRATCH(i - 1));
|
||||
|
||||
I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
|
||||
|
||||
/*
|
||||
* Fast commands should complete in less than 10us, so sample quickly
|
||||
* up to that length of time, then switch to a slower sleep-wait loop.
|
||||
* No inte_guc_send command should ever take longer than 10ms.
|
||||
*/
|
||||
ret = wait_for_us(intel_guc_recv(guc, &status), 10);
|
||||
if (ret)
|
||||
ret = wait_for(intel_guc_recv(guc, &status), 10);
|
||||
if (status != INTEL_GUC_STATUS_SUCCESS) {
|
||||
/*
|
||||
* Either the GuC explicitly returned an error (which
|
||||
* we convert to -EIO here) or no response at all was
|
||||
* received within the timeout limit (-ETIMEDOUT)
|
||||
*/
|
||||
if (ret != -ETIMEDOUT)
|
||||
ret = -EIO;
|
||||
|
||||
DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
|
||||
" ret=%d status=0x%08X response=0x%08X\n",
|
||||
action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
|
||||
|
||||
dev_priv->guc.action_fail += 1;
|
||||
dev_priv->guc.action_err = ret;
|
||||
}
|
||||
dev_priv->guc.action_status = status;
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
mutex_unlock(&guc->send_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_guc_sample_forcewake(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
u32 action[2];
|
||||
|
||||
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
|
||||
action[1] = 0;
|
||||
else
|
||||
/* bit 0 and 1 are for Render and Media domain separately */
|
||||
action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
|
||||
|
||||
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
||||
}
|
||||
|
||||
int intel_guc_log_flush_complete(struct intel_guc *guc)
|
||||
{
|
||||
u32 action[] = { INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE };
|
||||
|
||||
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
||||
}
|
||||
|
||||
int intel_guc_log_flush(struct intel_guc *guc)
|
||||
{
|
||||
u32 action[] = {
|
||||
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
|
||||
0
|
||||
};
|
||||
|
||||
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
||||
}
|
||||
|
||||
int intel_guc_log_control(struct intel_guc *guc, u32 control_val)
|
||||
{
|
||||
u32 action[] = {
|
||||
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
|
||||
control_val
|
||||
};
|
||||
|
||||
return intel_guc_send(guc, action, ARRAY_SIZE(action));
|
||||
}
|
@ -21,13 +21,15 @@
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef _INTEL_GUC_H_
|
||||
#define _INTEL_GUC_H_
|
||||
#ifndef _INTEL_UC_H_
|
||||
#define _INTEL_UC_H_
|
||||
|
||||
#include "intel_guc_fwif.h"
|
||||
#include "i915_guc_reg.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
|
||||
#include "i915_vma.h"
|
||||
|
||||
struct drm_i915_gem_request;
|
||||
|
||||
/*
|
||||
@ -74,7 +76,7 @@ struct i915_guc_client {
|
||||
uint32_t proc_desc_offset;
|
||||
|
||||
uint32_t doorbell_offset;
|
||||
uint32_t cookie;
|
||||
uint32_t doorbell_cookie;
|
||||
uint16_t doorbell_id;
|
||||
uint16_t padding[3]; /* Maintain alignment */
|
||||
|
||||
@ -103,7 +105,6 @@ enum intel_guc_fw_status {
|
||||
* of fetching, caching, and loading the firmware image into the GuC.
|
||||
*/
|
||||
struct intel_guc_fw {
|
||||
struct drm_device * guc_dev;
|
||||
const char * guc_fw_path;
|
||||
size_t guc_fw_size;
|
||||
struct drm_i915_gem_object * guc_fw_obj;
|
||||
@ -143,7 +144,7 @@ struct intel_guc {
|
||||
struct intel_guc_fw guc_fw;
|
||||
struct intel_guc_log log;
|
||||
|
||||
/* GuC2Host interrupt related state */
|
||||
/* intel_guc_recv interrupt related state */
|
||||
bool interrupts_enabled;
|
||||
|
||||
struct i915_vma *ads_vma;
|
||||
@ -165,17 +166,25 @@ struct intel_guc {
|
||||
uint64_t submissions[I915_NUM_ENGINES];
|
||||
uint32_t last_seqno[I915_NUM_ENGINES];
|
||||
|
||||
/* To serialize the Host2GuC actions */
|
||||
struct mutex action_lock;
|
||||
/* To serialize the intel_guc_send actions */
|
||||
struct mutex send_mutex;
|
||||
};
|
||||
|
||||
/* intel_uc.c */
|
||||
void intel_uc_init_early(struct drm_i915_private *dev_priv);
|
||||
int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
|
||||
int intel_guc_sample_forcewake(struct intel_guc *guc);
|
||||
int intel_guc_log_flush_complete(struct intel_guc *guc);
|
||||
int intel_guc_log_flush(struct intel_guc *guc);
|
||||
int intel_guc_log_control(struct intel_guc *guc, u32 control_val);
|
||||
|
||||
/* intel_guc_loader.c */
|
||||
extern void intel_guc_init(struct drm_device *dev);
|
||||
extern int intel_guc_setup(struct drm_device *dev);
|
||||
extern void intel_guc_fini(struct drm_device *dev);
|
||||
extern void intel_guc_init(struct drm_i915_private *dev_priv);
|
||||
extern int intel_guc_setup(struct drm_i915_private *dev_priv);
|
||||
extern void intel_guc_fini(struct drm_i915_private *dev_priv);
|
||||
extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
|
||||
extern int intel_guc_suspend(struct drm_device *dev);
|
||||
extern int intel_guc_resume(struct drm_device *dev);
|
||||
extern int intel_guc_suspend(struct drm_i915_private *dev_priv);
|
||||
extern int intel_guc_resume(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* i915_guc_submission.c */
|
||||
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
|
||||
@ -190,4 +199,12 @@ void i915_guc_register(struct drm_i915_private *dev_priv);
|
||||
void i915_guc_unregister(struct drm_i915_private *dev_priv);
|
||||
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
|
||||
|
||||
static inline u32 guc_ggtt_offset(struct i915_vma *vma)
|
||||
{
|
||||
u32 offset = i915_ggtt_offset(vma);
|
||||
GEM_BUG_ON(offset < GUC_WOPCM_TOP);
|
||||
GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
|
||||
return offset;
|
||||
}
|
||||
|
||||
#endif
|
@ -421,8 +421,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
|
||||
GT_FIFO_CTL_RC6_POLICY_STALL);
|
||||
}
|
||||
|
||||
/* Enable Decoupled MMIO only on BXT C stepping onwards */
|
||||
if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
|
||||
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
|
||||
info->has_decoupled_mmio = false;
|
||||
|
||||
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
|
||||
@ -626,7 +625,14 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
|
||||
dev_priv->uncore.fw_domains_table_entries,
|
||||
fw_range_cmp);
|
||||
|
||||
return entry ? entry->domains : 0;
|
||||
if (!entry)
|
||||
return 0;
|
||||
|
||||
WARN(entry->domains & ~dev_priv->uncore.fw_domains,
|
||||
"Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
|
||||
entry->domains & ~dev_priv->uncore.fw_domains, offset);
|
||||
|
||||
return entry->domains;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1813,7 +1819,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
|
||||
return ironlake_do_reset;
|
||||
else if (IS_G4X(dev_priv))
|
||||
return g4x_do_reset;
|
||||
else if (IS_G33(dev_priv))
|
||||
else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
|
||||
return g33_do_reset;
|
||||
else if (INTEL_INFO(dev_priv)->gen >= 3)
|
||||
return i915_do_reset;
|
||||
|
@ -399,10 +399,12 @@ struct lvds_dvo_timing {
|
||||
u8 vblank_hi:4;
|
||||
u8 vactive_hi:4;
|
||||
u8 hsync_off_lo;
|
||||
u8 hsync_pulse_width;
|
||||
u8 vsync_pulse_width:4;
|
||||
u8 vsync_off:4;
|
||||
u8 rsvd0:6;
|
||||
u8 hsync_pulse_width_lo;
|
||||
u8 vsync_pulse_width_lo:4;
|
||||
u8 vsync_off_lo:4;
|
||||
u8 vsync_pulse_width_hi:2;
|
||||
u8 vsync_off_hi:2;
|
||||
u8 hsync_pulse_width_hi:2;
|
||||
u8 hsync_off_hi:2;
|
||||
u8 himage_lo;
|
||||
u8 vimage_lo;
|
||||
@ -414,7 +416,7 @@ struct lvds_dvo_timing {
|
||||
u8 digital:2;
|
||||
u8 vsync_positive:1;
|
||||
u8 hsync_positive:1;
|
||||
u8 rsvd2:1;
|
||||
u8 non_interlaced:1;
|
||||
} __packed;
|
||||
|
||||
struct lvds_pnp_id {
|
||||
|
@ -226,23 +226,18 @@
|
||||
INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
|
||||
INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
|
||||
|
||||
#define INTEL_BDW_RSVDM_IDS(info) \
|
||||
#define INTEL_BDW_RSVD_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
|
||||
INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
|
||||
INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
|
||||
INTEL_VGA_DEVICE(0x163E, info) /* ULX */
|
||||
|
||||
#define INTEL_BDW_RSVDD_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \
|
||||
INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
|
||||
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
|
||||
|
||||
#define INTEL_BDW_IDS(info) \
|
||||
INTEL_BDW_GT12_IDS(info), \
|
||||
INTEL_BDW_GT3_IDS(info), \
|
||||
INTEL_BDW_RSVDM_IDS(info), \
|
||||
INTEL_BDW_GT12_IDS(info), \
|
||||
INTEL_BDW_GT3_IDS(info), \
|
||||
INTEL_BDW_RSVDD_IDS(info)
|
||||
INTEL_BDW_RSVD_IDS(info)
|
||||
|
||||
#define INTEL_CHV_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x22b0, info), \
|
||||
@ -270,14 +265,14 @@
|
||||
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */
|
||||
INTEL_VGA_DEVICE(0x192B, info) /* Halo GT3 */ \
|
||||
|
||||
#define INTEL_SKL_GT4_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */
|
||||
INTEL_VGA_DEVICE(0x192A, info), /* SRV GT4 */ \
|
||||
INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4e */
|
||||
|
||||
#define INTEL_SKL_IDS(info) \
|
||||
INTEL_SKL_GT1_IDS(info), \
|
||||
@ -292,6 +287,10 @@
|
||||
INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
|
||||
INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
|
||||
|
||||
#define INTEL_GLK_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x3184, info), \
|
||||
INTEL_VGA_DEVICE(0x3185, info)
|
||||
|
||||
#define INTEL_KBL_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
|
||||
INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
|
||||
|
@ -3,8 +3,10 @@
|
||||
#ifndef _DRM_INTEL_GTT_H
|
||||
#define _DRM_INTEL_GTT_H
|
||||
|
||||
void intel_gtt_get(u64 *gtt_total, size_t *stolen_size,
|
||||
phys_addr_t *mappable_base, u64 *mappable_end);
|
||||
void intel_gtt_get(u64 *gtt_total,
|
||||
u32 *stolen_size,
|
||||
phys_addr_t *mappable_base,
|
||||
u64 *mappable_end);
|
||||
|
||||
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
|
||||
struct agp_bridge_data *bridge);
|
||||
|
@ -258,6 +258,7 @@ typedef struct _drm_i915_sarea {
|
||||
#define DRM_I915_GEM_USERPTR 0x33
|
||||
#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
|
||||
#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
|
||||
#define DRM_I915_PERF_OPEN 0x36
|
||||
|
||||
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
|
||||
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
|
||||
@ -311,6 +312,7 @@ typedef struct _drm_i915_sarea {
|
||||
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
|
||||
#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
|
||||
#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
|
||||
#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
|
||||
|
||||
/* Allow drivers to submit batchbuffers directly to hardware, relying
|
||||
* on the security mechanisms provided by hardware.
|
||||
@ -1224,9 +1226,142 @@ struct drm_i915_gem_context_param {
|
||||
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
|
||||
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
|
||||
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
|
||||
#define I915_CONTEXT_PARAM_BANNABLE 0x5
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
enum drm_i915_oa_format {
|
||||
I915_OA_FORMAT_A13 = 1,
|
||||
I915_OA_FORMAT_A29,
|
||||
I915_OA_FORMAT_A13_B8_C8,
|
||||
I915_OA_FORMAT_B4_C8,
|
||||
I915_OA_FORMAT_A45_B8_C8,
|
||||
I915_OA_FORMAT_B4_C8_A16,
|
||||
I915_OA_FORMAT_C4_B8,
|
||||
|
||||
I915_OA_FORMAT_MAX /* non-ABI */
|
||||
};
|
||||
|
||||
enum drm_i915_perf_property_id {
|
||||
/**
|
||||
* Open the stream for a specific context handle (as used with
|
||||
* execbuffer2). A stream opened for a specific context this way
|
||||
* won't typically require root privileges.
|
||||
*/
|
||||
DRM_I915_PERF_PROP_CTX_HANDLE = 1,
|
||||
|
||||
/**
|
||||
* A value of 1 requests the inclusion of raw OA unit reports as
|
||||
* part of stream samples.
|
||||
*/
|
||||
DRM_I915_PERF_PROP_SAMPLE_OA,
|
||||
|
||||
/**
|
||||
* The value specifies which set of OA unit metrics should be
|
||||
* be configured, defining the contents of any OA unit reports.
|
||||
*/
|
||||
DRM_I915_PERF_PROP_OA_METRICS_SET,
|
||||
|
||||
/**
|
||||
* The value specifies the size and layout of OA unit reports.
|
||||
*/
|
||||
DRM_I915_PERF_PROP_OA_FORMAT,
|
||||
|
||||
/**
|
||||
* Specifying this property implicitly requests periodic OA unit
|
||||
* sampling and (at least on Haswell) the sampling frequency is derived
|
||||
* from this exponent as follows:
|
||||
*
|
||||
* 80ns * 2^(period_exponent + 1)
|
||||
*/
|
||||
DRM_I915_PERF_PROP_OA_EXPONENT,
|
||||
|
||||
DRM_I915_PERF_PROP_MAX /* non-ABI */
|
||||
};
|
||||
|
||||
struct drm_i915_perf_open_param {
|
||||
__u32 flags;
|
||||
#define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
|
||||
#define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
|
||||
#define I915_PERF_FLAG_DISABLED (1<<2)
|
||||
|
||||
/** The number of u64 (id, value) pairs */
|
||||
__u32 num_properties;
|
||||
|
||||
/**
|
||||
* Pointer to array of u64 (id, value) pairs configuring the stream
|
||||
* to open.
|
||||
*/
|
||||
__u64 properties_ptr;
|
||||
};
|
||||
|
||||
/**
|
||||
* Enable data capture for a stream that was either opened in a disabled state
|
||||
* via I915_PERF_FLAG_DISABLED or was later disabled via
|
||||
* I915_PERF_IOCTL_DISABLE.
|
||||
*
|
||||
* It is intended to be cheaper to disable and enable a stream than it may be
|
||||
* to close and re-open a stream with the same configuration.
|
||||
*
|
||||
* It's undefined whether any pending data for the stream will be lost.
|
||||
*/
|
||||
#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
|
||||
|
||||
/**
|
||||
* Disable data capture for a stream.
|
||||
*
|
||||
* It is an error to try and read a stream that is disabled.
|
||||
*/
|
||||
#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
|
||||
|
||||
/**
|
||||
* Common to all i915 perf records
|
||||
*/
|
||||
struct drm_i915_perf_record_header {
|
||||
__u32 type;
|
||||
__u16 pad;
|
||||
__u16 size;
|
||||
};
|
||||
|
||||
enum drm_i915_perf_record_type {
|
||||
|
||||
/**
|
||||
* Samples are the work horse record type whose contents are extensible
|
||||
* and defined when opening an i915 perf stream based on the given
|
||||
* properties.
|
||||
*
|
||||
* Boolean properties following the naming convention
|
||||
* DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
|
||||
* every sample.
|
||||
*
|
||||
* The order of these sample properties given by userspace has no
|
||||
* affect on the ordering of data within a sample. The order is
|
||||
* documented here.
|
||||
*
|
||||
* struct {
|
||||
* struct drm_i915_perf_record_header header;
|
||||
*
|
||||
* { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
|
||||
* };
|
||||
*/
|
||||
DRM_I915_PERF_RECORD_SAMPLE = 1,
|
||||
|
||||
/*
|
||||
* Indicates that one or more OA reports were not written by the
|
||||
* hardware. This can happen for example if an MI_REPORT_PERF_COUNT
|
||||
* command collides with periodic sampling - which would be more likely
|
||||
* at higher sampling frequencies.
|
||||
*/
|
||||
DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
|
||||
|
||||
/**
|
||||
* An error occurred that resulted in all pending OA reports being lost.
|
||||
*/
|
||||
DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
|
||||
|
||||
DRM_I915_PERF_RECORD_MAX /* non-ABI */
|
||||
};
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user