2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-15 16:53:54 +08:00

More change sets for 4.16:

- Many improvements for selftests and other igt tests (Chris)
 - Forcewake with PUNIT->PMIC bus fixes and robustness (Hans)
 - Define an engine class for uABI (Tvrtko)
 - Context switch fixes and improvements (Chris)
 - GT powersavings and power gating simplification and fixes (Chris)
 - Other general driver clean-ups (Chris, Lucas, Ville)
 - Removing old, useless and/or bad workarounds (Chris, Oscar, Radhakrishna)
 - IPS, pipe config, etc in preparation for another Fast Boot attempt (Maarten)
 - OA perf fixes and support to Coffee Lake and Cannonlake (Lionel)
 - Fixes around GPU fault registers (Michel)
 - GEM Proxy (Tina)
 - Refactor of Geminilake and Cannonlake plane color handling (James)
 - Generalize transcoder loop (Mika Kahola)
 - New HW Workaround for Cannonlake and Geminilake (Rodrigo)
 - Resume GuC before using GEM (Chris)
 - Stolen Memory handling improvements (Ville)
 - Initialize entry in PPAT for older compilers (Chris)
 - Other fixes and robustness improvements on execbuf (Chris)
 - Improve logs of GEM_BUG_ON (Mika Kuoppala)
 - Rework with massive rename of GuC functions and files (Sagar)
 - Don't sanitize frame start delay if pipe is off (Ville)
 - Cannonlake clock fixes (Rodrigo)
 - Cannonlake HDMI 2.0 support (Rodrigo)
 - Add a GuC doorbells selftest (Michel)
 - Add might_sleep() check to our wait_for() (Chris)
 
 Many GVT changes for 4.16:
 
 - CSB HWSP update support (Weinan)
 - GVT debug helpers, dyndbg and debugfs (Chuanxiao, Shuo)
 - full virtualized opregion (Xiaolin)
 - VM health check for sane fallback (Fred)
 - workload submission code refactor for future enabling (Zhi)
 - Updated repo URL in MAINTAINERS (Zhenyu)
 - other many misc fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJaD2cyAAoJEPpiX2QO6xPKuiEH/2/J7Ebf5IRZtaTU+ke2uOI4
 2YCdrn9F1guz6d+cZtsLPkJ9JwQlz9EftfB7KT+9dT8viEG0FFna9bV+Xz3wyGQ6
 DRlP9tCFnCDaOyZBI5QshubuzldabPpfscPJI7/EMr91jtveGhKIhsRzHBxKCEZF
 LKlAHtXAWSkTozmh6bU+wf5TEOFzYv2oquTVn5ZJrpYlqup/wEKh+KnL9eBQ3+Qp
 FLnmKjInaadOV/uXQfeWstJuohG/pfcNm68OmDOxYNmwpeNnwbtfKT9eZeDtDZDy
 dXj9mokeTwg4fBrXX/tyxuKogywxQSNFTqCU2yY9up+35ykmjVN8p/1BYi+GGe0=
 =ePes
 -----END PGP SIGNATURE-----

Merge tag 'drm-intel-next-2017-11-17-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

More change sets for 4.16:

- Many improvements for selftests and other igt tests (Chris)
- Forcewake with PUNIT->PMIC bus fixes and robustness (Hans)
- Define an engine class for uABI (Tvrtko)
- Context switch fixes and improvements (Chris)
- GT powersavings and power gating simplification and fixes (Chris)
- Other general driver clean-ups (Chris, Lucas, Ville)
- Removing old, useless and/or bad workarounds (Chris, Oscar, Radhakrishna)
- IPS, pipe config, etc in preparation for another Fast Boot attempt (Maarten)
- OA perf fixes and support to Coffee Lake and Cannonlake (Lionel)
- Fixes around GPU fault registers (Michel)
- GEM Proxy (Tina)
- Refactor of Geminilake and Cannonlake plane color handling (James)
- Generalize transcoder loop (Mika Kahola)
- New HW Workaround for Cannonlake and Geminilake (Rodrigo)
- Resume GuC before using GEM (Chris)
- Stolen Memory handling improvements (Ville)
- Initialize entry in PPAT for older compilers (Chris)
- Other fixes and robustness improvements on execbuf (Chris)
- Improve logs of GEM_BUG_ON (Mika Kuoppala)
- Rework with massive rename of GuC functions and files (Sagar)
- Don't sanitize frame start delay if pipe is off (Ville)
- Cannonlake clock fixes (Rodrigo)
- Cannonlake HDMI 2.0 support (Rodrigo)
- Add a GuC doorbells selftest (Michel)
- Add might_sleep() check to our wait_for() (Chris)

Many GVT changes for 4.16:

- CSB HWSP update support (Weinan)
- GVT debug helpers, dyndbg and debugfs (Chuanxiao, Shuo)
- full virtualized opregion (Xiaolin)
- VM health check for sane fallback (Fred)
- workload submission code refactor for future enabling (Zhi)
- Updated repo URL in MAINTAINERS (Zhenyu)
- other many misc fixes

* tag 'drm-intel-next-2017-11-17-1' of git://anongit.freedesktop.org/drm/drm-intel: (260 commits)
  drm/i915: Update DRIVER_DATE to 20171117
  drm/i915: Add a policy note for removing workarounds
  drm/i915/selftests: Report ENOMEM clearly for an allocation failure
  Revert "drm/i915: Display WA #1133 WaFbcSkipSegments:cnl, glk"
  drm/i915: Calculate g4x intermediate watermarks correctly
  drm/i915: Calculate vlv/chv intermediate watermarks correctly, v3.
  drm/i915: Pass crtc_state to ips toggle functions, v2
  drm/i915: Pass idle crtc_state to intel_dp_sink_crc
  drm/i915: Enable FIFO underrun reporting after initial fastset, v4.
  drm/i915: Mark the userptr invalidate workqueue as WQ_MEM_RECLAIM
  drm/i915: Add might_sleep() check to wait_for()
  drm/i915/selftests: Add a GuC doorbells selftest
  drm/i915/cnl: Extend HDMI 2.0 support to CNL.
  drm/i915/cnl: Simplify dco_fraction calculation.
  drm/i915/cnl: Don't blindly replace qdiv.
  drm/i915/cnl: Fix wrpll math for higher freqs.
  drm/i915/cnl: Fix, simplify and unify wrpll variable sizes.
  drm/i915/cnl: Remove useless conversion.
  drm/i915/cnl: Remove spurious central_freq.
  drm/i915/selftests: exercise_ggtt may have nothing to do
  ...
This commit is contained in:
Dave Airlie 2017-12-04 09:40:35 +10:00
commit ca797d29cd
129 changed files with 6554 additions and 3182 deletions

View File

@ -350,10 +350,10 @@ GuC-specific firmware loader
GuC-based command submission
----------------------------
.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_submission.c
:doc: GuC-based command submission
.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_submission.c
:internal:
GuC Firmware Layout

View File

@ -7030,7 +7030,7 @@ M: Zhi Wang <zhi.a.wang@intel.com>
L: intel-gvt-dev@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g
T: git https://github.com/01org/gvt-linux.git
T: git https://github.com/intel/gvt-linux.git
S: Supported
F: drivers/gpu/drm/i915/gvt/

View File

@ -146,6 +146,18 @@ int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb);
*/
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
/**
* iosf_mbi_unregister_pmic_bus_access_notifier_unlocked - Unregister PMIC bus
* notifier, unlocked
*
* Like iosf_mbi_unregister_pmic_bus_access_notifier(), but for use when the
* caller has already called iosf_mbi_punit_acquire() itself.
*
* @nb: notifier_block to unregister
*/
int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
struct notifier_block *nb);
/**
* iosf_mbi_call_pmic_bus_access_notifier_chain - Call PMIC bus notifier chain
*
@ -154,6 +166,11 @@ int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
*/
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v);
/**
* iosf_mbi_assert_punit_acquired - Assert that the P-Unit has been acquired.
*/
void iosf_mbi_assert_punit_acquired(void);
#else /* CONFIG_IOSF_MBI is not enabled */
static inline
bool iosf_mbi_available(void)
@ -197,12 +214,20 @@ int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
return 0;
}
static inline int
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb)
{
return 0;
}
static inline
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
{
return 0;
}
static inline void iosf_mbi_assert_punit_acquired(void) {}
#endif /* CONFIG_IOSF_MBI */
#endif /* IOSF_MBI_SYMS_H */

View File

@ -218,14 +218,23 @@ int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(iosf_mbi_register_pmic_bus_access_notifier);
int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
struct notifier_block *nb)
{
iosf_mbi_assert_punit_acquired();
return blocking_notifier_chain_unregister(
&iosf_mbi_pmic_bus_access_notifier, nb);
}
EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier_unlocked);
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
{
int ret;
/* Wait for the bus to go inactive before unregistering */
mutex_lock(&iosf_mbi_punit_mutex);
ret = blocking_notifier_chain_unregister(
&iosf_mbi_pmic_bus_access_notifier, nb);
ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb);
mutex_unlock(&iosf_mbi_punit_mutex);
return ret;
@ -239,6 +248,12 @@ int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
}
EXPORT_SYMBOL(iosf_mbi_call_pmic_bus_access_notifier_chain);
void iosf_mbi_assert_punit_acquired(void)
{
WARN_ON(!mutex_is_locked(&iosf_mbi_punit_mutex));
}
EXPORT_SYMBOL(iosf_mbi_assert_punit_acquired);
#ifdef CONFIG_IOSF_MBI_DEBUG
static u32 dbg_mdr;
static u32 dbg_mcr;

View File

@ -28,6 +28,7 @@ config DRM_I915_DEBUG
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
select DRM_I915_TRACE_GEM
default n
help
Choose this option to turn on extra driver debugging that may affect
@ -49,6 +50,19 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
config DRM_I915_TRACE_GEM
bool "Insert extra ftrace output from the GEM internals"
select TRACING
default n
help
Enable additional and verbose debugging output that will spam
ordinary tests, but may be vital for post-mortem debugging when
used with /proc/sys/kernel/ftrace_dump_on_oops
Recommended for driver developers only.
If in doubt, say "N".
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
bool "Enable additional driver debugging for fence objects"
depends on DRM_I915
@ -90,6 +104,20 @@ config DRM_I915_SELFTEST
If in doubt, say "N".
config DRM_I915_SELFTEST_BROKEN
bool "Enable broken and dangerous selftests"
depends on DRM_I915_SELFTEST
depends on BROKEN
default n
help
This option enables the execution of selftests that are "dangerous"
and may trigger unintended HW side-effects as they break strict
rules given in the HW specification. For science.
Recommended for masochistic driver developers only.
If in doubt, say "N".
config DRM_I915_LOW_LEVEL_TRACEPOINTS
bool "Enable low level request tracing events"
depends on DRM_I915

View File

@ -3,7 +3,26 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
# Add a set of useful warning flags and enable -Werror for CI to prevent
# trivial mistakes from creeping in. We have to do this piecemeal as we reject
# any patch that isn't warning clean, so turning on -Wall -Wextra (or W=1) we
# need to filter out dubious warnings. Still it is our interest
# to keep running locally with W=1 C=1 until we are completely clean.
#
# Note the danger in using -Wall -Wextra is that when CI updates gcc we
# will most likely get a sudden build breakage... Hopefully we will fix
# new warnings before CI updates!
subdir-ccflags-y := -Wall -Wextra
subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
subdir-ccflags-y += $(call cc-disable-warning, type-limits)
subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
@ -64,10 +83,10 @@ i915-y += intel_uc.o \
intel_uc_fw.o \
intel_guc.o \
intel_guc_ct.o \
intel_guc_log.o \
intel_guc_fw.o \
intel_huc.o \
i915_guc_submission.o
intel_guc_log.o \
intel_guc_submission.o \
intel_huc.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
@ -144,7 +163,9 @@ i915-y += i915_perf.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
i915_oa_glk.o \
i915_oa_cflgt2.o
i915_oa_cflgt2.o \
i915_oa_cflgt3.o \
i915_oa_cnl.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o

View File

@ -2,7 +2,7 @@
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))

View File

@ -208,6 +208,20 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
return 0;
}
static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 *pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
u32 new = *(u32 *)(p_data);
if ((new & PCI_ROM_ADDRESS_MASK) == PCI_ROM_ADDRESS_MASK)
/* We don't have rom, return size of 0. */
*pval = 0;
else
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
return 0;
}
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@ -300,6 +314,11 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
}
switch (rounddown(offset, 4)) {
case PCI_ROM_ADDRESS:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
@ -375,6 +394,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
pci_resource_len(gvt->dev_priv->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
pci_resource_len(gvt->dev_priv->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
/**

View File

@ -709,18 +709,13 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
print_opcode(cmd_val(s, 0), s->ring_id);
/* print the whole page to trace */
pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
while (cnt < 1024) {
pr_err("ip_va=%p: ", s->ip_va);
gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
for (i = 0; i < 8; i++)
pr_err("%08x ", cmd_val(s, i));
pr_err("\n");
gvt_dbg_cmd("%08x ", cmd_val(s, i));
gvt_dbg_cmd("\n");
s->ip_va += 8 * sizeof(u32);
cnt += 8;
@ -825,7 +820,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
offset, data);
return -EINVAL;
return -EPERM;
}
return 0;
}
@ -839,7 +834,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
cmd, offset);
return -EINVAL;
return -EFAULT;
}
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
@ -854,8 +849,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
}
if (is_force_nonpriv_mmio(offset) &&
force_nonpriv_reg_handler(s, offset, index))
return -EINVAL;
force_nonpriv_reg_handler(s, offset, index))
return -EPERM;
if (offset == i915_mmio_reg_offset(DERRMR) ||
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
@ -894,11 +889,14 @@ static int cmd_handler_lri(struct parser_exec_state *s)
i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
ret |= (cmd_reg_inhibit(s, i)) ?
-EBADRQC : 0;
}
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
if (ret)
break;
}
return ret;
}
@ -912,11 +910,15 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
ret |= ((cmd_reg_inhibit(s, i) ||
(cmd_reg_inhibit(s, i + 1)))) ?
-EINVAL : 0;
-EBADRQC : 0;
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
if (ret)
break;
}
return ret;
}
@ -934,15 +936,19 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
for (i = 1; i < cmd_len;) {
if (IS_BROADWELL(gvt->dev_priv))
ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret)
break;
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
if (ret)
break;
if (cmd_val(s, 0) & (1 << 22)) {
gma = cmd_gma(s, i + 1);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, i + 2)) << 32;
ret |= cmd_address_audit(s, gma, sizeof(u32), false);
if (ret)
break;
}
i += gmadr_dw_number(s) + 1;
}
@ -958,11 +964,15 @@ static int cmd_handler_srm(struct parser_exec_state *s)
for (i = 1; i < cmd_len;) {
ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
if (ret)
break;
if (cmd_val(s, 0) & (1 << 22)) {
gma = cmd_gma(s, i + 1);
if (gmadr_bytes == 8)
gma |= (cmd_gma_hi(s, i + 2)) << 32;
ret |= cmd_address_audit(s, gma, sizeof(u32), false);
if (ret)
break;
}
i += gmadr_dw_number(s) + 1;
}
@ -1116,7 +1126,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
v = (dword0 & GENMASK(21, 19)) >> 19;
if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
return -EINVAL;
return -EBADRQC;
info->pipe = gen8_plane_code[v].pipe;
info->plane = gen8_plane_code[v].plane;
@ -1136,7 +1146,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->surf_reg = SPRSURF(info->pipe);
} else {
WARN_ON(1);
return -EINVAL;
return -EBADRQC;
}
return 0;
}
@ -1185,7 +1195,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
default:
gvt_vgpu_err("unknown plane code %d\n", plane);
return -EINVAL;
return -EBADRQC;
}
info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
@ -1348,10 +1358,13 @@ static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
{
unsigned long addr;
unsigned long gma_high, gma_low;
int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct intel_vgpu *vgpu = s->vgpu;
int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
return INTEL_GVT_INVALID_ADDR;
}
gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 4) {
@ -1374,16 +1387,16 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
if (op_size > max_surface_size) {
gvt_vgpu_err("command address audit fail name %s\n",
s->info->name);
return -EINVAL;
return -EFAULT;
}
if (index_mode) {
if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
ret = -EINVAL;
if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
ret = -EFAULT;
goto err;
}
} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
ret = -EINVAL;
ret = -EFAULT;
goto err;
}
@ -1439,7 +1452,7 @@ static inline int unexpected_cmd(struct parser_exec_state *s)
gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
return -EINVAL;
return -EBADRQC;
}
static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
@ -1545,10 +1558,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
return -EFAULT;
}
offset = gma & (GTT_PAGE_SIZE - 1);
offset = gma & (I915_GTT_PAGE_SIZE - 1);
copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
GTT_PAGE_SIZE - offset : end_gma - gma;
copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
I915_GTT_PAGE_SIZE - offset : end_gma - gma;
intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
@ -1576,110 +1589,113 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
static int find_bb_size(struct parser_exec_state *s)
static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
{
unsigned long gma = 0;
struct cmd_info *info;
int bb_size = 0;
uint32_t cmd_len = 0;
bool met_bb_end = false;
bool bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
*bb_size = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
cmd = cmd_val(s, 0);
if (gma == INTEL_GVT_INVALID_ADDR)
return -EFAULT;
cmd = cmd_val(s, 0);
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
return -EBADRQC;
}
do {
copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + 4, &cmd);
if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + 4, &cmd) < 0)
return -EFAULT;
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
return -EBADRQC;
}
if (info->opcode == OP_MI_BATCH_BUFFER_END) {
met_bb_end = true;
bb_end = true;
} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
/* chained batch buffer */
met_bb_end = true;
}
bb_end = true;
}
cmd_len = get_cmd_length(info, cmd) << 2;
bb_size += cmd_len;
*bb_size += cmd_len;
gma += cmd_len;
} while (!bb_end);
} while (!met_bb_end);
return bb_size;
return 0;
}
static int perform_bb_shadow(struct parser_exec_state *s)
{
struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
struct intel_vgpu_shadow_bb *bb;
unsigned long gma = 0;
int bb_size;
void *dst = NULL;
unsigned long bb_size;
int ret = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
if (gma == INTEL_GVT_INVALID_ADDR)
return -EFAULT;
/* get the size of the batch buffer */
bb_size = find_bb_size(s);
if (bb_size < 0)
return -EINVAL;
ret = find_bb_size(s, &bb_size);
if (ret)
return ret;
/* allocate shadow batch buffer */
entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
if (entry_obj == NULL)
bb = kzalloc(sizeof(*bb), GFP_KERNEL);
if (!bb)
return -ENOMEM;
entry_obj->obj =
i915_gem_object_create(s->vgpu->gvt->dev_priv,
roundup(bb_size, PAGE_SIZE));
if (IS_ERR(entry_obj->obj)) {
ret = PTR_ERR(entry_obj->obj);
goto free_entry;
}
entry_obj->len = bb_size;
INIT_LIST_HEAD(&entry_obj->list);
dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
if (IS_ERR(dst)) {
ret = PTR_ERR(dst);
goto put_obj;
bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
roundup(bb_size, PAGE_SIZE));
if (IS_ERR(bb->obj)) {
ret = PTR_ERR(bb->obj);
goto err_free_bb;
}
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
if (ret) {
gvt_vgpu_err("failed to set shadow batch to CPU\n");
goto unmap_src;
ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
if (ret)
goto err_free_obj;
bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
if (IS_ERR(bb->va)) {
ret = PTR_ERR(bb->va);
goto err_finish_shmem_access;
}
entry_obj->va = dst;
entry_obj->bb_start_cmd_va = s->ip_va;
if (bb->clflush & CLFLUSH_BEFORE) {
drm_clflush_virt_range(bb->va, bb->obj->base.size);
bb->clflush &= ~CLFLUSH_BEFORE;
}
/* copy batch buffer to shadow batch buffer*/
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size,
dst);
bb->va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
goto unmap_src;
ret = -EFAULT;
goto err_unmap;
}
list_add(&entry_obj->list, &s->workload->shadow_bb);
INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
bb->accessing = true;
bb->bb_start_cmd_va = s->ip_va;
/*
* ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer.
@ -1688,17 +1704,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
*/
s->ip_va = dst;
s->ip_va = bb->va;
s->ip_gma = gma;
return 0;
unmap_src:
i915_gem_object_unpin_map(entry_obj->obj);
put_obj:
i915_gem_object_put(entry_obj->obj);
free_entry:
kfree(entry_obj);
err_unmap:
i915_gem_object_unpin_map(bb->obj);
err_finish_shmem_access:
i915_gem_obj_finish_shmem_access(bb->obj);
err_free_obj:
i915_gem_object_put(bb->obj);
err_free_bb:
kfree(bb);
return ret;
}
@ -1710,13 +1726,13 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
return -EINVAL;
return -EFAULT;
}
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
return -EINVAL;
return -EFAULT;
}
s->saved_buf_addr_type = s->buf_addr_type;
@ -1740,7 +1756,6 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
if (ret < 0)
return ret;
}
return ret;
}
@ -2430,7 +2445,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (info == NULL) {
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
cmd, get_opcode(cmd, s->ring_id));
return -EINVAL;
return -EBADRQC;
}
s->info = info;
@ -2465,6 +2480,10 @@ static inline bool gma_out_of_range(unsigned long gma,
return (gma > gma_tail) && (gma < gma_head);
}
/* Keep the consistent return type, e.g EBADRQC for unknown
* cmd, EFAULT for invalid address, EPERM for nonpriv. later
* works as the input of VM healthy status.
*/
static int command_scan(struct parser_exec_state *s,
unsigned long rb_head, unsigned long rb_tail,
unsigned long rb_start, unsigned long rb_len)
@ -2487,7 +2506,7 @@ static int command_scan(struct parser_exec_state *s,
s->ip_gma, rb_start,
gma_bottom);
parser_exec_state_dump(s);
return -EINVAL;
return -EFAULT;
}
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
gvt_vgpu_err("ip_gma %lx out of range."
@ -2516,7 +2535,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
int ret = 0;
/* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
return -EINVAL;
gma_head = workload->rb_start + workload->rb_head;
@ -2565,7 +2584,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx);
/* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
I915_GTT_PAGE_SIZE)))
return -EINVAL;
ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
@ -2604,6 +2624,7 @@ out:
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
int ring_id = workload->ring_id;
@ -2619,19 +2640,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
void *va = vgpu->reserve_ring_buffer_va[ring_id];
if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
void *p;
/* realloc the new ring buffer if needed */
vgpu->reserve_ring_buffer_va[ring_id] =
krealloc(va, workload->rb_len, GFP_KERNEL);
if (!vgpu->reserve_ring_buffer_va[ring_id]) {
gvt_vgpu_err("fail to alloc reserve ring buffer\n");
p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
GFP_KERNEL);
if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
s->ring_scan_buffer[ring_id] = p;
s->ring_scan_buffer_size[ring_id] = workload->rb_len;
}
shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;

View File

@ -25,41 +25,41 @@
#define __GVT_DEBUG_H__
#define gvt_err(fmt, args...) \
DRM_ERROR("gvt: "fmt, ##args)
pr_err("gvt: "fmt, ##args)
#define gvt_vgpu_err(fmt, args...) \
do { \
if (IS_ERR_OR_NULL(vgpu)) \
DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \
pr_err("gvt: "fmt, ##args); \
else \
DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
pr_err("gvt: vgpu %d: "fmt, vgpu->id, ##args);\
} while (0)
#define gvt_dbg_core(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
pr_debug("gvt: core: "fmt, ##args)
#define gvt_dbg_irq(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: irq: "fmt, ##args)
pr_debug("gvt: irq: "fmt, ##args)
#define gvt_dbg_mm(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: mm: "fmt, ##args)
pr_debug("gvt: mm: "fmt, ##args)
#define gvt_dbg_mmio(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: mmio: "fmt, ##args)
pr_debug("gvt: mmio: "fmt, ##args)
#define gvt_dbg_dpy(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: dpy: "fmt, ##args)
pr_debug("gvt: dpy: "fmt, ##args)
#define gvt_dbg_el(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: el: "fmt, ##args)
pr_debug("gvt: el: "fmt, ##args)
#define gvt_dbg_sched(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: sched: "fmt, ##args)
pr_debug("gvt: sched: "fmt, ##args)
#define gvt_dbg_render(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
pr_debug("gvt: render: "fmt, ##args)
#define gvt_dbg_cmd(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
pr_debug("gvt: cmd: "fmt, ##args)
#endif

View File

@ -0,0 +1,212 @@
/*
* Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/list_sort.h>
#include "i915_drv.h"
#include "gvt.h"
struct mmio_diff_param {
struct intel_vgpu *vgpu;
int total;
int diff;
struct list_head diff_mmio_list;
};
struct diff_mmio {
struct list_head node;
u32 offset;
u32 preg;
u32 vreg;
};
/* Compare two diff_mmio items. */
static int mmio_offset_compare(void *priv,
struct list_head *a, struct list_head *b)
{
struct diff_mmio *ma;
struct diff_mmio *mb;
ma = container_of(a, struct diff_mmio, node);
mb = container_of(b, struct diff_mmio, node);
if (ma->offset < mb->offset)
return -1;
else if (ma->offset > mb->offset)
return 1;
return 0;
}
static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct mmio_diff_param *param = data;
struct diff_mmio *node;
u32 preg, vreg;
preg = I915_READ_NOTRACE(_MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
node = kmalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
node->offset = offset;
node->preg = preg;
node->vreg = vreg;
list_add(&node->node, &param->diff_mmio_list);
param->diff++;
}
param->total++;
return 0;
}
/* Show the all the different values of tracked mmio. */
static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
{
struct intel_vgpu *vgpu = s->private;
struct intel_gvt *gvt = vgpu->gvt;
struct mmio_diff_param param = {
.vgpu = vgpu,
.total = 0,
.diff = 0,
};
struct diff_mmio *node, *next;
INIT_LIST_HEAD(&param.diff_mmio_list);
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
mmio_hw_access_pre(gvt->dev_priv);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
mmio_hw_access_post(gvt->dev_priv);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
/* In an ascending order by mmio offset. */
list_sort(NULL, &param.diff_mmio_list, mmio_offset_compare);
seq_printf(s, "%-8s %-8s %-8s %-8s\n", "Offset", "HW", "vGPU", "Diff");
list_for_each_entry_safe(node, next, &param.diff_mmio_list, node) {
u32 diff = node->preg ^ node->vreg;
seq_printf(s, "%08x %08x %08x %*pbl\n",
node->offset, node->preg, node->vreg,
32, &diff);
list_del(&node->node);
kfree(node);
}
seq_printf(s, "Total: %d, Diff: %d\n", param.total, param.diff);
return 0;
}
static int vgpu_mmio_diff_open(struct inode *inode, struct file *file)
{
return single_open(file, vgpu_mmio_diff_show, inode->i_private);
}
static const struct file_operations vgpu_mmio_diff_fops = {
.open = vgpu_mmio_diff_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
* @vgpu: a vGPU
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
{
struct dentry *ent;
char name[10] = "";
sprintf(name, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
if (!vgpu->debugfs)
return -ENOMEM;
ent = debugfs_create_bool("active", 0444, vgpu->debugfs,
&vgpu->active);
if (!ent)
return -ENOMEM;
ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs,
vgpu, &vgpu_mmio_diff_fops);
if (!ent)
return -ENOMEM;
return 0;
}
/**
* intel_gvt_debugfs_remove_vgpu - remove debugfs entries of a vGPU
* @vgpu: a vGPU
*/
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
{
debugfs_remove_recursive(vgpu->debugfs);
vgpu->debugfs = NULL;
}
/**
* intel_gvt_debugfs_init - register gvt debugfs root entry
* @gvt: GVT device
*
* Returns:
* zero on success, negative if failed.
*/
int intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
struct drm_minor *minor = gvt->dev_priv->drm.primary;
struct dentry *ent;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
if (!gvt->debugfs_root) {
gvt_err("Cannot create debugfs dir\n");
return -ENOMEM;
}
ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
&gvt->mmio.num_tracked_mmio);
if (!ent)
return -ENOMEM;
return 0;
}
/**
* intel_gvt_debugfs_clean - remove debugfs entries
* @gvt: GVT device
*/
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
{
debugfs_remove_recursive(gvt->debugfs_root);
gvt->debugfs_root = NULL;
}

View File

@ -46,8 +46,6 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
@ -135,6 +133,8 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
unsigned long hwsp_gpa;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
@ -160,6 +160,20 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
ctx_status_ptr.write_ptr = write_pointer;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
/* Update the CSB and CSB write pointer in HWSP */
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
vgpu->hws_pga[ring_id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
intel_gvt_hypervisor_write_gpa(vgpu,
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
write_pointer * 8,
status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
hwsp_gpa +
intel_hws_csb_write_index(dev_priv) * 4,
&write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
vgpu->id, write_pointer, offset, status->ldw, status->udw);
@ -358,218 +372,47 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist,
return 0;
}
static void free_workload(struct intel_vgpu_workload *workload)
{
intel_vgpu_unpin_mm(workload->shadow_mm);
intel_gvt_mm_unreference(workload->shadow_mm);
kmem_cache_free(workload->vgpu->workloads, workload);
}
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct intel_shadow_bb_entry *entry_obj;
/* pin the gem object to ggtt */
list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
struct i915_vma *vma;
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) {
return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
* free.
*/
/* update the relocate gma with shadow batch buffer*/
entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
if (gmadr_bytes == 8)
entry_obj->bb_start_cmd_va[2] = 0;
}
return 0;
}
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct intel_vgpu_workload *workload = container_of(wa_ctx,
struct intel_vgpu_workload,
wa_ctx);
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
kunmap_atomic(shadow_ring_context);
return 0;
}
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned char *per_ctx_va =
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) {
return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
* free.
*/
wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
memset(per_ctx_va, 0, CACHELINE_BYTES);
update_wa_ctx_2_shadow_ctx(wa_ctx);
return 0;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
/* release all the shadow batch buffer */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
i915_gem_object_unpin_map(entry_obj->obj);
i915_gem_object_put(entry_obj->obj);
list_del(&entry_obj->list);
kfree(entry_obj);
}
}
}
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2];
int ring_id = workload->ring_id;
int ret;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
if (ret) {
gvt_vgpu_err("fail to vgpu pin mm\n");
goto out;
}
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n");
goto err_unpin_mm;
}
ret = intel_vgpu_flush_post_shadow(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to flush post shadow\n");
goto err_unpin_mm;
}
ret = intel_gvt_generate_request(workload);
if (ret) {
gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm;
}
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
goto err_unpin_mm;
}
ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
goto err_shadow_batch;
}
if (!workload->emulate_schedule_in)
return 0;
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
if (!ret)
goto out;
else
ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
if (ret) {
gvt_vgpu_err("fail to emulate execlist schedule in\n");
release_shadow_wa_ctx(&workload->wa_ctx);
err_shadow_batch:
release_shadow_batch_buffer(workload);
err_unpin_mm:
intel_vgpu_unpin_mm(workload->shadow_mm);
out:
return ret;
return ret;
}
return 0;
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct intel_vgpu_workload *next_workload;
struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false;
int ret;
int ret = 0;
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
if (!workload->status) {
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
}
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
* So this error is a vGPU hang actually to the guest.
* According to this we should emunlate a vGPU hang. If
* there are pending workloads which are already submitted
* from guest, we should clean them up like HW GPU does.
*
* if it is in middle of engine resetting, the pending
* workloads won't be submitted to HW GPU and will be
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
clean_workloads(vgpu, ENGINE_MASK(ring_id));
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
goto out;
}
if (!list_empty(workload_q_head(vgpu, ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
@ -584,213 +427,60 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
if (lite_restore) {
gvt_dbg_el("next context == current - no schedule-out\n");
free_workload(workload);
return 0;
goto out;
}
ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
if (ret)
goto err;
out:
free_workload(workload);
return 0;
err:
free_workload(workload);
intel_vgpu_unpin_mm(workload->shadow_mm);
intel_vgpu_destroy_workload(workload);
return ret;
}
#define RING_CTX_OFF(x) \
offsetof(struct execlist_ring_context, x)
static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 ring_context_gpa, u32 pdp[8])
{
u64 gpa;
int i;
gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
}
static int prepare_mm(struct intel_vgpu_workload *workload)
{
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level;
u32 pdp[8];
if (desc->addressing_mode == 1) { /* legacy 32-bit */
page_table_level = 3;
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4;
} else {
gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL;
}
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
if (mm) {
intel_gvt_mm_reference(mm);
} else {
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm);
}
}
workload->shadow_mm = mm;
return 0;
}
#define get_last_workload(q) \
(list_empty(q) ? NULL : container_of(q->prev, \
struct intel_vgpu_workload, list))
static int submit_context(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc,
bool emulate_schedule_in)
{
struct list_head *q = workload_q_head(vgpu, ring_id);
struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload = NULL;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
return -EINVAL;
}
workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
if (IS_ERR(workload))
return PTR_ERR(workload);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_header.val), &head, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
gvt_dbg_el("ctx head %x real head %lx\n", head,
last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
*/
head = last_workload->rb_tail;
}
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL);
if (!workload)
return -ENOMEM;
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
INIT_LIST_HEAD(&workload->list);
INIT_LIST_HEAD(&workload->shadow_bb);
init_waitqueue_head(&workload->shadow_ctx_status_wq);
atomic_set(&workload->shadow_ctx_active, 0);
workload->vgpu = vgpu;
workload->ring_id = ring_id;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
workload->rb_tail = tail;
workload->rb_start = start;
workload->rb_ctl = ctl;
workload->prepare = prepare_execlist_workload;
workload->complete = complete_execlist_workload;
workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
workload->shadowed = false;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
workload->wa_ctx.indirect_ctx.guest_gma =
indirect_ctx & INDIRECT_CTX_ADDR_MASK;
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
}
if (emulate_schedule_in)
workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
workload, ring_id, head, tail, start, ctl);
workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in);
ret = prepare_mm(workload);
if (ret) {
kmem_cache_free(vgpu->workloads, workload);
return ret;
}
/* Only scan and shadow the first workload in the queue
* as there is only one pre-allocated buf-obj for shadow.
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
}
queue_workload(workload);
return 0;
}
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
{
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct execlist_ctx_descriptor_format desc[2];
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct execlist_ctx_descriptor_format *desc[2];
int i, ret;
desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0);
desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1);
if (!desc[0].valid) {
if (!desc[0]->valid) {
gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n");
goto inv_desc;
}
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i].valid)
if (!desc[i]->valid)
continue;
if (!desc[i].privilege_access) {
if (!desc[i]->privilege_access) {
gvt_vgpu_err("unexpected GGTT elsp submission\n");
goto inv_desc;
}
@ -798,9 +488,9 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
/* submit workload */
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i].valid)
if (!desc[i]->valid)
continue;
ret = submit_context(vgpu, ring_id, &desc[i], i == 0);
ret = submit_context(vgpu, ring_id, desc[i], i == 0);
if (ret) {
gvt_vgpu_err("failed to submit desc %d\n", i);
return ret;
@ -811,13 +501,14 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
inv_desc:
gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n",
desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw);
desc[0]->udw, desc[0]->ldw, desc[1]->udw, desc[1]->ldw);
return -EINVAL;
}
static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
{
struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
@ -837,91 +528,40 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
unsigned int tmp;
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&vgpu->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
free_workload(pos);
}
clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
}
}
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
void clean_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
struct intel_engine_cs *engine;
clean_workloads(vgpu, ALL_ENGINES);
kmem_cache_destroy(vgpu->workloads);
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
kfree(vgpu->reserve_ring_buffer_va[i]);
vgpu->reserve_ring_buffer_va[i] = NULL;
vgpu->reserve_ring_buffer_size[i] = 0;
}
struct intel_vgpu_submission *s = &vgpu->submission;
kfree(s->ring_scan_buffer[i]);
s->ring_scan_buffer[i] = NULL;
s->ring_scan_buffer_size[i] = 0;
}
}
#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
struct intel_engine_cs *engine;
/* each ring has a virtual execlist engine */
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
init_vgpu_execlist(vgpu, i);
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
}
vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!vgpu->workloads)
return -ENOMEM;
/* each ring has a shadow ring buffer until vgpu destroyed */
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
vgpu->reserve_ring_buffer_va[i] =
kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
if (!vgpu->reserve_ring_buffer_va[i]) {
gvt_vgpu_err("fail to alloc reserve ring buffer\n");
goto out;
}
vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
}
return 0;
out:
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
if (vgpu->reserve_ring_buffer_size[i]) {
kfree(vgpu->reserve_ring_buffer_va[i]);
vgpu->reserve_ring_buffer_va[i] = NULL;
vgpu->reserve_ring_buffer_size[i] = 0;
}
}
return -ENOMEM;
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
void reset_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
unsigned int tmp;
clean_workloads(vgpu, engine_mask);
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
int init_execlist(struct intel_vgpu *vgpu)
{
reset_execlist(vgpu, ALL_ENGINES);
return 0;
}
const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = {
.name = "execlist",
.init = init_execlist,
.reset = reset_execlist,
.clean = clean_execlist,
};

View File

@ -36,10 +36,6 @@
#define _GVT_EXECLIST_H_
struct execlist_ctx_descriptor_format {
union {
u32 udw;
u32 context_id;
};
union {
u32 ldw;
struct {
@ -54,6 +50,10 @@ struct execlist_ctx_descriptor_format {
u32 lrca : 20;
};
};
union {
u32 udw;
u32 context_id;
};
};
struct execlist_status_format {

View File

@ -66,20 +66,23 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL,
};
static int expose_firmware_sysfs(struct intel_gvt *gvt)
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
struct drm_i915_private *dev_priv = gvt->dev_priv;
*(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset));
return 0;
}
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
int num = gvt->mmio.num_mmio_block;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
int i, j;
int ret;
int i, ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
@ -104,15 +107,8 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
p = firmware + h->mmio_offset;
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
for (i = 0; i < num; i++, block++) {
for (j = 0; j < block->size; j += 4)
*(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
block->offset) + j));
}
/* Take a snapshot of hw mmio registers. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
memcpy(gvt->firmware.mmio, p, info->mmio_size);

View File

@ -94,12 +94,12 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
u64 h_addr;
int ret;
ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
&h_addr);
if (ret)
return ret;
*h_index = h_addr >> GTT_PAGE_SHIFT;
*h_index = h_addr >> I915_GTT_PAGE_SHIFT;
return 0;
}
@ -109,12 +109,12 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
u64 g_addr;
int ret;
ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
&g_addr);
if (ret)
return ret;
*g_index = g_addr >> GTT_PAGE_SHIFT;
*g_index = g_addr >> I915_GTT_PAGE_SHIFT;
return 0;
}
@ -156,13 +156,15 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
struct gtt_type_table_entry {
int entry_type;
int pt_type;
int next_pt_type;
int pse_entry_type;
};
#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
[type] = { \
.entry_type = e_type, \
.pt_type = cpt_type, \
.next_pt_type = npt_type, \
.pse_entry_type = pse_type, \
}
@ -170,55 +172,68 @@ struct gtt_type_table_entry {
static struct gtt_type_table_entry gtt_type_table[] = {
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_PML4_ENTRY,
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
GTT_TYPE_PPGTT_PTE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
GTT_TYPE_PPGTT_PDE_ENTRY,
GTT_TYPE_PPGTT_PDE_PT,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
GTT_TYPE_PPGTT_PDP_ENTRY,
GTT_TYPE_PPGTT_PDP_PT,
GTT_TYPE_INVALID,
GTT_TYPE_PPGTT_PTE_1G_ENTRY),
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
GTT_TYPE_GGTT_PTE,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID,
GTT_TYPE_INVALID),
};
@ -227,6 +242,11 @@ static inline int get_next_pt_type(int type)
return gtt_type_table[type].next_pt_type;
}
static inline int get_pt_type(int type)
{
return gtt_type_table[type].pt_type;
}
static inline int get_entry_type(int type)
{
return gtt_type_table[type].entry_type;
@ -351,7 +371,7 @@ static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
return false;
e->type = get_entry_type(e->type);
if (!(e->val64 & (1 << 7)))
if (!(e->val64 & BIT(7)))
return false;
e->type = get_pse_type(e->type);
@ -369,12 +389,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
|| e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
return (e->val64 != 0);
else
return (e->val64 & (1 << 0));
return (e->val64 & BIT(0));
}
static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
{
e->val64 &= ~(1 << 0);
e->val64 &= ~BIT(0);
}
static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
{
e->val64 |= BIT(0);
}
/*
@ -382,7 +407,7 @@ static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
*/
static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
{
unsigned long x = (gma >> GTT_PAGE_SHIFT);
unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
trace_gma_index(__func__, gma, x);
return x;
@ -406,6 +431,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
.get_entry = gtt_get_entry64,
.set_entry = gtt_set_entry64,
.clear_present = gtt_entry_clear_present,
.set_present = gtt_entry_set_present,
.test_present = gen8_gtt_test_present,
.test_pse = gen8_gtt_test_pse,
.get_pfn = gen8_gtt_get_pfn,
@ -494,7 +520,7 @@ static inline int ppgtt_spt_get_entry(
return -EINVAL;
ret = ops->get_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT,
spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
if (ret)
return ret;
@ -516,7 +542,7 @@ static inline int ppgtt_spt_set_entry(
return -EINVAL;
return ops->set_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT,
spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu);
}
@ -537,88 +563,103 @@ static inline int ppgtt_spt_set_entry(
spt->shadow_page.type, e, index, false)
/**
* intel_vgpu_init_guest_page - init a guest page data structure
* intel_vgpu_init_page_track - init a page track data structure
* @vgpu: a vGPU
* @p: a guest page data structure
* @t: a page track data structure
* @gfn: guest memory page frame number
* @handler: function will be called when target guest memory page has
* @handler: the function will be called when target guest memory page has
* been modified.
*
* This function is called when user wants to track a guest memory page.
* This function is called when a user wants to prepare a page track data
* structure to track a guest memory page.
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t,
unsigned long gfn,
int (*handler)(void *, u64, void *, int),
void *data)
{
INIT_HLIST_NODE(&t->node);
t->tracked = false;
t->gfn = gfn;
t->handler = handler;
t->data = data;
hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn);
return 0;
}
/**
* intel_vgpu_clean_page_track - release a page track data structure
* @vgpu: a vGPU
* @t: a page track data structure
*
* This function is called before a user frees a page track data structure.
*/
void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t)
{
if (!hlist_unhashed(&t->node))
hash_del(&t->node);
if (t->tracked)
intel_gvt_hypervisor_disable_page_track(vgpu, t);
}
/**
* intel_vgpu_find_tracked_page - find a tracked guest page
* @vgpu: a vGPU
* @gfn: guest memory page frame number
*
* This function is called when the emulation layer wants to figure out if a
* trapped GFN is a tracked guest page.
*
* Returns:
* Pointer to page track data structure, NULL if not found.
*/
struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_page_track *t;
hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
t, node, gfn) {
if (t->gfn == gfn)
return t;
}
return NULL;
}
static int init_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p,
unsigned long gfn,
int (*handler)(void *, u64, void *, int),
void *data)
{
INIT_HLIST_NODE(&p->node);
p->writeprotection = false;
p->gfn = gfn;
p->handler = handler;
p->data = data;
p->oos_page = NULL;
p->write_cnt = 0;
hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
return 0;
return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data);
}
static int detach_oos_page(struct intel_vgpu *vgpu,
struct intel_vgpu_oos_page *oos_page);
/**
* intel_vgpu_clean_guest_page - release the resource owned by guest page data
* structure
* @vgpu: a vGPU
* @p: a tracked guest page
*
* This function is called when user tries to stop tracking a guest memory
* page.
*/
void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
static void clean_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
{
if (!hlist_unhashed(&p->node))
hash_del(&p->node);
if (p->oos_page)
detach_oos_page(vgpu, p->oos_page);
if (p->writeprotection)
intel_gvt_hypervisor_unset_wp_page(vgpu, p);
}
/**
* intel_vgpu_find_guest_page - find a guest page data structure by GFN.
* @vgpu: a vGPU
* @gfn: guest memory page frame number
*
* This function is called when emulation logic wants to know if a trapped GFN
* is a tracked guest page.
*
* Returns:
* Pointer to guest page data structure, NULL if failed.
*/
struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_guest_page *p;
hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
p, node, gfn) {
if (p->gfn == gfn)
return p;
}
return NULL;
intel_vgpu_clean_page_track(vgpu, &p->track);
}
static inline int init_shadow_page(struct intel_vgpu *vgpu,
struct intel_vgpu_shadow_page *p, int type)
struct intel_vgpu_shadow_page *p, int type, bool hash)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr;
@ -634,8 +675,9 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
INIT_HLIST_NODE(&p->node);
p->mfn = daddr >> GTT_PAGE_SHIFT;
hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
if (hash)
hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
return 0;
}
@ -644,7 +686,7 @@ static inline void clean_shadow_page(struct intel_vgpu *vgpu,
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
PCI_DMA_BIDIRECTIONAL);
if (!hlist_unhashed(&p->node))
@ -664,6 +706,9 @@ static inline struct intel_vgpu_shadow_page *find_shadow_page(
return NULL;
}
#define page_track_to_guest_page(ptr) \
container_of(ptr, struct intel_vgpu_guest_page, track)
#define guest_page_to_ppgtt_spt(ptr) \
container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
@ -697,7 +742,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
clean_shadow_page(spt->vgpu, &spt->shadow_page);
intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
clean_guest_page(spt->vgpu, &spt->guest_page);
list_del_init(&spt->post_shadow_list);
free_spt(spt);
@ -713,22 +758,24 @@ static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
}
static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
static int ppgtt_handle_guest_write_page_table_bytes(
struct intel_vgpu_guest_page *gpt,
u64 pa, void *p_data, int bytes);
static int ppgtt_write_protection_handler(void *gp, u64 pa,
static int ppgtt_write_protection_handler(void *data, u64 pa,
void *p_data, int bytes)
{
struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
struct intel_vgpu_page_track *t = data;
struct intel_vgpu_guest_page *p = page_track_to_guest_page(t);
int ret;
if (bytes != 4 && bytes != 8)
return -EINVAL;
if (!gpt->writeprotection)
if (!t->tracked)
return -EINVAL;
ret = ppgtt_handle_guest_write_page_table_bytes(gp,
ret = ppgtt_handle_guest_write_page_table_bytes(p,
pa, p_data, bytes);
if (ret)
return ret;
@ -762,13 +809,13 @@ retry:
* TODO: guest page type may be different with shadow page type,
* when we support PSE page in future.
*/
ret = init_shadow_page(vgpu, &spt->shadow_page, type);
ret = init_shadow_page(vgpu, &spt->shadow_page, type, true);
if (ret) {
gvt_vgpu_err("fail to initialize shadow page for spt\n");
goto err;
}
ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
ret = init_guest_page(vgpu, &spt->guest_page,
gfn, ppgtt_write_protection_handler, NULL);
if (ret) {
gvt_vgpu_err("fail to initialize guest page for spt\n");
@ -798,7 +845,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
#define pt_entries(spt) \
(GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
@ -856,7 +903,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
int v = atomic_read(&spt->refcount);
trace_spt_change(spt->vgpu->id, "die", spt,
spt->guest_page.gfn, spt->shadow_page.type);
spt->guest_page.track.gfn, spt->shadow_page.type);
trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
@ -878,7 +925,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
}
release:
trace_spt_change(spt->vgpu->id, "release", spt,
spt->guest_page.gfn, spt->shadow_page.type);
spt->guest_page.track.gfn, spt->shadow_page.type);
ppgtt_free_shadow_page(spt);
return 0;
fail:
@ -895,6 +942,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s = NULL;
struct intel_vgpu_guest_page *g;
struct intel_vgpu_page_track *t;
int ret;
if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
@ -902,8 +950,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
goto fail;
}
g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
if (g) {
t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we));
if (t) {
g = page_track_to_guest_page(t);
s = guest_page_to_ppgtt_spt(g);
ppgtt_get_shadow_page(s);
} else {
@ -915,7 +964,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
goto fail;
}
ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
ret = intel_gvt_hypervisor_enable_page_track(vgpu,
&s->guest_page.track);
if (ret)
goto fail;
@ -923,7 +973,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
if (ret)
goto fail;
trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn,
s->shadow_page.type);
}
return s;
@ -953,7 +1003,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
spt->guest_page.gfn, spt->shadow_page.type);
spt->guest_page.track.gfn, spt->shadow_page.type);
if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
for_each_present_guest_entry(spt, &ge, i) {
@ -1078,11 +1128,11 @@ static int sync_oos_page(struct intel_vgpu *vgpu,
old.type = new.type = get_entry_type(spt->guest_page_type);
old.val64 = new.val64 = 0;
for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
index++) {
for (index = 0; index < (I915_GTT_PAGE_SIZE >>
info->gtt_entry_size_shift); index++) {
ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
ops->get_entry(NULL, &new, index, true,
oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
if (old.val64 == new.val64
&& !test_and_clear_bit(index, spt->post_shadow_bitmap))
@ -1132,8 +1182,9 @@ static int attach_oos_page(struct intel_vgpu *vgpu,
struct intel_gvt *gvt = vgpu->gvt;
int ret;
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
oos_page->mem, GTT_PAGE_SIZE);
ret = intel_gvt_hypervisor_read_gpa(vgpu,
gpt->track.gfn << I915_GTT_PAGE_SHIFT,
oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret)
return ret;
@ -1152,7 +1203,7 @@ static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
{
int ret;
ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track);
if (ret)
return ret;
@ -1200,7 +1251,7 @@ static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track);
}
/**
@ -1335,10 +1386,10 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
return 0;
}
static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
static int ppgtt_handle_guest_write_page_table_bytes(
struct intel_vgpu_guest_page *gpt,
u64 pa, void *p_data, int bytes)
{
struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
@ -1415,7 +1466,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
mm->shadow_page_table = mem + mm->page_table_entry_size;
} else if (mm->type == INTEL_GVT_MM_GGTT) {
mm->page_table_entry_cnt =
(gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
(gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
mm->page_table_entry_size = mm->page_table_entry_cnt *
info->gtt_entry_size;
mem = vzalloc(mm->page_table_entry_size);
@ -1737,8 +1788,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
gma_ops->gma_to_ggtt_pte_index(gma));
if (ret)
goto err;
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ (gma & ~GTT_PAGE_MASK);
gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ (gma & ~I915_GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
return gpa;
@ -1790,8 +1841,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
}
}
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ (gma & ~GTT_PAGE_MASK);
gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ (gma & ~I915_GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ppgtt", 0,
mm->page_table_level, gma, gpa);
@ -1859,7 +1910,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (bytes != 4 && bytes != 8)
return -EINVAL;
gma = g_gtt_index << GTT_PAGE_SHIFT;
gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */
if (!vgpu_gmadr_is_valid(vgpu, gma))
@ -1878,11 +1929,11 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
* update the entry in this situation p2m will fail
* settting the shadow entry to point to a scratch page
*/
ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
}
} else {
m = e;
ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
}
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
@ -1922,7 +1973,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = GTT_PAGE_SIZE >>
int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
@ -1946,7 +1997,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
return -ENOMEM;
}
gtt->scratch_pt[type].page_mfn =
(unsigned long)(daddr >> GTT_PAGE_SHIFT);
(unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
vgpu->id, type, gtt->scratch_pt[type].page_mfn);
@ -1989,7 +2040,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu)
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
if (vgpu->gtt.scratch_pt[i].page != NULL) {
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
GTT_PAGE_SHIFT);
I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL;
@ -2032,7 +2083,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_vgpu_mm *ggtt_mm;
hash_init(gtt->guest_page_hash_table);
hash_init(gtt->tracked_guest_page_hash_table);
hash_init(gtt->shadow_page_hash_table);
INIT_LIST_HEAD(&gtt->mm_list_head);
@ -2285,15 +2336,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
__free_page(virt_to_page(page));
return -ENOMEM;
}
gvt->gtt.scratch_ggtt_page = virt_to_page(page);
gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
gvt->gtt.scratch_page = virt_to_page(page);
gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_ggtt_page);
__free_page(gvt->gtt.scratch_page);
return ret;
}
}
@ -2312,12 +2364,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
struct device *dev = &gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
GTT_PAGE_SHIFT);
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_ggtt_page);
__free_page(gvt->gtt.scratch_page);
if (enable_out_of_sync)
clean_spt_oos(gvt);
@ -2343,7 +2395,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
e.type = GTT_TYPE_GGTT_PTE;
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
ops->set_pfn(&e, gvt->gtt.scratch_mfn);
e.val64 |= _PAGE_PRESENT;
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
@ -2369,8 +2421,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
int i;
ppgtt_free_all_shadow_page(vgpu);
/* Shadow pages are only created when there is no page
@ -2380,11 +2430,4 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
intel_vgpu_reset_ggtt(vgpu);
/* clear scratch page for security */
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
if (vgpu->gtt.scratch_pt[i].page != NULL)
memset(page_address(vgpu->gtt.scratch_pt[i].page),
0, PAGE_SIZE);
}
}

View File

@ -34,9 +34,8 @@
#ifndef _GVT_GTT_H_
#define _GVT_GTT_H_
#define GTT_PAGE_SHIFT 12
#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT)
#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
#define I915_GTT_PAGE_SHIFT 12
#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
struct intel_vgpu_mm;
@ -63,6 +62,7 @@ struct intel_gvt_gtt_pte_ops {
struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e);
void (*set_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
@ -86,8 +86,8 @@ struct intel_gvt_gtt {
struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head;
struct page *scratch_ggtt_page;
unsigned long scratch_ggtt_mfn;
struct page *scratch_page;
unsigned long scratch_mfn;
};
enum {
@ -193,18 +193,16 @@ struct intel_vgpu_scratch_pt {
unsigned long page_mfn;
};
struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap;
struct list_head mm_list_head;
DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
atomic_t n_write_protected_guest_page;
DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
atomic_t n_tracked_guest_page;
struct list_head oos_page_list_head;
struct list_head post_shadow_list_head;
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
};
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
@ -228,12 +226,16 @@ struct intel_vgpu_shadow_page {
unsigned long mfn;
};
struct intel_vgpu_guest_page {
struct intel_vgpu_page_track {
struct hlist_node node;
bool writeprotection;
bool tracked;
unsigned long gfn;
int (*handler)(void *, u64, void *, int);
void *data;
};
struct intel_vgpu_guest_page {
struct intel_vgpu_page_track track;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
};
@ -243,7 +245,7 @@ struct intel_vgpu_oos_page {
struct list_head list;
struct list_head vm_list;
int id;
unsigned char mem[GTT_PAGE_SIZE];
unsigned char mem[I915_GTT_PAGE_SIZE];
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
@ -258,22 +260,16 @@ struct intel_vgpu_ppgtt_spt {
struct list_head post_shadow_list;
};
int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page,
int intel_vgpu_init_page_track(struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t,
unsigned long gfn,
int (*handler)(void *gp, u64, void *, int),
void *data);
void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t);
int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *guest_page);
struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);

View File

@ -36,6 +36,8 @@
#include "i915_drv.h"
#include "gvt.h"
#include <linux/vfio.h>
#include <linux/mdev.h>
struct intel_gvt_host intel_gvt_host;
@ -44,6 +46,129 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
const char *name)
{
int i;
struct intel_vgpu_type *t;
const char *driver_name = dev_driver_string(
&gvt->dev_priv->drm.pdev->dev);
for (i = 0; i < gvt->num_types; i++) {
t = &gvt->types[i];
if (!strncmp(t->name, name + strlen(driver_name) + 1,
sizeof(t->name)))
return t;
}
return NULL;
}
static ssize_t available_instances_show(struct kobject *kobj,
struct device *dev, char *buf)
{
struct intel_vgpu_type *type;
unsigned int num = 0;
void *gvt = kdev_to_i915(dev)->gvt;
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type)
num = 0;
else
num = type->avail_instance;
return sprintf(buf, "%u\n", num);
}
static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
char *buf)
{
return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
}
static ssize_t description_show(struct kobject *kobj, struct device *dev,
char *buf)
{
struct intel_vgpu_type *type;
void *gvt = kdev_to_i915(dev)->gvt;
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type)
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
"fence: %d\nresolution: %s\n"
"weight: %d\n",
BYTES_TO_MB(type->low_gm_size),
BYTES_TO_MB(type->high_gm_size),
type->fence, vgpu_edid_str(type->resolution),
type->weight);
}
static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);
static struct attribute *gvt_type_attrs[] = {
&mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr,
NULL,
};
static struct attribute_group *gvt_vgpu_type_groups[] = {
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
};
static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
struct attribute_group ***intel_vgpu_type_groups)
{
*type_attrs = gvt_type_attrs;
*intel_vgpu_type_groups = gvt_vgpu_type_groups;
return true;
}
static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
{
int i, j;
struct intel_vgpu_type *type;
struct attribute_group *group;
for (i = 0; i < gvt->num_types; i++) {
type = &gvt->types[i];
group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
if (WARN_ON(!group))
goto unwind;
group->name = type->name;
group->attrs = gvt_type_attrs;
gvt_vgpu_type_groups[i] = group;
}
return true;
unwind:
for (j = 0; j < i; j++) {
group = gvt_vgpu_type_groups[j];
kfree(group);
}
return false;
}
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
{
int i;
struct attribute_group *group;
for (i = 0; i < gvt->num_types; i++) {
group = gvt_vgpu_type_groups[i];
gvt_vgpu_type_groups[i] = NULL;
kfree(group);
}
}
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@ -54,6 +179,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
.gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
.get_gvt_attrs = intel_get_gvt_attrs,
};
/**
@ -191,17 +318,18 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!gvt))
return;
intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_opregion(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
idr_destroy(&gvt->vgpu_idr);
@ -268,13 +396,9 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_irq;
ret = intel_gvt_init_opregion(gvt);
if (ret)
goto out_clean_gtt;
ret = intel_gvt_init_workload_scheduler(gvt);
if (ret)
goto out_clean_opregion;
goto out_clean_gtt;
ret = intel_gvt_init_sched_policy(gvt);
if (ret)
@ -292,6 +416,12 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_thread;
ret = intel_gvt_init_vgpu_type_groups(gvt);
if (ret == false) {
gvt_err("failed to init vgpu type groups: %d\n", ret);
goto out_clean_types;
}
ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
&intel_gvt_ops);
if (ret) {
@ -307,6 +437,10 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
}
gvt->idle_vgpu = vgpu;
ret = intel_gvt_debugfs_init(gvt);
if (ret)
gvt_err("debugfs registeration failed, go on.\n");
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
return 0;
@ -321,8 +455,6 @@ out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler:
intel_gvt_clean_workload_scheduler(gvt);
out_clean_opregion:
intel_gvt_clean_opregion(gvt);
out_clean_gtt:
intel_gvt_clean_gtt(gvt);
out_clean_irq:

View File

@ -125,7 +125,6 @@ struct intel_vgpu_irq {
struct intel_vgpu_opregion {
void *va;
u32 gfn[INTEL_GVT_OPREGION_PAGES];
struct page *pages[INTEL_GVT_OPREGION_PAGES];
};
#define vgpu_opregion(vgpu) (&(vgpu->opregion))
@ -142,6 +141,33 @@ struct vgpu_sched_ctl {
int weight;
};
enum {
INTEL_VGPU_EXECLIST_SUBMISSION = 1,
INTEL_VGPU_GUC_SUBMISSION,
};
struct intel_vgpu_submission_ops {
const char *name;
int (*init)(struct intel_vgpu *vgpu);
void (*clean)(struct intel_vgpu *vgpu);
void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
};
struct intel_vgpu_submission {
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
void *ring_scan_buffer[I915_NUM_ENGINES];
int ring_scan_buffer_size[I915_NUM_ENGINES];
const struct intel_vgpu_submission_ops *ops;
int virtual_submission_interface;
bool active;
};
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
@ -161,16 +187,10 @@ struct intel_vgpu {
struct intel_vgpu_gtt gtt;
struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display;
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
/* 1/2K for each reserve ring buffer */
void *reserve_ring_buffer_va[I915_NUM_ENGINES];
int reserve_ring_buffer_size[I915_NUM_ENGINES];
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
struct intel_vgpu_submission submission;
u32 hws_pga[I915_NUM_ENGINES];
struct dentry *debugfs;
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@ -190,6 +210,10 @@ struct intel_vgpu {
#endif
};
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
struct intel_gvt_gm {
unsigned long vgpu_allocated_low_gm_size;
unsigned long vgpu_allocated_high_gm_size;
@ -231,7 +255,7 @@ struct intel_gvt_mmio {
unsigned int num_mmio_block;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
unsigned int num_tracked_mmio;
unsigned long num_tracked_mmio;
};
struct intel_gvt_firmware {
@ -240,11 +264,6 @@ struct intel_gvt_firmware {
bool firmware_loaded;
};
struct intel_gvt_opregion {
void *opregion_va;
u32 opregion_pa;
};
#define NR_MAX_INTEL_VGPU_TYPES 20
struct intel_vgpu_type {
char name[16];
@ -268,7 +287,6 @@ struct intel_gvt {
struct intel_gvt_firmware firmware;
struct intel_gvt_irq irq;
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
@ -279,6 +297,8 @@ struct intel_gvt {
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
unsigned long service_request;
struct dentry *debugfs_root;
};
static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
@ -484,9 +504,6 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
PCI_BASE_ADDRESS_MEM_MASK;
}
void intel_gvt_clean_opregion(struct intel_gvt *gvt);
int intel_gvt_init_opregion(struct intel_gvt *gvt);
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
@ -494,6 +511,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
@ -510,12 +528,17 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
const char *name);
bool (*get_gvt_attrs)(struct attribute ***type_attrs,
struct attribute_group ***intel_vgpu_type_groups);
};
enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST,
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
GVT_FAILSAFE_GUEST_ERR,
};
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
@ -591,6 +614,12 @@ static inline bool intel_gvt_mmio_has_mode_mask(
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
int intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
#include "trace.h"
#include "mpt.h"

View File

@ -137,17 +137,26 @@ static int new_mmio_info(struct intel_gvt *gvt,
return 0;
}
static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
/**
* intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* Ring ID on success, negative error code if failed.
*/
int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
unsigned int offset)
{
enum intel_engine_id id;
struct intel_engine_cs *engine;
reg &= ~GENMASK(11, 0);
offset &= ~GENMASK(11, 0);
for_each_engine(engine, gvt->dev_priv, id) {
if (engine->mmio_base == reg)
if (engine->mmio_base == offset)
return id;
}
return -1;
return -ENODEV;
}
#define offset_to_fence_num(offset) \
@ -157,7 +166,7 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
{
switch (reason) {
case GVT_FAILSAFE_UNSUPPORTED_GUEST:
@ -165,6 +174,8 @@ static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
break;
case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
pr_err("Graphics resource is not enough for the guest\n");
case GVT_FAILSAFE_GUEST_ERR:
pr_err("GVT Internal error for the guest\n");
default:
break;
}
@ -1369,6 +1380,34 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
}
static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 value = *(u32 *)p_data;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n",
vgpu->id, offset, value);
return -EINVAL;
}
/*
* Need to emulate all the HWSP register write to ensure host can
* update the VM CSB status correctly. Here listed registers can
* support BDW, SKL or other platforms with same HWSP registers.
*/
if (unlikely(ring_id < 0 || ring_id > I915_NUM_ENGINES)) {
gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
vgpu->id, offset);
return -EINVAL;
}
vgpu->hws_pga[ring_id] = value;
gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
vgpu->id, value, offset);
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
}
static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
@ -1398,18 +1437,36 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ring_id;
u32 ring_base;
ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
/**
* Read HW reg in following case
* a. the offset isn't a ring mmio
* b. the offset's ring is running on hw.
* c. the offset is ring time stamp mmio
*/
if (ring_id >= 0)
ring_base = dev_priv->engine[ring_id]->mmio_base;
if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
mmio_hw_access_pre(dev_priv);
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
mmio_hw_access_post(dev_priv);
}
mmio_hw_access_pre(dev_priv);
vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
mmio_hw_access_post(dev_priv);
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
}
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret = 0;
@ -1417,9 +1474,9 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
return -EINVAL;
execlist = &vgpu->execlist[ring_id];
execlist = &vgpu->submission.execlist[ring_id];
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
@ -1435,9 +1492,11 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct intel_vgpu_submission *s = &vgpu->submission;
u32 data = *(u32 *)p_data;
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
int ret;
write_vreg(vgpu, offset, p_data, bytes);
@ -1459,8 +1518,18 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
(enable_execlist ? "enabling" : "disabling"),
ring_id);
if (enable_execlist)
intel_vgpu_start_schedule(vgpu);
if (!enable_execlist)
return 0;
if (s->active)
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
intel_vgpu_start_schedule(vgpu);
}
return 0;
}
@ -1492,7 +1561,7 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
default:
return -EINVAL;
}
set_bit(id, (void *)vgpu->tlb_handle_pending);
set_bit(id, (void *)vgpu->submission.tlb_handle_pending);
return 0;
}
@ -2478,7 +2547,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, hws_pga_write);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@ -2879,14 +2948,46 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_block = mmio_blocks;
gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
gvt_dbg_mmio("traced %u virtual mmio registers\n",
gvt->mmio.num_tracked_mmio);
return 0;
err:
intel_gvt_clean_mmio_info(gvt);
return ret;
}
/**
* intel_gvt_for_each_tracked_mmio - iterate each tracked mmio
* @gvt: a GVT device
* @handler: the handler
* @data: private data given to handler
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data)
{
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
struct intel_gvt_mmio_info *e;
int i, j, ret;
hash_for_each(gvt->mmio.mmio_info_table, i, e, node) {
ret = handler(gvt, e->offset, data);
if (ret)
return ret;
}
for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
for (j = 0; j < block->size; j += 4) {
ret = handler(gvt,
INTEL_GVT_MMIO_OFFSET(block->offset) + j,
data);
if (ret)
return ret;
}
}
return 0;
}
/**
* intel_vgpu_default_mmio_read - default MMIO read handler

View File

@ -248,120 +248,6 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
}
}
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
const char *name)
{
int i;
struct intel_vgpu_type *t;
const char *driver_name = dev_driver_string(
&gvt->dev_priv->drm.pdev->dev);
for (i = 0; i < gvt->num_types; i++) {
t = &gvt->types[i];
if (!strncmp(t->name, name + strlen(driver_name) + 1,
sizeof(t->name)))
return t;
}
return NULL;
}
static ssize_t available_instances_show(struct kobject *kobj,
struct device *dev, char *buf)
{
struct intel_vgpu_type *type;
unsigned int num = 0;
void *gvt = kdev_to_i915(dev)->gvt;
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type)
num = 0;
else
num = type->avail_instance;
return sprintf(buf, "%u\n", num);
}
static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
char *buf)
{
return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
}
static ssize_t description_show(struct kobject *kobj, struct device *dev,
char *buf)
{
struct intel_vgpu_type *type;
void *gvt = kdev_to_i915(dev)->gvt;
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type)
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
"fence: %d\nresolution: %s\n"
"weight: %d\n",
BYTES_TO_MB(type->low_gm_size),
BYTES_TO_MB(type->high_gm_size),
type->fence, vgpu_edid_str(type->resolution),
type->weight);
}
static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);
static struct attribute *type_attrs[] = {
&mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr,
NULL,
};
static struct attribute_group *intel_vgpu_type_groups[] = {
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
};
static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
{
int i, j;
struct intel_vgpu_type *type;
struct attribute_group *group;
for (i = 0; i < gvt->num_types; i++) {
type = &gvt->types[i];
group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
if (WARN_ON(!group))
goto unwind;
group->name = type->name;
group->attrs = type_attrs;
intel_vgpu_type_groups[i] = group;
}
return true;
unwind:
for (j = 0; j < i; j++) {
group = intel_vgpu_type_groups[j];
kfree(group);
}
return false;
}
static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
{
int i;
struct attribute_group *group;
for (i = 0; i < gvt->num_types; i++) {
group = intel_vgpu_type_groups[i];
kfree(group);
}
}
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
{
hash_init(info->ptable);
@ -441,7 +327,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
if (!type) {
gvt_vgpu_err("failed to find type %s to create\n",
kobject_name(kobj));
@ -1188,7 +1074,7 @@ hw_id_show(struct device *dev, struct device_attribute *attr,
struct intel_vgpu *vgpu = (struct intel_vgpu *)
mdev_get_drvdata(mdev);
return sprintf(buf, "%u\n",
vgpu->shadow_ctx->hw_id);
vgpu->submission.shadow_ctx->hw_id);
}
return sprintf(buf, "\n");
}
@ -1212,8 +1098,7 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
static const struct mdev_parent_ops intel_vgpu_ops = {
.supported_type_groups = intel_vgpu_type_groups,
static struct mdev_parent_ops intel_vgpu_ops = {
.mdev_attr_groups = intel_vgpu_groups,
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
@ -1229,17 +1114,20 @@ static const struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
if (!intel_gvt_init_vgpu_type_groups(gvt))
return -EFAULT;
struct attribute **kvm_type_attrs;
struct attribute_group **kvm_vgpu_type_groups;
intel_gvt_ops = ops;
if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
&kvm_vgpu_type_groups))
return -EFAULT;
intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
return mdev_register_device(dev, &intel_vgpu_ops);
}
static void kvmgt_host_exit(struct device *dev, void *gvt)
{
intel_gvt_cleanup_vgpu_type_groups(gvt);
mdev_unregister_device(dev);
}

View File

@ -117,18 +117,18 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
else
memcpy(pt, p_data, bytes);
} else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
} else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
struct intel_vgpu_page_track *t;
/* Since we enter the failsafe mode early during guest boot,
* guest may not have chance to set up its ppgtt table, so
* there should not be any wp pages for guest. Keep the wp
* related code here in case we need to handle it in furture.
*/
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
if (gp) {
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
if (t) {
/* remove write protection to prevent furture traps */
intel_vgpu_clean_guest_page(vgpu, gp);
intel_vgpu_clean_page_track(vgpu, t);
if (read)
intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes);
@ -170,17 +170,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
return ret;
}
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
struct intel_vgpu_page_track *t;
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
if (gp) {
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
if (t) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes);
if (ret) {
gvt_vgpu_err("guest page read error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
ret, gp->gfn, pa, *(u32 *)p_data,
ret, t->gfn, pa, *(u32 *)p_data,
bytes);
}
mutex_unlock(&gvt->lock);
@ -267,17 +267,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
return ret;
}
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
struct intel_vgpu_page_track *t;
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
if (gp) {
ret = gp->handler(gp, pa, p_data, bytes);
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
if (t) {
ret = t->handler(t, pa, p_data, bytes);
if (ret) {
gvt_err("guest page write error %d, "
"gfn 0x%lx, pa 0x%llx, "
"var 0x%x, len %d\n",
ret, gp->gfn, pa,
ret, t->gfn, pa,
*(u32 *)p_data, bytes);
}
mutex_unlock(&gvt->lock);

View File

@ -65,11 +65,17 @@ struct intel_gvt_mmio_info {
struct hlist_node node;
};
int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data);
#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
typeof(reg) __reg = reg; \

View File

@ -154,51 +154,53 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
}
/**
* intel_gvt_hypervisor_set_wp_page - set a guest page to write-protected
* intel_gvt_hypervisor_enable - set a guest page to write-protected
* @vgpu: a vGPU
* @p: intel_vgpu_guest_page
* @t: page track data structure
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_set_wp_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
static inline int intel_gvt_hypervisor_enable_page_track(
struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t)
{
int ret;
if (p->writeprotection)
if (t->tracked)
return 0;
ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, p->gfn);
ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn);
if (ret)
return ret;
p->writeprotection = true;
atomic_inc(&vgpu->gtt.n_write_protected_guest_page);
t->tracked = true;
atomic_inc(&vgpu->gtt.n_tracked_guest_page);
return 0;
}
/**
* intel_gvt_hypervisor_unset_wp_page - remove the write-protection of a
* intel_gvt_hypervisor_disable_page_track - remove the write-protection of a
* guest page
* @vgpu: a vGPU
* @p: intel_vgpu_guest_page
* @t: page track data structure
*
* Returns:
* Zero on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_unset_wp_page(struct intel_vgpu *vgpu,
struct intel_vgpu_guest_page *p)
static inline int intel_gvt_hypervisor_disable_page_track(
struct intel_vgpu *vgpu,
struct intel_vgpu_page_track *t)
{
int ret;
if (!p->writeprotection)
if (!t->tracked)
return 0;
ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, p->gfn);
ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn);
if (ret)
return ret;
p->writeprotection = false;
atomic_dec(&vgpu->gtt.n_write_protected_guest_page);
t->tracked = false;
atomic_dec(&vgpu->gtt.n_tracked_guest_page);
return 0;
}

View File

@ -25,36 +25,247 @@
#include "i915_drv.h"
#include "gvt.h"
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
/*
* Note: Only for GVT-g virtual VBT generation, other usage must
* not do like this.
*/
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_VBT (1<<3)
/* device handle */
#define DEVICE_TYPE_CRT 0x01
#define DEVICE_TYPE_EFP1 0x04
#define DEVICE_TYPE_EFP2 0x40
#define DEVICE_TYPE_EFP3 0x20
#define DEVICE_TYPE_EFP4 0x10
#define DEV_SIZE 38
struct opregion_header {
u8 signature[16];
u32 size;
u32 opregion_ver;
u8 bios_ver[32];
u8 vbios_ver[16];
u8 driver_ver[16];
u32 mboxes;
u32 driver_model;
u32 pcon;
u8 dver[32];
u8 rsvd[124];
} __packed;
struct bdb_data_header {
u8 id;
u16 size; /* data size */
} __packed;
struct efp_child_device_config {
u16 handle;
u16 device_type;
u16 device_class;
u8 i2c_speed;
u8 dp_onboard_redriver; /* 158 */
u8 dp_ondock_redriver; /* 158 */
u8 hdmi_level_shifter_value:4; /* 169 */
u8 hdmi_max_data_rate:4; /* 204 */
u16 dtd_buf_ptr; /* 161 */
u8 edidless_efp:1; /* 161 */
u8 compression_enable:1; /* 198 */
u8 compression_method:1; /* 198 */
u8 ganged_edp:1; /* 202 */
u8 skip0:4;
u8 compression_structure_index:4; /* 198 */
u8 skip1:4;
u8 slave_port; /* 202 */
u8 skip2;
u8 dvo_port;
u8 i2c_pin; /* for add-in card */
u8 slave_addr; /* for add-in card */
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_config;
u8 efp_docked_port:1; /* 158 */
u8 lane_reversal:1; /* 184 */
u8 onboard_lspcon:1; /* 192 */
u8 iboost_enable:1; /* 196 */
u8 hpd_invert:1; /* BXT 196 */
u8 slip3:3;
u8 hdmi_compat:1;
u8 dp_compat:1;
u8 tmds_compat:1;
u8 skip4:5;
u8 aux_channel;
u8 dongle_detect;
u8 pipe_cap:2;
u8 sdvo_stall:1; /* 158 */
u8 hpd_status:2;
u8 integrated_encoder:1;
u8 skip5:2;
u8 dvo_wiring;
u8 mipi_bridge_type; /* 171 */
u16 device_class_ext;
u8 dvo_function;
u8 dp_usb_type_c:1; /* 195 */
u8 skip6:7;
u8 dp_usb_type_c_2x_gpio_index; /* 195 */
u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
u8 iboost_dp:4; /* 196 */
u8 iboost_hdmi:4; /* 196 */
} __packed;
struct vbt {
/* header->bdb_offset point to bdb_header offset */
struct vbt_header header;
struct bdb_header bdb_header;
struct bdb_data_header general_features_header;
struct bdb_general_features general_features;
struct bdb_data_header general_definitions_header;
struct bdb_general_definitions general_definitions;
struct efp_child_device_config child0;
struct efp_child_device_config child1;
struct efp_child_device_config child2;
struct efp_child_device_config child3;
struct bdb_data_header driver_features_header;
struct bdb_driver_features driver_features;
};
static void virt_vbt_generation(struct vbt *v)
{
int num_child;
memset(v, 0, sizeof(struct vbt));
v->header.signature[0] = '$';
v->header.signature[1] = 'V';
v->header.signature[2] = 'B';
v->header.signature[3] = 'T';
/* there's features depending on version! */
v->header.version = 155;
v->header.header_size = sizeof(v->header);
v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
v->header.bdb_offset = offsetof(struct vbt, bdb_header);
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
v->bdb_header.version = 186; /* child_dev_size = 38 */
v->bdb_header.header_size = sizeof(v->bdb_header);
v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
- sizeof(struct bdb_header);
/* general features */
v->general_features_header.id = BDB_GENERAL_FEATURES;
v->general_features_header.size = sizeof(struct bdb_general_features);
v->general_features.int_crt_support = 0;
v->general_features.int_tv_support = 0;
/* child device */
num_child = 4; /* each port has one child */
v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
/* size will include child devices */
v->general_definitions_header.size =
sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
v->general_definitions.child_dev_size = DEV_SIZE;
/* portA */
v->child0.handle = DEVICE_TYPE_EFP1;
v->child0.device_type = DEVICE_TYPE_DP;
v->child0.dvo_port = DVO_PORT_DPA;
v->child0.aux_channel = DP_AUX_A;
v->child0.dp_compat = true;
v->child0.integrated_encoder = true;
/* portB */
v->child1.handle = DEVICE_TYPE_EFP2;
v->child1.device_type = DEVICE_TYPE_DP;
v->child1.dvo_port = DVO_PORT_DPB;
v->child1.aux_channel = DP_AUX_B;
v->child1.dp_compat = true;
v->child1.integrated_encoder = true;
/* portC */
v->child2.handle = DEVICE_TYPE_EFP3;
v->child2.device_type = DEVICE_TYPE_DP;
v->child2.dvo_port = DVO_PORT_DPC;
v->child2.aux_channel = DP_AUX_C;
v->child2.dp_compat = true;
v->child2.integrated_encoder = true;
/* portD */
v->child3.handle = DEVICE_TYPE_EFP4;
v->child3.device_type = DEVICE_TYPE_DP;
v->child3.dvo_port = DVO_PORT_DPD;
v->child3.aux_channel = DP_AUX_D;
v->child3.dp_compat = true;
v->child3.integrated_encoder = true;
/* driver features */
v->driver_features_header.id = BDB_DRIVER_FEATURES;
v->driver_features_header.size = sizeof(struct bdb_driver_features);
v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
}
static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
{
u8 *buf;
int i;
struct opregion_header *header;
struct vbt v;
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
__GFP_ZERO,
get_order(INTEL_GVT_OPREGION_SIZE));
if (!vgpu_opregion(vgpu)->va) {
gvt_err("fail to get memory for vgpu virt opregion\n");
return -ENOMEM;
}
/* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf;
memcpy(header->signature, OPREGION_SIGNATURE,
sizeof(OPREGION_SIGNATURE));
header->size = 0x8;
header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT;
/* for unknown reason, the value in LID field is incorrect
* which block the windows guest, so workaround it by force
* setting it to "OPEN"
*/
buf[INTEL_GVT_OPREGION_CLID] = 0x3;
/* emulated vbt from virt vbt generation */
virt_vbt_generation(&v);
memcpy(buf + INTEL_GVT_OPREGION_VBT_OFFSET, &v, sizeof(struct vbt));
return 0;
}
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
int i, ret;
if (WARN((vgpu_opregion(vgpu)->va),
"vgpu%d: opregion has been initialized already.\n",
vgpu->id))
return -EINVAL;
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
__GFP_ZERO,
get_order(INTEL_GVT_OPREGION_SIZE));
if (!vgpu_opregion(vgpu)->va)
return -ENOMEM;
memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
INTEL_GVT_OPREGION_SIZE);
ret = alloc_and_init_virt_opregion(vgpu);
if (ret < 0)
return ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
/* for unknown reason, the value in LID field is incorrect
* which block the windows guest, so workaround it by force
* setting it to "OPEN"
*/
buf = (u8 *)vgpu_opregion(vgpu)->va;
buf[INTEL_GVT_OPREGION_CLID] = 0x3;
return 0;
}
@ -132,40 +343,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
return 0;
}
/**
* intel_gvt_clean_opregion - clean host opergion related stuffs
* @gvt: a GVT device
*
*/
void intel_gvt_clean_opregion(struct intel_gvt *gvt)
{
memunmap(gvt->opregion.opregion_va);
gvt->opregion.opregion_va = NULL;
}
/**
* intel_gvt_init_opregion - initialize host opergion related stuffs
* @gvt: a GVT device
*
* Returns:
* Zero on success, negative error code if failed.
*/
int intel_gvt_init_opregion(struct intel_gvt *gvt)
{
gvt_dbg_core("init host opregion\n");
pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
&gvt->opregion.opregion_pa);
gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
if (!gvt->opregion.opregion_va) {
gvt_err("fail to map host opregion\n");
return -EFAULT;
}
return 0;
}
#define GVT_OPREGION_FUNC(scic) \
({ \
u32 __ret; \

View File

@ -51,6 +51,9 @@
#define INTEL_GVT_OPREGION_PAGES 2
#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
#define INTEL_GVT_OPREGION_VBT_OFFSET 0x400
#define INTEL_GVT_OPREGION_VBT_SIZE \
(INTEL_GVT_OPREGION_SIZE - INTEL_GVT_OPREGION_VBT_OFFSET)
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
@ -71,6 +74,7 @@
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE)
#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \
I915_GTT_PAGE_SIZE)
#endif

View File

@ -147,6 +147,7 @@ static u32 gen9_render_mocs_L3[32];
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
@ -160,7 +161,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending))
if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
return;
reg = _MMIO(regs[ring_id]);
@ -208,7 +209,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
I915_WRITE_FW(offset, vgpu_vreg(vgpu, offset));
offset.reg += 4;
}
@ -261,14 +262,15 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
u32 v;
int i, array_size;
u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
struct intel_vgpu_submission *s = &vgpu->submission;
u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state;
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
i915_reg_t last_reg = _MMIO(0);
struct render_mmio *mmio;
u32 v;
int i, array_size;
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {

View File

@ -57,7 +57,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
@ -81,16 +81,16 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("Invalid guest context descriptor\n");
return -EINVAL;
return -EFAULT;
}
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
I915_GTT_PAGE_SIZE);
kunmap(page);
i++;
}
@ -120,7 +120,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page);
return 0;
@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
return i915_gem_context_force_single_submission(req->ctx);
}
static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
i915_reg_t reg;
reg = RING_INSTDONE(ring_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
reg = RING_ACTHD(ring_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
reg = RING_ACTHD_UDW(ring_base);
vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
}
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
@ -176,6 +190,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
save_ring_hw_state(workload->vgpu, ring_id);
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@ -250,11 +265,12 @@ void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
*/
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct intel_ring *ring;
int ret;
@ -267,7 +283,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
shadow_context_descriptor_update(shadow_ctx,
dev_priv->engine[ring_id]);
@ -310,14 +326,15 @@ err_scan:
return ret;
}
int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
@ -341,11 +358,203 @@ err_unpin:
return ret;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
struct intel_gvt *gvt = workload->vgpu->gvt;
const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
struct intel_vgpu_shadow_bb *bb;
int ret;
list_for_each_entry(bb, &workload->shadow_bb, list) {
bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
if (IS_ERR(bb->vma)) {
ret = PTR_ERR(bb->vma);
goto err;
}
/* relocate shadow batch buffer */
bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
if (gmadr_bytes == 8)
bb->bb_start_cmd_va[2] = 0;
/* No one is going to touch shadow bb from now on. */
if (bb->clflush & CLFLUSH_AFTER) {
drm_clflush_virt_range(bb->va, bb->obj->base.size);
bb->clflush &= ~CLFLUSH_AFTER;
}
ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
if (ret)
goto err;
i915_gem_obj_finish_shmem_access(bb->obj);
bb->accessing = false;
i915_vma_move_to_active(bb->vma, workload->req, 0);
}
return 0;
err:
release_shadow_batch_buffer(workload);
return ret;
}
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct intel_vgpu_workload *workload = container_of(wa_ctx,
struct intel_vgpu_workload,
wa_ctx);
int ring_id = workload->ring_id;
struct intel_vgpu_submission *s = &workload->vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
kunmap_atomic(shadow_ring_context);
return 0;
}
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned char *per_ctx_va =
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0);
if (IS_ERR(vma))
return PTR_ERR(vma);
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
* free.
*/
wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
memset(per_ctx_va, 0, CACHELINE_BYTES);
update_wa_ctx_2_shadow_ctx(wa_ctx);
return 0;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_shadow_bb *bb, *pos;
if (list_empty(&workload->shadow_bb))
return;
bb = list_first_entry(&workload->shadow_bb,
struct intel_vgpu_shadow_bb, list);
mutex_lock(&dev_priv->drm.struct_mutex);
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
if (bb->accessing)
i915_gem_obj_finish_shmem_access(bb->obj);
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
if (bb->vma && !IS_ERR(bb->vma)) {
i915_vma_unpin(bb->vma);
i915_vma_close(bb->vma);
}
__i915_gem_object_release_unless_active(bb->obj);
}
list_del(&bb->list);
kfree(bb);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
}
static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
if (ret) {
gvt_vgpu_err("fail to vgpu pin mm\n");
return ret;
}
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n");
goto err_unpin_mm;
}
ret = intel_vgpu_flush_post_shadow(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to flush post shadow\n");
goto err_unpin_mm;
}
ret = intel_gvt_generate_request(workload);
if (ret) {
gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm;
}
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
goto err_unpin_mm;
}
ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
goto err_shadow_batch;
}
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
goto err_shadow_wa_ctx;
}
return 0;
err_shadow_wa_ctx:
release_shadow_wa_ctx(&workload->wa_ctx);
err_shadow_batch:
release_shadow_batch_buffer(workload);
err_unpin_mm:
intel_vgpu_unpin_mm(workload->shadow_mm);
return ret;
}
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
int ret = 0;
@ -358,12 +567,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret)
goto out;
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret) {
engine->context_unpin(engine, shadow_ctx);
goto out;
}
ret = prepare_workload(workload);
if (ret) {
engine->context_unpin(engine, shadow_ctx);
goto out;
}
out:
@ -431,7 +638,7 @@ static struct intel_vgpu_workload *pick_next_workload(
gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
atomic_inc(&workload->vgpu->running_workload_num);
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
mutex_unlock(&gvt->lock);
return workload;
@ -441,8 +648,9 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
@ -466,7 +674,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT));
I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid guest context descriptor\n");
return;
@ -475,7 +683,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
I915_GTT_PAGE_SIZE);
kunmap(page);
i++;
}
@ -500,23 +708,41 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page);
}
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
unsigned int tmp;
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
intel_vgpu_destroy_workload(pos);
}
clear_bit(engine->id, s->shadow_ctx_desc_updated);
}
}
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
struct intel_vgpu *vgpu;
struct intel_vgpu_workload *workload =
scheduler->current_workload[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
int event;
mutex_lock(&gvt->lock);
workload = scheduler->current_workload[ring_id];
vgpu = workload->vgpu;
/* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
@ -553,7 +779,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
}
mutex_lock(&dev_priv->drm.struct_mutex);
/* unpin shadow ctx as the shadow_ctx update is done */
engine->context_unpin(engine, workload->vgpu->shadow_ctx);
engine->context_unpin(engine, s->shadow_ctx);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
@ -563,9 +789,32 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id] = NULL;
list_del_init(&workload->list);
if (!workload->status) {
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
}
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
* So this error is a vGPU hang actually to the guest.
* According to this we should emunlate a vGPU hang. If
* there are pending workloads which are already submitted
* from guest, we should clean them up like HW GPU does.
*
* if it is in middle of engine resetting, the pending
* workloads won't be submitted to HW GPU and will be
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
clean_workloads(vgpu, ENGINE_MASK(ring_id));
}
workload->complete(workload);
atomic_dec(&vgpu->running_workload_num);
atomic_dec(&s->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
if (gvt->scheduler.need_reschedule)
@ -648,20 +897,23 @@ complete:
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
return 0;
}
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
if (atomic_read(&vgpu->running_workload_num)) {
if (atomic_read(&s->running_workload_num)) {
gvt_dbg_sched("wait vgpu idle\n");
wait_event(scheduler->workload_complete_wq,
!atomic_read(&vgpu->running_workload_num));
!atomic_read(&s->running_workload_num));
}
}
@ -726,23 +978,354 @@ err:
return ret;
}
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
/**
* intel_vgpu_clean_submission - free submission-related resource for vGPU
* @vgpu: a vGPU
*
* This function is called when a vGPU is being destroyed.
*
*/
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
i915_gem_context_put(vgpu->shadow_ctx);
struct intel_vgpu_submission *s = &vgpu->submission;
intel_vgpu_select_submission_ops(vgpu, 0);
i915_gem_context_put(s->shadow_ctx);
kmem_cache_destroy(s->workloads);
}
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
/**
* intel_vgpu_reset_submission - reset submission-related resource for vGPU
* @vgpu: a vGPU
* @engine_mask: engines expected to be reset
*
* This function is called when a vGPU is being destroyed.
*
*/
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
unsigned long engine_mask)
{
atomic_set(&vgpu->running_workload_num, 0);
struct intel_vgpu_submission *s = &vgpu->submission;
vgpu->shadow_ctx = i915_gem_context_create_gvt(
if (!s->active)
return;
clean_workloads(vgpu, engine_mask);
s->ops->reset(vgpu, engine_mask);
}
/**
* intel_vgpu_setup_submission - setup submission-related resource for vGPU
* @vgpu: a vGPU
*
* This function is called when a vGPU is being created.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
enum intel_engine_id i;
struct intel_engine_cs *engine;
int ret;
s->shadow_ctx = i915_gem_context_create_gvt(
&vgpu->gvt->dev_priv->drm);
if (IS_ERR(vgpu->shadow_ctx))
return PTR_ERR(vgpu->shadow_ctx);
if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx);
vgpu->shadow_ctx->engine[RCS].initialised = true;
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!s->workloads) {
ret = -ENOMEM;
goto out_shadow_ctx;
}
for_each_engine(engine, vgpu->gvt->dev_priv, i)
INIT_LIST_HEAD(&s->workload_q_head[i]);
atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
return 0;
out_shadow_ctx:
i915_gem_context_put(s->shadow_ctx);
return ret;
}
/**
* intel_vgpu_select_submission_ops - select virtual submission interface
* @vgpu: a vGPU
* @interface: expected vGPU virtual submission interface
*
* This function is called when guest configures submission interface.
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
unsigned int interface)
{
struct intel_vgpu_submission *s = &vgpu->submission;
const struct intel_vgpu_submission_ops *ops[] = {
[INTEL_VGPU_EXECLIST_SUBMISSION] =
&intel_vgpu_execlist_submission_ops,
};
int ret;
if (WARN_ON(interface >= ARRAY_SIZE(ops)))
return -EINVAL;
if (s->active) {
s->ops->clean(vgpu);
s->active = false;
gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
vgpu->id, s->ops->name);
}
if (interface == 0) {
s->ops = NULL;
s->virtual_submission_interface = 0;
gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
return 0;
}
ret = ops[interface]->init(vgpu);
if (ret)
return ret;
s->ops = ops[interface];
s->virtual_submission_interface = interface;
s->active = true;
gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
vgpu->id, s->ops->name);
return 0;
}
/**
* intel_vgpu_destroy_workload - destroy a vGPU workload
* @vgpu: a vGPU
*
* This function is called when destroy a vGPU workload.
*
*/
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu_submission *s = &workload->vgpu->submission;
if (workload->shadow_mm)
intel_gvt_mm_unreference(workload->shadow_mm);
kmem_cache_free(s->workloads, workload);
}
static struct intel_vgpu_workload *
alloc_workload(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload;
workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
if (!workload)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&workload->list);
INIT_LIST_HEAD(&workload->shadow_bb);
init_waitqueue_head(&workload->shadow_ctx_status_wq);
atomic_set(&workload->shadow_ctx_active, 0);
workload->status = -EINPROGRESS;
workload->shadowed = false;
workload->vgpu = vgpu;
return workload;
}
#define RING_CTX_OFF(x) \
offsetof(struct execlist_ring_context, x)
static void read_guest_pdps(struct intel_vgpu *vgpu,
u64 ring_context_gpa, u32 pdp[8])
{
u64 gpa;
int i;
gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
for (i = 0; i < 8; i++)
intel_gvt_hypervisor_read_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
}
static int prepare_mm(struct intel_vgpu_workload *workload)
{
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level;
u32 pdp[8];
if (desc->addressing_mode == 1) { /* legacy 32-bit */
page_table_level = 3;
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */
page_table_level = 4;
} else {
gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL;
}
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp);
mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp);
if (mm) {
intel_gvt_mm_reference(mm);
} else {
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm);
}
}
workload->shadow_mm = mm;
return 0;
}
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
#define get_last_workload(q) \
(list_empty(q) ? NULL : container_of(q->prev, \
struct intel_vgpu_workload, list))
/**
* intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU
* @desc: a guest context descriptor
*
* This function is called when creating a vGPU workload.
*
* Returns:
* struct intel_vgpu_workload * on success, negative error code in
* pointer if failed.
*
*/
struct intel_vgpu_workload *
intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct list_head *q = workload_q_head(vgpu, ring_id);
struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
return ERR_PTR(-EINVAL);
}
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_header.val), &head, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
gvt_dbg_el("ctx head %x real head %lx\n", head,
last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
*/
head = last_workload->rb_tail;
}
gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
workload = alloc_workload(vgpu);
if (IS_ERR(workload))
return workload;
workload->ring_id = ring_id;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
workload->rb_tail = tail;
workload->rb_start = start;
workload->rb_ctl = ctl;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
workload->wa_ctx.indirect_ctx.guest_gma =
indirect_ctx & INDIRECT_CTX_ADDR_MASK;
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
}
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
workload, ring_id, head, tail, start, ctl);
ret = prepare_mm(workload);
if (ret) {
kmem_cache_free(s->workloads, workload);
return ERR_PTR(ret);
}
/* Only scan and shadow the first workload in the queue
* as there is only one pre-allocated buf-obj for shadow.
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
}
if (ret && (vgpu_is_vm_unhealthy(ret))) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
intel_vgpu_destroy_workload(workload);
return ERR_PTR(ret);
}
return workload;
}

View File

@ -112,17 +112,18 @@ struct intel_vgpu_workload {
struct intel_shadow_wa_ctx wa_ctx;
};
/* Intel shadow batch buffer is a i915 gem object */
struct intel_shadow_bb_entry {
struct intel_vgpu_shadow_bb {
struct list_head list;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
void *va;
unsigned long len;
u32 *bb_start_cmd_va;
unsigned int clflush;
bool accessing;
};
#define workload_q_head(vgpu, ring_id) \
(&(vgpu->workload_q_head[ring_id]))
(&(vgpu->submission.workload_q_head[ring_id]))
#define queue_workload(workload) do { \
list_add_tail(&workload->list, \
@ -137,12 +138,23 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt);
void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
unsigned long engine_mask);
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
unsigned int interface);
extern const struct intel_vgpu_submission_ops
intel_vgpu_execlist_submission_ops;
struct intel_vgpu_workload *
intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
struct execlist_ctx_descriptor_format *desc);
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
#endif

View File

@ -43,7 +43,10 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
@ -226,7 +229,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
vgpu->active = false;
if (atomic_read(&vgpu->running_workload_num)) {
if (atomic_read(&vgpu->submission.running_workload_num)) {
mutex_unlock(&gvt->lock);
intel_gvt_wait_vgpu_idle(vgpu);
mutex_lock(&gvt->lock);
@ -252,10 +255,10 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
WARN(vgpu->active, "vGPU is still active!\n");
intel_gvt_debugfs_remove_vgpu(vgpu);
idr_remove(&gvt->vgpu_idr, vgpu->id);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_gvt_context(vgpu);
intel_vgpu_clean_execlist(vgpu);
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_clean_gtt(vgpu);
@ -293,7 +296,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
vgpu->gvt = gvt;
for (i = 0; i < I915_NUM_ENGINES; i++)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
@ -346,7 +349,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
intel_vgpu_init_cfg_space(vgpu, param->primary);
@ -372,26 +374,26 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_gtt;
ret = intel_vgpu_init_execlist(vgpu);
ret = intel_vgpu_setup_submission(vgpu);
if (ret)
goto out_clean_display;
ret = intel_vgpu_init_gvt_context(vgpu);
if (ret)
goto out_clean_execlist;
ret = intel_vgpu_init_sched_policy(vgpu);
if (ret)
goto out_clean_shadow_ctx;
goto out_clean_submission;
ret = intel_gvt_debugfs_add_vgpu(vgpu);
if (ret)
goto out_clean_sched_policy;
mutex_unlock(&gvt->lock);
return vgpu;
out_clean_shadow_ctx:
intel_vgpu_clean_gvt_context(vgpu);
out_clean_execlist:
intel_vgpu_clean_execlist(vgpu);
out_clean_sched_policy:
intel_vgpu_clean_sched_policy(vgpu);
out_clean_submission:
intel_vgpu_clean_submission(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
out_clean_gtt:
@ -500,10 +502,10 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
mutex_lock(&gvt->lock);
}
intel_vgpu_reset_execlist(vgpu, resetting_eng);
intel_vgpu_reset_submission(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_select_submission_ops(vgpu, 0);
/*fence will not be reset during virtual reset */
if (dmlr) {

View File

@ -798,22 +798,15 @@ struct cmd_node {
*/
static inline u32 cmd_header_key(u32 x)
{
u32 shift;
switch (x >> INSTR_CLIENT_SHIFT) {
default:
case INSTR_MI_CLIENT:
shift = STD_MI_OPCODE_SHIFT;
break;
return x >> STD_MI_OPCODE_SHIFT;
case INSTR_RC_CLIENT:
shift = STD_3D_OPCODE_SHIFT;
break;
return x >> STD_3D_OPCODE_SHIFT;
case INSTR_BC_CLIENT:
shift = STD_2D_OPCODE_SHIFT;
break;
return x >> STD_2D_OPCODE_SHIFT;
}
return x >> shift;
}
static int init_hash_table(struct intel_engine_cs *engine,

View File

@ -30,7 +30,7 @@
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include "intel_drv.h"
#include "i915_guc_submission.h"
#include "intel_guc_submission.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@ -1974,7 +1974,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct intel_context *ce = &ctx->engine[engine->id];
seq_printf(m, "%s: ", engine->name);
seq_putc(m, ce->initialised ? 'I' : 'i');
if (ce->state)
describe_obj(m, ce->state->obj);
if (ce->ring)
@ -2434,7 +2433,7 @@ static void i915_guc_log_info(struct seq_file *m,
static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
struct intel_guc_client *client)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@ -2484,6 +2483,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
i915_guc_client_info(m, dev_priv, guc->execbuf_client);
seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client);
i915_guc_client_info(m, dev_priv, guc->preempt_client);
i915_guc_log_info(m, dev_priv);
@ -2497,7 +2498,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
struct i915_guc_client *client = guc->execbuf_client;
struct intel_guc_client *client = guc->execbuf_client;
unsigned int tmp;
int index;
@ -2734,39 +2735,76 @@ static int i915_sink_crc(struct seq_file *m, void *data)
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp = NULL;
struct drm_modeset_acquire_ctx ctx;
int ret;
u8 crc[6];
drm_modeset_lock_all(dev);
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_crtc *crtc;
if (!connector->base.state->best_encoder)
continue;
crtc = connector->base.state->crtc;
if (!crtc->state->active)
continue;
struct drm_connector_state *state;
struct intel_crtc_state *crtc_state;
if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
ret = intel_dp_sink_crc(intel_dp, crc);
retry:
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
if (ret)
goto out;
goto err;
state = connector->base.state;
if (!state->best_encoder)
continue;
crtc = state->crtc;
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret)
goto err;
crtc_state = to_intel_crtc_state(crtc->state);
if (!crtc_state->base.active)
continue;
/*
* We need to wait for all crtc updates to complete, to make
* sure any pending modesets and plane updates are completed.
*/
if (crtc_state->base.commit) {
ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
if (ret)
goto err;
}
intel_dp = enc_to_intel_dp(state->best_encoder);
ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
if (ret)
goto err;
seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
crc[0], crc[1], crc[2],
crc[3], crc[4], crc[5]);
goto out;
err:
if (ret == -EDEADLK) {
ret = drm_modeset_backoff(&ctx);
if (!ret)
goto retry;
}
goto out;
}
ret = -ENODEV;
out:
drm_connector_list_iter_end(&conn_iter);
drm_modeset_unlock_all(dev);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
}
@ -3049,7 +3087,7 @@ static void intel_connector_info(struct seq_file *m,
break;
case DRM_MODE_CONNECTOR_HDMIA:
if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
intel_encoder->type == INTEL_OUTPUT_DDI)
intel_hdmi_info(m, intel_connector);
break;
default:
@ -3244,6 +3282,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
yesno(dev_priv->gt.awake));
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
seq_printf(m, "CS timestamp frequency: %u kHz\n",
dev_priv->info.cs_timestamp_frequency_khz);
p = drm_seq_file_printer(m);
for_each_engine(engine, dev_priv, id)
@ -3601,7 +3641,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
continue;
seq_printf(m, "MST Source Port %c\n",
port_name(intel_dig_port->port));
port_name(intel_dig_port->base.port));
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
@ -4448,6 +4488,61 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
}
}
static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
const struct intel_device_info *info = INTEL_INFO(dev_priv);
int s_max = 6, ss_max = 4;
int s, ss;
u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2];
for (s = 0; s < s_max; s++) {
/*
* FIXME: Valid SS Mask respects the spec and read
* only valid bits for those registers, excluding reserverd
* although this seems wrong because it would leave many
* subslices without ACK.
*/
s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
GEN10_PGCTL_VALID_SS_MASK(s);
eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
}
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
GEN9_PGCTL_SSA_EU19_ACK |
GEN9_PGCTL_SSA_EU210_ACK |
GEN9_PGCTL_SSA_EU311_ACK;
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
GEN9_PGCTL_SSB_EU19_ACK |
GEN9_PGCTL_SSB_EU210_ACK |
GEN9_PGCTL_SSB_EU311_ACK;
for (s = 0; s < s_max; s++) {
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
/* skip disabled slice */
continue;
sseu->slice_mask |= BIT(s);
sseu->subslice_mask = info->sseu.subslice_mask;
for (ss = 0; ss < ss_max; ss++) {
unsigned int eu_cnt;
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
eu_mask[ss % 2]);
sseu->eu_total += eu_cnt;
sseu->eu_per_subslice = max_t(unsigned int,
sseu->eu_per_subslice,
eu_cnt);
}
}
}
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
@ -4483,7 +4578,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
if (IS_GEN9_BC(dev_priv))
sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask;
@ -4589,8 +4684,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
cherryview_sseu_device_status(dev_priv, &sseu);
} else if (IS_BROADWELL(dev_priv)) {
broadwell_sseu_device_status(dev_priv, &sseu);
} else if (INTEL_GEN(dev_priv) >= 9) {
} else if (IS_GEN9(dev_priv)) {
gen9_sseu_device_status(dev_priv, &sseu);
} else if (INTEL_GEN(dev_priv) >= 10) {
gen10_sseu_device_status(dev_priv, &sseu);
}
intel_runtime_pm_put(dev_priv);

View File

@ -372,9 +372,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
value |= I915_SCHEDULER_CAP_ENABLED;
value |= I915_SCHEDULER_CAP_PRIORITY;
if (INTEL_INFO(dev_priv)->has_logical_ring_preemption &&
i915_modparams.enable_execlists &&
!i915_modparams.enable_guc_submission)
if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
i915_modparams.enable_execlists)
value |= I915_SCHEDULER_CAP_PREEMPTION;
}
break;
@ -407,6 +406,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
*/
value = 1;
break;
case I915_PARAM_HAS_CONTEXT_ISOLATION:
value = intel_engines_has_context_isolation(dev_priv);
break;
case I915_PARAM_SLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.slice_mask;
if (!value)
@ -417,6 +419,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
if (!value)
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
@ -677,7 +682,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_uc;
intel_modeset_gem_init(dev);
intel_setup_overlay(dev_priv);
if (INTEL_INFO(dev_priv)->num_pipes == 0)
return 0;
@ -838,6 +843,11 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
* We don't keep the workarounds for pre-production hardware, so we expect our
* driver to fail on these machines in one way or another. A little warning on
* dmesg may help both the user and the bug triagers.
*
* Our policy for removing pre-production workarounds is to keep the
* current gen workarounds as a guide to the bring-up of the next gen
* (workarounds have a habit of persisting!). Anything older than that
* should be removed along with the complications they introduce.
*/
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
@ -892,7 +902,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
@ -1682,8 +1691,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_csr_ucode_resume(dev_priv);
i915_gem_resume(dev_priv);
i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
intel_opregion_setup(dev_priv);
@ -1704,14 +1711,7 @@ static int i915_drm_resume(struct drm_device *dev)
drm_mode_config_reset(dev);
mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev_priv)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
i915_gem_set_wedged(dev_priv);
}
mutex_unlock(&dev->struct_mutex);
intel_guc_resume(dev_priv);
i915_gem_resume(dev_priv);
intel_modeset_init_hw(dev);
intel_init_clock_gating(dev_priv);
@ -1745,8 +1745,6 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
intel_autoenable_gt_powersave(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
return 0;
@ -1952,6 +1950,12 @@ error:
goto finish;
}
static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
}
/**
* i915_reset_engine - reset GPU engine to recover from a hang
* @engine: engine to reset
@ -1986,10 +1990,14 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
goto out;
}
ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
if (!engine->i915->guc.execbuf_client)
ret = intel_gt_reset_engine(engine->i915, engine);
else
ret = intel_guc_reset_engine(&engine->i915->guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
engine->i915->guc.execbuf_client ? "GuC " : "",
engine->name, ret);
goto out;
}
@ -2524,6 +2532,8 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_disable_interrupts(dev_priv);
intel_uncore_suspend(dev_priv);
ret = 0;
if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
@ -2536,6 +2546,8 @@ static int intel_runtime_suspend(struct device *kdev)
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_uncore_runtime_resume(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@ -2543,8 +2555,6 @@ static int intel_runtime_suspend(struct device *kdev)
return ret;
}
intel_uncore_suspend(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));

View File

@ -67,7 +67,6 @@
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
@ -80,8 +79,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20171023"
#define DRIVER_TIMESTAMP 1508748913
#define DRIVER_DATE "20171117"
#define DRIVER_TIMESTAMP 1510958822
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@ -726,10 +725,12 @@ struct drm_i915_display_funcs {
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state);
void (*update_crtcs)(struct drm_atomic_state *state);
void (*audio_codec_enable)(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*audio_codec_enable)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void (*audio_codec_disable)(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
void (*fdi_link_train)(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
@ -884,6 +885,8 @@ struct intel_device_info {
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
u32 cs_timestamp_frequency_khz;
struct color_luts {
u16 degamma_lut_size;
u16 gamma_lut_size;
@ -911,6 +914,12 @@ struct i915_gpu_state {
struct intel_device_info device_info;
struct i915_params params;
struct i915_error_uc {
struct intel_uc_fw guc_fw;
struct intel_uc_fw huc_fw;
struct drm_i915_error_object *guc_log;
} uc;
/* Generic register state */
u32 eir;
u32 pgtbl_er;
@ -934,7 +943,6 @@ struct i915_gpu_state {
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore;
struct drm_i915_error_object *guc_log;
struct drm_i915_error_engine {
int engine_id;
@ -1386,7 +1394,6 @@ struct intel_gen6_power_mgmt {
struct intel_rps rps;
struct intel_rc6 rc6;
struct intel_llc_pstate llc_pstate;
struct delayed_work autoenable_work;
};
/* defined intel_pm.c */
@ -1698,6 +1705,8 @@ enum modeset_restore {
#define DDC_PIN_D 0x06
struct ddi_vbt_port_info {
int max_tmds_clock;
/*
* This is an index in the HDMI/DVI DDI buffer translation table.
* The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
@ -2228,6 +2237,7 @@ struct i915_oa_ops {
struct intel_cdclk_state {
unsigned int cdclk, vco, ref;
u8 voltage_level;
};
struct drm_i915_private {
@ -2339,6 +2349,7 @@ struct drm_i915_private {
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
unsigned int hpll_freq;
unsigned int fdi_pll_freq;
unsigned int czclk_freq;
struct {
@ -2415,6 +2426,8 @@ struct drm_i915_private {
unsigned int active_crtcs;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
u8 min_voltage_level[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
@ -3046,6 +3059,8 @@ intel_info(const struct drm_i915_private *dev_priv)
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(dev_priv)->info.gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(dev_priv)->info.gt == 3)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@ -3137,6 +3152,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
((dev_priv)->info.has_logical_ring_preemption)
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
@ -3315,7 +3332,9 @@ extern int i915_reset_engine(struct intel_engine_cs *engine,
unsigned int flags);
extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern int intel_reset_guc(struct drm_i915_private *dev_priv);
extern int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@ -4107,7 +4126,6 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv);
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_connector_register(struct drm_connector *);
extern void intel_connector_unregister(struct drm_connector *);
@ -4174,8 +4192,7 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
uint8_t lane_count);
uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask);
uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
@ -4184,18 +4201,25 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
bool uniq_trans_scale);
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool reset);
void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void chv_phy_release_cl2_override(struct intel_encoder *encoder);
void chv_phy_post_pll_disable(struct intel_encoder *encoder);
void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
void vlv_set_phy_signal_level(struct intel_encoder *encoder,
u32 demph_reg_value, u32 preemph_reg_value,
u32 uniqtranscale_reg_value, u32 tx3_demph);
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
void vlv_phy_reset_lanes(struct intel_encoder *encoder);
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);

View File

@ -538,7 +538,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
* @obj: i915 gem object
* @flags: how to wait (under a lock, for all rendering or just for writes etc)
* @timeout: how long to wait
* @rps: client (user process) to charge for any waitboosting
* @rps_client: client (user process) to charge for any waitboosting
*/
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
@ -1619,7 +1619,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (err)
goto out;
/* Flush and acquire obj->pages so that we are coherent through
/*
* Proxy objects do not control access to the backing storage, ergo
* they cannot be used as a means to manipulate the cache domain
* tracking for that backing storage. The proxy object is always
* considered to be outside of any cache domain.
*/
if (i915_gem_object_is_proxy(obj)) {
err = -ENXIO;
goto out;
}
/*
* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
* For example, if the obj->filp was moved to swap without us
@ -1675,6 +1687,11 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
/*
* Proxy objects are barred from CPU access, so there is no
* need to ban sw_finish as it is a nop.
*/
/* Pinned buffers may be scanout, so flush the cache */
i915_gem_object_flush_if_display(obj);
i915_gem_object_put(obj);
@ -1725,7 +1742,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
*/
if (!obj->base.filp) {
i915_gem_object_put(obj);
return -EINVAL;
return -ENXIO;
}
addr = vm_mmap(obj->base.filp, 0, args->size,
@ -2669,7 +2686,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
void *ptr;
int ret;
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
if (unlikely(!i915_gem_object_has_struct_page(obj)))
return ERR_PTR(-ENXIO);
ret = mutex_lock_interruptible(&obj->mm.lock);
if (ret)
@ -2915,13 +2933,23 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
* Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
* to a second via its engine->irq_tasklet *just* as we are
* to a second via its execlists->tasklet *just* as we are
* calling engine->init_hw() and also writing the ELSP.
* Turning off the engine->irq_tasklet until the reset is over
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
tasklet_kill(&engine->execlists.irq_tasklet);
tasklet_disable(&engine->execlists.irq_tasklet);
tasklet_kill(&engine->execlists.tasklet);
tasklet_disable(&engine->execlists.tasklet);
/*
* We're using worker to queue preemption requests from the tasklet in
* GuC submission mode.
* Even though tasklet was disabled, we may still have a worker queued.
* Let's make sure that all workers scheduled before disabling the
* tasklet are completed before continuing with the reset.
*/
if (engine->i915->guc.preempt_wq)
flush_workqueue(engine->i915->guc.preempt_wq);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@ -3100,7 +3128,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
tasklet_enable(&engine->execlists.irq_tasklet);
tasklet_enable(&engine->execlists.tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
@ -3278,13 +3306,20 @@ i915_gem_retire_work_handler(struct work_struct *work)
}
}
static inline bool
new_requests_since_last_retire(const struct drm_i915_private *i915)
{
return (READ_ONCE(i915->gt.active_requests) ||
work_pending(&i915->gt.idle_work.work));
}
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), gt.idle_work.work);
struct drm_device *dev = &dev_priv->drm;
bool rearm_hangcheck;
ktime_t end;
if (!READ_ONCE(dev_priv->gt.awake))
return;
@ -3293,14 +3328,21 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Wait for last execlists context complete, but bail out in case a
* new request is submitted.
*/
wait_for(intel_engines_are_idle(dev_priv), 10);
if (READ_ONCE(dev_priv->gt.active_requests))
return;
end = ktime_add_ms(ktime_get(), 200);
do {
if (new_requests_since_last_retire(dev_priv))
return;
if (intel_engines_are_idle(dev_priv))
break;
usleep_range(100, 500);
} while (ktime_before(ktime_get(), end));
rearm_hangcheck =
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
/* Currently busy, come back later */
mod_delayed_work(dev_priv->wq,
&dev_priv->gt.idle_work,
@ -3312,16 +3354,23 @@ i915_gem_idle_work_handler(struct work_struct *work)
* New request retired after this work handler started, extend active
* period until next instance of the work.
*/
if (work_pending(work))
if (new_requests_since_last_retire(dev_priv))
goto out_unlock;
if (dev_priv->gt.active_requests)
goto out_unlock;
/*
* Be paranoid and flush a concurrent interrupt to make sure
* we don't reactivate any irq tasklets after parking.
*
* FIXME: Note that even though we have waited for execlists to be idle,
* there may still be an in-flight interrupt even though the CSB
* is now empty. synchronize_irq() makes sure that a residual interrupt
* is completed before we continue, but it doesn't prevent the HW from
* raising a spurious interrupt later. To complete the shield we should
* coordinate disabling the CS irq with flushing the interrupts.
*/
synchronize_irq(dev_priv->drm.irq);
if (wait_for(intel_engines_are_idle(dev_priv), 10))
DRM_ERROR("Timeout waiting for engines to idle\n");
intel_engines_mark_idle(dev_priv);
intel_engines_park(dev_priv);
i915_gem_timelines_mark_idle(dev_priv);
GEM_BUG_ON(!dev_priv->gt.awake);
@ -3332,7 +3381,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
gen6_rps_idle(dev_priv);
intel_runtime_pm_put(dev_priv);
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->drm.struct_mutex);
out_rearm:
if (rearm_hangcheck) {
@ -3857,6 +3906,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
/*
* The caching mode of proxy object is handled by its generator, and
* not allowed to be changed by userspace.
*/
if (i915_gem_object_is_proxy(obj)) {
ret = -ENXIO;
goto out;
}
if (obj->cache_level == level)
goto out;
@ -4662,14 +4720,16 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
i915_gem_object_put(obj);
}
static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
static void assert_kernel_context_is_current(struct drm_i915_private *i915)
{
struct i915_gem_context *kernel_context = i915->kernel_context;
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id)
GEM_BUG_ON(engine->last_retired_context &&
!i915_gem_context_is_kernel(engine->last_retired_context));
for_each_engine(engine, i915, id) {
GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
GEM_BUG_ON(engine->last_retired_context != kernel_context);
}
}
void i915_gem_sanitize(struct drm_i915_private *i915)
@ -4773,23 +4833,40 @@ err_unlock:
return ret;
}
void i915_gem_resume(struct drm_i915_private *dev_priv)
void i915_gem_resume(struct drm_i915_private *i915)
{
struct drm_device *dev = &dev_priv->drm;
WARN_ON(i915->gt.awake);
WARN_ON(dev_priv->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev_priv);
i915_gem_restore_fences(dev_priv);
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
/* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
dev_priv->gt.resume(dev_priv);
i915->gt.resume(i915);
mutex_unlock(&dev->struct_mutex);
if (i915_gem_init_hw(i915))
goto err_wedged;
intel_guc_resume(i915);
/* Always reload a context for powersaving. */
if (i915_gem_switch_to_kernel_context(i915))
goto err_wedged;
out_unlock:
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
mutex_unlock(&i915->drm.struct_mutex);
return;
err_wedged:
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
i915_gem_set_wedged(i915);
goto out_unlock;
}
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
@ -4906,18 +4983,15 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
goto out;
}
/* Need to do basic initialisation of all rings first: */
ret = __i915_gem_restart_engines(dev_priv);
if (ret)
goto out;
intel_mocs_init_l3cc_table(dev_priv);
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(dev_priv);
if (ret)
goto out;
intel_mocs_init_l3cc_table(dev_priv);
/* Only when the HW is re-initialised, can we replay the requests */
ret = __i915_gem_restart_engines(dev_priv);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
@ -4942,6 +5016,120 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
return true;
}
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
/*
* As we reset the gpu during very early sanitisation, the current
* register state on the GPU should reflect its defaults values.
* We load a context onto the hw (with restore-inhibit), then switch
* over to a second context to save that default register state. We
* can then prime every new context with that state so they all start
* from the same default HW values.
*/
ctx = i915_gem_context_create_kernel(i915, 0);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
for_each_engine(engine, i915, id) {
struct drm_i915_gem_request *rq;
rq = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ctx;
}
err = i915_switch_context(rq);
if (engine->init_context)
err = engine->init_context(rq);
__i915_add_request(rq, true);
if (err)
goto err_active;
}
err = i915_gem_switch_to_kernel_context(i915);
if (err)
goto err_active;
err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
if (err)
goto err_active;
assert_kernel_context_is_current(i915);
for_each_engine(engine, i915, id) {
struct i915_vma *state;
state = ctx->engine[id].state;
if (!state)
continue;
/*
* As we will hold a reference to the logical state, it will
* not be torn down with the context, and importantly the
* object will hold onto its vma (making it possible for a
* stray GTT write to corrupt our defaults). Unmap the vma
* from the GTT to prevent such accidents and reclaim the
* space.
*/
err = i915_vma_unbind(state);
if (err)
goto err_active;
err = i915_gem_object_set_to_cpu_domain(state->obj, false);
if (err)
goto err_active;
engine->default_state = i915_gem_object_get(state->obj);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
unsigned int found = intel_engines_has_context_isolation(i915);
/*
* Make sure that classes with multiple engine instances all
* share the same basic configuration.
*/
for_each_engine(engine, i915, id) {
unsigned int bit = BIT(engine->uabi_class);
unsigned int expected = engine->default_state ? bit : 0;
if ((found & bit) != expected) {
DRM_ERROR("mismatching default context state for class %d on engine %s\n",
engine->uabi_class, engine->name);
}
}
}
out_ctx:
i915_gem_context_set_closed(ctx);
i915_gem_context_put(ctx);
return err;
err_active:
/*
* If we have to abandon now, we expect the engines to be idle
* and ready to be torn-down. First try to flush any remaining
* request, ensure we are pointing at the kernel context and
* then remove it.
*/
if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
goto out_ctx;
if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
goto out_ctx;
i915_gem_contexts_lost(i915);
goto out_ctx;
}
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@ -4991,7 +5179,25 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto out_unlock;
intel_init_gt_powersave(dev_priv);
ret = i915_gem_init_hw(dev_priv);
if (ret)
goto out_unlock;
/*
* Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
* GT power context workaround. Worse, sometimes it includes a context
* register workaround which we need to apply before we record the
* default HW state for all contexts.
*
* FIXME: break up the workarounds and apply them at the right time!
*/
intel_init_clock_gating(dev_priv);
ret = __intel_engines_record_defaults(dev_priv);
out_unlock:
if (ret == -EIO) {
/* Allow engine initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
@ -5003,8 +5209,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
}
ret = 0;
}
out_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
@ -5058,6 +5262,22 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
i915_gem_detect_bit_6_swizzle(dev_priv);
}
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
spin_lock_init(&i915->mm.object_stat_lock);
spin_lock_init(&i915->mm.obj_lock);
spin_lock_init(&i915->mm.free_lock);
init_llist_head(&i915->mm.free_list);
INIT_LIST_HEAD(&i915->mm.unbound_list);
INIT_LIST_HEAD(&i915->mm.bound_list);
INIT_LIST_HEAD(&i915->mm.fence_list);
INIT_LIST_HEAD(&i915->mm.userfault_list);
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}
int
i915_gem_load_init(struct drm_i915_private *dev_priv)
{
@ -5099,15 +5319,7 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (err)
goto err_priorities;
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
spin_lock_init(&dev_priv->mm.obj_lock);
spin_lock_init(&dev_priv->mm.free_lock);
init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
i915_gem_init__mm(dev_priv);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);

View File

@ -28,7 +28,11 @@
#include <linux/bug.h>
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
printk(KERN_ERR "GEM_BUG_ON(%s)\n", __stringify(condition)); \
BUG(); \
} \
} while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
#define GEM_DEBUG_DECL(var) var
@ -44,6 +48,12 @@
#define GEM_DEBUG_BUG_ON(expr)
#endif
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
#else
#define GEM_TRACE(...) do { } while (0)
#endif
#define I915_NUM_ENGINES 5
#endif /* __I915_GEM_H__ */

View File

@ -418,8 +418,8 @@ out:
return ctx;
}
static struct i915_gem_context *
create_kernel_context(struct drm_i915_private *i915, int prio)
struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
@ -473,7 +473,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
ida_init(&dev_priv->contexts.hw_ida);
/* lowest priority; idle task */
ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n");
err = PTR_ERR(ctx);
@ -487,7 +487,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
dev_priv->kernel_context = ctx;
/* highest priority; preempting task */
ctx = create_kernel_context(dev_priv, INT_MAX);
ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default preempt context\n");
err = PTR_ERR(ctx);
@ -522,28 +522,6 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
engine->context_unpin(engine, engine->last_retired_context);
engine->last_retired_context = NULL;
}
/* Force the GPU state to be restored on enabling */
if (!i915_modparams.enable_execlists) {
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
if (!i915_gem_context_is_default(ctx))
continue;
for_each_engine(engine, dev_priv, id)
ctx->engine[engine->id].initialised = false;
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
}
for_each_engine(engine, dev_priv, id) {
struct intel_context *kce =
&dev_priv->kernel_context->engine[engine->id];
kce->initialised = true;
}
}
}
void i915_gem_contexts_fini(struct drm_i915_private *i915)
@ -718,9 +696,6 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
if (to->remap_slice)
return false;
if (!to->engine[RCS].initialised)
return false;
if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
@ -795,11 +770,14 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return ret;
}
if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
* space. This means we must enforce that a page table load
* occur when this occurs. */
if (i915_gem_context_is_kernel(to))
/*
* The kernel context(s) is treated as pure scratch and is not
* expected to retain any state (as we sacrifice it during
* suspend and on resume it may be corrupted). This is ok,
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
hw_flags = MI_RESTORE_INHIBIT;
else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
hw_flags = MI_FORCE_RESTORE;
@ -843,15 +821,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
to->remap_slice &= ~(1<<i);
}
if (!to->engine[RCS].initialised) {
if (engine->init_context) {
ret = engine->init_context(req);
if (ret)
return ret;
}
to->engine[RCS].initialised = true;
}
return 0;
}
@ -899,7 +868,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
return do_rcs_switch(req);
}
static bool engine_has_kernel_context(struct intel_engine_cs *engine)
static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
{
struct i915_gem_timeline *timeline;
@ -915,8 +884,7 @@ static bool engine_has_kernel_context(struct intel_engine_cs *engine)
return false;
}
return (!engine->last_retired_context ||
i915_gem_context_is_kernel(engine->last_retired_context));
return intel_engine_has_kernel_context(engine);
}
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
@ -933,7 +901,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
struct drm_i915_gem_request *req;
int ret;
if (engine_has_kernel_context(engine))
if (engine_has_idle_kernel_context(engine))
continue;
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);

View File

@ -157,7 +157,6 @@ struct i915_gem_context {
u32 *lrc_reg_state;
u64 lrc_desc;
int pin_count;
bool initialised;
} engine[I915_NUM_ENGINES];
/** ring_size: size for allocating the per-engine ring buffer */
@ -292,6 +291,9 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{

View File

@ -46,7 +46,7 @@ static bool ggtt_is_idle(struct drm_i915_private *i915)
return false;
for_each_engine(engine, i915, id) {
if (engine->last_retired_context != i915->kernel_context)
if (!intel_engine_has_kernel_context(engine))
return false;
}
@ -73,6 +73,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
if (err)
return err;
GEM_BUG_ON(!ggtt_is_idle(i915));
return 0;
}
@ -216,6 +217,7 @@ search_again:
if (ret)
return ret;
cond_resched();
goto search_again;
}

View File

@ -2074,23 +2074,27 @@ static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file)
{
const unsigned int nfences = args->num_cliprects;
const unsigned long nfences = args->num_cliprects;
struct drm_i915_gem_exec_fence __user *user;
struct drm_syncobj **fences;
unsigned int n;
unsigned long n;
int err;
if (!(args->flags & I915_EXEC_FENCE_ARRAY))
return NULL;
if (nfences > SIZE_MAX / sizeof(*fences))
/* Check multiplication overflow for access_ok() and kvmalloc_array() */
BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
if (nfences > min_t(unsigned long,
ULONG_MAX / sizeof(*user),
SIZE_MAX / sizeof(*fences)))
return ERR_PTR(-EINVAL);
user = u64_to_user_ptr(args->cliprects_ptr);
if (!access_ok(VERIFY_READ, user, nfences * 2 * sizeof(u32)))
if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user)))
return ERR_PTR(-EFAULT);
fences = kvmalloc_array(args->num_cliprects, sizeof(*fences),
fences = kvmalloc_array(nfences, sizeof(*fences),
__GFP_NOWARN | GFP_KERNEL);
if (!fences)
return ERR_PTR(-ENOMEM);
@ -2447,6 +2451,26 @@ err_in_fence:
return err;
}
static size_t eb_element_size(void)
{
return (sizeof(struct drm_i915_gem_exec_object2) +
sizeof(struct i915_vma *) +
sizeof(unsigned int));
}
static bool check_buffer_count(size_t count)
{
const size_t sz = eb_element_size();
/*
* When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
* array size (see eb_create()). Otherwise, we can accept an array as
* large as can be addressed (though use large arrays at your peril)!
*/
return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
}
/*
* Legacy execbuffer just creates an exec2 list from the original exec object
* list array and passes it to the real function.
@ -2455,18 +2479,16 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
sizeof(struct i915_vma *) +
sizeof(unsigned int));
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
const size_t count = args->buffer_count;
unsigned int i;
int err;
if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
if (!check_buffer_count(count)) {
DRM_DEBUG("execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@ -2485,9 +2507,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
/* Copy in the exec list from userland */
exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list),
exec_list = kvmalloc_array(count, sizeof(*exec_list),
__GFP_NOWARN | GFP_KERNEL);
exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
@ -2498,7 +2520,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
}
err = copy_from_user(exec_list,
u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * args->buffer_count);
sizeof(*exec_list) * count);
if (err) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, err);
@ -2548,16 +2570,14 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
sizeof(struct i915_vma *) +
sizeof(unsigned int));
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list;
struct drm_syncobj **fences = NULL;
const size_t count = args->buffer_count;
int err;
if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
if (!check_buffer_count(count)) {
DRM_DEBUG("execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@ -2565,17 +2585,17 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
/* Allocate an extra slot for use by the command parser */
exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
count);
return -ENOMEM;
}
if (copy_from_user(exec2_list,
u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * args->buffer_count)) {
DRM_DEBUG("copy %d exec entries failed\n", args->buffer_count);
sizeof(*exec2_list) * count)) {
DRM_DEBUG("copy %zd exec entries failed\n", count);
kvfree(exec2_list);
return -EFAULT;
}

View File

@ -454,6 +454,14 @@ static void vm_free_pages_release(struct i915_address_space *vm,
static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
/*
* On !llc, we need to change the pages back to WB. We only do so
* in bulk, so we rarely need to change the page attributes here,
* but doing so requires a stop_machine() from deep inside arch/x86/mm.
* To make detection of the possible sleep more likely, use an
* unconditional might_sleep() for everybody.
*/
might_sleep();
if (!pagevec_add(&vm->free_pages, page))
vm_free_pages_release(vm, false);
}
@ -2248,35 +2256,62 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
if (INTEL_INFO(dev_priv)->gen < 6)
return;
u32 fault;
for_each_engine(engine, dev_priv, id) {
u32 fault_reg;
fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
fault = I915_READ(RING_FAULT_REG(engine));
if (fault & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
fault_reg & PAGE_MASK,
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
RING_FAULT_SRCID(fault_reg),
RING_FAULT_FAULT_TYPE(fault_reg));
fault & PAGE_MASK,
fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
I915_WRITE(RING_FAULT_REG(engine),
fault_reg & ~RING_FAULT_VALID);
fault & ~RING_FAULT_VALID);
}
}
/* Engine specific init may not have been done till this point. */
if (dev_priv->engine[RCS])
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
u32 fault = I915_READ(GEN8_RING_FAULT_REG);
if (fault & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tEngine ID: %d\n"
"\tSource ID: %d\n"
"\tType: %d\n",
fault & PAGE_MASK,
GEN8_RING_FAULT_ENGINE_ID(fault),
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
I915_WRITE(GEN8_RING_FAULT_REG,
fault & ~RING_FAULT_VALID);
}
POSTING_READ(GEN8_RING_FAULT_REG);
}
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
if (INTEL_GEN(dev_priv) >= 8)
gen8_check_and_clear_faults(dev_priv);
else if (INTEL_GEN(dev_priv) >= 6)
gen6_check_and_clear_faults(dev_priv);
else
return;
}
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
@ -3041,7 +3076,7 @@ const struct intel_ppat_entry *
intel_ppat_get(struct drm_i915_private *i915, u8 value)
{
struct intel_ppat *ppat = &i915->ppat;
struct intel_ppat_entry *entry;
struct intel_ppat_entry *entry = NULL;
unsigned int scanned, best_score;
int i;
@ -3064,7 +3099,7 @@ intel_ppat_get(struct drm_i915_private *i915, u8 value)
}
if (scanned == ppat->max_entries) {
if (!best_score)
if (!entry)
return ERR_PTR(-ENOSPC);
kref_get(&entry->ref);
@ -3171,12 +3206,6 @@ static void cnl_setup_private_ppat(struct intel_ppat *ppat)
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
/* XXX: spec is unclear if this is still needed for CNL+ */
if (!USES_PPGTT(ppat->i915)) {
__alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
return;
}
__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);

View File

@ -53,8 +53,9 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@ -361,6 +362,12 @@ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
}
static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
}
static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{

View File

@ -26,10 +26,12 @@
*/
#include "i915_drv.h"
#include "i915_gem_render_state.h"
#include "intel_renderstate.h"
struct intel_render_state {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 batch_offset;
u32 batch_size;
@ -40,6 +42,9 @@ struct intel_render_state {
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
if (engine->id != RCS)
return NULL;
switch (INTEL_GEN(engine->i915)) {
case 6:
return &gen6_null_state;
@ -74,17 +79,16 @@ static int render_state_setup(struct intel_render_state *so,
struct drm_i915_private *i915)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
struct drm_i915_gem_object *obj = so->vma->obj;
unsigned int i = 0, reloc_index = 0;
unsigned int needs_clflush;
u32 *d;
int ret;
ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
ret = i915_gem_obj_prepare_shmem_write(so->obj, &needs_clflush);
if (ret)
return ret;
d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0));
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@ -112,7 +116,7 @@ static int render_state_setup(struct intel_render_state *so,
goto err;
}
so->batch_offset = so->vma->node.start;
so->batch_offset = i915_ggtt_offset(so->vma);
so->batch_size = rodata->batch_items * sizeof(u32);
while (i % CACHELINE_DWORDS)
@ -160,9 +164,9 @@ static int render_state_setup(struct intel_render_state *so,
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
ret = i915_gem_object_set_to_gtt_domain(obj, false);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
out:
i915_gem_obj_finish_shmem_access(obj);
i915_gem_obj_finish_shmem_access(so->obj);
return ret;
err:
@ -173,112 +177,61 @@ err:
#undef OUT_BATCH
int i915_gem_render_state_init(struct intel_engine_cs *engine)
int i915_gem_render_state_emit(struct drm_i915_gem_request *rq)
{
struct intel_render_state *so;
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
int ret;
struct intel_engine_cs *engine = rq->engine;
struct intel_render_state so = {}; /* keep the compiler happy */
int err;
if (engine->id != RCS)
so.rodata = render_state_get_rodata(engine);
if (!so.rodata)
return 0;
rodata = render_state_get_rodata(engine);
if (!rodata)
return 0;
if (rodata->batch_items * 4 > PAGE_SIZE)
if (so.rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so)
return -ENOMEM;
so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_free;
}
so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(so->vma)) {
ret = PTR_ERR(so->vma);
so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
}
so->rodata = rodata;
engine->render_state = so;
return 0;
err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
goto err_vma;
err_obj:
i915_gem_object_put(obj);
err_free:
kfree(so);
return ret;
}
err = render_state_setup(&so, rq->i915);
if (err)
goto err_unpin;
int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
{
struct intel_render_state *so;
int ret;
err = engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto err_unpin;
lockdep_assert_held(&req->i915->drm.struct_mutex);
err = engine->emit_bb_start(rq,
so.batch_offset, so.batch_size,
I915_DISPATCH_SECURE);
if (err)
goto err_unpin;
so = req->engine->render_state;
if (!so)
return 0;
/* Recreate the page after shrinking */
if (!i915_gem_object_has_pages(so->vma->obj))
so->batch_offset = -1;
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (ret)
return ret;
if (so->vma->node.start != so->batch_offset) {
ret = render_state_setup(so, req->i915);
if (ret)
if (so.aux_size > 8) {
err = engine->emit_bb_start(rq,
so.aux_offset, so.aux_size,
I915_DISPATCH_SECURE);
if (err)
goto err_unpin;
}
ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
if (ret)
goto err_unpin;
ret = req->engine->emit_bb_start(req,
so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
if (so->aux_size > 8) {
ret = req->engine->emit_bb_start(req,
so->aux_offset, so->aux_size,
I915_DISPATCH_SECURE);
if (ret)
goto err_unpin;
}
i915_vma_move_to_active(so->vma, req, 0);
i915_vma_move_to_active(so.vma, rq, 0);
err_unpin:
i915_vma_unpin(so->vma);
return ret;
}
void i915_gem_render_state_fini(struct intel_engine_cs *engine)
{
struct intel_render_state *so;
struct drm_i915_gem_object *obj;
so = fetch_and_zero(&engine->render_state);
if (!so)
return;
obj = so->vma->obj;
i915_vma_close(so->vma);
__i915_gem_object_release_unless_active(obj);
kfree(so);
i915_vma_unpin(so.vma);
err_vma:
i915_vma_close(so.vma);
err_obj:
__i915_gem_object_release_unless_active(so.obj);
return err;
}

View File

@ -26,8 +26,6 @@
struct drm_i915_gem_request;
int i915_gem_render_state_init(struct intel_engine_cs *engine);
int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct intel_engine_cs *engine);
int i915_gem_render_state_emit(struct drm_i915_gem_request *rq);
#endif /* _I915_GEM_RENDER_STATE_H_ */

View File

@ -259,6 +259,8 @@ static void mark_busy(struct drm_i915_private *i915)
if (INTEL_GEN(i915) >= 6)
gen6_rps_busy(i915);
intel_engines_unpark(i915);
queue_delayed_work(i915->wq,
&i915->gt.retire_work,
round_jiffies_up_relative(HZ));

View File

@ -294,6 +294,18 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
ELK_STOLEN_RESERVED);
dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) {
*base = 0;
*size = 0;
return;
}
/*
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
@ -313,6 +325,12 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
*base = 0;
*size = 0;
return;
}
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
@ -339,6 +357,12 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
*base = 0;
*size = 0;
return;
}
*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
@ -359,6 +383,12 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
*base = 0;
*size = 0;
return;
}
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
@ -387,6 +417,12 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
dma_addr_t stolen_top;
if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
*base = 0;
*size = 0;
return;
}
stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
@ -436,14 +472,12 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
case 3:
break;
case 4:
if (IS_G4X(dev_priv))
g4x_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
if (!IS_G4X(dev_priv))
break;
/* fall through */
case 5:
/* Assume the gen6 maximum for the older platforms. */
reserved_size = 1024 * 1024;
reserved_base = stolen_top - reserved_size;
g4x_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
case 6:
gen6_get_stolen_reserved(dev_priv,
@ -473,9 +507,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
if (reserved_base < dev_priv->mm.stolen_base ||
reserved_base + reserved_size > stolen_top) {
dma_addr_t reserved_top = reserved_base + reserved_size;
DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
&reserved_base, &reserved_top,
&dev_priv->mm.stolen_base, &stolen_top);
DRM_ERROR("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
&reserved_base, &reserved_top,
&dev_priv->mm.stolen_base, &stolen_top);
return 0;
}

View File

@ -345,6 +345,15 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
/*
* The tiling mode of proxy objects is handled by its generator, and
* not allowed to be changed by userspace.
*/
if (i915_gem_object_is_proxy(obj)) {
err = -ENXIO;
goto err;
}
if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
err = -EINVAL;
goto err;

View File

@ -30,6 +30,8 @@
#include <generated/utsrelease.h>
#include <linux/stop_machine.h>
#include <linux/zlib.h>
#include <drm/drm_print.h>
#include "i915_drv.h"
static const char *engine_str(int engine)
@ -175,6 +177,21 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)
static void __i915_printfn_error(struct drm_printer *p, struct va_format *vaf)
{
i915_error_vprintf(p->arg, vaf->fmt, *vaf->va);
}
static inline struct drm_printer
i915_error_printer(struct drm_i915_error_state_buf *e)
{
struct drm_printer p = {
.printfn = __i915_printfn_error,
.arg = e,
};
return p;
}
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
struct compress {
@ -589,6 +606,21 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
}
static void err_print_uc(struct drm_i915_error_state_buf *m,
const struct i915_error_uc *error_uc)
{
struct drm_printer p = i915_error_printer(m);
const struct i915_gpu_state *error =
container_of(error_uc, typeof(*error), uc);
if (!error->device_info.has_guc)
return;
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
}
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_gpu_state *error)
{
@ -763,8 +795,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
print_error_obj(m, NULL, "Semaphores", error->semaphore);
print_error_obj(m, NULL, "GuC log buffer", error->guc_log);
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@ -773,6 +803,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
if (m->bytes == 0 && m->err)
return m->err;
@ -831,6 +862,22 @@ static __always_inline void free_param(const char *type, void *x)
kfree(*(void **)x);
}
static void cleanup_params(struct i915_gpu_state *error)
{
#define FREE(T, x, ...) free_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
}
static void cleanup_uc_state(struct i915_gpu_state *error)
{
struct i915_error_uc *error_uc = &error->uc;
kfree(error_uc->guc_fw.path);
kfree(error_uc->huc_fw.path);
i915_error_object_free(error_uc->guc_log);
}
void __i915_gpu_state_free(struct kref *error_ref)
{
struct i915_gpu_state *error =
@ -857,7 +904,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
}
i915_error_object_free(error->semaphore);
i915_error_object_free(error->guc_log);
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]);
@ -866,9 +912,8 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(error->overlay);
kfree(error->display);
#define FREE(T, x, ...) free_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
cleanup_params(error);
cleanup_uc_state(error);
kfree(error);
}
@ -1172,11 +1217,13 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (INTEL_GEN(dev_priv) >= 6) {
ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
if (INTEL_GEN(dev_priv) >= 8)
if (INTEL_GEN(dev_priv) >= 8) {
gen8_record_semaphore_state(error, engine, ee);
else
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
} else {
gen6_record_semaphore_state(engine, ee);
ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
}
}
if (INTEL_GEN(dev_priv) >= 4) {
@ -1559,15 +1606,25 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
error->pinned_bo = bo;
}
static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error)
static void capture_uc_state(struct i915_gpu_state *error)
{
/* Capturing log buf contents won't be useful if logging was disabled */
if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0))
struct drm_i915_private *i915 = error->i915;
struct i915_error_uc *error_uc = &error->uc;
/* Capturing uC state won't be useful if there is no GuC */
if (!error->device_info.has_guc)
return;
error->guc_log = i915_error_object_create(dev_priv,
dev_priv->guc.log.vma);
error_uc->guc_fw = i915->guc.fw;
error_uc->huc_fw = i915->huc.fw;
/* Non-default firmware paths will be specified by the modparam.
* As modparams are generally accesible from the userspace make
* explicit copies of the firmware paths.
*/
error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
}
/* Capture all registers which don't fit into another category. */
@ -1695,6 +1752,14 @@ static __always_inline void dup_param(const char *type, void *x)
*(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
}
static void capture_params(struct i915_gpu_state *error)
{
error->params = i915_modparams;
#define DUP(T, x, ...) dup_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
}
static int capture(void *data)
{
struct i915_gpu_state *error = data;
@ -1705,10 +1770,8 @@ static int capture(void *data)
ktime_to_timeval(ktime_sub(ktime_get(),
error->i915->gt.last_init_time));
error->params = i915_modparams;
#define DUP(T, x, ...) dup_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
capture_params(error);
capture_uc_state(error);
i915_capture_gen_state(error->i915, error);
i915_capture_reg_state(error->i915, error);
@ -1716,7 +1779,6 @@ static int capture(void *data)
i915_gem_record_rings(error->i915, error);
i915_capture_active_buffers(error->i915, error);
i915_capture_pinned_buffers(error->i915, error);
i915_gem_capture_guc_log_buffer(error->i915, error);
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);

View File

@ -102,13 +102,6 @@
#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15)
#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
#define GUC_SHIM_CONTROL_VALUE (GUC_DISABLE_SRAM_INIT_TO_ZEROES | \
GUC_ENABLE_READ_CACHE_LOGIC | \
GUC_ENABLE_MIA_CACHING | \
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | \
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
GUC_ENABLE_MIA_CLOCK_GATING)
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)

View File

@ -1068,6 +1068,9 @@ static void notify_ring(struct intel_engine_cs *engine)
struct drm_i915_gem_request *rq = NULL;
struct intel_wait *wait;
if (!engine->breadcrumbs.irq_armed)
return;
atomic_inc(&engine->irq_count);
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
@ -1101,7 +1104,8 @@ static void notify_ring(struct intel_engine_cs *engine)
if (wakeup)
wake_up_process(wait->tsk);
} else {
__intel_engine_disarm_breadcrumbs(engine);
if (engine->breadcrumbs.irq_armed)
__intel_engine_disarm_breadcrumbs(engine);
}
spin_unlock(&engine->breadcrumbs.irq_lock);
@ -1400,7 +1404,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
}
if (tasklet)
tasklet_hi_schedule(&execlists->irq_tasklet);
tasklet_hi_schedule(&execlists->tasklet);
}
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,

View File

@ -85,9 +85,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"d6de6f55-e526-4f79-a6a6-d7315c09044e",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -83,9 +83,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"5ee72f5c-092f-421e-8b70-225f7c3e9612",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"74fb4902-d3d3-4237-9e90-cbdc68d0a446",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -0,0 +1,109 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>
#include "i915_drv.h"
#include "i915_oa_cflgt3.h"
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
{ _MMIO(0x2714), 0xf0800000 },
{ _MMIO(0x2710), 0x00000000 },
{ _MMIO(0x2724), 0xf0800000 },
{ _MMIO(0x2720), 0x00000000 },
{ _MMIO(0x2770), 0x00000004 },
{ _MMIO(0x2774), 0x00000000 },
{ _MMIO(0x2778), 0x00000003 },
{ _MMIO(0x277c), 0x00000000 },
{ _MMIO(0x2780), 0x00000007 },
{ _MMIO(0x2784), 0x00000000 },
{ _MMIO(0x2788), 0x00100002 },
{ _MMIO(0x278c), 0x0000fff7 },
{ _MMIO(0x2790), 0x00100002 },
{ _MMIO(0x2794), 0x0000ffcf },
{ _MMIO(0x2798), 0x00100082 },
{ _MMIO(0x279c), 0x0000ffef },
{ _MMIO(0x27a0), 0x001000c2 },
{ _MMIO(0x27a4), 0x0000ffe7 },
{ _MMIO(0x27a8), 0x00100001 },
{ _MMIO(0x27ac), 0x0000ffe7 },
};
static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x9888), 0x11810000 },
{ _MMIO(0x9888), 0x07810013 },
{ _MMIO(0x9888), 0x1f810000 },
{ _MMIO(0x9888), 0x1d810000 },
{ _MMIO(0x9888), 0x1b930040 },
{ _MMIO(0x9888), 0x07e54000 },
{ _MMIO(0x9888), 0x1f908000 },
{ _MMIO(0x9888), 0x11900000 },
{ _MMIO(0x9888), 0x37900000 },
{ _MMIO(0x9888), 0x53900000 },
{ _MMIO(0x9888), 0x45900000 },
{ _MMIO(0x9888), 0x33900000 },
};
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "1\n");
}
void
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
UUID_STRING_LEN);
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
dev_priv->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0";
dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}

View File

@ -0,0 +1,34 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_CFLGT3_H__
#define __I915_OA_CFLGT3_H__
extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
#endif

View File

@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"4a534b07-cba3-414d-8d60-874830e883aa",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -0,0 +1,121 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>
#include "i915_drv.h"
#include "i915_oa_cnl.h"
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2710), 0x00000000 },
{ _MMIO(0x2714), 0xf0800000 },
{ _MMIO(0x2720), 0x00000000 },
{ _MMIO(0x2724), 0xf0800000 },
{ _MMIO(0x2770), 0x00000004 },
{ _MMIO(0x2774), 0x0000ffff },
{ _MMIO(0x2778), 0x00000003 },
{ _MMIO(0x277c), 0x0000ffff },
{ _MMIO(0x2780), 0x00000007 },
{ _MMIO(0x2784), 0x0000ffff },
{ _MMIO(0x2788), 0x00100002 },
{ _MMIO(0x278c), 0x0000fff7 },
{ _MMIO(0x2790), 0x00100002 },
{ _MMIO(0x2794), 0x0000ffcf },
{ _MMIO(0x2798), 0x00100082 },
{ _MMIO(0x279c), 0x0000ffef },
{ _MMIO(0x27a0), 0x001000c2 },
{ _MMIO(0x27a4), 0x0000ffe7 },
{ _MMIO(0x27a8), 0x00100001 },
{ _MMIO(0x27ac), 0x0000ffe7 },
};
static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0xd04), 0x00000200 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x17060000 },
{ _MMIO(0x9840), 0x00000000 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x13034000 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x07060066 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x05060000 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x0f080040 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x07091000 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x0f041000 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x1d004000 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x35000000 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x49000000 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x3d000000 },
{ _MMIO(0x9884), 0x00000007 },
{ _MMIO(0x9888), 0x31000000 },
};
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "1\n");
}
void
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
UUID_STRING_LEN);
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
dev_priv->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503";
dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}

View File

@ -0,0 +1,34 @@
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_CNL_H__
#define __I915_OA_CNL_H__
extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
#endif

View File

@ -83,9 +83,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"dd3fd789-e783-4204-8cd0-b671bbccb0cf",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -113,9 +113,9 @@ show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *b
void
i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"403d8832-1a27-4aa6-a64e-f5389ce7b212",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic;

View File

@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"baa3c7e4-52b6-4b85-801e-465a94b746dd",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"f1792f32-6db2-4b50-b4b2-557128f1688d",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -83,9 +83,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"1651949f-0ac0-4cb1-a06f-dafd74a407d1",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"2b985803-d3c9-4629-8a4f-634bfecba0e8",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
strlcpy(dev_priv->perf.oa.test_config.uuid,
"882fa433-1f4a-4a67-a962-c741888fe5f5",
UUID_STRING_LEN);
sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;

View File

@ -207,6 +207,8 @@
#include "i915_oa_kblgt3.h"
#include "i915_oa_glk.h"
#include "i915_oa_cflgt2.h"
#include "i915_oa_cflgt3.h"
#include "i915_oa_cnl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@ -1851,7 +1853,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
if (IS_GEN9(dev_priv)) {
if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
I915_WRITE(GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@ -1884,6 +1886,16 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
}
static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
{
/* Reset all contexts' slices/subslices configurations. */
gen8_configure_all_contexts(dev_priv, NULL, false);
/* Make sure we disable noa to save power. */
I915_WRITE(RPM_CONFIG1,
I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
}
static void gen7_oa_enable(struct drm_i915_private *dev_priv)
{
/*
@ -2934,6 +2946,10 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
} else if (IS_COFFEELAKE(dev_priv)) {
if (IS_CFL_GT2(dev_priv))
i915_perf_load_test_config_cflgt2(dev_priv);
if (IS_CFL_GT3(dev_priv))
i915_perf_load_test_config_cflgt3(dev_priv);
} else if (IS_CANNONLAKE(dev_priv)) {
i915_perf_load_test_config_cnl(dev_priv);
}
if (dev_priv->perf.oa.test_config.id == 0)
@ -3019,11 +3035,18 @@ static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
(addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg);
}
static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
return gen8_is_valid_mux_addr(dev_priv, addr) ||
(addr >= OA_PERFCNT3_LO.reg && addr <= OA_PERFCNT4_HI.reg);
}
static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
return gen7_is_valid_mux_addr(dev_priv, addr) ||
(addr >= 0x25100 && addr <= 0x2FF90) ||
addr == 0x9ec0;
(addr >= HSW_MBVID2_NOA0.reg && addr <= HSW_MBVID2_NOA9.reg) ||
addr == HSW_MBVID2_MISR0.reg;
}
static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
@ -3419,41 +3442,46 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
gen8_is_valid_mux_addr;
dev_priv->perf.oa.ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
dev_priv->perf.oa.ops.read = gen8_oa_read;
dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
gen8_is_valid_mux_addr;
dev_priv->perf.oa.ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
if (IS_GEN8(dev_priv)) {
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
dev_priv->perf.oa.timestamp_frequency = 12500000;
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->perf.oa.ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
} else if (IS_GEN9(dev_priv)) {
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
if (IS_GEN8(dev_priv)) {
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
} else {
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
}
switch (dev_priv->info.platform) {
case INTEL_BROADWELL:
dev_priv->perf.oa.timestamp_frequency = 12500000;
break;
case INTEL_BROXTON:
case INTEL_GEMINILAKE:
dev_priv->perf.oa.timestamp_frequency = 19200000;
@ -3464,11 +3492,28 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.timestamp_frequency = 12000000;
break;
default:
/* Leave timestamp_frequency to 0 so we can
* detect unsupported platforms.
*/
break;
}
} else if (IS_GEN10(dev_priv)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
gen10_is_valid_mux_addr;
dev_priv->perf.oa.ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
/* Default frequency, although we need to read it from
* the register as it might vary between parts.
*/
dev_priv->perf.oa.timestamp_frequency = 12000000;
}
}

View File

@ -355,9 +355,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
#define GEN8_CONFIG0 _MMIO(0xD00)
#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
@ -382,6 +379,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_STOLEN_RESERVED_2M (1 << 7)
#define GEN8_STOLEN_RESERVED_4M (2 << 7)
#define GEN8_STOLEN_RESERVED_8M (3 << 7)
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
/* VGA stuff */
@ -1109,16 +1107,50 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OA_PERFCNT1_HI _MMIO(0x91BC)
#define OA_PERFCNT2_LO _MMIO(0x91C0)
#define OA_PERFCNT2_HI _MMIO(0x91C4)
#define OA_PERFCNT3_LO _MMIO(0x91C8)
#define OA_PERFCNT3_HI _MMIO(0x91CC)
#define OA_PERFCNT4_LO _MMIO(0x91D8)
#define OA_PERFCNT4_HI _MMIO(0x91DC)
#define OA_PERFMATRIX_LO _MMIO(0x91C8)
#define OA_PERFMATRIX_HI _MMIO(0x91CC)
/* RPM unit config (Gen8+) */
#define RPM_CONFIG0 _MMIO(0x0D00)
#define RPM_CONFIG1 _MMIO(0x0D04)
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT)
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0
#define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1
#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1
#define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT)
/* RPC unit config (Gen8+) */
#define RPM_CONFIG _MMIO(0x0D08)
#define RPM_CONFIG1 _MMIO(0x0D04)
#define GEN10_GT_NOA_ENABLE (1 << 9)
/* GPM unit config (Gen9+) */
#define CTC_MODE _MMIO(0xA26C)
#define CTC_SOURCE_PARAMETER_MASK 1
#define CTC_SOURCE_CRYSTAL_CLOCK 0
#define CTC_SOURCE_DIVIDE_LOGIC 1
#define CTC_SHIFT_PARAMETER_SHIFT 1
#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
/* RCP unit config (Gen8+) */
#define RCP_CONFIG _MMIO(0x0D08)
/* NOA (HSW) */
#define HSW_MBVID2_NOA0 _MMIO(0x9E80)
#define HSW_MBVID2_NOA1 _MMIO(0x9E84)
#define HSW_MBVID2_NOA2 _MMIO(0x9E88)
#define HSW_MBVID2_NOA3 _MMIO(0x9E8C)
#define HSW_MBVID2_NOA4 _MMIO(0x9E90)
#define HSW_MBVID2_NOA5 _MMIO(0x9E94)
#define HSW_MBVID2_NOA6 _MMIO(0x9E98)
#define HSW_MBVID2_NOA7 _MMIO(0x9E9C)
#define HSW_MBVID2_NOA8 _MMIO(0x9EA0)
#define HSW_MBVID2_NOA9 _MMIO(0x9EA4)
#define HSW_MBVID2_MISR0 _MMIO(0x9EC0)
/* NOA (Gen8+) */
#define NOA_CONFIG(i) _MMIO(0x0D0C + (i) * 4)
@ -2329,6 +2361,8 @@ enum i915_power_well_id {
#define ARB_MODE_SWIZZLE_BDW (1<<1)
#define RENDER_HWS_PGA_GEN7 _MMIO(0x04080)
#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
#define GEN8_RING_FAULT_REG _MMIO(0x4094)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
#define RING_FAULT_GTTSEL_MASK (1<<11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
@ -2951,9 +2985,6 @@ enum i915_power_well_id {
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define GLK_SKIP_SEG_EN (1<<12)
#define GLK_SKIP_SEG_COUNT_MASK (3<<10)
#define GLK_SKIP_SEG_COUNT(x) ((x)<<10)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
@ -3398,6 +3429,7 @@ enum i915_power_well_id {
#define ELK_STOLEN_RESERVED _MMIO(MCHBAR_MIRROR_BASE + 0x48)
#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
#define G4X_STOLEN_RESERVED_ENABLE (1 << 0)
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5e04)
@ -3819,6 +3851,7 @@ enum {
* GEN9 clock gating regs
*/
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
#define DARBF_GATING_DIS (1 << 27)
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
@ -3837,6 +3870,7 @@ enum {
*/
#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
/*
* Display engine regs
@ -6263,7 +6297,7 @@ enum {
#define _PLANE_CTL_2_A 0x70280
#define _PLANE_CTL_3_A 0x70380
#define PLANE_CTL_ENABLE (1 << 31)
#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-GLK */
#define PLANE_CTL_FORMAT_MASK (0xf << 24)
#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
@ -6273,7 +6307,7 @@ enum {
#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
@ -6286,13 +6320,13 @@ enum {
#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
#define PLANE_CTL_TILED_MASK (0x7 << 10)
#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
#define PLANE_CTL_TILED_X ( 1 << 10)
#define PLANE_CTL_TILED_Y ( 4 << 10)
#define PLANE_CTL_TILED_YF ( 5 << 10)
#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
@ -6332,6 +6366,10 @@ enum {
#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30)
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23)
#define PLANE_COLOR_PLANE_GAMMA_DISABLE (1 << 13)
#define PLANE_COLOR_ALPHA_MASK (0x3 << 4)
#define PLANE_COLOR_ALPHA_DISABLE (0 << 4)
#define PLANE_COLOR_ALPHA_SW_PREMULTIPLY (2 << 4)
#define PLANE_COLOR_ALPHA_HW_PREMULTIPLY (3 << 4)
#define _PLANE_BUF_CFG_1_A 0x7027c
#define _PLANE_BUF_CFG_2_A 0x7037c
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
@ -7774,8 +7812,9 @@ enum {
#define FORCEWAKE_ACK_MEDIA_GEN9 _MMIO(0x0D88)
#define FORCEWAKE_ACK_RENDER_GEN9 _MMIO(0x0D84)
#define FORCEWAKE_ACK_BLITTER_GEN9 _MMIO(0x130044)
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_KERNEL BIT(0)
#define FORCEWAKE_USER BIT(1)
#define FORCEWAKE_KERNEL_FALLBACK BIT(15)
#define FORCEWAKE_MT_ACK _MMIO(0x130040)
#define ECOBUS _MMIO(0xa180)
#define FORCEWAKE_MT_ENABLE (1<<5)
@ -7905,6 +7944,7 @@ enum {
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
#define GEN10_MEDIA_WAKE_RATE_LIMIT _MMIO(0xA0A0)
#define GEN6_RC_EVALUATION_INTERVAL _MMIO(0xA0A8)
#define GEN6_RC_IDLE_HYSTERSIS _MMIO(0xA0AC)
#define GEN6_RC_SLEEP _MMIO(0xA0B0)
@ -8036,11 +8076,18 @@ enum {
#define CHV_EU311_PG_ENABLE (1<<1)
#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4)
#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
((slice) % 3) * 0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8)
#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8)
#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
@ -8837,6 +8884,12 @@ enum skl_power_gate {
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
#define GEN9_TIMESTAMP_OVERRIDE _MMIO(0x44074)
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT 0
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK 0x3ff
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12
#define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12)
#define _PIPE_FRMTMSTMP_A 0x70048
#define PIPE_FRMTMSTMP(pipe) \
_MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)

View File

@ -99,6 +99,6 @@ __printf(2, 3)
bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
__igt_timeout((t), KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
__igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#endif /* !__I915_SELFTEST_H__ */

View File

@ -83,8 +83,11 @@
(typeof(ptr))(__v & -BIT(n)); \
})
#define ptr_pack_bits(ptr, bits, n) \
((typeof(ptr))((unsigned long)(ptr) | (bits)))
#define ptr_pack_bits(ptr, bits, n) ({ \
unsigned long __bits = (bits); \
GEM_BUG_ON(__bits & -BIT(n)); \
((typeof(ptr))((unsigned long)(ptr) | __bits)); \
})
#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)

View File

@ -640,15 +640,17 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if (ret)
goto err_unpin;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
if (ret)
goto err_remove;
GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
@ -656,6 +658,7 @@ err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) {
i915_vma_remove(vma);
GEM_BUG_ON(vma->pages);
GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
}
err_unpin:
__i915_vma_unpin(vma);
@ -740,6 +743,7 @@ int i915_vma_unbind(struct i915_vma *vma)
/* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
*/
might_sleep();
active = i915_vma_get_active(vma);
if (active) {
int idx;

View File

@ -102,13 +102,13 @@ static const struct dp_aud_n_m dp_aud_n_m[] = {
};
static const struct dp_aud_n_m *
audio_config_dp_get_n_m(struct intel_crtc *intel_crtc, int rate)
audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) {
if (rate == dp_aud_n_m[i].sample_rate &&
intel_crtc->config->port_clock == dp_aud_n_m[i].clock)
crtc_state->port_clock == dp_aud_n_m[i].clock)
return &dp_aud_n_m[i];
}
@ -157,8 +157,10 @@ static const struct {
};
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted_mode)
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
@ -179,9 +181,11 @@ static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted
return hdmi_audio_clock[i].config;
}
static int audio_config_hdmi_get_n(const struct drm_display_mode *adjusted_mode,
static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
int rate)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
@ -220,7 +224,9 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
return true;
}
static void g4x_audio_codec_disable(struct intel_encoder *encoder)
static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t eldv, tmp;
@ -239,11 +245,12 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder)
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
static void g4x_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
static void g4x_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_connector *connector = conn_state->connector;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t tmp;
@ -279,16 +286,20 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
}
static void
hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
const struct drm_display_mode *adjusted_mode)
hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
int rate = acomp ? acomp->aud_sample_rate[port] : 0;
const struct dp_aud_n_m *nm = audio_config_dp_get_n_m(intel_crtc, rate);
enum pipe pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
const struct dp_aud_n_m *nm;
int rate;
u32 tmp;
rate = acomp ? acomp->aud_sample_rate[port] : 0;
nm = audio_config_dp_get_n_m(crtc_state, rate);
if (nm)
DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
else
@ -323,23 +334,26 @@ hsw_dp_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
}
static void
hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
const struct drm_display_mode *adjusted_mode)
hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
int rate = acomp ? acomp->aud_sample_rate[port] : 0;
enum pipe pipe = intel_crtc->pipe;
int n;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
int n, rate;
u32 tmp;
rate = acomp ? acomp->aud_sample_rate[port] : 0;
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
tmp |= audio_config_hdmi_pixel_clock(crtc_state);
n = audio_config_hdmi_get_n(adjusted_mode, rate);
n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
DRM_DEBUG_KMS("using N %d\n", n);
@ -363,20 +377,22 @@ hsw_hdmi_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
}
static void
hsw_audio_config_update(struct intel_crtc *intel_crtc, enum port port,
const struct drm_display_mode *adjusted_mode)
hsw_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
if (intel_crtc_has_dp_encoder(intel_crtc->config))
hsw_dp_audio_config_update(intel_crtc, port, adjusted_mode);
if (intel_crtc_has_dp_encoder(crtc_state))
hsw_dp_audio_config_update(encoder, crtc_state);
else
hsw_hdmi_audio_config_update(intel_crtc, port, adjusted_mode);
hsw_hdmi_audio_config_update(encoder, crtc_state);
}
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t tmp;
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
@ -389,7 +405,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(intel_crtc->config))
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
@ -402,14 +418,14 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->av_mutex);
}
static void hsw_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
static void hsw_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
enum port port = intel_encoder->port;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
const uint8_t *eld = connector->eld;
uint32_t tmp;
int len, i;
@ -448,17 +464,19 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
hsw_audio_config_update(intel_crtc, port, adjusted_mode);
hsw_audio_config_update(encoder, crtc_state);
mutex_unlock(&dev_priv->av_mutex);
}
static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
static void ilk_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
enum port port = intel_encoder->port;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
uint32_t tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
@ -485,7 +503,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(intel_crtc->config))
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp);
@ -497,14 +515,15 @@ static void ilk_audio_codec_disable(struct intel_encoder *intel_encoder)
I915_WRITE(aud_cntrl_st2, tmp);
}
static void ilk_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *intel_encoder,
const struct drm_display_mode *adjusted_mode)
static void ilk_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
enum port port = intel_encoder->port;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_connector *connector = conn_state->connector;
enum pipe pipe = crtc->pipe;
enum port port = encoder->port;
uint8_t *eld = connector->eld;
uint32_t tmp, eldv;
int len, i;
@ -568,36 +587,36 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
if (intel_crtc_has_dp_encoder(intel_crtc->config))
if (intel_crtc_has_dp_encoder(crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
tmp |= audio_config_hdmi_pixel_clock(crtc_state);
I915_WRITE(aud_config, tmp);
}
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @intel_encoder: encoder on which to enable audio
* @encoder: encoder on which to enable audio
* @crtc_state: pointer to the current crtc state.
* @conn_state: pointer to the current connector state.
*
* The enable sequences may only be performed after enabling the transcoder and
* port, and after completed link training.
*/
void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
enum port port = intel_encoder->port;
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
connector = conn_state->connector;
if (!connector || !connector->eld[0])
if (!connector->eld[0])
return;
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@ -609,19 +628,20 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
if (dev_priv->display.audio_codec_enable)
dev_priv->display.audio_codec_enable(connector, intel_encoder,
adjusted_mode);
dev_priv->display.audio_codec_enable(encoder,
crtc_state,
conn_state);
mutex_lock(&dev_priv->av_mutex);
intel_encoder->audio_connector = connector;
encoder->audio_connector = connector;
/* referred in audio callbacks */
dev_priv->av_enc_map[pipe] = intel_encoder;
dev_priv->av_enc_map[pipe] = encoder;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
(int) port, (int) pipe);
@ -629,36 +649,41 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
intel_lpe_audio_notify(dev_priv, pipe, port, connector->eld,
crtc_state->port_clock,
intel_encoder->type == INTEL_OUTPUT_DP);
intel_crtc_has_dp_encoder(crtc_state));
}
/**
* intel_audio_codec_disable - Disable the audio codec for HD audio
* @intel_encoder: encoder on which to disable audio
* @encoder: encoder on which to disable audio
* @old_crtc_state: pointer to the old crtc state.
* @old_conn_state: pointer to the old connector state.
*
* The disable sequences must be performed before disabling the transcoder or
* port.
*/
void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
enum port port = intel_encoder->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(intel_encoder);
dev_priv->display.audio_codec_disable(encoder,
old_crtc_state,
old_conn_state);
mutex_lock(&dev_priv->av_mutex);
intel_encoder->audio_connector = NULL;
encoder->audio_connector = NULL;
dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (intel_encoder->type != INTEL_OUTPUT_DP_MST)
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
(int) port, (int) pipe);
@ -793,10 +818,9 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_encoder *intel_encoder;
struct intel_crtc *crtc;
struct drm_display_mode *adjusted_mode;
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
int err = 0;
if (!HAS_DDI(dev_priv))
@ -806,23 +830,19 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder || !intel_encoder->base.crtc) {
encoder = get_saved_enc(dev_priv, port, pipe);
if (!encoder || !encoder->base.crtc) {
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
/* pipe passed from the audio driver will be -1 for Non-MST case */
crtc = to_intel_crtc(intel_encoder->base.crtc);
pipe = crtc->pipe;
adjusted_mode = &crtc->config->base.adjusted_mode;
crtc = to_intel_crtc(encoder->base.crtc);
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
hsw_audio_config_update(crtc, port, adjusted_mode);
hsw_audio_config_update(encoder, crtc->config);
unlock:
mutex_unlock(&dev_priv->av_mutex);

View File

@ -1234,6 +1234,30 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
info->hdmi_level_shift = hdmi_level_shift;
}
if (bdb_version >= 204) {
int max_tmds_clock;
switch (child->hdmi_max_data_rate) {
default:
MISSING_CASE(child->hdmi_max_data_rate);
/* fall through */
case HDMI_MAX_DATA_RATE_PLATFORM:
max_tmds_clock = 0;
break;
case HDMI_MAX_DATA_RATE_297:
max_tmds_clock = 297000;
break;
case HDMI_MAX_DATA_RATE_165:
max_tmds_clock = 165000;
break;
}
if (max_tmds_clock)
DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n",
port_name(port), max_tmds_clock);
info->max_tmds_clock = max_tmds_clock;
}
/* Parse the I_boost config for SKL and above */
if (bdb_version >= 196 && child->iboost) {
info->dp_boost_level = translate_iboost(child->dp_iboost_level);

View File

@ -123,7 +123,7 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
*/
spin_lock_irq(&b->irq_lock);
if (!__intel_breadcrumbs_wakeup(b))
if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
__intel_engine_disarm_breadcrumbs(engine);
spin_unlock_irq(&b->irq_lock);
if (!b->irq_armed)
@ -145,6 +145,14 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
static void irq_enable(struct intel_engine_cs *engine)
{
/*
* FIXME: Ideally we want this on the API boundary, but for the
* sake of testing with mock breadcrumbs (no HW so unable to
* enable irqs) we place it deep within the bowels, at the point
* of no return.
*/
GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
/* Enabling the IRQ may miss the generation of the interrupt, but
* we still need to force the barrier before reading the seqno,
* just in case.
@ -171,15 +179,37 @@ void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
lockdep_assert_held(&b->irq_lock);
GEM_BUG_ON(b->irq_wait);
GEM_BUG_ON(!b->irq_armed);
if (b->irq_enabled) {
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
irq_disable(engine);
b->irq_enabled = false;
}
b->irq_armed = false;
}
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
spin_lock_irq(&b->irq_lock);
if (!b->irq_enabled++)
irq_enable(engine);
GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
spin_unlock_irq(&b->irq_lock);
}
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
spin_lock_irq(&b->irq_lock);
GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
if (!--b->irq_enabled)
irq_disable(engine);
spin_unlock_irq(&b->irq_lock);
}
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
@ -197,7 +227,8 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
spin_lock(&b->irq_lock);
first = fetch_and_zero(&b->irq_wait);
__intel_engine_disarm_breadcrumbs(engine);
if (b->irq_armed)
__intel_engine_disarm_breadcrumbs(engine);
spin_unlock(&b->irq_lock);
rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
@ -241,6 +272,7 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
struct drm_i915_private *i915 = engine->i915;
bool enabled;
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
@ -252,7 +284,6 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
* the irq.
*/
b->irq_armed = true;
GEM_BUG_ON(b->irq_enabled);
if (I915_SELFTEST_ONLY(b->mock)) {
/* For our mock objects we want to avoid interaction
@ -273,14 +304,15 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
*/
/* No interrupts? Kick the waiter every jiffie! */
if (intel_irqs_enabled(i915)) {
if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
irq_enable(engine);
b->irq_enabled = true;
enabled = false;
if (!b->irq_enabled++ &&
!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
irq_enable(engine);
enabled = true;
}
enable_fake_irq(b);
return true;
return enabled;
}
static inline struct intel_wait *to_wait(struct rb_node *node)

View File

@ -437,13 +437,45 @@ static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
return 200000;
}
static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
{
if (IS_VALLEYVIEW(dev_priv)) {
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
return 2;
else if (cdclk >= 266667)
return 1;
else
return 0;
} else {
/*
* Specs are full of misinformation, but testing on actual
* hardware has shown that we just need to write the desired
* CCK divider into the Punit register.
*/
return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
}
}
static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
u32 val;
cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
CCK_DISPLAY_CLOCK_CONTROL,
cdclk_state->vco);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
mutex_unlock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv))
cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
DSPFREQGUAR_SHIFT;
else
cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
DSPFREQGUAR_SHIFT_CHV;
}
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
@ -486,7 +518,19 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd;
u32 val, cmd = cdclk_state->voltage_level;
switch (cdclk) {
case 400000:
case 333333:
case 320000:
case 266667:
case 200000:
break;
default:
MISSING_CASE(cdclk);
return;
}
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
@ -496,13 +540,6 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
else if (cdclk == 266667)
cmd = 1;
else
cmd = 0;
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK;
@ -562,7 +599,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd;
u32 val, cmd = cdclk_state->voltage_level;
switch (cdclk) {
case 333333:
@ -583,13 +620,6 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
/*
* Specs are full of misinformation, but testing on actual
* hardware has shown that we just need to write the desired
* CCK divider into the Punit register.
*/
cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
@ -621,6 +651,21 @@ static int bdw_calc_cdclk(int min_cdclk)
return 337500;
}
static u8 bdw_calc_voltage_level(int cdclk)
{
switch (cdclk) {
default:
case 337500:
return 2;
case 450000:
return 0;
case 540000:
return 1;
case 675000:
return 3;
}
}
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
@ -639,13 +684,20 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = 337500;
else
cdclk_state->cdclk = 675000;
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
bdw_calc_voltage_level(cdclk_state->cdclk);
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
uint32_t val, data;
uint32_t val;
int ret;
if (WARN((I915_READ(LCPLL_CTL) &
@ -681,25 +733,21 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
val &= ~LCPLL_CLK_FREQ_MASK;
switch (cdclk) {
default:
MISSING_CASE(cdclk);
/* fall through */
case 337500:
val |= LCPLL_CLK_FREQ_337_5_BDW;
break;
case 450000:
val |= LCPLL_CLK_FREQ_450;
data = 0;
break;
case 540000:
val |= LCPLL_CLK_FREQ_54O_BDW;
data = 1;
break;
case 337500:
val |= LCPLL_CLK_FREQ_337_5_BDW;
data = 2;
break;
case 675000:
val |= LCPLL_CLK_FREQ_675_BDW;
data = 3;
break;
default:
WARN(1, "invalid cdclk frequency\n");
return;
}
I915_WRITE(LCPLL_CTL, val);
@ -713,16 +761,13 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
DRM_ERROR("Switching back to LCPLL failed\n");
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
intel_update_cdclk(dev_priv);
WARN(cdclk != dev_priv->cdclk.hw.cdclk,
"cdclk requested %d kHz but got %d kHz\n",
cdclk, dev_priv->cdclk.hw.cdclk);
}
static int skl_calc_cdclk(int min_cdclk, int vco)
@ -748,6 +793,24 @@ static int skl_calc_cdclk(int min_cdclk, int vco)
}
}
static u8 skl_calc_voltage_level(int cdclk)
{
switch (cdclk) {
default:
case 308571:
case 337500:
return 0;
case 450000:
case 432000:
return 1;
case 540000:
return 2;
case 617143:
case 675000:
return 3;
}
}
static void skl_dpll0_update(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
@ -798,7 +861,7 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = cdclk_state->ref;
if (cdclk_state->vco == 0)
return;
goto out;
cdctl = I915_READ(CDCLK_CTL);
@ -839,6 +902,14 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
break;
}
}
out:
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
skl_calc_voltage_level(cdclk_state->cdclk);
}
/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
@ -923,11 +994,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
u32 freq_select, pcu_ack;
u32 freq_select;
int ret;
WARN_ON((cdclk == 24000) != (vco == 0));
mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
@ -942,25 +1011,24 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
/* set CDCLK_CTL */
switch (cdclk) {
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
WARN_ON(vco != 0);
/* fall through */
case 308571:
case 337500:
freq_select = CDCLK_FREQ_337_308;
break;
case 450000:
case 432000:
freq_select = CDCLK_FREQ_450_432;
pcu_ack = 1;
break;
case 540000:
freq_select = CDCLK_FREQ_540;
pcu_ack = 2;
break;
case 308571:
case 337500:
default:
freq_select = CDCLK_FREQ_337_308;
pcu_ack = 0;
break;
case 617143:
case 675000:
freq_select = CDCLK_FREQ_675_617;
pcu_ack = 3;
break;
}
@ -976,7 +1044,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
@ -995,6 +1064,8 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
goto sanitize;
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
@ -1055,6 +1126,7 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
if (cdclk_state.vco == 0)
cdclk_state.vco = 8100000;
cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
skl_set_cdclk(dev_priv, &cdclk_state);
}
@ -1072,6 +1144,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.ref;
cdclk_state.vco = 0;
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
skl_set_cdclk(dev_priv, &cdclk_state);
}
@ -1100,6 +1173,11 @@ static int glk_calc_cdclk(int min_cdclk)
return 79200;
}
static u8 bxt_calc_voltage_level(int cdclk)
{
return DIV_ROUND_UP(cdclk, 25000);
}
static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
int ratio;
@ -1110,6 +1188,7 @@ static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
switch (cdclk) {
default:
MISSING_CASE(cdclk);
/* fall through */
case 144000:
case 288000:
case 384000:
@ -1134,6 +1213,7 @@ static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
switch (cdclk) {
default:
MISSING_CASE(cdclk);
/* fall through */
case 79200:
case 158400:
case 316800:
@ -1174,7 +1254,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = cdclk_state->ref;
if (cdclk_state->vco == 0)
return;
goto out;
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@ -1198,6 +1278,14 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
}
cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
out:
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
bxt_calc_voltage_level(cdclk_state->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
@ -1246,24 +1334,22 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
/* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
case 8:
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
break;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
WARN_ON(vco != 0);
/* fall through */
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
case 3:
WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
WARN_ON(vco != 0);
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
case 8:
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
break;
}
@ -1302,7 +1388,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
DIV_ROUND_UP(cdclk, 25000));
cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
@ -1319,6 +1405,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
u32 cdctl, expected;
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
@ -1394,6 +1481,7 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = bxt_calc_cdclk(0);
cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
}
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_state);
}
@ -1411,6 +1499,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.ref;
cdclk_state.vco = 0;
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_state);
}
@ -1425,6 +1514,19 @@ static int cnl_calc_cdclk(int min_cdclk)
return 168000;
}
static u8 cnl_calc_voltage_level(int cdclk)
{
switch (cdclk) {
default:
case 168000:
return 0;
case 336000:
return 1;
case 528000:
return 2;
}
}
static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
@ -1458,7 +1560,7 @@ static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = cdclk_state->ref;
if (cdclk_state->vco == 0)
return;
goto out;
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@ -1475,6 +1577,14 @@ static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
}
cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
out:
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
cnl_calc_voltage_level(cdclk_state->cdclk);
}
static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
@ -1515,7 +1625,7 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
u32 val, divider, pcu_ack;
u32 val, divider;
int ret;
mutex_lock(&dev_priv->pcu_lock);
@ -1532,30 +1642,15 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
/* cdclk = vco / 2 / div{1,2} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
WARN_ON(vco != 0);
/* fall through */
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
}
switch (cdclk) {
case 528000:
pcu_ack = 2;
break;
case 336000:
pcu_ack = 1;
break;
case 168000:
default:
pcu_ack = 0;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
}
@ -1576,10 +1671,17 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
cdclk_state->voltage_level);
mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
/*
* Can't read out the voltage level :(
* Let's just assume everything is as expected.
*/
dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
@ -1592,6 +1694,7 @@ static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
switch (cdclk) {
default:
MISSING_CASE(cdclk);
/* fall through */
case 168000:
case 336000:
ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
@ -1609,6 +1712,7 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
u32 cdctl, expected;
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
@ -1668,6 +1772,7 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cnl_calc_cdclk(0);
cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
cnl_set_cdclk(dev_priv, &cdclk_state);
}
@ -1685,22 +1790,48 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.ref;
cdclk_state.vco = 0;
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
cnl_set_cdclk(dev_priv, &cdclk_state);
}
/**
* intel_cdclk_state_compare - Determine if two CDCLK states differ
* intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes
* @a: first CDCLK state
* @b: second CDCLK state
*
* Returns:
* True if the CDCLK states are identical, false if they differ.
* True if the CDCLK states require pipes to be off during reprogramming, false if not.
*/
bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b)
{
return memcmp(a, b, sizeof(*a)) == 0;
return a->cdclk != b->cdclk ||
a->vco != b->vco ||
a->ref != b->ref;
}
/**
* intel_cdclk_changed - Determine if two CDCLK states are different
* @a: first CDCLK state
* @b: second CDCLK state
*
* Returns:
* True if the CDCLK states don't match, false if they do.
*/
bool intel_cdclk_changed(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b)
{
return intel_cdclk_needs_modeset(a, b) ||
a->voltage_level != b->voltage_level;
}
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context)
{
DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, voltage level %d\n",
context, cdclk_state->cdclk, cdclk_state->vco,
cdclk_state->ref, cdclk_state->voltage_level);
}
/**
@ -1714,29 +1845,28 @@ bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
if (intel_cdclk_state_compare(&dev_priv->cdclk.hw, cdclk_state))
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
return;
if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
return;
DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz, VCO %d kHz, ref %d kHz\n",
cdclk_state->cdclk, cdclk_state->vco,
cdclk_state->ref);
intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
dev_priv->display.set_cdclk(dev_priv, cdclk_state);
if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
"cdclk state doesn't match!\n")) {
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
intel_dump_cdclk_state(cdclk_state, "[sw state]");
}
}
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
if (INTEL_GEN(dev_priv) >= 10)
/*
* FIXME: Switch to DIV_ROUND_UP(pixel_rate, 2)
* once DDI clock voltage requirements are
* handled correctly.
*/
return pixel_rate;
return DIV_ROUND_UP(pixel_rate, 2);
else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
@ -1829,6 +1959,43 @@ static int intel_compute_min_cdclk(struct drm_atomic_state *state)
return min_cdclk;
}
/*
* Note that this functions assumes that 0 is
* the lowest voltage value, and higher values
* correspond to increasingly higher voltages.
*
* Should that relationship no longer hold on
* future platforms this code will need to be
* adjusted.
*/
static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
u8 min_voltage_level;
int i;
enum pipe pipe;
memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
sizeof(state->min_voltage_level));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (crtc_state->base.enable)
state->min_voltage_level[i] =
crtc_state->min_voltage_level;
else
state->min_voltage_level[i] = 0;
}
min_voltage_level = 0;
for_each_pipe(dev_priv, pipe)
min_voltage_level = max(state->min_voltage_level[pipe],
min_voltage_level);
return min_voltage_level;
}
static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
@ -1842,11 +2009,15 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
intel_state->cdclk.logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
if (!intel_state->active_crtcs) {
cdclk = vlv_calc_cdclk(dev_priv, 0);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@ -1871,11 +2042,15 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = bdw_calc_cdclk(min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
intel_state->cdclk.logical.voltage_level =
bdw_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
cdclk = bdw_calc_cdclk(0);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
bdw_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@ -1906,12 +2081,16 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
intel_state->cdclk.logical.voltage_level =
skl_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
cdclk = skl_calc_cdclk(0, vco);
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
skl_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@ -1940,6 +2119,8 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
intel_state->cdclk.logical.voltage_level =
bxt_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
if (IS_GEMINILAKE(dev_priv)) {
@ -1952,6 +2133,8 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
bxt_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@ -1975,6 +2158,9 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
intel_state->cdclk.logical.voltage_level =
max(cnl_calc_voltage_level(cdclk),
cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
cdclk = cnl_calc_cdclk(0);
@ -1982,6 +2168,8 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
cnl_calc_voltage_level(cdclk);
} else {
intel_state->cdclk.actual =
intel_state->cdclk.logical;
@ -1995,12 +2183,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
int max_cdclk_freq = dev_priv->max_cdclk_freq;
if (INTEL_GEN(dev_priv) >= 10)
/*
* FIXME: Allow '2 * max_cdclk_freq'
* once DDI clock voltage requirements are
* handled correctly.
*/
return max_cdclk_freq;
return 2 * max_cdclk_freq;
else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Limiting to 99% as a temporary workaround. See
@ -2099,10 +2282,6 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw);
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
dev_priv->cdclk.hw.cdclk, dev_priv->cdclk.hw.vco,
dev_priv->cdclk.hw.ref);
/*
* 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
* Programmng [sic] note: bit[9:2] should be programmed to the number

View File

@ -370,7 +370,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
*/
if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc);
hsw_disable_ips(intel_crtc_state);
reenable_ips = true;
}
@ -380,7 +380,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
i9xx_load_luts(crtc_state);
if (reenable_ips)
hsw_enable_ips(intel_crtc);
hsw_enable_ips(intel_crtc_state);
}
static void bdw_load_degamma_lut(struct drm_crtc_state *state)

View File

@ -119,6 +119,8 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
@ -217,11 +219,9 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
WARN_ON(!intel_crtc->config->has_pch_encoder);
WARN_ON(!old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@ -245,46 +245,42 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
}
static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
WARN_ON(!intel_crtc->config->has_pch_encoder);
WARN_ON(!crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
static void hsw_pre_enable_crt(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
WARN_ON(!intel_crtc->config->has_pch_encoder);
WARN_ON(!crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
dev_priv->display.fdi_link_train(crtc, crtc_state);
}
static void hsw_enable_crt(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
WARN_ON(!intel_crtc->config->has_pch_encoder);
WARN_ON(!crtc_state->has_pch_encoder);
intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
@ -293,10 +289,10 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
}
static void intel_enable_crt(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
}
static enum drm_mode_status

View File

@ -37,8 +37,8 @@
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_CNL "i915/cnl_dmc_ver1_04.bin"
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_CNL "i915/cnl_dmc_ver1_06.bin"
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 6)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
@ -198,6 +198,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
si = bxt_stepping_info;
} else {
size = 0;
si = NULL;
}
if (INTEL_REVID(dev_priv) < size)

View File

@ -492,24 +492,6 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
};
enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder)
{
switch (encoder->type) {
case INTEL_OUTPUT_DP_MST:
return enc_to_mst(&encoder->base)->primary->port;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_UNKNOWN:
return enc_to_dig_port(&encoder->base)->port;
case INTEL_OUTPUT_ANALOG:
return PORT_E;
default:
MISSING_CASE(encoder->type);
return PORT_A;
}
}
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@ -811,31 +793,24 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
* values in advance. This function programs the correct values for
* DP/eDP/FDI use cases.
*/
static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int i, n_entries;
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
const struct ddi_buf_trans *ddi_translations;
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
&n_entries);
break;
case INTEL_OUTPUT_DP:
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
&n_entries);
break;
case INTEL_OUTPUT_ANALOG:
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
&n_entries);
break;
default:
MISSING_CASE(encoder->type);
return;
}
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
&n_entries);
else
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
&n_entries);
/* If we're boosting the current, set bit 31 of trans1 */
if (IS_GEN9_BC(dev_priv) &&
@ -861,7 +836,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
int n_entries;
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
const struct ddi_buf_trans *ddi_translations;
ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
@ -937,7 +912,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
intel_prepare_dp_ddi_buffers(encoder);
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
}
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
@ -1448,19 +1423,16 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
ddi_dotclock_get(pipe_config);
}
static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id)
static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
{
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state;
struct dpll clock;
/* For DDI ports we always use a shared PLL. */
if (WARN_ON(pll_id == DPLL_ID_PRIVATE))
if (WARN_ON(!crtc_state->shared_dpll))
return 0;
pll = &dev_priv->shared_dplls[pll_id];
state = &pll->state.hw_state;
state = &crtc_state->dpll_hw_state;
clock.m1 = 2;
clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
@ -1474,19 +1446,15 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
}
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum intel_dpll_id pll_id = port;
pipe_config->port_clock = bxt_calc_pll_link(dev_priv, pll_id);
pipe_config->port_clock = bxt_calc_pll_link(pipe_config);
ddi_dotclock_get(pipe_config);
}
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
static void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -1504,33 +1472,34 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
int type = encoder->type;
uint32_t temp;
u32 temp;
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
WARN_ON(transcoder_is_dsi(cpu_transcoder));
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
temp = TRANS_MSA_SYNC_CLK;
switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
case 24:
temp |= TRANS_MSA_8_BPC;
break;
case 30:
temp |= TRANS_MSA_10_BPC;
break;
case 36:
temp |= TRANS_MSA_12_BPC;
break;
default:
BUG();
}
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
WARN_ON(transcoder_is_dsi(cpu_transcoder));
temp = TRANS_MSA_SYNC_CLK;
switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
case 24:
temp |= TRANS_MSA_8_BPC;
break;
case 30:
temp |= TRANS_MSA_10_BPC;
break;
case 36:
temp |= TRANS_MSA_12_BPC;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
break;
}
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
@ -1540,6 +1509,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
@ -1555,8 +1525,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(encoder);
int type = encoder->type;
enum port port = encoder->port;
uint32_t temp;
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
@ -1611,7 +1580,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
}
}
if (type == INTEL_OUTPUT_HDMI) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
if (crtc_state->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
@ -1621,19 +1590,15 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
} else if (type == INTEL_OUTPUT_ANALOG) {
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (crtc_state->fdi_lanes - 1) << 1;
} else if (type == INTEL_OUTPUT_DP ||
type == INTEL_OUTPUT_EDP) {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (type == INTEL_OUTPUT_DP_MST) {
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %c\n",
encoder->type, pipe_name(pipe));
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
@ -1656,7 +1621,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
uint32_t tmp;
@ -1715,9 +1680,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
enum pipe p;
u32 tmp;
int i;
bool ret;
if (!intel_display_power_get_if_enabled(dev_priv,
@ -1752,15 +1717,17 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
goto out;
}
for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder) p;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
TRANS_DDI_MODE_SELECT_DP_MST)
goto out;
*pipe = i;
*pipe = p;
ret = true;
goto out;
@ -1800,7 +1767,7 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
@ -1836,8 +1803,8 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->port;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
uint8_t iboost;
if (type == INTEL_OUTPUT_HDMI)
@ -1939,8 +1906,8 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
const struct cnl_ddi_buf_trans *ddi_translations;
enum port port = encoder->port;
int n_entries, ln;
u32 val;
@ -2003,7 +1970,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
int level, enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
int width, rate, ln;
u32 val;
@ -2122,7 +2089,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
uint32_t val;
if (WARN_ON(!pll))
@ -2161,7 +2128,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
if (IS_CANNONLAKE(dev_priv))
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
@ -2179,7 +2146,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
@ -2200,7 +2167,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(encoder, level, encoder->type);
else
intel_prepare_dp_ddi_buffers(encoder);
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
@ -2217,7 +2184,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
int level = intel_ddi_hdmi_level(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
@ -2249,6 +2216,19 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/*
* When called from DP MST code:
* - conn_state will be NULL
* - encoder will be the main encoder (ie. mst->primary)
* - the main connector associated with this port
* won't be active or linked to a crtc
* - crtc_state will be the state of the first stream to
* be activated on this port, and it may not be the same
* stream that will be deactivated last, but each stream
* should have a state that is identical when it comes to
* the DP link parameteres
*/
WARN_ON(crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@ -2262,7 +2242,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
static void intel_disable_ddi_buf(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
bool wait = false;
u32 val;
@ -2289,12 +2269,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_dp *intel_dp = &dig_port->dp;
/*
* old_crtc_state and old_conn_state are NULL when called from
* DP_MST. The main connector associated with this port is never
* bound to a crtc for MST.
*/
bool is_mst = !old_crtc_state;
bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);
/*
* Power down sink before disabling the port, otherwise we end
@ -2338,12 +2313,19 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
/*
* old_crtc_state and old_conn_state are NULL when called from
* DP_MST. The main connector associated with this port is never
* bound to a crtc for MST.
* When called from DP MST code:
* - old_conn_state will be NULL
* - encoder will be the main encoder (ie. mst->primary)
* - the main connector associated with this port
* won't be active or linked to a crtc
* - old_crtc_state will be the state of the last stream to
* be deactivated on this port, and it may not be the same
* stream that was activated last, but each stream
* should have a state that is identical when it comes to
* the DP link parameteres
*/
if (old_crtc_state &&
intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_post_disable_hdmi(encoder,
old_crtc_state, old_conn_state);
else
@ -2391,7 +2373,7 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
intel_dp_stop_link_train(intel_dp);
@ -2410,7 +2392,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
enum port port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
intel_hdmi_handle_sink_scrambling(encoder,
conn_state->connector,
@ -2445,7 +2427,8 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
@ -2457,7 +2440,8 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
intel_hdmi_handle_sink_scrambling(encoder,
old_conn_state->connector,
@ -2488,7 +2472,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->port;
enum port port = intel_dig_port->base.port;
uint32_t val;
bool wait = false;
@ -2542,11 +2526,18 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
return false;
}
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
@ -2599,12 +2590,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->hdmi_high_tmds_clock_ratio = true;
/* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
pipe_config->lane_count = 4;
break;
case TRANS_DDI_MODE_SELECT_FDI:
pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
if (encoder->type == INTEL_OUTPUT_EDP)
pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
else
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
@ -2641,6 +2643,26 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
}
static enum intel_output_type
intel_ddi_compute_output_type(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
switch (conn_state->connector->connector_type) {
case DRM_MODE_CONNECTOR_HDMIA:
return INTEL_OUTPUT_HDMI;
case DRM_MODE_CONNECTOR_eDP:
return INTEL_OUTPUT_EDP;
case DRM_MODE_CONNECTOR_DisplayPort:
return INTEL_OUTPUT_DP;
default:
MISSING_CASE(conn_state->connector->connector_type);
return INTEL_OUTPUT_UNUSED;
}
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
@ -2648,24 +2670,22 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int type = encoder->type;
int port = intel_ddi_get_encoder_port(encoder);
enum port port = encoder->port;
int ret;
WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
if (port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (type == INTEL_OUTPUT_HDMI)
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
else
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
if (IS_GEN9_LP(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
pipe_config->lane_count);
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
return ret;
@ -2680,7 +2700,7 @@ static struct intel_connector *
intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
{
struct intel_connector *connector;
enum port port = intel_dig_port->port;
enum port port = intel_dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
@ -2699,7 +2719,7 @@ static struct intel_connector *
intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
{
struct intel_connector *connector;
enum port port = intel_dig_port->port;
enum port port = intel_dig_port->base.port;
connector = intel_connector_alloc();
if (!connector)
@ -2711,6 +2731,34 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
return connector;
}
static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport)
{
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
if (dport->base.port != PORT_A)
return false;
if (dport->saved_port_bits & DDI_A_4_LANES)
return false;
/* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only
* supported configuration
*/
if (IS_GEN9_LP(dev_priv))
return true;
/* Cannonlake: Most of SKUs don't support DDI_E, and the only
* one who does also have a full A/E split called
* DDI_F what makes DDI_E useless. However for this
* case let's trust VBT info.
*/
if (IS_CANNONLAKE(dev_priv) &&
!intel_bios_is_port_present(dev_priv, PORT_E))
return true;
return false;
}
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
struct intel_digital_port *intel_dig_port;
@ -2777,6 +2825,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
intel_encoder->compute_output_type = intel_ddi_compute_output_type;
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
if (IS_GEN9_LP(dev_priv))
@ -2789,7 +2838,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
@ -2820,23 +2868,20 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
}
/*
* Bspec says that DDI_A_4_LANES is the only supported configuration
* for Broxton. Yet some BIOS fail to set this bit on port A if eDP
* wasn't lit up at boot. Force this bit on in our internal
* configuration so that we use the proper lane count for our
* calculations.
* Some BIOS might fail to set this bit on port A if eDP
* wasn't lit up at boot. Force this bit set when needed
* so we use the proper lane count for our calculations.
*/
if (IS_GEN9_LP(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
max_lanes = 4;
}
if (intel_ddi_a_force_4_lanes(intel_dig_port)) {
DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
max_lanes = 4;
}
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = max_lanes;
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);

View File

@ -235,16 +235,6 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss)))
info->has_pooled_eu = hweight8(sseu->subslice_mask) == 3;
/*
* There is a HW issue in 2x6 fused down parts that requires
* Pooled EU to be enabled as a WA. The pool configuration
* changes depending upon which subslice is fused down. This
* doesn't affect if the device has all 3 subslices enabled.
*/
/* WaEnablePooledEuFor2x6:bxt */
info->has_pooled_eu |= (hweight8(sseu->subslice_mask) == 2 &&
IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST));
sseu->min_eu_in_pool = 0;
if (info->has_pooled_eu) {
if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
@ -329,6 +319,107 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_eu_pg = 0;
}
static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv)
{
u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE);
u32 base_freq, frac_freq;
base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
base_freq *= 1000;
frac_freq = ((ts_override &
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
frac_freq = 1000 / (frac_freq + 1);
return base_freq + frac_freq;
}
static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
{
u32 f12_5_mhz = 12500;
u32 f19_2_mhz = 19200;
u32 f24_mhz = 24000;
if (INTEL_GEN(dev_priv) <= 4) {
/* PRMs say:
*
* "The value in this register increments once every 16
* hclks." (through the “Clocking Configuration”
* (CLKCFG) MCHBAR register)
*/
return dev_priv->rawclk_freq / 16;
} else if (INTEL_GEN(dev_priv) <= 8) {
/* PRMs say:
*
* "The PCU TSC counts 10ns increments; this timestamp
* reflects bits 38:3 of the TSC (i.e. 80ns granularity,
* rolling over every 1.5 hours).
*/
return f12_5_mhz;
} else if (INTEL_GEN(dev_priv) <= 9) {
u32 ctc_reg = I915_READ(CTC_MODE);
u32 freq = 0;
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(dev_priv);
} else {
freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz;
/* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
CTC_SHIFT_PARAMETER_SHIFT);
}
return freq;
} else if (INTEL_GEN(dev_priv) <= 10) {
u32 ctc_reg = I915_READ(CTC_MODE);
u32 freq = 0;
u32 rpm_config_reg = 0;
/* First figure out the reference frequency. There are 2 ways
* we can compute the frequency, either through the
* TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
* tells us which one we should use.
*/
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(dev_priv);
} else {
u32 crystal_clock;
rpm_config_reg = I915_READ(RPM_CONFIG0);
crystal_clock = (rpm_config_reg &
GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
switch (crystal_clock) {
case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
freq = f19_2_mhz;
break;
case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
freq = f24_mhz;
break;
}
}
/* Now figure out how the command stream's timestamp register
* increments from this frequency (it might increment only
* every few clock cycle).
*/
freq >>= 3 - ((rpm_config_reg &
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
return freq;
}
DRM_ERROR("Unknown gen, unable to compute command stream timestamp frequency\n");
return 0;
}
/*
* Determine various intel_device_info fields at runtime.
*
@ -347,7 +438,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
struct intel_device_info *info = mkwrite_device_info(dev_priv);
enum pipe pipe;
if (INTEL_GEN(dev_priv) >= 9) {
if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
info->num_scalers[pipe] = 2;
} else if (INTEL_GEN(dev_priv) == 9) {
info->num_scalers[PIPE_A] = 2;
info->num_scalers[PIPE_B] = 2;
info->num_scalers[PIPE_C] = 1;
@ -447,6 +541,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 10)
gen10_sseu_info_init(dev_priv);
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
DRM_DEBUG_DRIVER("subslice total: %u\n",
@ -462,4 +559,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->sseu.has_subslice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has EU power gating: %s\n",
info->sseu.has_eu_pg ? "y" : "n");
DRM_DEBUG_DRIVER("CS timestamp frequency: %u kHz\n",
info->cs_timestamp_frequency_khz);
}

View File

@ -219,10 +219,8 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
{
if (HAS_DDI(dev_priv))
return pipe_config->port_clock; /* SPLL */
else if (IS_GEN5(dev_priv))
return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
else
return 270000;
return dev_priv->fdi_pll_freq;
}
static const struct intel_limit intel_limits_i8xx_dac = {
@ -1703,7 +1701,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
u32 port_mask;
i915_reg_t dpll_reg;
switch (dport->port) {
switch (dport->base.port) {
case PORT_B:
port_mask = DPLL_PORTB_READY_MASK;
dpll_reg = DPLL(0);
@ -1725,7 +1723,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
dpll_reg, port_mask, expected_mask,
1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
port_name(dport->base.port),
I915_READ(dpll_reg) & port_mask, expected_mask);
}
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@ -1873,8 +1872,6 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
WARN_ON(!crtc->config->has_pch_encoder);
if (HAS_PCH_LPT(dev_priv))
return PIPE_A;
else
@ -3433,20 +3430,11 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
case DRM_FORMAT_RGB565:
return PLANE_CTL_FORMAT_RGB_565;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB8888:
return PLANE_CTL_FORMAT_XRGB_8888;
/*
* XXX: For ARBG/ABGR formats we default to expecting scanout buffers
* to be already pre-multiplied. We need to add a knob (or a different
* DRM_FORMAT) for user-space to configure that.
*/
case DRM_FORMAT_ABGR8888:
return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
case DRM_FORMAT_ARGB8888:
return PLANE_CTL_FORMAT_XRGB_8888 |
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
return PLANE_CTL_FORMAT_XRGB_8888;
case DRM_FORMAT_XRGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR2101010:
@ -3466,6 +3454,33 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
return 0;
}
/*
* XXX: For ARBG/ABGR formats we default to expecting scanout buffers
* to be already pre-multiplied. We need to add a knob (or a different
* DRM_FORMAT) for user-space to configure that.
*/
static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ARGB8888:
return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
default:
return PLANE_CTL_ALPHA_DISABLE;
}
}
static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ARGB8888:
return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
default:
return PLANE_COLOR_ALPHA_DISABLE;
}
}
static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
{
switch (fb_modifier) {
@ -3522,7 +3537,8 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) {
if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
@ -3541,6 +3557,20 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
return plane_ctl;
}
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
u32 plane_color_ctl = 0;
plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
return plane_color_ctl;
}
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@ -4483,7 +4513,7 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc)
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP)
return enc_to_dig_port(&encoder->base)->port;
return encoder->port;
}
return -1;
@ -4834,8 +4864,9 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
}
}
void hsw_enable_ips(struct intel_crtc *crtc)
void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -4873,12 +4904,13 @@ void hsw_enable_ips(struct intel_crtc *crtc)
}
}
void hsw_disable_ips(struct intel_crtc *crtc)
void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
if (!crtc->config->ips_enabled)
if (!crtc_state->ips_enabled)
return;
assert_plane_enabled(dev_priv, crtc->plane);
@ -4926,7 +4958,8 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
* completely hide the primary plane.
*/
static void
intel_post_enable_primary(struct drm_crtc *crtc)
intel_post_enable_primary(struct drm_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -4939,7 +4972,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
* when going from primary only to sprite only and vice
* versa.
*/
hsw_enable_ips(intel_crtc);
hsw_enable_ips(new_crtc_state);
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
@ -4958,7 +4991,8 @@ intel_post_enable_primary(struct drm_crtc *crtc)
/* FIXME move all this to pre_plane_update() with proper state tracking */
static void
intel_pre_disable_primary(struct drm_crtc *crtc)
intel_pre_disable_primary(struct drm_crtc *crtc,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -4980,7 +5014,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
* when going from primary only to sprite only and vice
* versa.
*/
hsw_disable_ips(intel_crtc);
hsw_disable_ips(old_crtc_state);
}
/* FIXME get rid of this and use pre_plane_update */
@ -4992,7 +5026,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
intel_pre_disable_primary(crtc);
intel_pre_disable_primary(crtc, to_intel_crtc_state(crtc->state));
/*
* Vblank time updates from the shadow to live plane control register
@ -5036,7 +5070,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (primary_state->base.visible &&
(needs_modeset(&pipe_config->base) ||
!old_primary_state->base.visible))
intel_post_enable_primary(&crtc->base);
intel_post_enable_primary(&crtc->base, pipe_config);
}
}
@ -5065,7 +5099,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
if (old_primary_state->base.visible &&
(modeset || !primary_state->base.visible))
intel_pre_disable_primary(&crtc->base);
intel_pre_disable_primary(&crtc->base, old_crtc_state);
}
/*
@ -5939,6 +5973,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
}
/*
@ -7633,7 +7668,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
break;
case INTEL_OUTPUT_EDP:
has_panel = true;
if (enc_to_dig_port(&encoder->base)->port == PORT_A)
if (encoder->port == PORT_A)
has_cpu_edp = true;
break;
default:
@ -8426,7 +8461,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, base, offset, stride_mult, tiling;
u32 val, base, offset, stride_mult, tiling, alpha;
int pipe = crtc->pipe;
int fourcc, pixel_format;
unsigned int aligned_height;
@ -8448,9 +8483,16 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
pixel_format = val & PLANE_CTL_FORMAT_MASK;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
alpha = I915_READ(PLANE_COLOR_CTL(pipe, 0));
alpha &= PLANE_COLOR_ALPHA_MASK;
} else {
alpha = val & PLANE_CTL_ALPHA_MASK;
}
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX,
val & PLANE_CTL_ALPHA_MASK);
val & PLANE_CTL_ORDER_RGBX, alpha);
fb->format = drm_format_info(fourcc);
tiling = val & PLANE_CTL_TILED_MASK;
@ -8857,7 +8899,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
}
/*
@ -9231,10 +9275,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_pfit_config(crtc, pipe_config);
}
if (IS_HASWELL(dev_priv))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
@ -10578,7 +10618,7 @@ static const char * const output_type_str[] = {
OUTPUT_TYPE(DP),
OUTPUT_TYPE(EDP),
OUTPUT_TYPE(DSI),
OUTPUT_TYPE(UNKNOWN),
OUTPUT_TYPE(DDI),
OUTPUT_TYPE(DP_MST),
};
@ -10749,13 +10789,13 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
switch (encoder->type) {
unsigned int port_mask;
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
port_mask = 1 << encoder->port;
/* the same port mustn't appear more than once */
if (used_ports & port_mask)
@ -10765,7 +10805,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
break;
case INTEL_OUTPUT_DP_MST:
used_mst_ports |=
1 << enc_to_mst(&encoder->base)->primary->port;
1 << encoder->port;
break;
default:
break;
@ -10882,7 +10922,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* Determine output_types before calling the .compute_config()
* hooks so that the hooks can use this information safely.
*/
pipe_config->output_types |= 1 << encoder->type;
if (encoder->compute_output_type)
pipe_config->output_types |=
BIT(encoder->compute_output_type(encoder, pipe_config,
connector_state));
else
pipe_config->output_types |= BIT(encoder->type);
}
encoder_retry:
@ -11071,6 +11116,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
bool adjust)
{
bool ret = true;
bool fixup_inherited = adjust &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
@ -11090,6 +11138,31 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
ret = false; \
}
#define PIPE_CONF_CHECK_BOOL(name) \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
"(expected %s, found %s)\n", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
}
/*
* Checks state where we only read out the enabling, but not the entire
* state itself (like full infoframes or ELD for audio). These states
* require a full modeset on bootup to fix up.
*/
#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) \
if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
PIPE_CONF_CHECK_BOOL(name); \
} else { \
pipe_config_err(adjust, __stringify(name), \
"unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
}
#define PIPE_CONF_CHECK_P(name) \
if (current_config->name != pipe_config->name) { \
pipe_config_err(adjust, __stringify(name), \
@ -11175,7 +11248,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(cpu_transcoder);
PIPE_CONF_CHECK_I(has_pch_encoder);
PIPE_CONF_CHECK_BOOL(has_pch_encoder);
PIPE_CONF_CHECK_I(fdi_lanes);
PIPE_CONF_CHECK_M_N(fdi_m_n);
@ -11207,17 +11280,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(has_hdmi_sink);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_I(limited_color_range);
PIPE_CONF_CHECK_BOOL(limited_color_range);
PIPE_CONF_CHECK_I(hdmi_scrambling);
PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_I(has_infoframe);
PIPE_CONF_CHECK_I(ycbcr420);
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
PIPE_CONF_CHECK_BOOL(ycbcr420);
PIPE_CONF_CHECK_I(has_audio);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
@ -11243,7 +11316,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
PIPE_CONF_CHECK_I(pch_pfit.enabled);
PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_X(pch_pfit.pos);
PIPE_CONF_CHECK_X(pch_pfit.size);
@ -11253,11 +11326,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
}
/* BDW+ don't expose a synchronous way to read the state */
if (IS_HASWELL(dev_priv))
PIPE_CONF_CHECK_I(ips_enabled);
PIPE_CONF_CHECK_I(double_wide);
PIPE_CONF_CHECK_BOOL(double_wide);
PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
@ -11291,8 +11360,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
PIPE_CONF_CHECK_I(min_voltage_level);
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
@ -11559,10 +11632,8 @@ verify_crtc_state(struct drm_crtc *crtc,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
if (active) {
pipe_config->output_types |= 1 << encoder->type;
if (active)
encoder->get_config(encoder, pipe_config);
}
}
intel_crtc_compute_pixel_rate(pipe_config);
@ -11933,16 +12004,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* holding all the crtc locks, even if we don't end up
* touching the hardware
*/
if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical,
&intel_state->cdclk.logical)) {
if (intel_cdclk_changed(&dev_priv->cdclk.logical,
&intel_state->cdclk.logical)) {
ret = intel_lock_all_pipes(state);
if (ret < 0)
return ret;
}
/* All pipes must be switched off while we change the cdclk. */
if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual,
&intel_state->cdclk.actual)) {
if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
&intel_state->cdclk.actual)) {
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
@ -11951,6 +12022,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
intel_state->cdclk.logical.cdclk,
intel_state->cdclk.actual.cdclk);
DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
intel_state->cdclk.logical.voltage_level,
intel_state->cdclk.actual.voltage_level);
} else {
to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
}
@ -12519,6 +12593,9 @@ static int intel_atomic_commit(struct drm_device *dev,
if (intel_state->modeset) {
memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
sizeof(intel_state->min_cdclk));
memcpy(dev_priv->min_voltage_level,
intel_state->min_voltage_level,
sizeof(intel_state->min_voltage_level));
dev_priv->active_crtcs = intel_state->active_crtcs;
dev_priv->cdclk.logical = intel_state->cdclk.logical;
dev_priv->cdclk.actual = intel_state->cdclk.actual;
@ -12756,7 +12833,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
if (IS_GEMINILAKE(dev_priv))
if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
max_dotclk *= 2;
if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
@ -12820,6 +12897,9 @@ intel_check_primary_plane(struct intel_plane *plane,
state->ctl = i9xx_plane_ctl(crtc_state, state);
}
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
state->color_ctl = glk_plane_color_ctl(crtc_state, state);
return 0;
}
@ -12864,6 +12944,7 @@ out:
static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
@ -12871,6 +12952,20 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
intel_pipe_update_end(new_crtc_state);
if (new_crtc_state->update_pipe &&
!needs_modeset(&new_crtc_state->base) &&
old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED) {
if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, intel_crtc->pipe, true);
if (new_crtc_state->has_pch_encoder) {
enum pipe pch_transcoder =
intel_crtc_pch_transcoder(intel_crtc);
intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
}
}
}
/**
@ -14352,6 +14447,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
}
@ -14431,6 +14527,8 @@ retry:
cs->wm.need_postvbl_update = true;
dev_priv->display.optimize_watermarks(intel_state, cs);
to_intel_crtc_state(crtc->state)->wm = cs->wm;
}
put_state:
@ -14440,6 +14538,22 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
{
if (IS_GEN5(dev_priv)) {
u32 fdi_pll_clk =
I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
dev_priv->fdi_pll_freq = 270000;
} else {
return;
}
DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
}
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@ -14527,6 +14641,7 @@ int intel_modeset_init(struct drm_device *dev)
}
intel_shared_dpll_init(dev);
intel_update_fdi_pll_freq(dev_priv);
intel_update_czclk(dev_priv);
intel_modeset_init_hw(dev);
@ -14716,7 +14831,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
if (!transcoder_is_dsi(cpu_transcoder)) {
if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
I915_WRITE(reg,
@ -14949,7 +15064,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
crtc_state->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc_state);
} else {
encoder->base.crtc = NULL;
@ -15028,6 +15142,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
dev_priv->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
@ -15051,6 +15167,23 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
}
}
static void intel_early_display_was(struct drm_i915_private *dev_priv)
{
/* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
DARBF_GATING_DIS);
if (IS_HASWELL(dev_priv)) {
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
}
}
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
*/
@ -15064,15 +15197,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
int i;
if (IS_HASWELL(dev_priv)) {
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
}
intel_early_display_was(dev_priv);
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
@ -15164,17 +15289,6 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
intel_init_gt_powersave(dev_priv);
intel_init_clock_gating(dev_priv);
intel_setup_overlay(dev_priv);
}
int intel_connector_register(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);

File diff suppressed because it is too large Load Diff

View File

@ -34,6 +34,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
@ -87,6 +88,12 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->dp_m_n.tu = slots;
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
return true;
}
@ -142,7 +149,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
@ -172,13 +180,27 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_dp->active_mst_links--;
intel_mst->connector = NULL;
if (intel_dp->active_mst_links == 0) {
if (intel_dp->active_mst_links == 0)
intel_dig_port->base.post_disable(&intel_dig_port->base,
NULL, NULL);
}
old_crtc_state, NULL);
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (intel_dp->active_mst_links == 0 &&
intel_dig_port->base.pre_pll_enable)
intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
pipe_config, NULL);
}
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@ -187,7 +209,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_dig_port->port;
enum port port = intel_dig_port->base.port;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
@ -231,7 +253,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_dig_port->port;
enum port port = intel_dig_port->base.port;
int ret;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
@ -265,48 +287,8 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, crtc);
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (temp & TRANS_DDI_PVSYNC)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
pipe_config->pipe_bpp = 18;
break;
case TRANS_DDI_BPC_8:
pipe_config->pipe_bpp = 24;
break;
case TRANS_DDI_BPC_10:
pipe_config->pipe_bpp = 30;
break;
case TRANS_DDI_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
pipe_config->base.adjusted_mode.flags |= flags;
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(crtc, pipe_config);
intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
intel_ddi_get_config(&intel_dig_port->base, pipe_config);
}
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
@ -570,13 +552,14 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->port;
intel_encoder->port = intel_dig_port->base.port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;

View File

@ -466,21 +466,21 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
lockdep_assert_held(&dev_priv->power_domains.lock);
if (rcomp_phy != -1) {
was_enabled = true;
if (rcomp_phy != -1)
was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
/*
* We need to copy the GRC calibration value from rcomp_phy,
* so make sure it's powered up.
*/
if (!was_enabled)
_bxt_ddi_phy_init(dev_priv, rcomp_phy);
}
/*
* We need to copy the GRC calibration value from rcomp_phy,
* so make sure it's powered up.
*/
if (!was_enabled)
_bxt_ddi_phy_init(dev_priv, rcomp_phy);
_bxt_ddi_phy_init(dev_priv, phy);
if (rcomp_phy != -1 && !was_enabled)
bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
if (!was_enabled)
bxt_ddi_phy_uninit(dev_priv, rcomp_phy);
}
static bool __printf(6, 7)
@ -567,8 +567,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
}
uint8_t
bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
uint8_t lane_count)
bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
{
switch (lane_count) {
case 1:
@ -587,9 +586,8 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t lane_lat_optim_mask)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
enum port port = dport->port;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
@ -614,9 +612,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
uint8_t
bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
enum port port = dport->port;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
@ -642,7 +639,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
u32 val;
@ -734,11 +731,12 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
}
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
uint32_t val;
@ -777,17 +775,16 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
}
}
void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
intel_dp_unused_lane_mask(crtc_state->lane_count);
u32 val;
/*
@ -803,7 +800,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, true);
chv_data_lane_soft_reset(encoder, crtc_state, true);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
@ -833,7 +830,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
if (intel_crtc->config->lane_count > 2) {
if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
@ -858,16 +855,15 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
enum pipe pipe = crtc->pipe;
int data, i, stagger;
u32 val;
@ -878,16 +874,16 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
if (intel_crtc->config->lane_count > 2) {
if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
for (i = 0; i < intel_crtc->config->lane_count; i++) {
for (i = 0; i < crtc_state->lane_count; i++) {
/* Set the upar bit */
if (intel_crtc->config->lane_count == 1)
if (crtc_state->lane_count == 1)
data = 0x0;
else
data = (i == 1) ? 0x0 : 0x1;
@ -896,13 +892,13 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
}
/* Data lane stagger programming */
if (intel_crtc->config->port_clock > 270000)
if (crtc_state->port_clock > 270000)
stagger = 0x18;
else if (intel_crtc->config->port_clock > 135000)
else if (crtc_state->port_clock > 135000)
stagger = 0xd;
else if (intel_crtc->config->port_clock > 67500)
else if (crtc_state->port_clock > 67500)
stagger = 0x7;
else if (intel_crtc->config->port_clock > 33750)
else if (crtc_state->port_clock > 33750)
stagger = 0x4;
else
stagger = 0x2;
@ -911,7 +907,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
if (intel_crtc->config->lane_count > 2) {
if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
@ -924,7 +920,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
DPIO_TX1_STAGGER_MULT(6) |
DPIO_TX2_STAGGER_MULT(0));
if (intel_crtc->config->lane_count > 2) {
if (crtc_state->lane_count > 2) {
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
@ -934,7 +930,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
}
/* Deassert data lane reset */
chv_data_lane_soft_reset(encoder, false);
chv_data_lane_soft_reset(encoder, crtc_state, false);
mutex_unlock(&dev_priv->sb_lock);
}
@ -950,10 +946,11 @@ void chv_phy_release_cl2_override(struct intel_encoder *encoder)
}
}
void chv_phy_post_pll_disable(struct intel_encoder *encoder)
void chv_phy_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
enum pipe pipe = to_intel_crtc(old_crtc_state->base.crtc)->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
@ -991,7 +988,7 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
enum pipe pipe = intel_crtc->pipe;
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
@ -1009,15 +1006,14 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
mutex_unlock(&dev_priv->sb_lock);
}
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
enum pipe pipe = crtc->pipe;
/* Program Tx lane resets to default */
mutex_lock(&dev_priv->sb_lock);
@ -1037,15 +1033,15 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
enum pipe pipe = crtc->pipe;
u32 val;
mutex_lock(&dev_priv->sb_lock);
@ -1067,14 +1063,14 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
void vlv_phy_reset_lanes(struct intel_encoder *encoder)
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
enum pipe pipe = crtc->pipe;
mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);

View File

@ -813,15 +813,11 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (encoder->type == INTEL_OUTPUT_HDMI) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
} else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
pll = hsw_ddi_dp_get_dpll(encoder, clock);
} else if (encoder->type == INTEL_OUTPUT_ANALOG) {
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
return NULL;
@ -1369,15 +1365,13 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
if (encoder->type == INTEL_OUTPUT_HDMI) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
} else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
@ -1388,7 +1382,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return NULL;
}
if (encoder->type == INTEL_OUTPUT_EDP)
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL0);
@ -1808,18 +1802,15 @@ bxt_get_dpll(struct intel_crtc *crtc,
{
struct intel_dpll_hw_state dpll_hw_state = { };
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
int i, clock = crtc_state->port_clock;
if (encoder->type == INTEL_OUTPUT_HDMI &&
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
!bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
&dpll_hw_state))
return NULL;
if ((encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP ||
encoder->type == INTEL_OUTPUT_DP_MST) &&
if (intel_crtc_has_dp_encoder(crtc_state) &&
!bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
return NULL;
@ -1828,15 +1819,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state = dpll_hw_state;
if (encoder->type == INTEL_OUTPUT_DP_MST) {
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
intel_dig_port = intel_mst->primary;
} else
intel_dig_port = enc_to_dig_port(&encoder->base);
/* 1:1 mapping between ports and PLLs */
i = (enum intel_dpll_id) intel_dig_port->port;
i = (enum intel_dpll_id) encoder->port;
pll = intel_get_shared_dpll_by_id(dev_priv, i);
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
@ -2008,8 +1992,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence Before Frequency Change
*
* FIXME: (DVFS) is used to adjust the display voltage to match the
* display clock frequencies
* Note: DVFS is actually handled via the cdclk code paths,
* hence we do nothing here.
*/
/* 6. Enable DPLL in DPLL_ENABLE. */
@ -2030,8 +2014,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence After Frequency Change
*
* FIXME: (DVFS) is used to adjust the display voltage to match the
* display clock frequencies
* Note: DVFS is actually handled via the cdclk code paths,
* hence we do nothing here.
*/
/*
@ -2055,8 +2039,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence Before Frequency Change
*
* FIXME: (DVFS) is used to adjust the display voltage to match the
* display clock frequencies
* Note: DVFS is actually handled via the cdclk code paths,
* hence we do nothing here.
*/
/* 3. Disable DPLL through DPLL_ENABLE. */
@ -2077,8 +2061,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
* requirement, follow the Display Voltage Frequency Switching
* Sequence After Frequency Change
*
* FIXME: (DVFS) is used to adjust the display voltage to match the
* display clock frequencies
* Note: DVFS is actually handled via the cdclk code paths,
* hence we do nothing here.
*/
/* 6. Disable DPLL power in DPLL_ENABLE. */
@ -2126,10 +2110,8 @@ out:
return ret;
}
static void cnl_wrpll_get_multipliers(unsigned int bestdiv,
unsigned int *pdiv,
unsigned int *qdiv,
unsigned int *kdiv)
static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
int *qdiv, int *kdiv)
{
/* even dividers */
if (bestdiv % 2 == 0) {
@ -2167,10 +2149,12 @@ static void cnl_wrpll_get_multipliers(unsigned int bestdiv,
}
}
static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, uint32_t dco_freq,
uint32_t ref_freq, uint32_t pdiv, uint32_t qdiv,
uint32_t kdiv)
static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
u32 dco_freq, u32 ref_freq,
int pdiv, int qdiv, int kdiv)
{
u32 dco;
switch (kdiv) {
case 1:
params->kdiv = 1;
@ -2202,39 +2186,35 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, uint32_t
WARN(1, "Incorrect PDiv\n");
}
if (kdiv != 2)
qdiv = 1;
WARN_ON(kdiv != 2 && qdiv != 1);
params->qdiv_ratio = qdiv;
params->qdiv_mode = (qdiv == 1) ? 0 : 1;
params->dco_integer = div_u64(dco_freq, ref_freq);
params->dco_fraction = div_u64((div_u64((uint64_t)dco_freq<<15, (uint64_t)ref_freq) -
((uint64_t)params->dco_integer<<15)) * 0x8000, 0x8000);
dco = div_u64((u64)dco_freq << 15, ref_freq);
params->dco_integer = dco >> 15;
params->dco_fraction = dco & 0x7fff;
}
static bool
cnl_ddi_calculate_wrpll(int clock /* in Hz */,
cnl_ddi_calculate_wrpll(int clock,
struct drm_i915_private *dev_priv,
struct skl_wrpll_params *wrpll_params)
{
uint64_t afe_clock = clock * 5 / KHz(1); /* clocks in kHz */
unsigned int dco_min = 7998 * KHz(1);
unsigned int dco_max = 10000 * KHz(1);
unsigned int dco_mid = (dco_min + dco_max) / 2;
u32 afe_clock = clock * 5;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 24, 28, 30, 32, 36, 40,
42, 44, 48, 50, 52, 54, 56, 60,
64, 66, 68, 70, 72, 76, 78, 80,
84, 88, 90, 92, 96, 98, 100, 102,
3, 5, 7, 9, 15, 21 };
unsigned int d, dco;
unsigned int dco_centrality = 0;
unsigned int best_dco_centrality = 999999;
unsigned int best_div = 0;
unsigned int best_dco = 0;
unsigned int pdiv = 0, qdiv = 0, kdiv = 0;
u32 dco, best_dco = 0, dco_centrality = 0;
u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
dco = afe_clock * dividers[d];
@ -2271,7 +2251,7 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
if (!cnl_ddi_calculate_wrpll(clock * 1000, dev_priv, &wrpll_params))
if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
return false;
cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
@ -2281,7 +2261,6 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq |
DPLL_CFGCR1_CENTRAL_FREQ;
memset(&crtc_state->dpll_hw_state, 0,
@ -2345,15 +2324,13 @@ cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
if (encoder->type == INTEL_OUTPUT_HDMI) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
} else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
@ -2361,8 +2338,8 @@ cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
}
crtc_state->dpll_hw_state = dpll_hw_state;
} else {
DRM_DEBUG_KMS("Skip DPLL setup for encoder %d\n",
encoder->type);
DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
crtc_state->output_types);
return NULL;
}

View File

@ -47,14 +47,11 @@
* contexts. Note that it's important that we check the condition again after
* having timed out, since the timeout could be due to preemption or similar and
* we've never had a chance to check the condition before the timeout.
*
* TODO: When modesetting has fully transitioned to atomic, the below
* drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts
* added.
*/
#define _wait_for(COND, US, W) ({ \
unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
int ret__; \
might_sleep(); \
for (;;) { \
bool expired__ = time_after(jiffies, timeout__); \
if (COND) { \
@ -65,11 +62,7 @@
ret__ = -ETIMEDOUT; \
break; \
} \
if ((W) && drm_can_sleep()) { \
usleep_range((W), (W)*2); \
} else { \
cpu_relax(); \
} \
usleep_range((W), (W) * 2); \
} \
ret__; \
})
@ -173,7 +166,7 @@ enum intel_output_type {
INTEL_OUTPUT_DP = 7,
INTEL_OUTPUT_EDP = 8,
INTEL_OUTPUT_DSI = 9,
INTEL_OUTPUT_UNKNOWN = 10,
INTEL_OUTPUT_DDI = 10,
INTEL_OUTPUT_DP_MST = 11,
};
@ -216,6 +209,9 @@ struct intel_encoder {
enum port port;
unsigned int cloneable;
void (*hot_plug)(struct intel_encoder *);
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
bool (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
@ -386,6 +382,8 @@ struct intel_atomic_state {
unsigned int active_crtcs;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
u8 min_voltage_level[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@ -420,6 +418,9 @@ struct intel_plane_state {
/* plane control register */
u32 ctl;
/* plane color control register */
u32 color_ctl;
/*
* scaler_id
* = -1 : not using a scaler
@ -738,6 +739,9 @@ struct intel_crtc_state {
*/
uint8_t lane_lat_optim_mask;
/* minimum acceptable voltage level */
u8 min_voltage_level;
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
@ -1048,7 +1052,6 @@ struct intel_lspcon {
struct intel_digital_port {
struct intel_encoder base;
enum port port;
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
@ -1080,7 +1083,7 @@ struct intel_dp_mst_encoder {
static inline enum dpio_channel
vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
switch (dport->base.port) {
case PORT_B:
case PORT_D:
return DPIO_CH0;
@ -1094,7 +1097,7 @@ vlv_dport_to_channel(struct intel_digital_port *dport)
static inline enum dpio_phy
vlv_dport_to_phy(struct intel_digital_port *dport)
{
switch (dport->port) {
switch (dport->base.port) {
case PORT_B:
case PORT_C:
return DPIO_PHY0;
@ -1147,7 +1150,7 @@ enc_to_dig_port(struct drm_encoder *encoder)
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
switch (intel_encoder->type) {
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DDI:
WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
@ -1271,7 +1274,6 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
@ -1288,10 +1290,10 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
@ -1304,7 +1306,9 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
@ -1322,10 +1326,14 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
bool intel_cdclk_changed(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context);
/* intel_display.c */
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@ -1477,8 +1485,8 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct intel_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
@ -1491,6 +1499,8 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
return i915_ggtt_offset(state->vma);
}
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
@ -1521,7 +1531,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
int intel_dp_sink_crc(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
@ -1868,7 +1879,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);

View File

@ -662,11 +662,11 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
}
}
static void intel_dsi_port_enable(struct intel_encoder *encoder)
static void intel_dsi_port_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@ -705,7 +705,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
if (IS_BROXTON(dev_priv))
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
else
temp |= intel_crtc->pipe ?
temp |= crtc->pipe ?
LANE_CONFIGURATION_DUAL_LINK_B :
LANE_CONFIGURATION_DUAL_LINK_A;
}
@ -875,7 +875,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
intel_dsi_port_enable(encoder);
intel_dsi_port_enable(encoder, pipe_config);
}
intel_panel_enable_backlight(pipe_config, conn_state);
@ -1082,7 +1082,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
struct intel_crtc *intel_crtc;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
@ -1093,8 +1093,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
crtc_hblank_start_sw, crtc_hblank_end_sw;
/* FIXME: hw readout should not depend on SW state */
intel_crtc = to_intel_crtc(encoder->base.crtc);
adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
adjusted_mode_sw = &crtc->config->base.adjusted_mode;
/*
* Atleast one port is active as encoder->get_config called only if
@ -1243,6 +1242,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk;
DRM_DEBUG_KMS("\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
if (IS_GEN9_LP(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);

View File

@ -159,6 +159,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;

View File

@ -50,6 +50,8 @@ struct engine_class_info {
const char *name;
int (*init_legacy)(struct intel_engine_cs *engine);
int (*init_execlists)(struct intel_engine_cs *engine);
u8 uabi_class;
};
static const struct engine_class_info intel_engine_classes[] = {
@ -57,21 +59,25 @@ static const struct engine_class_info intel_engine_classes[] = {
.name = "rcs",
.init_execlists = logical_render_ring_init,
.init_legacy = intel_init_render_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_RENDER,
},
[COPY_ENGINE_CLASS] = {
.name = "bcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_blt_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_COPY,
},
[VIDEO_DECODE_CLASS] = {
.name = "vcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO,
},
[VIDEO_ENHANCEMENT_CLASS] = {
.name = "vecs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_vebox_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
},
};
@ -213,13 +219,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
class_info->name, info->instance) >=
sizeof(engine->name));
engine->uabi_id = info->uabi_id;
engine->hw_id = engine->guc_id = info->hw_id;
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
engine->class = info->class;
engine->instance = info->instance;
engine->uabi_id = info->uabi_id;
engine->uabi_class = class_info->uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv,
engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
@ -281,6 +289,8 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
device_info->num_rings = hweight32(mask);
i915_check_and_clear_faults(dev_priv);
return 0;
cleanup:
@ -620,7 +630,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time.
*/
if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
ring = engine->context_pin(engine,
engine->i915->preempt_context);
if (IS_ERR(ring)) {
@ -633,25 +643,19 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
goto err_unpin_preempt;
ret = i915_gem_render_state_init(engine);
if (ret)
goto err_breadcrumbs;
if (HWS_NEEDS_PHYSICAL(engine->i915))
ret = init_phys_status_page(engine);
else
ret = init_status_page(engine);
if (ret)
goto err_rs_fini;
goto err_breadcrumbs;
return 0;
err_rs_fini:
i915_gem_render_state_fini(engine);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt:
if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->context_unpin(engine, engine->i915->preempt_context);
err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context);
@ -674,12 +678,14 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
else
cleanup_status_page(engine);
i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->context_unpin(engine, engine->i915->preempt_context);
engine->context_unpin(engine, engine->i915->kernel_context);
}
@ -1014,22 +1020,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
/* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
* WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
* but we do that in per ctx batchbuffer as there is an issue
* with this register not getting restored on ctx restore
*/
}
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
@ -1045,11 +1035,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
@ -1079,8 +1064,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
IS_COFFEELAKE(dev_priv))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@ -1204,72 +1188,35 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
static int bxt_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
u32 val;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
/* WaDisablePooledEuLoadBalancingFix:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
}
/* WaDisableSbeCacheDispatchPortSharing:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
/* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
}
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
/* WaProgramL3SqcReg1DefaultForPerf:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
u32 val = I915_READ(GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
I915_WRITE(GEN8_L3SQCREG1, val);
}
val = I915_READ(GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
I915_WRITE(GEN8_L3SQCREG1, val);
/* WaToEnableHwFixForPushConstHWBug:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaInPlaceDecompressionHang:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
(I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
(I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
return 0;
}
@ -1585,6 +1532,34 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
return true;
}
/**
* intel_engine_has_kernel_context:
* @engine: the engine
*
* Returns true if the last context to be executed on this engine, or has been
* executed if the engine is already idle, is the kernel context
* (#i915.kernel_context).
*/
bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
{
const struct i915_gem_context * const kernel_context =
engine->i915->kernel_context;
struct drm_i915_gem_request *rq;
lockdep_assert_held(&engine->i915->drm.struct_mutex);
/*
* Check the last context seen by the engine. If active, it will be
* the last request that remains in the timeline. When idle, it is
* the last executed context as tracked by retirement.
*/
rq = __i915_gem_active_peek(&engine->timeline->last_request);
if (rq)
return rq->ctx == kernel_context;
else
return engine->last_retired_context == kernel_context;
}
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@ -1594,19 +1569,63 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
void intel_engines_mark_idle(struct drm_i915_private *i915)
/**
* intel_engines_park: called when the GT is transitioning from busy->idle
* @i915: the i915 device
*
* The GT is now idle and about to go to sleep (maybe never to wake again?).
* Time for us to tidy and put away our toys (release resources back to the
* system).
*/
void intel_engines_park(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
/* Flush the residual irq tasklets first. */
intel_engine_disarm_breadcrumbs(engine);
tasklet_kill(&engine->execlists.tasklet);
/*
* We are committed now to parking the engines, make sure there
* will be no more interrupts arriving later and the engines
* are truly idle.
*/
if (wait_for(intel_engine_is_idle(engine), 10)) {
struct drm_printer p = drm_debug_printer(__func__);
dev_err(i915->drm.dev,
"%s is not idle before parking\n",
engine->name);
intel_engine_dump(engine, &p);
}
if (engine->park)
engine->park(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
tasklet_kill(&engine->execlists.irq_tasklet);
engine->execlists.no_priolist = false;
}
}
/**
* intel_engines_unpark: called when the GT is transitioning from idle->busy
* @i915: the i915 device
*
* The GT was idle and now about to fire up with some new user requests.
*/
void intel_engines_unpark(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
if (engine->unpark)
engine->unpark(engine);
}
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
@ -1622,6 +1641,20 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
}
}
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int which;
which = 0;
for_each_engine(engine, i915, id)
if (engine->default_state)
which |= BIT(engine->uabi_class);
return which;
}
static void print_request(struct drm_printer *m,
struct drm_i915_gem_request *rq,
const char *prefix)
@ -1688,9 +1721,14 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
rq ? rq->ring->tail : 0);
drm_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
I915_READ(RING_CTL(engine->mmio_base)),
I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
if (INTEL_GEN(engine->i915) > 2) {
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
I915_READ(RING_MI_MODE(engine->mmio_base)),
I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
}
rcu_read_unlock();
@ -1781,6 +1819,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
}
spin_unlock_irq(&b->rb_lock);
drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
drm_printf(m, "\n");
}

View File

@ -23,6 +23,7 @@
*/
#include "intel_guc.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
static void gen8_guc_raise_irq(struct intel_guc *guc)
@ -268,7 +269,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
int intel_guc_suspend(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
@ -276,14 +276,33 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
gen9_disable_guc_interrupts(dev_priv);
ctx = dev_priv->kernel_context;
data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
/* first page is shared data with GuC */
data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
LRC_GUCSHR_PN * PAGE_SIZE;
data[2] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
/**
* intel_guc_reset_engine() - ask GuC to reset an engine
* @guc: intel_guc structure
* @engine: engine to be reset
*/
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine)
{
u32 data[7];
GEM_BUG_ON(!guc->execbuf_client);
data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
data[1] = engine->guc_id;
data[2] = 0;
data[3] = 0;
data[4] = 0;
data[5] = guc->execbuf_client->stage_id;
data[6] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
@ -295,7 +314,6 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
int intel_guc_resume(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_gem_context *ctx;
u32 data[3];
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
@ -304,13 +322,9 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
ctx = dev_priv->kernel_context;
data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
/* first page is shared data with GuC */
data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
LRC_GUCSHR_PN * PAGE_SIZE;
data[2] = guc_ggtt_offset(guc->shared_data);
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}

View File

@ -34,9 +34,14 @@
#include "i915_guc_reg.h"
#include "i915_vma.h"
struct guc_preempt_work {
struct work_struct work;
struct intel_engine_cs *engine;
};
/*
* Top level structure of GuC. It handles firmware loading and manages client
* pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
* pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
* ExecList submission.
*/
struct intel_guc {
@ -54,8 +59,14 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
struct ida stage_ids;
struct i915_vma *shared_data;
void *shared_data_vaddr;
struct i915_guc_client *execbuf_client;
struct intel_guc_client *execbuf_client;
struct intel_guc_client *preempt_client;
struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
struct workqueue_struct *preempt_wq;
DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
/* Cyclic counter mod pagesize */

View File

@ -198,6 +198,7 @@ static int ctch_open(struct intel_guc *guc,
err = ctch_init(guc, ctch);
if (unlikely(err))
goto err_out;
GEM_BUG_ON(!ctch->vma);
}
/* vma should be already allocated and map'ed */

View File

@ -97,23 +97,50 @@ int intel_guc_fw_select(struct intel_guc *guc)
return 0;
}
/*
* Read the GuC status register (GUC_STATUS) and store it in the
* specified location; then return a boolean indicating whether
* the value matches either of two values representing completion
* of the GuC boot process.
*
* This is used for polling the GuC status in a wait_for()
* loop below.
*/
static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
u32 *status)
static void guc_prepare_xfer(struct intel_guc *guc)
{
u32 val = I915_READ(GUC_STATUS);
u32 uk_val = val & GS_UKERNEL_MASK;
*status = val;
return (uk_val == GS_UKERNEL_READY ||
((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
struct drm_i915_private *dev_priv = guc_to_i915(guc);
/* Must program this register before loading the ucode with DMA */
I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
GUC_ENABLE_READ_CACHE_LOGIC |
GUC_ENABLE_MIA_CACHING |
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING);
if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
if (IS_GEN9(dev_priv)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
/* allows for 5us (in 10ns units) before GT can go to RC6 */
I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
}
}
/* Copy RSA signature from the fw image to HW for verification */
static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
struct sg_table *sg = vma->pages;
u32 rsa[UOS_RSA_SCRATCH_MAX_COUNT];
int i;
if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
guc_fw->rsa_offset) != sizeof(rsa))
return -EINVAL;
for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
return 0;
}
/*
@ -122,29 +149,19 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
* Architecturally, the DMA engine is bidirectional, and can potentially even
* transfer between GTT locations. This functionality is left out of the API
* for now as there is no need for it.
*
* Note that GuC needs the CSS header plus uKernel code to be copied by the
* DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
*/
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
struct i915_vma *vma)
static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
{
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw;
unsigned long offset;
struct sg_table *sg = vma->pages;
u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
int i, ret = 0;
u32 status;
int ret;
/* where RSA signature starts */
offset = guc_fw->rsa_offset;
/* Copy RSA signature from the fw image to HW for verification */
sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
/* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components */
/*
* The header plus uCode will be copied to WOPCM via DMA, excluding any
* other components
*/
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
@ -162,33 +179,62 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
/* Finally start the DMA */
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
/* Wait for DMA to finish */
ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
2, 100, &status);
DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
return ret;
}
/*
* Read the GuC status register (GUC_STATUS) and store it in the
* specified location; then return a boolean indicating whether
* the value matches either of two values representing completion
* of the GuC boot process.
*
* This is used for polling the GuC status in a wait_for()
* loop below.
*/
static inline bool guc_ready(struct intel_guc *guc, u32 *status)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 val = I915_READ(GUC_STATUS);
u32 uk_val = val & GS_UKERNEL_MASK;
*status = val;
return (uk_val == GS_UKERNEL_READY) ||
((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
}
static int guc_wait_ucode(struct intel_guc *guc)
{
u32 status;
int ret;
/*
* Wait for the DMA to complete & the GuC to start up.
* Wait for the GuC to start up.
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver will attempt to fall back to
* execlist mode if this happens.)
*/
ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
I915_READ(DMA_CTRL), status);
ret = wait_for(guc_ready(guc, &status), 100);
DRM_DEBUG_DRIVER("GuC status %#x\n", status);
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
DRM_ERROR("GuC firmware signature verification failed\n");
ret = -ENOEXEC;
}
DRM_DEBUG_DRIVER("returning %d\n", ret);
return ret;
}
/*
* Load the GuC firmware blob into the MinuteIA.
*/
static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@ -198,34 +244,24 @@ static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Enable MIA caching. GuC clock gating is disabled. */
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
guc_prepare_xfer(guc);
/* WaDisableMinuteIaClockGating:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
/*
* Note that GuC needs the CSS header plus uKernel code to be copied
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
ret = guc_xfer_rsa(guc, vma);
if (ret)
DRM_WARN("GuC firmware signature xfer error %d\n", ret);
/* WaC6DisallowByGfxPause:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
ret = guc_xfer_ucode(guc, vma);
if (ret)
DRM_WARN("GuC firmware code xfer error %d\n", ret);
if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
if (IS_GEN9(dev_priv)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
/* allows for 5us (in 10ns units) before GT can go to RC6 */
I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
}
ret = guc_ucode_xfer_dma(dev_priv, vma);
ret = guc_wait_ucode(guc);
if (ret)
DRM_ERROR("GuC firmware xfer error %d\n", ret);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@ -247,5 +283,5 @@ static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
*/
int intel_guc_fw_upload(struct intel_guc *guc)
{
return intel_uc_fw_upload(&guc->fw, guc_ucode_xfer);
return intel_uc_fw_upload(&guc->fw, guc_fw_xfer);
}

View File

@ -544,9 +544,37 @@ union guc_log_control {
u32 value;
} __packed;
struct guc_ctx_report {
u32 report_return_status;
u32 reserved1[64];
u32 affected_count;
u32 reserved2[2];
} __packed;
/* GuC Shared Context Data Struct */
struct guc_shared_ctx_data {
u32 addr_of_last_preempted_data_low;
u32 addr_of_last_preempted_data_high;
u32 addr_of_last_preempted_data_high_tmp;
u32 padding;
u32 is_mapped_to_proxy;
u32 proxy_ctx_id;
u32 engine_reset_ctx_id;
u32 media_reset_count;
u32 reserved1[8];
u32 uk_last_ctx_switch_reason;
u32 was_reset;
u32 lrca_gpu_addr;
u64 execlist_ctx;
u32 reserved2[66];
struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
} __packed;
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
enum intel_guc_action {
INTEL_GUC_ACTION_DEFAULT = 0x0,
INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
@ -562,6 +590,18 @@ enum intel_guc_action {
INTEL_GUC_ACTION_LIMIT
};
enum intel_guc_preempt_options {
INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
};
enum intel_guc_report_status {
INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0,
INTEL_GUC_REPORT_STATUS_ACKED = 0x1,
INTEL_GUC_REPORT_STATUS_ERROR = 0x2,
INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
};
/*
* The GuC sends its response to a command by overwriting the
* command in SS0. The response is distinguishable from a command

View File

@ -52,7 +52,7 @@ struct drm_i915_private;
* queue (a circular array of work items), again described in the process
* descriptor. Work queue pages are mapped momentarily as required.
*/
struct i915_guc_client {
struct intel_guc_client {
struct i915_vma *vma;
void *vaddr;
struct i915_gem_context *owner;
@ -67,14 +67,15 @@ struct i915_guc_client {
u16 doorbell_id;
unsigned long doorbell_offset;
/* Protects GuC client's WQ access */
spinlock_t wq_lock;
/* Per-engine counts of GuC submissions */
u64 submissions[I915_NUM_ENGINES];
};
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
int intel_guc_submission_init(struct intel_guc *guc);
int intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
#endif

View File

@ -186,7 +186,7 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@ -245,7 +245,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@ -362,7 +362,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->port))
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@ -538,7 +538,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
@ -689,7 +689,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
@ -785,7 +785,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port = VIDEO_DIP_PORT(intel_dig_port->port);
u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
assert_hdmi_port_disabled(intel_hdmi);
@ -960,6 +960,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
u32 tmp, flags = 0;
int dotclock;
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
@ -1207,7 +1209,8 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
@ -1217,7 +1220,8 @@ static void pch_disable_hdmi(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
intel_audio_codec_disable(encoder,
old_crtc_state, old_conn_state);
}
static void pch_post_disable_hdmi(struct intel_encoder *encoder,
@ -1227,24 +1231,34 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder,
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
if (IS_G4X(dev_priv))
return 165000;
else if (IS_GEMINILAKE(dev_priv))
return 594000;
else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
return 300000;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[encoder->port];
int max_tmds_clock;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
max_tmds_clock = 594000;
else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
max_tmds_clock = 300000;
else if (INTEL_GEN(dev_priv) >= 5)
max_tmds_clock = 225000;
else
return 225000;
max_tmds_clock = 165000;
if (info->max_tmds_clock)
max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
return max_tmds_clock;
}
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits,
bool force_dvi)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder);
if (respect_downstream_limits) {
struct intel_connector *connector = hdmi->attached_connector;
@ -1339,6 +1353,12 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
if (HAS_GMCH_DISPLAY(dev_priv))
return false;
if (crtc_state->pipe_bpp <= 8*3)
return false;
if (!crtc_state->has_hdmi_sink)
return false;
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
@ -1464,9 +1484,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && !force_dvi &&
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK &&
hdmi_12bpc_possible(pipe_config)) {
if (hdmi_12bpc_possible(pipe_config) &&
hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@ -1495,7 +1514,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
if (scdc->scrambling.supported && IS_GEMINILAKE(dev_priv)) {
if (scdc->scrambling.supported && (INTEL_GEN(dev_priv) >= 10 ||
IS_GEMINILAKE(dev_priv))) {
if (scdc->scrambling.low_rates)
pipe_config->hdmi_scrambling = true;
@ -1529,7 +1549,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
enum port port = hdmi_to_dig_port(hdmi)->port;
enum port port = hdmi_to_dig_port(hdmi)->base.port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
@ -1613,12 +1633,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi_unset_edid(connector);
if (intel_hdmi_set_edid(connector)) {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
if (intel_hdmi_set_edid(connector))
status = connector_status_connected;
} else
else
status = connector_status_disconnected;
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
@ -1629,8 +1646,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
static void
intel_hdmi_force(struct drm_connector *connector)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@ -1640,7 +1655,6 @@ intel_hdmi_force(struct drm_connector *connector)
return;
intel_hdmi_set_edid(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
@ -1673,10 +1687,9 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
vlv_phy_pre_encoder_enable(encoder);
vlv_phy_pre_encoder_enable(encoder, pipe_config);
/* HDMI 1.0V-2dB */
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
@ -1697,7 +1710,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
{
intel_hdmi_prepare(encoder, pipe_config);
vlv_phy_pre_pll_enable(encoder);
vlv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
@ -1706,14 +1719,14 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
{
intel_hdmi_prepare(encoder, pipe_config);
chv_phy_pre_pll_enable(encoder);
chv_phy_pre_pll_enable(encoder, pipe_config);
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
chv_phy_post_pll_disable(encoder);
chv_phy_post_pll_disable(encoder, old_crtc_state);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
@ -1721,7 +1734,7 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
vlv_phy_reset_lanes(encoder);
vlv_phy_reset_lanes(encoder, old_crtc_state);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder,
@ -1734,7 +1747,7 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
mutex_lock(&dev_priv->sb_lock);
/* Assert data lane reset */
chv_data_lane_soft_reset(encoder, true);
chv_data_lane_soft_reset(encoder, old_crtc_state, true);
mutex_unlock(&dev_priv->sb_lock);
}
@ -1747,7 +1760,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
chv_phy_pre_encoder_enable(encoder);
chv_phy_pre_encoder_enable(encoder, pipe_config);
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
@ -2006,7 +2019,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
enum port port = intel_encoder->port;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port));
@ -2024,7 +2037,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
if (IS_GEMINILAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
connector->ycbcr_420_allowed = true;
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
@ -2126,7 +2139,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
if (IS_G4X(dev_priv))
intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
intel_dig_port->port = port;
intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;

View File

@ -151,7 +151,7 @@ static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
/* Wait for DMA to finish */
ret = wait_for((I915_READ(DMA_CTRL) & START_DMA) == 0, 100);
ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);

View File

@ -136,6 +136,7 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_render_state.h"
#include "intel_mocs.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
@ -354,7 +355,7 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq)
assert_ring_tail_valid(rq->ring, rq->tail);
}
static void unwind_incomplete_requests(struct intel_engine_cs *engine)
static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *rq, *rn;
struct i915_priolist *uninitialized_var(p);
@ -385,6 +386,17 @@ static void unwind_incomplete_requests(struct intel_engine_cs *engine)
}
}
void
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
spin_lock_irq(&engine->timeline->lock);
__unwind_incomplete_requests(engine);
spin_unlock_irq(&engine->timeline->lock);
}
static inline void
execlists_context_status_change(struct drm_i915_gem_request *rq,
unsigned long status)
@ -455,6 +467,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
port_set(&port[n], port_pack(rq, count));
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n",
engine->name, n,
rq->ctx->hw_id, count,
rq->global_seqno);
} else {
GEM_BUG_ON(!n);
desc = 0;
@ -509,17 +526,13 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
ce->ring->tail &= (ce->ring->size - 1);
ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
GEM_TRACE("\n");
for (n = execlists_num_ports(&engine->execlists); --n; )
elsp_write(0, elsp);
elsp_write(ce->lrc_desc, elsp);
}
static bool can_preempt(struct intel_engine_cs *engine)
{
return INTEL_INFO(engine->i915)->has_logical_ring_preemption;
}
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@ -567,7 +580,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (port_count(&port[0]) > 1)
goto unlock;
if (can_preempt(engine) &&
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
rb_entry(rb, struct i915_priolist, node)->priority >
max(last->priotree.priority, 0)) {
/*
@ -690,8 +703,8 @@ unlock:
}
}
static void
execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
{
struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
@ -718,7 +731,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->timeline->lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
execlist_cancel_port_requests(execlists);
execlists_cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline->requests, link) {
@ -768,7 +781,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
static void intel_lrc_irq_handler(unsigned long data)
static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
@ -826,6 +839,10 @@ static void intel_lrc_irq_handler(unsigned long data)
head = execlists->csb_head;
tail = READ_ONCE(buf[write_idx]);
}
GEM_TRACE("%s cs-irq head=%d [%d], tail=%d [%d]\n",
engine->name,
head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))),
tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))));
while (head != tail) {
struct drm_i915_gem_request *rq;
@ -853,16 +870,16 @@ static void intel_lrc_irq_handler(unsigned long data)
*/
status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
GEM_TRACE("%s csb[%dd]: status=0x%08x:0x%08x\n",
engine->name, head,
status, buf[2*head + 1]);
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
buf[2*head + 1] == PREEMPT_ID) {
execlist_cancel_port_requests(execlists);
spin_lock_irq(&engine->timeline->lock);
unwind_incomplete_requests(engine);
spin_unlock_irq(&engine->timeline->lock);
execlists_cancel_port_requests(execlists);
execlists_unwind_incomplete_requests(execlists);
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_PREEMPT));
@ -883,6 +900,10 @@ static void intel_lrc_irq_handler(unsigned long data)
GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
rq = port_unpack(port, &count);
GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n",
engine->name,
rq->ctx->hw_id, count,
rq->global_seqno);
GEM_BUG_ON(count == 0);
if (--count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
@ -926,7 +947,7 @@ static void insert_request(struct intel_engine_cs *engine,
list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
if (ptr_unmask_bits(p, 1))
tasklet_hi_schedule(&engine->execlists.irq_tasklet);
tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void execlists_submit_request(struct drm_i915_gem_request *request)
@ -1057,12 +1078,34 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
spin_unlock_irq(&engine->timeline->lock);
}
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
{
unsigned int flags;
int err;
/*
* Clear this page out of any CPU caches for coherent swap-in/out.
* We only want to do this on the first bind so that we do not stall
* on an active context (which by nature is already on the GPU).
*/
if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
if (err)
return err;
}
flags = PIN_GLOBAL | PIN_HIGH;
if (ctx->ggtt_offset_bias)
flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
}
static struct intel_ring *
execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
unsigned int flags;
void *vaddr;
int ret;
@ -1079,11 +1122,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
}
GEM_BUG_ON(!ce->state);
flags = PIN_GLOBAL | PIN_HIGH;
if (ctx->ggtt_offset_bias)
flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
ret = __context_pin(ctx, ce->state);
if (ret)
goto err;
@ -1103,9 +1142,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma);
ce->state->obj->mm.dirty = true;
ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
out:
return ce->ring;
@ -1143,7 +1180,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
u32 *cs;
int ret;
GEM_BUG_ON(!ce->pin_count);
@ -1154,17 +1190,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
cs = intel_ring_begin(request, 0);
if (IS_ERR(cs))
return PTR_ERR(cs);
if (!ce->initialised) {
ret = engine->init_context(request);
if (ret)
return ret;
ce->initialised = true;
}
ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
if (ret)
return ret;
/* Note that after this point, we have committed to using
* this request as it is being used to both track the
@ -1474,8 +1502,8 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
execlists->active = 0;
/* After a GPU reset, we may have requests to replay */
if (!i915_modparams.enable_guc_submission && execlists->first)
tasklet_schedule(&execlists->irq_tasklet);
if (execlists->first)
tasklet_schedule(&execlists->tasklet);
return 0;
}
@ -1531,10 +1559,10 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* guessing the missed context-switch events by looking at what
* requests were completed.
*/
execlist_cancel_port_requests(execlists);
execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
unwind_incomplete_requests(engine);
__unwind_incomplete_requests(engine);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
@ -1794,10 +1822,8 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
*cs++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = request->global_seqno;
cs = gen8_emit_ggtt_write(cs, request->global_seqno,
intel_hws_seqno_address(request->engine));
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
@ -1807,24 +1833,14 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
}
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request,
u32 *cs)
{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
/* w/a for post sync ops following a GPGPU operation we
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
*cs++ = GFX_OP_PIPE_CONTROL(6);
*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE;
*cs++ = intel_hws_seqno_address(request->engine);
*cs++ = 0;
*cs++ = request->global_seqno;
/* We're thrashing one dword of HWS. */
*cs++ = 0;
cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
intel_hws_seqno_address(request->engine));
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
@ -1832,7 +1848,7 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
gen8_emit_wa_tail(request, cs);
}
static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
@ -1865,8 +1881,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
*/
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state)))
tasklet_kill(&engine->execlists.irq_tasklet);
if (WARN_ON(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)))
tasklet_kill(&engine->execlists.tasklet);
dev_priv = engine->i915;
@ -1890,7 +1907,10 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests;
engine->schedule = execlists_schedule;
engine->execlists.irq_tasklet.func = intel_lrc_irq_handler;
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->park = NULL;
engine->unpark = NULL;
}
static void
@ -1949,8 +1969,8 @@ logical_ring_setup(struct intel_engine_cs *engine)
engine->execlists.fw_domains = fw_domains;
tasklet_init(&engine->execlists.irq_tasklet,
intel_lrc_irq_handler, (unsigned long)engine);
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
@ -1988,8 +2008,8 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
@ -2106,7 +2126,6 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
(HAS_RESOURCE_STREAMER(dev_priv) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
@ -2183,6 +2202,7 @@ populate_lr_context(struct i915_gem_context *ctx,
struct intel_ring *ring)
{
void *vaddr;
u32 *regs;
int ret;
ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
@ -2199,11 +2219,31 @@ populate_lr_context(struct i915_gem_context *ctx,
}
ctx_obj->mm.dirty = true;
if (engine->default_state) {
/*
* We only want to copy over the template context state;
* skipping over the headers reserved for GuC communication,
* leaving those as zero.
*/
const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
void *defaults;
defaults = i915_gem_object_pin_map(engine->default_state,
I915_MAP_WB);
if (IS_ERR(defaults))
return PTR_ERR(defaults);
memcpy(vaddr + start, defaults + start, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
}
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
ctx, engine, ring);
regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
execlists_init_reg_state(regs, ctx, engine, ring);
if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
i915_gem_object_unpin_map(ctx_obj);
@ -2256,7 +2296,6 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
ce->ring = ring;
ce->state = vma;
ce->initialised |= engine->init_context == NULL;
return 0;

View File

@ -107,7 +107,6 @@ intel_lr_context_descriptor(struct i915_gem_context *ctx,
return ctx->engine[engine->id].lrc_desc;
}
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);

View File

@ -125,6 +125,8 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
tmp = I915_READ(lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;

View File

@ -367,7 +367,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
if (intel_encoder->type == INTEL_OUTPUT_DSI)
port = 0;
else
port = intel_ddi_get_encoder_port(intel_encoder);
port = intel_encoder->port;
if (port == PORT_E) {
port = 0;
@ -383,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
case INTEL_OUTPUT_ANALOG:
type = DISPLAY_TYPE_CRT;
break;
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DDI:
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_DP_MST:

Some files were not shown because too many files have changed in this diff Show More