mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
Merge tag 'drm-intel-fixes-2021-01-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
drm/i915 fixes for v5.11-rc3: - Use per-connector PM QoS tracking for DP aux communication - GuC firmware fix for older Cometlakes - Clear the gpu reloc and shadow batches Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/877dop18zf.fsf@intel.com
This commit is contained in:
commit
4e181dede9
@ -1436,6 +1436,9 @@ struct intel_dp {
|
|||||||
bool ycbcr_444_to_420;
|
bool ycbcr_444_to_420;
|
||||||
} dfp;
|
} dfp;
|
||||||
|
|
||||||
|
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
|
||||||
|
struct pm_qos_request pm_qos;
|
||||||
|
|
||||||
/* Display stream compression testing */
|
/* Display stream compression testing */
|
||||||
bool force_dsc_en;
|
bool force_dsc_en;
|
||||||
|
|
||||||
|
@ -1489,7 +1489,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
|
|||||||
* lowest possible wakeup latency and so prevent the cpu from going into
|
* lowest possible wakeup latency and so prevent the cpu from going into
|
||||||
* deep sleep states.
|
* deep sleep states.
|
||||||
*/
|
*/
|
||||||
cpu_latency_qos_update_request(&i915->pm_qos, 0);
|
cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
|
||||||
|
|
||||||
intel_dp_check_edp(intel_dp);
|
intel_dp_check_edp(intel_dp);
|
||||||
|
|
||||||
@ -1622,7 +1622,7 @@ done:
|
|||||||
|
|
||||||
ret = recv_bytes;
|
ret = recv_bytes;
|
||||||
out:
|
out:
|
||||||
cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
|
cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
|
||||||
|
|
||||||
if (vdd)
|
if (vdd)
|
||||||
edp_panel_vdd_off(intel_dp, false);
|
edp_panel_vdd_off(intel_dp, false);
|
||||||
@ -1898,6 +1898,9 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
|
|||||||
static void
|
static void
|
||||||
intel_dp_aux_fini(struct intel_dp *intel_dp)
|
intel_dp_aux_fini(struct intel_dp *intel_dp)
|
||||||
{
|
{
|
||||||
|
if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
|
||||||
|
cpu_latency_qos_remove_request(&intel_dp->pm_qos);
|
||||||
|
|
||||||
kfree(intel_dp->aux.name);
|
kfree(intel_dp->aux.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1950,6 +1953,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
|
|||||||
encoder->base.name);
|
encoder->base.name);
|
||||||
|
|
||||||
intel_dp->aux.transfer = intel_dp_aux_transfer;
|
intel_dp->aux.transfer = intel_dp_aux_transfer;
|
||||||
|
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
|
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
|
||||||
|
@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
|
|||||||
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
|
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
|
||||||
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
|
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
|
||||||
|
|
||||||
__i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
|
i915_gem_object_flush_map(obj);
|
||||||
i915_gem_object_unpin_map(obj);
|
i915_gem_object_unpin_map(obj);
|
||||||
|
|
||||||
intel_gt_chipset_flush(cache->rq->engine->gt);
|
intel_gt_chipset_flush(cache->rq->engine->gt);
|
||||||
@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
|
|||||||
goto err_pool;
|
goto err_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
|
||||||
|
|
||||||
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
|
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
|
||||||
if (IS_ERR(batch)) {
|
if (IS_ERR(batch)) {
|
||||||
err = PTR_ERR(batch);
|
err = PTR_ERR(batch);
|
||||||
|
@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
|
|||||||
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
|
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
|
||||||
fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
|
fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
|
||||||
fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
|
fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
|
||||||
|
fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
|
||||||
fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
|
fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
|
||||||
fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
|
fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
|
||||||
fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
|
fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
|
||||||
|
@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (IS_ERR(src)) {
|
if (IS_ERR(src)) {
|
||||||
unsigned long x, n;
|
unsigned long x, n, remain;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
|||||||
* We don't care about copying too much here as we only
|
* We don't care about copying too much here as we only
|
||||||
* validate up to the end of the batch.
|
* validate up to the end of the batch.
|
||||||
*/
|
*/
|
||||||
|
remain = length;
|
||||||
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
|
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
|
||||||
length = round_up(length,
|
remain = round_up(remain,
|
||||||
boot_cpu_data.x86_clflush_size);
|
boot_cpu_data.x86_clflush_size);
|
||||||
|
|
||||||
ptr = dst;
|
ptr = dst;
|
||||||
x = offset_in_page(offset);
|
x = offset_in_page(offset);
|
||||||
for (n = offset >> PAGE_SHIFT; length; n++) {
|
for (n = offset >> PAGE_SHIFT; remain; n++) {
|
||||||
int len = min(length, PAGE_SIZE - x);
|
int len = min(remain, PAGE_SIZE - x);
|
||||||
|
|
||||||
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
|
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
|
||||||
if (needs_clflush)
|
if (needs_clflush)
|
||||||
@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
|||||||
kunmap_atomic(src);
|
kunmap_atomic(src);
|
||||||
|
|
||||||
ptr += len;
|
ptr += len;
|
||||||
length -= len;
|
remain -= len;
|
||||||
x = 0;
|
x = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_gem_object_unpin_pages(src_obj);
|
i915_gem_object_unpin_pages(src_obj);
|
||||||
|
|
||||||
|
memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
|
||||||
|
|
||||||
/* dst_obj is returned with vmap pinned */
|
/* dst_obj is returned with vmap pinned */
|
||||||
return dst;
|
return dst;
|
||||||
}
|
}
|
||||||
@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
|
|||||||
|
|
||||||
#define LENGTH_BIAS 2
|
#define LENGTH_BIAS 2
|
||||||
|
|
||||||
static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
|
|
||||||
{
|
|
||||||
return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
|
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
|
||||||
* @engine: the engine on which the batch is to execute
|
* @engine: the engine on which the batch is to execute
|
||||||
@ -1538,16 +1536,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
|||||||
ret = 0; /* allow execution */
|
ret = 0; /* allow execution */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (shadow_needs_clflush(shadow->obj))
|
|
||||||
drm_clflush_virt_range(batch_end, 8);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (shadow_needs_clflush(shadow->obj)) {
|
i915_gem_object_flush_map(shadow->obj);
|
||||||
void *ptr = page_mask_bits(shadow->obj->mm.mapping);
|
|
||||||
|
|
||||||
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!IS_ERR_OR_NULL(jump_whitelist))
|
if (!IS_ERR_OR_NULL(jump_whitelist))
|
||||||
kfree(jump_whitelist);
|
kfree(jump_whitelist);
|
||||||
|
@ -578,8 +578,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
|
|
||||||
|
|
||||||
intel_gt_init_workarounds(dev_priv);
|
intel_gt_init_workarounds(dev_priv);
|
||||||
|
|
||||||
/* On the 945G/GM, the chipset reports the MSI capability on the
|
/* On the 945G/GM, the chipset reports the MSI capability on the
|
||||||
@ -626,7 +624,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
|
|||||||
err_msi:
|
err_msi:
|
||||||
if (pdev->msi_enabled)
|
if (pdev->msi_enabled)
|
||||||
pci_disable_msi(pdev);
|
pci_disable_msi(pdev);
|
||||||
cpu_latency_qos_remove_request(&dev_priv->pm_qos);
|
|
||||||
err_mem_regions:
|
err_mem_regions:
|
||||||
intel_memory_regions_driver_release(dev_priv);
|
intel_memory_regions_driver_release(dev_priv);
|
||||||
err_ggtt:
|
err_ggtt:
|
||||||
@ -648,8 +645,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
if (pdev->msi_enabled)
|
if (pdev->msi_enabled)
|
||||||
pci_disable_msi(pdev);
|
pci_disable_msi(pdev);
|
||||||
|
|
||||||
cpu_latency_qos_remove_request(&dev_priv->pm_qos);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -891,9 +891,6 @@ struct drm_i915_private {
|
|||||||
|
|
||||||
bool display_irqs_enabled;
|
bool display_irqs_enabled;
|
||||||
|
|
||||||
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
|
|
||||||
struct pm_qos_request pm_qos;
|
|
||||||
|
|
||||||
/* Sideband mailbox protection */
|
/* Sideband mailbox protection */
|
||||||
struct mutex sb_lock;
|
struct mutex sb_lock;
|
||||||
struct pm_qos_request sb_qos;
|
struct pm_qos_request sb_qos;
|
||||||
|
Loading…
Reference in New Issue
Block a user