mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-18 08:35:08 +08:00
drm fixes for 6.8-rc4
i915: - gvt: docs fix, uninit var, MAINTAINERS ivpu: - add aborted job status - disable d3 hot delay - mmu fixes nouveau: - fix gsp rpc size request - fix dma buffer leaks - use common code for gsp mem ctor xe: - Fix a loop in an error path - Fix a missing dma-fence reference - Fix a retry path on userptr REMAP - Workaround for a false gcc warning - Fix missing map of the usm batch buffer in the migrate vm. - Fix a memory leak. - Fix a bad assumption of used page size - Fix hitting a BUG() due to zero pages to map. - Remove some leftover async bind queue relics amdgpu: - Misc NULL/bounds check fixes - ODM pipe policy fix - Aborted suspend fixes - JPEG 4.0.5 fix - DCN 3.5 fixes - PSP fix - DP MST fix - Phantom pipe fix - VRAM vendor fix - Clang fix - SR-IOV fix msm: - DPU: - fix for kernel doc warnings and smatch warnings in dpu_encoder - fix for smatch warning in dpu_encoder - fix the bus bandwidth value for SDM670 - DP: - fixes to handle unknown bpc case correctly for DP - fix for MISC0 programming - GPU: - dmabuf vmap fix - a610 UBWC corruption fix (incorrect hbb) - revert a commit that was making GPU recovery unreliable -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmXFqYkACgkQDHTzWXnE hr5Vaw/+LX3Fgv0/+wb/ldXf88njyzwvQR6EZ9BEULKDIDXwaU+yLkZZM29bO+mW 9P2NquF+zNPu6ovo9fMJu/yZKjSecNgm8gzRSHIE2IjGA/+bqoz5eVdgUHjxo1q5 PMfGWKFBHR4fJRWP7CGgSDSI4+9d6wl3DqBsQWcaWMffdDxltNjR0j3by4ticoo1 qochIs8Sx4QGNrFi08S8Yzxe1kCOykS7KMcJIhTrhviG7ff/Z+mZWe6q5cLFDqqG vV0kt5LxLBhvYnJyyg6p6R5dJfwYsle16MxiHCROQGyNNhFlzzoc/KxW5KDGd4pO +hC7TkKseU72qXu303dBvhhARY032LbpqYnCgK1eZWqiuxN5vQ1C/RvA/B6x7cgW Qs9ctgvfXJmENVttOUqilgy4neB8iCX7fYHLx7gPz4w1FiZZGW9lb5QwzYaVPxp/ cYf2D4pytkE/3FZr5qd9FyN8xGBGOb0uROlaJj3y9XGWn7Itkh8S71mtvZiSScQb KAVevLbHXM97k3MevZaAeIfM0pFj5vezwAZr6/N4m+Wek1HkXMMn5cX4U0rpmN/U gCx5Fq8n1I3g/d/qfICmRiG2NvD7ale7isUTVB5Es8YQ+OtM/xzXW/XklhnpYaR1 GLKxfqvhUDdNLvZl0zYmQshrK0NvMTrhgx8zK6Tzo12qKu+nw+U= =L7Vc -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2024-02-09' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Regular weekly fixes, xe, amdgpu and msm are most of them, with some misc in i915, ivpu and nouveau, scattered but nothing too intense at this point. i915: - gvt: docs fix, uninit var, MAINTAINERS ivpu: - add aborted job status - disable d3 hot delay - mmu fixes nouveau: - fix gsp rpc size request - fix dma buffer leaks - use common code for gsp mem ctor xe: - Fix a loop in an error path - Fix a missing dma-fence reference - Fix a retry path on userptr REMAP - Workaround for a false gcc warning - Fix missing map of the usm batch buffer in the migrate vm. - Fix a memory leak. - Fix a bad assumption of used page size - Fix hitting a BUG() due to zero pages to map. - Remove some leftover async bind queue relics amdgpu: - Misc NULL/bounds check fixes - ODM pipe policy fix - Aborted suspend fixes - JPEG 4.0.5 fix - DCN 3.5 fixes - PSP fix - DP MST fix - Phantom pipe fix - VRAM vendor fix - Clang fix - SR-IOV fix msm: - DPU: - fix for kernel doc warnings and smatch warnings in dpu_encoder - fix for smatch warning in dpu_encoder - fix the bus bandwidth value for SDM670 - DP: - fixes to handle unknown bpc case correctly for DP - fix for MISC0 programming - GPU: - dmabuf vmap fix - a610 UBWC corruption fix (incorrect hbb) - revert a commit that was making GPU recovery unreliable" * tag 'drm-fixes-2024-02-09' of git://anongit.freedesktop.org/drm/drm: (43 commits) drm/xe: Remove TEST_VM_ASYNC_OPS_ERROR drm/xe/vm: don't ignore error when in_kthread drm/xe: Assume large page size if VMA not yet bound drm/xe/display: Fix memleak in display initialization drm/xe: Map both mem.kernel_bb_pool and usm.bb_pool drm/xe: circumvent bogus stringop-overflow warning drm/xe: Pick correct userptr VMA to repin on REMAP op failure drm/xe: Take a reference in xe_exec_queue_last_fence_get() drm/xe: Fix loop in vm_bind_ioctl_ops_unwind drm/amdgpu: Fix HDP flush for VFs on nbio v7.9 drm/amd/display: Implement bounds check for stream encoder creation in DCN301 drm/amd/display: Increase frame-larger-than for all display_mode_vba files drm/amd/display: Clear phantom stream count and plane count drm/amdgpu: Avoid fetching VRAM vendor info drm/amd/display: Disable ODM by default for DCN35 drm/amd/display: Update phantom pipe enable / disable sequence drm/amd/display: Fix MST Null Ptr for RV drm/amdgpu: Fix shared buff copy to user drm/amd/display: Increase eval/entry delay for DCN35 drm/amdgpu: remove asymmetrical irq disabling in jpeg 4.0.5 suspend ...
This commit is contained in:
commit
c76b766ec5
@ -10801,11 +10801,11 @@ F: drivers/gpio/gpio-tangier.h
|
||||
|
||||
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
|
||||
M: Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||
M: Zhi Wang <zhi.a.wang@intel.com>
|
||||
M: Zhi Wang <zhi.wang.linux@gmail.com>
|
||||
L: intel-gvt-dev@lists.freedesktop.org
|
||||
L: intel-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
W: https://01.org/igvt-g
|
||||
W: https://github.com/intel/gvt-linux/wiki
|
||||
T: git https://github.com/intel/gvt-linux.git
|
||||
F: drivers/gpu/drm/i915/gvt/
|
||||
|
||||
|
@ -480,9 +480,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
|
||||
/* Clear any pending errors */
|
||||
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
|
||||
|
||||
/* VPU 37XX does not require 10m D3hot delay */
|
||||
if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
|
||||
pdev->d3hot_delay = 0;
|
||||
/* NPU does not require 10m D3hot delay */
|
||||
pdev->d3hot_delay = 0;
|
||||
|
||||
ret = pcim_enable_device(pdev);
|
||||
if (ret) {
|
||||
|
@ -222,7 +222,6 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
|
||||
const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
|
||||
|
||||
if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
|
||||
(ivpu_hw_gen(vdev) > IVPU_HW_37XX) ||
|
||||
(ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
|
||||
vdev->wa.disable_d0i3_msg = true;
|
||||
|
||||
|
@ -525,7 +525,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
|
||||
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
|
||||
|
||||
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
|
||||
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
|
||||
val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
|
||||
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
|
||||
|
||||
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
|
||||
|
@ -530,7 +530,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
|
||||
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
|
||||
|
||||
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
|
||||
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
|
||||
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
|
||||
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
|
||||
|
||||
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
|
||||
@ -704,7 +704,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
u32 tile_disable;
|
||||
u32 tile_enable;
|
||||
u32 fuse;
|
||||
|
||||
fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
|
||||
@ -725,10 +724,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
|
||||
else
|
||||
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
|
||||
|
||||
tile_enable = (~tile_disable) & TILE_MAX_MASK;
|
||||
|
||||
hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
|
||||
hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
|
||||
hw->tile_fuse = tile_disable;
|
||||
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
|
||||
|
||||
|
@ -294,7 +294,7 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
|
||||
return -ENOENT;
|
||||
|
||||
if (job->file_priv->has_mmu_faults)
|
||||
job_status = VPU_JSM_STATUS_ABORTED;
|
||||
job_status = DRM_IVPU_JOB_STATUS_ABORTED;
|
||||
|
||||
job->bos[CMD_BUF_IDX]->job_status = job_status;
|
||||
dma_fence_signal(job->done_fence);
|
||||
@ -315,7 +315,7 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
|
||||
unsigned long id;
|
||||
|
||||
xa_for_each(&vdev->submitted_jobs_xa, id, job)
|
||||
ivpu_job_signal_and_destroy(vdev, id, VPU_JSM_STATUS_ABORTED);
|
||||
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
|
||||
}
|
||||
|
||||
static int ivpu_job_submit(struct ivpu_job *job)
|
||||
|
@ -72,10 +72,10 @@
|
||||
|
||||
#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
|
||||
#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
|
||||
#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1)
|
||||
#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
|
||||
#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
|
||||
#define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
|
||||
#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
|
||||
#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
|
||||
#define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT)
|
||||
|
||||
#define IVPU_MMU_CMDQ_CMD_SIZE 16
|
||||
#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
|
||||
@ -475,20 +475,32 @@ static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
|
||||
{
|
||||
return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
|
||||
(IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
|
||||
}
|
||||
|
||||
static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
|
||||
{
|
||||
return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
|
||||
(IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
|
||||
}
|
||||
|
||||
static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
|
||||
{
|
||||
struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
|
||||
u64 *queue_buffer = q->base;
|
||||
int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
|
||||
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
|
||||
u64 *queue_buffer = cmdq->base;
|
||||
int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
|
||||
|
||||
if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
|
||||
if (ivpu_mmu_queue_is_full(cmdq)) {
|
||||
ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
queue_buffer[idx] = data0;
|
||||
queue_buffer[idx + 1] = data1;
|
||||
q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
|
||||
cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
|
||||
|
||||
ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
|
||||
|
||||
@ -560,7 +572,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
|
||||
mmu->cmdq.cons = 0;
|
||||
|
||||
memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
|
||||
clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
|
||||
mmu->evtq.prod = 0;
|
||||
mmu->evtq.cons = 0;
|
||||
|
||||
@ -874,14 +885,10 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
|
||||
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
|
||||
|
||||
evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
|
||||
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
|
||||
if (ivpu_mmu_queue_is_empty(evtq))
|
||||
return NULL;
|
||||
|
||||
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
|
||||
|
||||
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
|
||||
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
|
||||
|
||||
return evt;
|
||||
}
|
||||
|
||||
@ -902,6 +909,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
|
||||
}
|
||||
|
||||
ivpu_mmu_user_context_mark_invalid(vdev, ssid);
|
||||
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1078,6 +1078,8 @@ struct amdgpu_device {
|
||||
bool in_s3;
|
||||
bool in_s4;
|
||||
bool in_s0ix;
|
||||
/* indicate amdgpu suspension status */
|
||||
bool suspend_complete;
|
||||
|
||||
enum pp_mp1_state mp1_state;
|
||||
struct amdgpu_doorbell_index doorbell_index;
|
||||
|
@ -2476,6 +2476,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
adev->suspend_complete = false;
|
||||
if (amdgpu_acpi_is_s0ix_active(adev))
|
||||
adev->in_s0ix = true;
|
||||
else if (amdgpu_acpi_is_s3_active(adev))
|
||||
@ -2490,6 +2491,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
|
||||
adev->suspend_complete = true;
|
||||
if (amdgpu_acpi_should_gpu_reset(adev))
|
||||
return amdgpu_asic_reset(adev);
|
||||
|
||||
|
@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
|
||||
}
|
||||
}
|
||||
|
||||
if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
|
||||
if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
|
||||
ret = -EFAULT;
|
||||
|
||||
err_free_shared_buf:
|
||||
|
@ -3034,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
|
||||
|
||||
gfx_v9_0_cp_gfx_enable(adev, true);
|
||||
|
||||
/* Now only limit the quirk on the APU gfx9 series and already
|
||||
* confirmed that the APU gfx10/gfx11 needn't such update.
|
||||
*/
|
||||
if (adev->flags & AMD_IS_APU &&
|
||||
adev->in_s3 && !adev->suspend_complete) {
|
||||
DRM_INFO(" Will skip the CSB packet resubmit\n");
|
||||
return 0;
|
||||
}
|
||||
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
|
||||
|
@ -1947,14 +1947,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
|
||||
|
||||
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
|
||||
{
|
||||
static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
|
||||
u32 vram_info;
|
||||
|
||||
/* Only for dGPU, vendor informaton is reliable */
|
||||
if (!amdgpu_sriov_vf(adev) && !(adev->flags & AMD_IS_APU)) {
|
||||
vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
|
||||
adev->gmc.vram_vendor = vram_info & 0xF;
|
||||
}
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
|
||||
adev->gmc.vram_width = 128 * 64;
|
||||
}
|
||||
|
@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned int type,
|
||||
@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
|
||||
.set = jpeg_v4_0_set_interrupt_state,
|
||||
.process = jpeg_v4_0_process_interrupt,
|
||||
};
|
||||
|
||||
|
@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
|
||||
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
|
||||
jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
}
|
||||
amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
|
||||
.set = jpeg_v4_0_5_set_interrupt_state,
|
||||
.process = jpeg_v4_0_5_process_interrupt,
|
||||
};
|
||||
|
||||
|
@ -431,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
|
||||
u32 inst_mask;
|
||||
int i;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->rmmio_remap.reg_offset =
|
||||
SOC15_REG_OFFSET(
|
||||
NBIO, 0,
|
||||
regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
|
||||
<< 2;
|
||||
WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
|
||||
0xff & ~(adev->gfx.xcc_mask));
|
||||
|
||||
|
@ -1298,10 +1298,32 @@ static int soc15_common_suspend(void *handle)
|
||||
return soc15_common_hw_fini(adev);
|
||||
}
|
||||
|
||||
static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 sol_reg;
|
||||
|
||||
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
|
||||
|
||||
/* Will reset for the following suspend abort cases.
|
||||
* 1) Only reset limit on APU side, dGPU hasn't checked yet.
|
||||
* 2) S3 suspend abort and TOS already launched.
|
||||
*/
|
||||
if (adev->flags & AMD_IS_APU && adev->in_s3 &&
|
||||
!adev->suspend_complete &&
|
||||
sol_reg)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int soc15_common_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (soc15_need_reset_on_resume(adev)) {
|
||||
dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
|
||||
soc15_asic_reset(adev);
|
||||
}
|
||||
return soc15_common_hw_init(adev);
|
||||
}
|
||||
|
||||
|
@ -10731,11 +10731,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
if (dc_resource_is_dsc_encoding_supported(dc)) {
|
||||
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
|
||||
|
@ -3817,7 +3817,9 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
* programming has completed (we turn on phantom OTG in order
|
||||
* to complete the plane disable for phantom pipes).
|
||||
*/
|
||||
dc->hwss.apply_ctx_to_hw(dc, context);
|
||||
|
||||
if (dc->hwss.disable_phantom_streams)
|
||||
dc->hwss.disable_phantom_streams(dc, context);
|
||||
}
|
||||
|
||||
if (update_type != UPDATE_TYPE_FAST)
|
||||
|
@ -291,11 +291,14 @@ void dc_state_destruct(struct dc_state *state)
|
||||
dc_stream_release(state->phantom_streams[i]);
|
||||
state->phantom_streams[i] = NULL;
|
||||
}
|
||||
state->phantom_stream_count = 0;
|
||||
|
||||
for (i = 0; i < state->phantom_plane_count; i++) {
|
||||
dc_plane_state_release(state->phantom_planes[i]);
|
||||
state->phantom_planes[i] = NULL;
|
||||
}
|
||||
state->phantom_plane_count = 0;
|
||||
|
||||
state->stream_mask = 0;
|
||||
memset(&state->res_ctx, 0, sizeof(state->res_ctx));
|
||||
memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
|
||||
|
@ -72,11 +72,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
|
||||
|
@ -1288,7 +1288,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex
|
||||
return updated;
|
||||
}
|
||||
|
||||
static bool should_allow_odm_power_optimization(struct dc *dc,
|
||||
static bool should_apply_odm_power_optimization(struct dc *dc,
|
||||
struct dc_state *context, struct vba_vars_st *v, int *split,
|
||||
bool *merge)
|
||||
{
|
||||
@ -1392,9 +1392,12 @@ static void try_odm_power_optimization_and_revalidate(
|
||||
{
|
||||
int i;
|
||||
unsigned int new_vlevel;
|
||||
unsigned int cur_policy[MAX_PIPES];
|
||||
|
||||
for (i = 0; i < pipe_cnt; i++)
|
||||
for (i = 0; i < pipe_cnt; i++) {
|
||||
cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy;
|
||||
pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
|
||||
}
|
||||
|
||||
new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
|
||||
@ -1403,6 +1406,9 @@ static void try_odm_power_optimization_and_revalidate(
|
||||
memset(merge, 0, MAX_PIPES * sizeof(bool));
|
||||
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge);
|
||||
context->bw_ctx.dml.vba.VoltageLevel = *vlevel;
|
||||
} else {
|
||||
for (i = 0; i < pipe_cnt; i++)
|
||||
pipes[i].pipe.dest.odm_combine_policy = cur_policy[i];
|
||||
}
|
||||
}
|
||||
|
||||
@ -1580,7 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
if (should_allow_odm_power_optimization(dc, context, vba, split, merge))
|
||||
if (should_apply_odm_power_optimization(dc, context, vba, split, merge))
|
||||
try_odm_power_optimization_and_revalidate(
|
||||
dc, context, pipes, split, merge, vlevel, *pipe_cnt);
|
||||
|
||||
@ -2209,7 +2215,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
|
||||
int i;
|
||||
|
||||
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
|
||||
dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
|
||||
if (!dc->config.enable_windowed_mpo_odm)
|
||||
dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
|
||||
|
||||
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
|
||||
* we have to re-calculate the DET allocation and run through DML once more to
|
||||
|
@ -1476,7 +1476,7 @@ static enum dc_status dce110_enable_stream_timing(
|
||||
return DC_OK;
|
||||
}
|
||||
|
||||
static enum dc_status apply_single_controller_ctx_to_hw(
|
||||
enum dc_status dce110_apply_single_controller_ctx_to_hw(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_state *context,
|
||||
struct dc *dc)
|
||||
@ -2302,7 +2302,7 @@ enum dc_status dce110_apply_ctx_to_hw(
|
||||
if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
|
||||
continue;
|
||||
|
||||
status = apply_single_controller_ctx_to_hw(
|
||||
status = dce110_apply_single_controller_ctx_to_hw(
|
||||
pipe_ctx,
|
||||
context,
|
||||
dc);
|
||||
|
@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw(
|
||||
struct dc *dc,
|
||||
struct dc_state *context);
|
||||
|
||||
enum dc_status dce110_apply_single_controller_ctx_to_hw(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_state *context,
|
||||
struct dc *dc);
|
||||
|
||||
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
|
||||
|
||||
|
@ -2561,7 +2561,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
tg->funcs->setup_vertical_interrupt2(tg, start_line);
|
||||
}
|
||||
|
||||
static void dcn20_reset_back_end_for_pipe(
|
||||
void dcn20_reset_back_end_for_pipe(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_state *context)
|
||||
|
@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing(
|
||||
void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
|
||||
void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
|
||||
void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
|
||||
void dcn20_reset_back_end_for_pipe(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_state *context);
|
||||
void dcn20_init_blank(
|
||||
struct dc *dc,
|
||||
struct timing_generator *tg);
|
||||
|
@ -206,28 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
|
||||
void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct abm *abm = pipe_ctx->stream_res.abm;
|
||||
uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
|
||||
struct timing_generator *tg = pipe_ctx->stream_res.tg;
|
||||
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
|
||||
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
|
||||
uint32_t otg_inst;
|
||||
|
||||
if (!abm && !tg && !panel_cntl)
|
||||
return;
|
||||
|
||||
otg_inst = tg->inst;
|
||||
|
||||
if (dmcu) {
|
||||
dce110_set_pipe(pipe_ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (abm && panel_cntl) {
|
||||
if (abm->funcs && abm->funcs->set_pipe_ex) {
|
||||
abm->funcs->set_pipe_ex(abm,
|
||||
if (abm->funcs && abm->funcs->set_pipe_ex) {
|
||||
abm->funcs->set_pipe_ex(abm,
|
||||
otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
} else {
|
||||
dmub_abm_set_pipe(abm, otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
}
|
||||
} else {
|
||||
dmub_abm_set_pipe(abm, otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
}
|
||||
}
|
||||
|
||||
@ -237,34 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
|
||||
{
|
||||
struct dc_context *dc = pipe_ctx->stream->ctx;
|
||||
struct abm *abm = pipe_ctx->stream_res.abm;
|
||||
struct timing_generator *tg = pipe_ctx->stream_res.tg;
|
||||
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
|
||||
uint32_t otg_inst;
|
||||
|
||||
if (!abm && !tg && !panel_cntl)
|
||||
return false;
|
||||
|
||||
otg_inst = tg->inst;
|
||||
|
||||
if (dc->dc->res_pool->dmcu) {
|
||||
dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (abm != NULL) {
|
||||
uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
|
||||
|
||||
if (abm && panel_cntl) {
|
||||
if (abm->funcs && abm->funcs->set_pipe_ex) {
|
||||
abm->funcs->set_pipe_ex(abm,
|
||||
otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
} else {
|
||||
dmub_abm_set_pipe(abm,
|
||||
otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
}
|
||||
}
|
||||
if (abm->funcs && abm->funcs->set_pipe_ex) {
|
||||
abm->funcs->set_pipe_ex(abm,
|
||||
otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
} else {
|
||||
dmub_abm_set_pipe(abm,
|
||||
otg_inst,
|
||||
SET_ABM_PIPE_NORMAL,
|
||||
panel_cntl->inst,
|
||||
panel_cntl->pwrseq_inst);
|
||||
}
|
||||
|
||||
if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
|
||||
if (abm->funcs && abm->funcs->set_backlight_level_pwm)
|
||||
abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
|
||||
frame_ramp, 0, panel_cntl->inst);
|
||||
else
|
||||
|
@ -1474,9 +1474,44 @@ void dcn32_update_dsc_pg(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
int i;
|
||||
|
||||
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
|
||||
struct pipe_ctx *pipe_ctx_old =
|
||||
&dc->current_state->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe_ctx_old->stream)
|
||||
continue;
|
||||
|
||||
if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM)
|
||||
continue;
|
||||
|
||||
if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
|
||||
continue;
|
||||
|
||||
if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) ||
|
||||
(pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) {
|
||||
struct clock_source *old_clk = pipe_ctx_old->clock_source;
|
||||
|
||||
if (hws->funcs.reset_back_end_for_pipe)
|
||||
hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
|
||||
if (hws->funcs.enable_stream_gating)
|
||||
hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
|
||||
if (old_clk)
|
||||
old_clk->funcs->cs_power_down(old_clk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
unsigned int i;
|
||||
enum dc_status status = DC_OK;
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
@ -1497,16 +1532,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
|
||||
}
|
||||
}
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *pipe_ctx_old =
|
||||
&dc->current_state->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) {
|
||||
// If old context or new context has phantom pipes, apply
|
||||
// the phantom timings now. We can't change the phantom
|
||||
// pipe configuration safely without driver acquiring
|
||||
// the DMCUB lock first.
|
||||
dc->hwss.apply_ctx_to_hw(dc, context);
|
||||
break;
|
||||
if (pipe_ctx->stream == NULL)
|
||||
continue;
|
||||
|
||||
if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->stream == pipe_ctx_old->stream &&
|
||||
pipe_ctx->stream->link->link_state_valid) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
|
||||
continue;
|
||||
|
||||
if (hws->funcs.apply_single_controller_ctx_to_hw)
|
||||
status = hws->funcs.apply_single_controller_ctx_to_hw(
|
||||
pipe_ctx,
|
||||
context,
|
||||
dc);
|
||||
|
||||
ASSERT(status == DC_OK);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
if (hws->funcs.resync_fifo_dccg_dio)
|
||||
hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
|
||||
|
||||
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
|
||||
|
||||
void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context);
|
||||
|
||||
void dcn32_init_blank(
|
||||
struct dc *dc,
|
||||
struct timing_generator *tg);
|
||||
|
@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.commit_subvp_config = dcn32_commit_subvp_config,
|
||||
.enable_phantom_streams = dcn32_enable_phantom_streams,
|
||||
.disable_phantom_streams = dcn32_disable_phantom_streams,
|
||||
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
|
||||
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
|
||||
.subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast,
|
||||
@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
|
||||
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
|
||||
.resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio,
|
||||
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
|
||||
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
|
||||
.reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
|
||||
};
|
||||
|
||||
void dcn32_hw_sequencer_init_functions(struct dc *dc)
|
||||
|
@ -379,6 +379,7 @@ struct hw_sequencer_funcs {
|
||||
struct dc_cursor_attributes *cursor_attr);
|
||||
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
|
||||
void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
|
||||
void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context);
|
||||
void (*subvp_pipe_control_lock)(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
bool lock,
|
||||
|
@ -165,8 +165,15 @@ struct hwseq_private_funcs {
|
||||
void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
|
||||
void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
|
||||
struct dc_state *context);
|
||||
enum dc_status (*apply_single_controller_ctx_to_hw)(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_state *context,
|
||||
struct dc *dc);
|
||||
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
|
||||
#endif
|
||||
void (*reset_back_end_for_pipe)(struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct dc_state *context);
|
||||
};
|
||||
|
||||
struct dce_hwseq {
|
||||
|
@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe);
|
||||
int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe);
|
||||
|
||||
/*
|
||||
* Get number of MPC "cuts" of the plane associated with the pipe. MPC slice
|
||||
* count is equal to MPC splits + 1. For example if a plane is cut 3 times, it
|
||||
* will have 4 pieces of slice.
|
||||
* return - 0 if pipe is not used for a plane with MPCC combine. otherwise
|
||||
* the number of MPC "cuts" for the plane.
|
||||
* Get the number of MPC slices associated with the pipe.
|
||||
* The function returns 0 if the pipe is not associated with an MPC combine
|
||||
* pipe topology.
|
||||
*/
|
||||
int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head);
|
||||
int resource_get_mpc_slice_count(const struct pipe_ctx *pipe);
|
||||
|
||||
/*
|
||||
* Get number of ODM "cuts" of the timing associated with the pipe. ODM slice
|
||||
* count is equal to ODM splits + 1. For example if a timing is cut 3 times, it
|
||||
* will have 4 pieces of slice.
|
||||
* return - 0 if pipe is not used for ODM combine. otherwise
|
||||
* the number of ODM "cuts" for the timing.
|
||||
* Get the number of ODM slices associated with the pipe.
|
||||
* The function returns 0 if the pipe is not associated with an ODM combine
|
||||
* pipe topology.
|
||||
*/
|
||||
int resource_get_odm_slice_count(const struct pipe_ctx *otg_master);
|
||||
int resource_get_odm_slice_count(const struct pipe_ctx *pipe);
|
||||
|
||||
/* Get the ODM slice index counting from 0 from left most slice */
|
||||
int resource_get_odm_slice_index(const struct pipe_ctx *opp_head);
|
||||
|
@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
|
||||
vpg = dcn301_vpg_create(ctx, vpg_inst);
|
||||
afmt = dcn301_afmt_create(ctx, afmt_inst);
|
||||
|
||||
if (!enc1 || !vpg || !afmt) {
|
||||
if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
|
||||
kfree(enc1);
|
||||
kfree(vpg);
|
||||
kfree(afmt);
|
||||
|
@ -1829,7 +1829,21 @@ int dcn32_populate_dml_pipes_from_context(
|
||||
dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt);
|
||||
DC_FP_END();
|
||||
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
|
||||
if (dc->config.enable_windowed_mpo_odm &&
|
||||
dc->debug.enable_single_display_2to1_odm_policy) {
|
||||
switch (resource_get_odm_slice_count(pipe)) {
|
||||
case 2:
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
|
||||
break;
|
||||
case 4:
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
|
||||
break;
|
||||
default:
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
|
||||
}
|
||||
} else {
|
||||
pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
|
||||
}
|
||||
pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
|
||||
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
|
||||
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
|
||||
|
@ -780,8 +780,8 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_z10 = false,
|
||||
.ignore_pg = true,
|
||||
.psp_disabled_wa = true,
|
||||
.ips2_eval_delay_us = 200,
|
||||
.ips2_entry_delay_us = 400,
|
||||
.ips2_eval_delay_us = 1650,
|
||||
.ips2_entry_delay_us = 800,
|
||||
.static_screen_wait_frames = 2,
|
||||
};
|
||||
|
||||
@ -2130,6 +2130,7 @@ static bool dcn35_resource_construct(
|
||||
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
|
||||
dc->dml2_options.use_native_pstate_optimization = true;
|
||||
dc->dml2_options.use_native_soc_bb_construction = true;
|
||||
dc->dml2_options.minimize_dispclk_using_odm = false;
|
||||
if (dc->config.EnableMinDispClkODM)
|
||||
dc->dml2_options.minimize_dispclk_using_odm = true;
|
||||
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
|
||||
|
@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT
|
||||
|
||||
Note that this driver only supports newer device from Broadwell on.
|
||||
For further information and setup guide, you can visit:
|
||||
http://01.org/igvt-g.
|
||||
https://github.com/intel/gvt-linux/wiki.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
|
@ -2849,8 +2849,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
|
||||
for (i = start; i < end; i += 4) {
|
||||
p = intel_gvt_find_mmio_info(gvt, i);
|
||||
if (p) {
|
||||
WARN(1, "dup mmio definition offset %x\n",
|
||||
info->offset);
|
||||
WARN(1, "dup mmio definition offset %x\n", i);
|
||||
|
||||
/* We return -EEXIST here to make GVT-g load fail.
|
||||
* So duplicated MMIO can be found as soon as
|
||||
|
@ -41,7 +41,7 @@
|
||||
* To virtualize GPU resources GVT-g driver depends on hypervisor technology
|
||||
* e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
|
||||
* and be virtualized within GVT-g device module. More architectural design
|
||||
* doc is available on https://01.org/group/2230/documentation-list.
|
||||
* doc is available on https://github.com/intel/gvt-linux/wiki.
|
||||
*/
|
||||
|
||||
static LIST_HEAD(intel_gvt_devices);
|
||||
|
@ -144,10 +144,6 @@ enum dpu_enc_rc_states {
|
||||
* to track crtc in the disable() hook which is called
|
||||
* _after_ encoder_mask is cleared.
|
||||
* @connector: If a mode is set, cached pointer to the active connector
|
||||
* @crtc_kickoff_cb: Callback into CRTC that will flush & start
|
||||
* all CTL paths
|
||||
* @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
|
||||
* @debugfs_root: Debug file system root file node
|
||||
* @enc_lock: Lock around physical encoder
|
||||
* create/destroy/enable/disable
|
||||
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
|
||||
@ -2072,7 +2068,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
|
||||
}
|
||||
|
||||
/* reset the merge 3D HW block */
|
||||
if (phys_enc->hw_pp->merge_3d) {
|
||||
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
|
||||
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
|
||||
BLEND_3D_NONE);
|
||||
if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
|
||||
@ -2103,7 +2099,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
|
||||
if (phys_enc->hw_wb)
|
||||
intf_cfg.wb = phys_enc->hw_wb->idx;
|
||||
|
||||
if (phys_enc->hw_pp->merge_3d)
|
||||
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
|
||||
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
|
||||
|
||||
if (ctl->ops.reset_intf_cfg)
|
||||
|
@ -29,7 +29,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
|
||||
/**
|
||||
* struct dpu_rm_requirements - Reservation requirements parameter bundle
|
||||
* @topology: selected topology for the display
|
||||
* @hw_res: Hardware resources required as reported by the encoders
|
||||
*/
|
||||
struct dpu_rm_requirements {
|
||||
struct msm_display_topology topology;
|
||||
@ -204,6 +203,8 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
|
||||
* _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
|
||||
* @rm: dpu resource manager handle
|
||||
* @primary_idx: index of primary mixer in rm->mixer_blks[]
|
||||
*
|
||||
* Returns: lm peer mixed id on success or %-EINVAL on error
|
||||
*/
|
||||
static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
|
||||
{
|
||||
|
@ -135,11 +135,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
|
||||
tbd = dp_link_get_test_bits_depth(ctrl->link,
|
||||
ctrl->panel->dp_mode.bpp);
|
||||
|
||||
if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
|
||||
pr_debug("BIT_DEPTH not set. Configure default\n");
|
||||
tbd = DP_TEST_BIT_DEPTH_8;
|
||||
}
|
||||
|
||||
config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
|
||||
|
||||
/* Num of Lanes */
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "dp_reg.h"
|
||||
#include "dp_link.h"
|
||||
#include "dp_panel.h"
|
||||
|
||||
@ -1082,7 +1083,7 @@ int dp_link_process_request(struct dp_link *dp_link)
|
||||
|
||||
int dp_link_get_colorimetry_config(struct dp_link *dp_link)
|
||||
{
|
||||
u32 cc;
|
||||
u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
|
||||
struct dp_link_private *link;
|
||||
|
||||
if (!dp_link) {
|
||||
@ -1096,10 +1097,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
|
||||
* Unless a video pattern CTS test is ongoing, use RGB_VESA
|
||||
* Only RGB_VESA and RGB_CEA supported for now
|
||||
*/
|
||||
if (dp_link_is_video_pattern_requested(link))
|
||||
cc = link->dp_link.test_video.test_dyn_range;
|
||||
else
|
||||
cc = DP_TEST_DYNAMIC_RANGE_VESA;
|
||||
if (dp_link_is_video_pattern_requested(link)) {
|
||||
if (link->dp_link.test_video.test_dyn_range &
|
||||
DP_TEST_DYNAMIC_RANGE_CEA)
|
||||
cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
|
||||
}
|
||||
|
||||
return cc;
|
||||
}
|
||||
@ -1179,6 +1181,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
|
||||
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
|
||||
{
|
||||
u32 tbd;
|
||||
struct dp_link_private *link;
|
||||
|
||||
link = container_of(dp_link, struct dp_link_private, dp_link);
|
||||
|
||||
/*
|
||||
* Few simplistic rules and assumptions made here:
|
||||
@ -1196,12 +1201,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
|
||||
tbd = DP_TEST_BIT_DEPTH_10;
|
||||
break;
|
||||
default:
|
||||
tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
|
||||
drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n",
|
||||
bpp);
|
||||
tbd = DP_TEST_BIT_DEPTH_8;
|
||||
break;
|
||||
}
|
||||
|
||||
if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
|
||||
tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
|
||||
tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
|
||||
|
||||
return tbd;
|
||||
}
|
||||
|
@ -143,6 +143,9 @@
|
||||
#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
|
||||
#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
|
||||
|
||||
#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0)
|
||||
#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04)
|
||||
|
||||
#define REG_DP_VALID_BOUNDARY (0x00000030)
|
||||
#define REG_DP_VALID_BOUNDARY_2 (0x00000034)
|
||||
|
||||
|
@ -562,6 +562,7 @@ static const struct msm_mdss_data sdm670_data = {
|
||||
.ubwc_enc_version = UBWC_2_0,
|
||||
.ubwc_dec_version = UBWC_2_0,
|
||||
.highest_bank_bit = 1,
|
||||
.reg_bus_bw = 76800,
|
||||
};
|
||||
|
||||
static const struct msm_mdss_data sdm845_data = {
|
||||
|
@ -9,7 +9,7 @@
|
||||
#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
|
||||
|
||||
struct nvkm_gsp_mem {
|
||||
u32 size;
|
||||
size_t size;
|
||||
void *data;
|
||||
dma_addr_t addr;
|
||||
};
|
||||
|
@ -997,6 +997,32 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
|
||||
{
|
||||
if (mem->data) {
|
||||
/*
|
||||
* Poison the buffer to catch any unexpected access from
|
||||
* GSP-RM if the buffer was prematurely freed.
|
||||
*/
|
||||
memset(mem->data, 0xFF, mem->size);
|
||||
|
||||
dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
|
||||
memset(mem, 0, sizeof(*mem));
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
|
||||
{
|
||||
mem->size = size;
|
||||
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
|
||||
if (WARN_ON(!mem->data))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
r535_gsp_postinit(struct nvkm_gsp *gsp)
|
||||
{
|
||||
@ -1024,6 +1050,13 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
|
||||
|
||||
nvkm_inth_allow(&gsp->subdev.inth);
|
||||
nvkm_wr32(device, 0x110004, 0x00000040);
|
||||
|
||||
/* Release the DMA buffers that were needed only for boot and init */
|
||||
nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
|
||||
nvkm_gsp_mem_dtor(gsp, &gsp->libos);
|
||||
nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
|
||||
nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1532,27 +1565,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
|
||||
{
|
||||
if (mem->data) {
|
||||
dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
|
||||
mem->data = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem)
|
||||
{
|
||||
mem->size = size;
|
||||
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
|
||||
if (WARN_ON(!mem->data))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
|
||||
{
|
||||
@ -1938,20 +1950,20 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
|
||||
* See kgspCreateRadix3_IMPL
|
||||
*/
|
||||
static int
|
||||
nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
|
||||
nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
|
||||
struct nvkm_gsp_radix3 *rx3)
|
||||
{
|
||||
u64 addr;
|
||||
|
||||
for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
|
||||
u64 *ptes;
|
||||
int idx;
|
||||
size_t bufsize;
|
||||
int ret, idx;
|
||||
|
||||
rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
|
||||
rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size,
|
||||
&rx3->mem[i].addr, GFP_KERNEL);
|
||||
if (WARN_ON(!rx3->mem[i].data))
|
||||
return -ENOMEM;
|
||||
bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
|
||||
ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ptes = rx3->mem[i].data;
|
||||
if (i == 2) {
|
||||
@ -1991,7 +2003,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3);
|
||||
ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2150,6 +2162,11 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
|
||||
mutex_destroy(&gsp->cmdq.mutex);
|
||||
|
||||
r535_gsp_dtor_fws(gsp);
|
||||
|
||||
nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
|
||||
nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
|
||||
nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
|
||||
nvkm_gsp_mem_dtor(gsp, &gsp->logrm);
|
||||
}
|
||||
|
||||
int
|
||||
@ -2194,7 +2211,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
|
||||
memcpy(gsp->sig.data, data, size);
|
||||
|
||||
/* Build radix3 page table for ELF image. */
|
||||
ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
|
||||
ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1178,21 +1178,24 @@ static void drm_sched_run_job_work(struct work_struct *w)
|
||||
struct drm_sched_entity *entity;
|
||||
struct dma_fence *fence;
|
||||
struct drm_sched_fence *s_fence;
|
||||
struct drm_sched_job *sched_job = NULL;
|
||||
struct drm_sched_job *sched_job;
|
||||
int r;
|
||||
|
||||
if (READ_ONCE(sched->pause_submit))
|
||||
return;
|
||||
|
||||
/* Find entity with a ready job */
|
||||
while (!sched_job && (entity = drm_sched_select_entity(sched))) {
|
||||
sched_job = drm_sched_entity_pop_job(entity);
|
||||
if (!sched_job)
|
||||
complete_all(&entity->entity_idle);
|
||||
}
|
||||
entity = drm_sched_select_entity(sched);
|
||||
if (!entity)
|
||||
return; /* No more work */
|
||||
|
||||
sched_job = drm_sched_entity_pop_job(entity);
|
||||
if (!sched_job) {
|
||||
complete_all(&entity->entity_idle);
|
||||
drm_sched_run_job_queue(sched);
|
||||
return;
|
||||
}
|
||||
|
||||
s_fence = sched_job->s_fence;
|
||||
|
||||
atomic_add(sched_job->credits, &sched->credit_count);
|
||||
|
@ -134,8 +134,6 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
|
||||
|
||||
int xe_display_init_nommio(struct xe_device *xe)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!xe->info.enable_display)
|
||||
return 0;
|
||||
|
||||
@ -145,10 +143,6 @@ int xe_display_init_nommio(struct xe_device *xe)
|
||||
/* This must be called before any calls to HAS_PCH_* */
|
||||
intel_detect_pch(xe);
|
||||
|
||||
err = intel_power_domains_init(xe);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
|
||||
}
|
||||
|
||||
|
@ -926,20 +926,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
|
||||
* @q: The exec queue
|
||||
* @vm: The VM the engine does a bind or exec for
|
||||
*
|
||||
* Get last fence, does not take a ref
|
||||
* Get last fence, takes a ref
|
||||
*
|
||||
* Returns: last fence if not signaled, dma fence stub if signaled
|
||||
*/
|
||||
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
|
||||
struct xe_vm *vm)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
|
||||
xe_exec_queue_last_fence_lockdep_assert(q, vm);
|
||||
|
||||
if (q->last_fence &&
|
||||
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
|
||||
xe_exec_queue_last_fence_put(q, vm);
|
||||
|
||||
return q->last_fence ? q->last_fence : dma_fence_get_stub();
|
||||
fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
|
||||
dma_fence_get(fence);
|
||||
return fence;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
|
||||
* USM has its only SA pool to non-block behind user operations
|
||||
*/
|
||||
if (gt_to_xe(gt)->info.has_usm) {
|
||||
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
|
||||
IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
|
||||
if (IS_ERR(gt->usm.bb_pool)) {
|
||||
err = PTR_ERR(gt->usm.bb_pool);
|
||||
goto err_force_wake;
|
||||
|
@ -335,7 +335,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
|
||||
return -EPROTO;
|
||||
|
||||
asid = FIELD_GET(PFD_ASID, msg[1]);
|
||||
pf_queue = >->usm.pf_queue[asid % NUM_PF_QUEUE];
|
||||
pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
|
||||
|
||||
spin_lock_irqsave(&pf_queue->lock, flags);
|
||||
full = pf_queue_full(pf_queue);
|
||||
|
@ -170,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
|
||||
if (!IS_DGFX(xe)) {
|
||||
/* Write out batch too */
|
||||
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
|
||||
if (xe->info.has_usm) {
|
||||
batch = tile->primary_gt->usm.bb_pool->bo;
|
||||
m->usm_batch_base_ofs = m->batch_base_ofs;
|
||||
}
|
||||
|
||||
for (i = 0; i < batch->size;
|
||||
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
|
||||
XE_PAGE_SIZE) {
|
||||
@ -185,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
|
||||
entry);
|
||||
level++;
|
||||
}
|
||||
if (xe->info.has_usm) {
|
||||
xe_tile_assert(tile, batch->size == SZ_1M);
|
||||
|
||||
batch = tile->primary_gt->usm.bb_pool->bo;
|
||||
m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
|
||||
xe_tile_assert(tile, batch->size == SZ_512K);
|
||||
|
||||
for (i = 0; i < batch->size;
|
||||
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
|
||||
XE_PAGE_SIZE) {
|
||||
entry = vm->pt_ops->pte_encode_bo(batch, i,
|
||||
pat_index, 0);
|
||||
|
||||
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
|
||||
entry);
|
||||
level++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
|
||||
|
||||
@ -1204,8 +1217,11 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
|
||||
}
|
||||
if (q) {
|
||||
fence = xe_exec_queue_last_fence_get(q, vm);
|
||||
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
|
||||
dma_fence_put(fence);
|
||||
return false;
|
||||
}
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = xe_exec_queue_last_fence_get(job->q, vm);
|
||||
dma_fence_get(fence);
|
||||
|
||||
return drm_sched_job_add_dependency(&job->drm, fence);
|
||||
}
|
||||
|
@ -307,7 +307,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
|
||||
/* Easy case... */
|
||||
if (!num_in_fence) {
|
||||
fence = xe_exec_queue_last_fence_get(q, vm);
|
||||
dma_fence_get(fence);
|
||||
return fence;
|
||||
}
|
||||
|
||||
@ -322,7 +321,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
|
||||
}
|
||||
}
|
||||
fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
|
||||
dma_fence_get(fences[current_fence - 1]);
|
||||
cf = dma_fence_array_create(num_in_fence, fences,
|
||||
vm->composite_fence_ctx,
|
||||
vm->composite_fence_seqno++,
|
||||
|
@ -37,8 +37,6 @@
|
||||
#include "generated/xe_wa_oob.h"
|
||||
#include "xe_wa.h"
|
||||
|
||||
#define TEST_VM_ASYNC_OPS_ERROR
|
||||
|
||||
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
|
||||
{
|
||||
return vm->gpuvm.r_obj;
|
||||
@ -114,11 +112,8 @@ retry:
|
||||
num_pages - pinned,
|
||||
read_only ? 0 : FOLL_WRITE,
|
||||
&pages[pinned]);
|
||||
if (ret < 0) {
|
||||
if (in_kthread)
|
||||
ret = 0;
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
pinned += ret;
|
||||
ret = 0;
|
||||
@ -1984,6 +1979,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
|
||||
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
|
||||
|
||||
xe_sync_entry_signal(&syncs[i], NULL, fence);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2064,7 +2060,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
|
||||
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
|
||||
struct drm_gpuva_ops *ops;
|
||||
struct drm_gpuva_op *__op;
|
||||
struct xe_vma_op *op;
|
||||
struct drm_gpuvm_bo *vm_bo;
|
||||
int err;
|
||||
|
||||
@ -2111,15 +2106,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
|
||||
if (IS_ERR(ops))
|
||||
return ops;
|
||||
|
||||
#ifdef TEST_VM_ASYNC_OPS_ERROR
|
||||
if (operation & FORCE_ASYNC_OP_ERROR) {
|
||||
op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
|
||||
base.entry);
|
||||
if (op)
|
||||
op->inject_error = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
drm_gpuva_for_each_op(__op, ops) {
|
||||
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
|
||||
|
||||
@ -2199,8 +2185,10 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
|
||||
return SZ_1G;
|
||||
else if (vma->gpuva.flags & XE_VMA_PTE_2M)
|
||||
return SZ_2M;
|
||||
else if (vma->gpuva.flags & XE_VMA_PTE_4K)
|
||||
return SZ_4K;
|
||||
|
||||
return SZ_4K;
|
||||
return SZ_1G; /* Uninitialized, used max size */
|
||||
}
|
||||
|
||||
static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
|
||||
@ -2530,13 +2518,25 @@ retry_userptr:
|
||||
}
|
||||
drm_exec_fini(&exec);
|
||||
|
||||
if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
|
||||
if (err == -EAGAIN) {
|
||||
lockdep_assert_held_write(&vm->lock);
|
||||
err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
|
||||
if (!err)
|
||||
goto retry_userptr;
|
||||
|
||||
trace_xe_vma_fail(vma);
|
||||
if (op->base.op == DRM_GPUVA_OP_REMAP) {
|
||||
if (!op->remap.unmap_done)
|
||||
vma = gpuva_to_vma(op->base.remap.unmap->va);
|
||||
else if (op->remap.prev)
|
||||
vma = op->remap.prev;
|
||||
else
|
||||
vma = op->remap.next;
|
||||
}
|
||||
|
||||
if (xe_vma_is_userptr(vma)) {
|
||||
err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
|
||||
if (!err)
|
||||
goto retry_userptr;
|
||||
|
||||
trace_xe_vma_fail(vma);
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
@ -2548,13 +2548,6 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
|
||||
|
||||
lockdep_assert_held_write(&vm->lock);
|
||||
|
||||
#ifdef TEST_VM_ASYNC_OPS_ERROR
|
||||
if (op->inject_error) {
|
||||
op->inject_error = false;
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
|
||||
switch (op->base.op) {
|
||||
case DRM_GPUVA_OP_MAP:
|
||||
ret = __xe_vma_op_execute(vm, op->map.vma, op);
|
||||
@ -2669,7 +2662,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = num_ops_list - 1; i; ++i) {
|
||||
for (i = num_ops_list - 1; i >= 0; --i) {
|
||||
struct drm_gpuva_ops *__ops = ops[i];
|
||||
struct drm_gpuva_op *__op;
|
||||
|
||||
@ -2714,16 +2707,9 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef TEST_VM_ASYNC_OPS_ERROR
|
||||
#define SUPPORTED_FLAGS \
|
||||
(FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \
|
||||
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
|
||||
#else
|
||||
#define SUPPORTED_FLAGS \
|
||||
(DRM_XE_VM_BIND_FLAG_READONLY | \
|
||||
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
|
||||
0xffff)
|
||||
#endif
|
||||
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL)
|
||||
#define XE_64K_PAGE_MASK 0xffffull
|
||||
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
|
||||
|
||||
|
@ -21,9 +21,6 @@ struct xe_bo;
|
||||
struct xe_sync_entry;
|
||||
struct xe_vm;
|
||||
|
||||
#define TEST_VM_ASYNC_OPS_ERROR
|
||||
#define FORCE_ASYNC_OP_ERROR BIT(31)
|
||||
|
||||
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
|
||||
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
|
||||
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
|
||||
@ -360,11 +357,6 @@ struct xe_vma_op {
|
||||
/** @flags: operation flags */
|
||||
enum xe_vma_op_flags flags;
|
||||
|
||||
#ifdef TEST_VM_ASYNC_OPS_ERROR
|
||||
/** @inject_error: inject error to test async op error handling */
|
||||
bool inject_error;
|
||||
#endif
|
||||
|
||||
union {
|
||||
/** @map: VMA map operation specific data */
|
||||
struct xe_vma_op_map map;
|
||||
|
@ -305,6 +305,7 @@ struct drm_ivpu_submit {
|
||||
|
||||
/* drm_ivpu_bo_wait job status codes */
|
||||
#define DRM_IVPU_JOB_STATUS_SUCCESS 0
|
||||
#define DRM_IVPU_JOB_STATUS_ABORTED 256
|
||||
|
||||
/**
|
||||
* struct drm_ivpu_bo_wait - Wait for BO to become inactive
|
||||
|
Loading…
Reference in New Issue
Block a user