drm fixes for 6.7-rc4

drm:
 - Revert unexport of prime helpers for fd/handle conversion
 
 dma_resv:
 - Do not double add fences in dma_resv_add_fence.
 
 gpuvm:
 - Fix GPUVM license identifier.
 
 i915:
 - Mark internal GSC engine with reserved uabi class
 - Take VGA converters into account in eDP probe
 - Fix intel_pre_plane_updates() call to ensure workarounds get applied
 
 panel:
 - Revert panel fixes as they require exporting device_is_dependent.
 
 nouveau:
 - fix oversized allocations in new vm path
 - fix zero-length array
 - remove a stray lock
 
 nt36523:
 - Fix error check for nt36523.
 
 amdgpu:
 - DMUB fix
 - DCN 3.5 fixes
 - XGMI fix
 - DCN 3.2 fixes
 - Vangogh suspend fix
 - NBIO 7.9 fix
 - GFX11 golden register fix
 - Backlight fix
 - NBIO 7.11 fix
 - IB test overflow fix
 - DCN 3.1.4 fixes
 - fix a runtime pm ref count
 - Retimer fix
 - ABM fix
 - DCN 3.1.5 fix
 - Fix AGP addressing
 - Fix possible memory leak in SMU error path
 - Make sure PME is enabled in D3
 - Fix possible NULL pointer dereference in debugfs
 - EEPROM fix
 - GC 9.4.3 fix
 
 amdkfd:
 - IP version check fix
 - Fix memory leak in pqm_uninit()
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmVpf7sACgkQDHTzWXnE
 hr7dzBAAg7iqxlScxzbDEwsKjRRmRv77lRjKJbk8jcjee1b2Bubtj3Ezaa4iIVFc
 iYFV8GfyeyisSYxlzNHDIA0F95IGdASQSAs1pIHzegCobYyybax2hr/ZhQeThFG1
 xwji2PCHnbD2+r1jvVgvY6234+Re7hfQotsL+KWqLGz5YS4sgLw+Vyofm1WxTt7K
 OTbVvunvfn1PgT36LFOEjaVHAog9IHbZ/4cwEqgucOoTl7rAOXV5mN4WRsBh1B+E
 h6Ny9O8rMRajC6PDmUiwUBuSPj+TDj8K7WtfUQWhR1EySEikcwClfpWPM/LCyuPr
 wdv3LCPBIJnFQOa0pJX/3afTc0tB69ETtGrw342fo+wyUOKwaIt9OVlRaDgcjExv
 TIeTEjP6GJ7lcd/8VkAj97qBexFUQrcKVZjrRfTJuv+ee9KcCv0TA7I8hsqaOew/
 2xirLknX5kHt/pYH6CYsYzEt4ruoE5F0m1fjCpj6VubSl5B+kZsuQzui9BE1WusO
 +VKsujluhmcQHWH5KQYWqeVtgYa7ZP18o4P+tF5Xon52w2VeP8EqREbgH0uFIcd/
 jPVVjyyEgUgWpyw42v0w+EwGXRACnT5kMtDHrKNVsgF43cev2nvKq2GHQ4djvOG/
 CYWYCxro+ZbGDfEfPBDY16qh1ra+dnlZTYwYfBcGT5u4RfgDzPo=
 =YlVI
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2023-12-01' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Weekly fixes, mostly amdgpu fixes with a scattering of nouveau, i915,
  and a couple of reverts. Hopefully it will quieten down in coming
  weeks.

  drm:
   - Revert unexport of prime helpers for fd/handle conversion

  dma_resv:
   - Do not double add fences in dma_resv_add_fence.

  gpuvm:
   - Fix GPUVM license identifier.

  i915:
   - Mark internal GSC engine with reserved uabi class
   - Take VGA converters into account in eDP probe
   - Fix intel_pre_plane_updates() call to ensure workarounds get applied

  panel:
   - Revert panel fixes as they require exporting device_is_dependent.

  nouveau:
   - fix oversized allocations in new vm path
   - fix zero-length array
   - remove a stray lock

  nt36523:
   - Fix error check for nt36523.

  amdgpu:
   - DMUB fix
   - DCN 3.5 fixes
   - XGMI fix
   - DCN 3.2 fixes
   - Vangogh suspend fix
   - NBIO 7.9 fix
   - GFX11 golden register fix
   - Backlight fix
   - NBIO 7.11 fix
   - IB test overflow fix
   - DCN 3.1.4 fixes
   - fix a runtime pm ref count
   - Retimer fix
   - ABM fix
   - DCN 3.1.5 fix
   - Fix AGP addressing
   - Fix possible memory leak in SMU error path
   - Make sure PME is enabled in D3
   - Fix possible NULL pointer dereference in debugfs
   - EEPROM fix
   - GC 9.4.3 fix

  amdkfd:
   - IP version check fix
   - Fix memory leak in pqm_uninit()"

* tag 'drm-fixes-2023-12-01' of git://anongit.freedesktop.org/drm/drm: (53 commits)
  Revert "drm/prime: Unexport helpers for fd/handle conversion"
  drm/amdgpu: Use another offset for GC 9.4.3 remap
  drm/amd/display: Fix some HostVM parameters in DML
  drm/amdkfd: Free gang_ctx_bo and wptr_bo in pqm_uninit
  drm/amdgpu: Update EEPROM I2C address for smu v13_0_0
  drm/amd/display: Allow DTBCLK disable for DCN35
  drm/amdgpu: Fix cat debugfs amdgpu_regs_didt causes kernel null pointer
  drm/amd: Enable PCIe PME from D3
  drm/amd/pm: fix a memleak in aldebaran_tables_init
  drm/amdgpu: fix AGP addressing when GART is not at 0
  drm/amd/display: update dcn315 lpddr pstate latency
  drm/amd/display: fix ABM disablement
  drm/amd/display: Fix black screen on video playback with embedded panel
  drm/amd/display: Fix conversions between bytes and KB
  drm/amdkfd: Use common function for IP version check
  drm/amd/display: Remove config update
  drm/amd/display: Update DCN35 clock table policy
  drm/amd/display: force toggle rate wa for first link training for a retimer
  drm/amdgpu: correct the amdgpu runtime dereference usage count
  drm/amd/display: Update min Z8 residency time to 2100 for DCN314
  ...
This commit is contained in:
Linus Torvalds 2023-12-02 08:18:59 +09:00
commit b1e51588aa
67 changed files with 537 additions and 302 deletions

View File

@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage &&
dma_fence_is_later(fence, old)) ||
dma_fence_is_later_or_same(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);

View File

@ -547,7 +547,7 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
struct amdgpu_device *adev = dst, *peer_adev;
int num_links;
if (adev->asic_type != CHIP_ALDEBARAN)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
return 0;
if (src)

View File

@ -638,6 +638,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
if (!adev->didt_rreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@ -694,6 +697,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
if (!adev->didt_wreg)
return -EOPNOTSUPP;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);

View File

@ -4538,6 +4538,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
r = amdgpu_dpm_notify_rlc_state(adev, false);
if (r)
return r;
return 0;
}

View File

@ -340,14 +340,11 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
adev->have_disp_power_ref = true;
return ret;
}
/* if we have no active crtcs, then drop the power ref
* we got before
/* if we have no active crtcs, then go to
* drop the power ref we got before
*/
if (!active && adev->have_disp_power_ref) {
pm_runtime_put_autosuspend(dev->dev);
if (!active && adev->have_disp_power_ref)
adev->have_disp_power_ref = false;
}
out:
/* drop the power reference we got coming in here */
pm_runtime_put_autosuspend(dev->dev);

View File

@ -2263,6 +2263,8 @@ retry_init:
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
pci_wake_from_d3(pdev, TRUE);
/*
* For runpm implemented via BACO, PMFW will handle the
* timing for BACO in and out:

View File

@ -181,6 +181,9 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
if (!bo->ttm)
return AMDGPU_BO_INVALID_OFFSET;
if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET;

View File

@ -1527,10 +1527,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint64_t offset;
uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
if (bo->tbo.resource->mem_type == TTM_PL_TT)
offset = amdgpu_gmc_agp_addr(&bo->tbo);
if (offset == AMDGPU_BO_INVALID_OFFSET)
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
return amdgpu_gmc_sign_extend(offset);
}

View File

@ -214,6 +214,12 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
control->i2c_address = EEPROM_I2C_MADDR_0;
return true;
case IP_VERSION(13, 0, 0):
if (strnstr(atom_ctx->vbios_pn, "D707",
sizeof(atom_ctx->vbios_pn)))
control->i2c_address = EEPROM_I2C_MADDR_0;
else
control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
control->i2c_address = EEPROM_I2C_MADDR_4;

View File

@ -959,10 +959,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
return 0;
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
bo->resource->start = addr >> PAGE_SHIFT;
if (addr != AMDGPU_BO_INVALID_OFFSET)
return 0;
}
/* allocate GART space */
placement.num_placement = 1;

View File

@ -89,6 +89,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
};
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
@ -304,6 +308,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
soc15_program_register_sequence(adev,
golden_settings_gc_11_0,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
}
static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
@ -419,7 +427,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
cpu_ptr = &adev->wb.wb[index];
r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;

View File

@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16,
AMDGPU_IB_POOL_DIRECT, &ib);
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;

View File

@ -1039,8 +1039,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16,
AMDGPU_IB_POOL_DIRECT, &ib);
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;

View File

@ -297,8 +297,8 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16,
AMDGPU_IB_POOL_DIRECT, &ib);
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;

View File

@ -259,17 +259,17 @@ const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg = {
static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
{
/* uint32_t def, data;
uint32_t def, data;
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
*/
}
static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev,

View File

@ -611,11 +611,6 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
dev_info(adev->dev, "RAS controller interrupt triggered "
"by NBIF error\n");
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
*/
amdgpu_ras_reset_gpu(adev);
}
amdgpu_ras_error_data_fini(&err_data);

View File

@ -1161,6 +1161,11 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x46;
/* GC 9.4.3 uses MMIO register region hole at a different offset */
if (!amdgpu_sriov_vf(adev)) {
adev->rmmio_remap.reg_offset = 0x1A000;
adev->rmmio_remap.bus_addr = adev->rmmio_base + 0x1A000;
}
break;
default:
/* FIXME: not supported yet */

View File

@ -1128,7 +1128,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
struct kfd_dev *dev = adev->kfd.dev;
uint32_t i;
if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3))
return dev->nodes[0];
for (i = 0; i < dev->num_nodes; i++)

View File

@ -169,16 +169,43 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
return 0;
}
static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
struct process_queue_node *pqn)
{
struct kfd_node *dev;
struct kfd_process_device *pdd;
dev = pqn->q->device;
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return;
}
if (pqn->q->gws) {
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
!dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(
pqm->process->kgd_process_info, pqn->q->gws);
pdd->qpd.num_gws = 0;
}
if (dev->kfd->shared_resources.enable_mes) {
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo);
if (pqn->q->wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
}
}
void pqm_uninit(struct process_queue_manager *pqm)
{
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
if (pqn->q && pqn->q->gws &&
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
!pqn->q->device->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
pqn->q->gws);
if (pqn->q)
pqm_clean_queue_resource(pqm, pqn);
kfd_procfs_del_queue(pqn->q);
uninit_queue(pqn->q);
list_del(&pqn->process_queue_list);
@ -461,22 +488,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
goto err_destroy_queue;
}
if (pqn->q->gws) {
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
!dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_remove_gws_from_process(
pqm->process->kgd_process_info,
pqn->q->gws);
pdd->qpd.num_gws = 0;
}
if (dev->kfd->shared_resources.enable_mes) {
amdgpu_amdkfd_free_gtt_mem(dev->adev,
pqn->q->gang_ctx_bo);
if (pqn->q->wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
}
pqm_clean_queue_resource(pqm, pqn);
uninit_queue(pqn->q);
}

View File

@ -6267,7 +6267,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
dm_new_state->underscan_enable = val;
ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
dm_new_state->abm_level = val;
dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
ret = 0;
}
@ -6312,7 +6312,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
*val = dm_state->underscan_enable;
ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
*val = dm_state->abm_level;
*val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
dm_state->abm_level : 0;
ret = 0;
}
@ -6385,7 +6386,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->pbn = 0;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
state->abm_level = amdgpu_dm_abm_level;
state->abm_level = amdgpu_dm_abm_level ?:
ABM_LEVEL_IMMEDIATE_DISABLE;
__drm_atomic_helper_connector_reset(connector, &state->base);
}

View File

@ -334,7 +334,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@ -342,7 +342,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@ -350,7 +350,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@ -358,7 +358,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,

View File

@ -232,6 +232,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->work_arounds.skip_clock_update)
return;
/* DTBCLK is fixed, so set a default if unspecified. */
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@ -265,8 +269,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
dcn35_smu_set_dtbclk(clk_mgr, true);
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
}
/* check that we're not already in D0 */
@ -314,17 +320,12 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
update_dispclk = true;
}
if (!new_clocks->dtbclk_en) {
new_clocks->ref_dtbclk_khz = 600000;
}
/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
if (!dc->debug.disable_dtb_ref_clk_switch &&
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
/* DCCG requires KHz precision for DTBCLK */
dcn35_smu_set_dtbclk(clk_mgr, true);
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
}
if (dpp_clock_lowered) {
@ -443,32 +444,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
}
@ -480,32 +481,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
}
@ -515,11 +516,6 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
static struct dcn35_ss_info_table ss_info_table = {
.ss_divider = 1000,
.ss_percentage = {0, 0, 375, 375, 375}
};
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@ -653,27 +649,47 @@ static unsigned int convert_wck_ratio(uint8_t wck_ratio)
return 1;
}
static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry)
{
return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2;
}
static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
DpmClocks_t_dcn35 *clock_table)
{
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
uint32_t max_pstate = 0, max_uclk = 0, max_fclk = 0;
uint32_t min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
int i;
/* Determine min/max p-state values. */
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
if (is_valid_clock_value(clock_table->MemPstateTable[i].UClk) &&
clock_table->MemPstateTable[i].UClk > max_uclk) {
max_uclk = clock_table->MemPstateTable[i].UClk;
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
max_dram_speed_mts = dram_speed_mts;
max_pstate = i;
}
}
/* We expect the table to contain at least one valid Uclk entry. */
ASSERT(is_valid_clock_value(max_uclk));
min_dram_speed_mts = max_dram_speed_mts;
min_pstate = max_pstate;
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
min_dram_speed_mts = dram_speed_mts;
min_pstate = i;
}
}
/* We expect the table to contain at least one valid P-state entry. */
ASSERT(clock_table->NumMemPstatesEnabled &&
is_valid_clock_value(max_dram_speed_mts) &&
is_valid_clock_value(min_dram_speed_mts));
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
@ -683,47 +699,46 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
max_dppclk = find_max_clk_value(clock_table->DppClocks,
clock_table->NumDispClkLevelsEnabled);
} else {
/* Invalid number of entries in the table from PMFW. */
ASSERT(0);
}
if (clock_table->NumFclkLevelsEnabled <= NUM_FCLK_DPM_LEVELS)
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq,
clock_table->NumFclkLevelsEnabled);
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
uint32_t min_uclk = clock_table->MemPstateTable[0].UClk;
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
int j;
for (j = 1; j < clock_table->NumMemPstatesEnabled; j++) {
if (is_valid_clock_value(clock_table->MemPstateTable[j].UClk) &&
clock_table->MemPstateTable[j].UClk < min_uclk &&
clock_table->MemPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
min_uclk = clock_table->MemPstateTable[j].UClk;
min_pstate = j;
}
}
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
break;
break;
bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
/* Now update clocks we do read */
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
clock_table->MemPstateTable[min_pstate].WckRatio);
}
bw_params->clk_table.entries[i].wck_ratio =
convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio);
/* Dcfclk and Fclk are tied, but at a different ratio */
bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]);
}
/* Make sure to include at least one entry at highest pstate */
if (max_pstate != min_pstate || i == 0) {
if (i > MAX_NUM_DPM_LVL - 1)
i = MAX_NUM_DPM_LVL - 1;
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage;
@ -739,6 +754,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
}
bw_params->clk_table.num_entries = i--;
/* Make sure all highest clocks are included*/
bw_params->clk_table.entries[i].socclk_mhz =
find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
bw_params->clk_table.entries[i].dispclk_mhz =
@ -757,6 +773,11 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled;
bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled;
bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled;
/*
* Set any 0 clocks to max default setting. Not an issue for
* power since we aren't doing switching in such case anyway
*/
for (i = 0; i < bw_params->clk_table.num_entries; i++) {
if (!bw_params->clk_table.entries[i].fclk_mhz) {
bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
@ -965,21 +986,6 @@ struct clk_mgr_funcs dcn35_fpga_funcs = {
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
};
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
{
uint32_t clock_source;
struct dc_context *ctx = clk_mgr->base.ctx;
REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
if (clk_mgr->dprefclk_ss_percentage != 0) {
clk_mgr->ss_on_dprefclk = true;
clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
}
}
void dcn35_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn35 *clk_mgr,
@ -1043,17 +1049,11 @@ void dcn35_clk_mgr_construct(
dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = dcn35_smu_get_dtbclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
if (!clk_mgr->base.base.clks.ref_dtbclk_khz)
dcn35_smu_set_dtbclk(&clk_mgr->base, true);
clk_mgr->base.base.clks.dtbclk_en = true;
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
dcn35_read_ss_info_from_lut(&clk_mgr->base);
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
@ -1129,7 +1129,6 @@ void dcn35_clk_mgr_construct(
ctx->dc->debug.disable_dpp_power_gate = false;
ctx->dc->debug.disable_hubp_power_gate = false;
ctx->dc->debug.disable_dsc_power_gate = false;
ctx->dc->debug.disable_hpo_power_gate = false;
} else {
/*let's reset the config control flag*/
ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/

View File

@ -874,6 +874,7 @@ struct dc_debug_options {
unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
int minimum_z8_residency_time;
int minimum_z10_residency_time;
bool disable_z9_mpc;
unsigned int force_fclk_khz;
bool enable_tri_buf;
@ -1608,7 +1609,6 @@ struct dc_link {
enum edp_revision edp_revision;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
struct backlight_settings backlight_settings;
struct psr_settings psr_settings;
struct replay_settings replay_settings;

View File

@ -991,10 +991,6 @@ struct link_mst_stream_allocation_table {
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
struct backlight_settings {
uint32_t backlight_millinits;
};
/* PSR feature flags */
struct psr_settings {
bool psr_feature_enabled; // PSR is supported by sink

View File

@ -871,7 +871,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.enable_z9_disable_interface = true,
.minimum_z8_residency_time = 2000,
.minimum_z8_residency_time = 2100,
.psr_skip_crtc_disable = true,
.replay_skip_crtc_disabled = true,
.disable_dmcu = true,

View File

@ -261,6 +261,7 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
uint32_t power_forceon;
bool block_enabled;
if (pg_cntl->ctx->dc->debug.ignore_pg ||
@ -277,6 +278,10 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
return;
}
REG_GET(DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
if (power_forceon)
return;
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
@ -304,6 +309,7 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
uint32_t power_forceon;
bool block_enabled;
if (pg_cntl->ctx->dc->debug.ignore_pg ||
@ -319,6 +325,10 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
return;
}
REG_GET(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
if (power_forceon)
return;
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);

View File

@ -1712,6 +1712,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
out = dml2_validate(dc, context, fast_validate);
if (fast_validate)
return out;
DC_FP_START();
dcn35_decide_zstate_support(dc, context);
DC_FP_END();
return out;
}
@ -1857,7 +1864,7 @@ static bool dcn35_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.use_default_clock_table = false;
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {

View File

@ -36,7 +36,7 @@
* Define the maximum amount of states supported by the ASIC. Every ASIC has a
* specific number of states; this macro defines the maximum number of states.
*/
#define DC__VOLTAGE_STATES 20
#define DC__VOLTAGE_STATES 40
#define DC__NUM_DPP__4 1
#define DC__NUM_DPP__0_PRESENT 1
#define DC__NUM_DPP__1_PRESENT 1

View File

@ -950,10 +950,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
unsigned int min_dst_y_next_start_us;
plane_count = 0;
min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@ -975,26 +973,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
struct dc_stream_state *current_stream = context->streams[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
bool isFreesyncVideo;
isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
break;
}
}
/* Don't support multi-plane configurations */
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;

View File

@ -1192,13 +1192,16 @@ static bool update_pipe_slice_table_with_split_flags(
*/
struct pipe_ctx *pipe;
bool odm;
int i;
int dc_pipe_idx, dml_pipe_idx = 0;
bool updated = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
for (dc_pipe_idx = 0;
dc_pipe_idx < dc->res_pool->pipe_count; dc_pipe_idx++) {
pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
if (resource_is_pipe_type(pipe, FREE_PIPE))
continue;
if (merge[i]) {
if (merge[dc_pipe_idx]) {
if (resource_is_pipe_type(pipe, OPP_HEAD))
/* merging OPP head means reducing ODM slice
* count by 1
@ -1213,17 +1216,18 @@ static bool update_pipe_slice_table_with_split_flags(
updated = true;
}
if (split[i]) {
odm = vba->ODMCombineEnabled[vba->pipe_plane[i]] !=
if (split[dc_pipe_idx]) {
odm = vba->ODMCombineEnabled[vba->pipe_plane[dml_pipe_idx]] !=
dm_odm_combine_mode_disabled;
if (odm && resource_is_pipe_type(pipe, OPP_HEAD))
update_slice_table_for_stream(
table, pipe->stream, split[i] - 1);
table, pipe->stream, split[dc_pipe_idx] - 1);
else if (!odm && resource_is_pipe_type(pipe, DPP_PIPE))
update_slice_table_for_plane(table, pipe,
pipe->plane_state, split[i] - 1);
pipe->plane_state, split[dc_pipe_idx] - 1);
updated = true;
}
dml_pipe_idx++;
}
return updated;
}
@ -2231,6 +2235,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
int i, pipe_idx, vlevel_temp = 0;
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
dm_dram_clock_change_unsupported;
@ -2418,7 +2423,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
}
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
min_dram_speed_mts = dram_speed_from_validation;
min_dram_speed_mts_margin = 160;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =

View File

@ -164,10 +164,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
},
},
.num_states = 5,
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 50.0, /*changed from 442.0*/
.sr_enter_plus_exit_z8_time_us = 50.0,/*changed from 560.0*/
.sr_exit_time_us = 14.0,
.sr_enter_plus_exit_time_us = 16.0,
.sr_exit_z8_time_us = 525.0,
.sr_enter_plus_exit_z8_time_us = 715.0,
.fclk_change_latency_us = 20.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@ -329,6 +329,48 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
/*temp till dml2 fully work without dml1*/
dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip,
DML_PROJECT_DCN31);
/*copy to dml2, before dml2_create*/
if (clk_table->num_entries > 2) {
for (i = 0; i < clk_table->num_entries; i++) {
dc->dml2_options.bbox_overrides.clks_table.num_states =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
clock_limits[i].dcfclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
clock_limits[i].fabricclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
clock_limits[i].dispclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
clock_limits[i].dppclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
clock_limits[i].socclk_mhz;
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
clk_table->num_entries;
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
clk_table->num_entries;
}
}
/* Update latency values */
dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_5_soc.dram_clock_change_latency_us;
dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_5_soc.sr_exit_time_us;
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_5_soc.sr_enter_plus_exit_time_us;
dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_5_soc.sr_exit_z8_time_us;
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_5_soc.sr_enter_plus_exit_z8_time_us;
}
static bool is_dual_plane(enum surface_pixel_format format)
@ -507,3 +549,37 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
return pipe_cnt;
}
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
{
enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
unsigned int i, plane_count = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
}
if (plane_count == 0) {
support = DCN_ZSTATE_SUPPORT_ALLOW;
} else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
bool is_pwrseq0 = link && link->link_index == 0;
bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr;
int minmum_z8_residency =
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
int minmum_z10_residency =
dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000;
bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency;
if (is_pwrseq0 && allow_z10)
support = DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && is_psr1)
support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else if (allow_z8)
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
}
context->bw_ctx.bw.dcn.clk.zstate_support = support;
}

View File

@ -39,4 +39,6 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context);
#endif

View File

@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.NoOfDPPThisState,
mode_lib->ms.dpte_group_bytes,
s->HostVMInefficiencyFactor,
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
mode_lib->ms.MetaRowBytes[j][k],
mode_lib->ms.DPTEBytesPerRow[j][k],
@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
locals->dpte_group_bytes,
s->HostVMInefficiencyFactor,
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
locals->PDEAndMetaPTEBytesFrame[k],
locals->MetaRowByte[k],
locals->PixelPTEBytesPerRow[k],

View File

@ -341,25 +341,42 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
break;
}
/* Override from passed values, mainly for debugging purposes, if available */
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
p->in_states->state_array[0].sr_exit_time_us = dml2->config.bbox_overrides.sr_exit_latency_us;
}
/* Override from passed values, if available */
for (i = 0; i < p->in_states->num_states; i++) {
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
p->in_states->state_array[i].sr_exit_time_us =
dml2->config.bbox_overrides.sr_exit_latency_us;
}
if (dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us) {
p->in_states->state_array[0].sr_enter_plus_exit_time_us = dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us;
}
if (dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us) {
p->in_states->state_array[i].sr_enter_plus_exit_time_us =
dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us;
}
if (dml2->config.bbox_overrides.urgent_latency_us) {
p->in_states->state_array[0].urgent_latency_pixel_data_only_us = dml2->config.bbox_overrides.urgent_latency_us;
}
if (dml2->config.bbox_overrides.sr_exit_z8_time_us) {
p->in_states->state_array[i].sr_exit_z8_time_us =
dml2->config.bbox_overrides.sr_exit_z8_time_us;
}
if (dml2->config.bbox_overrides.dram_clock_change_latency_us) {
p->in_states->state_array[0].dram_clock_change_latency_us = dml2->config.bbox_overrides.dram_clock_change_latency_us;
}
if (dml2->config.bbox_overrides.sr_enter_plus_exit_z8_time_us) {
p->in_states->state_array[i].sr_enter_plus_exit_z8_time_us =
dml2->config.bbox_overrides.sr_enter_plus_exit_z8_time_us;
}
if (dml2->config.bbox_overrides.fclk_change_latency_us) {
p->in_states->state_array[0].fclk_change_latency_us = dml2->config.bbox_overrides.fclk_change_latency_us;
if (dml2->config.bbox_overrides.urgent_latency_us) {
p->in_states->state_array[i].urgent_latency_pixel_data_only_us =
dml2->config.bbox_overrides.urgent_latency_us;
}
if (dml2->config.bbox_overrides.dram_clock_change_latency_us) {
p->in_states->state_array[i].dram_clock_change_latency_us =
dml2->config.bbox_overrides.dram_clock_change_latency_us;
}
if (dml2->config.bbox_overrides.fclk_change_latency_us) {
p->in_states->state_array[i].fclk_change_latency_us =
dml2->config.bbox_overrides.fclk_change_latency_us;
}
}
/* DCFCLK stas values are project specific */
@ -498,8 +515,8 @@ void dml2_translate_socbb_params(const struct dc *in, struct soc_bounding_box_st
out->do_urgent_latency_adjustment = in_soc_params->do_urgent_latency_adjustment;
out->dram_channel_width_bytes = (dml_uint_t)in_soc_params->dram_channel_width_bytes;
out->fabric_datapath_to_dcn_data_return_bytes = (dml_uint_t)in_soc_params->fabric_datapath_to_dcn_data_return_bytes;
out->gpuvm_min_page_size_kbytes = in_soc_params->gpuvm_min_page_size_bytes * 1024;
out->hostvm_min_page_size_kbytes = in_soc_params->hostvm_min_page_size_bytes * 1024;
out->gpuvm_min_page_size_kbytes = in_soc_params->gpuvm_min_page_size_bytes / 1024;
out->hostvm_min_page_size_kbytes = in_soc_params->hostvm_min_page_size_bytes / 1024;
out->mall_allocated_for_dcn_mbytes = (dml_uint_t)in_soc_params->mall_allocated_for_dcn_mbytes;
out->max_avg_dram_bw_use_normal_percent = in_soc_params->max_avg_dram_bw_use_normal_percent;
out->max_avg_fabric_bw_use_normal_percent = in_soc_params->max_avg_fabric_bw_use_normal_percent;
@ -1040,9 +1057,12 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
}
//Generally these are set by referencing our latest BB/IP params in dcn32_resource.c file
dml_dispcfg->plane.GPUVMEnable = true;
dml_dispcfg->plane.GPUVMMaxPageTableLevels = 4;
dml_dispcfg->plane.HostVMEnable = false;
dml_dispcfg->plane.GPUVMEnable = dml2->v20.dml_core_ctx.ip.gpuvm_enable;
dml_dispcfg->plane.GPUVMMaxPageTableLevels = dml2->v20.dml_core_ctx.ip.gpuvm_max_page_table_levels;
dml_dispcfg->plane.HostVMEnable = dml2->v20.dml_core_ctx.ip.hostvm_enable;
dml_dispcfg->plane.HostVMMaxPageTableLevels = dml2->v20.dml_core_ctx.ip.hostvm_max_page_table_levels;
if (dml2->v20.dml_core_ctx.ip.hostvm_enable)
dml2->v20.dml_core_ctx.policy.AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter;
dml2_populate_pipe_to_plane_index_mapping(dml2, context);

View File

@ -139,6 +139,8 @@ struct dml2_soc_bbox_overrides {
double urgent_latency_us;
double sr_exit_latency_us;
double sr_enter_plus_exit_latency_us;
double sr_exit_z8_time_us;
double sr_enter_plus_exit_z8_time_us;
double dram_clock_change_latency_us;
double fclk_change_latency_us;
unsigned int dram_num_chan;

View File

@ -487,8 +487,7 @@ bool dcn32_set_mcm_luts(
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
lut_params = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->blend_tf,
cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
&dpp_base->regamma_params, false);
lut_params = &dpp_base->regamma_params;
}
@ -503,8 +502,7 @@ bool dcn32_set_mcm_luts(
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
// TODO: dpp_base replace
ASSERT(false);
cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->in_shaper_func,
cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
&dpp_base->shaper_params, true);
lut_params = &dpp_base->shaper_params;
}

View File

@ -879,7 +879,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
(link->dpcd_sink_ext_caps.bits.oled == 1)) {
dpcd_set_source_specific_data(link);
msleep(post_oui_delay);
set_cached_brightness_aux(link);
set_default_brightness_aux(link);
}
return true;

View File

@ -2142,8 +2142,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
set_cached_brightness_aux(link);
set_default_brightness_aux(link);
if (link->dpcd_sink_ext_caps.bits.oled == 1)
msleep(bl_oled_enable_delay);
edp_backlight_enable_aux(link, true);

View File

@ -115,7 +115,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
lt_settings->cr_pattern_time = 16000;
/* Fixed VS/PE specific: Toggle link rate */
apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
apply_toggle_rate_wa = ((link->vendor_specific_lttpr_link_rate_wa == target_rate) || (link->vendor_specific_lttpr_link_rate_wa == 0));
target_rate = get_dpcd_link_rate(&lt_settings->link_settings);
toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
@ -271,7 +271,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
/* Vendor specific: Toggle link rate */
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
if (link->vendor_specific_lttpr_link_rate_wa == rate) {
if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
core_link_write_dpcd(
link,
DP_LINK_BW_SET,
@ -617,7 +617,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
/* Vendor specific: Toggle link rate */
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
if (link->vendor_specific_lttpr_link_rate_wa == rate) {
if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
core_link_write_dpcd(
link,
DP_LINK_BW_SET,

View File

@ -170,7 +170,6 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
link->backlight_settings.backlight_millinits = backlight_millinits;
if (!link->dpcd_caps.panel_luminance_control) {
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
@ -288,9 +287,9 @@ bool set_default_brightness_aux(struct dc_link *link)
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
// if < 1 nits or > 5000, it might be wrong readback
if (default_backlight < 1000 || default_backlight > 5000000)
default_backlight = 150000; //
// if > 5000, it might be wrong readback
if (default_backlight > 5000000)
default_backlight = 150000;
return edp_set_backlight_level_nits(link, true,
default_backlight, 0);
@ -298,15 +297,6 @@ bool set_default_brightness_aux(struct dc_link *link)
return false;
}
bool set_cached_brightness_aux(struct dc_link *link)
{
if (link->backlight_settings.backlight_millinits)
return edp_set_backlight_level_nits(link, true,
link->backlight_settings.backlight_millinits, 0);
else
return set_default_brightness_aux(link);
return false;
}
bool edp_is_ilr_optimization_enabled(struct dc_link *link)
{
if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate)

View File

@ -30,7 +30,6 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
bool set_cached_brightness_aux(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
int edp_get_backlight_level(const struct dc_link *link);
bool edp_get_backlight_level_nits(struct dc_link *link,

View File

@ -1077,6 +1077,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
if (ack)
return DMUB_STATUS_OK;
udelay(1);
}
return DMUB_STATUS_TIMEOUT;
}

View File

@ -6369,6 +6369,8 @@
#define regTCP_INVALIDATE_BASE_IDX 1
#define regTCP_STATUS 0x19a1
#define regTCP_STATUS_BASE_IDX 1
#define regTCP_CNTL 0x19a2
#define regTCP_CNTL_BASE_IDX 1
#define regTCP_CNTL2 0x19a3
#define regTCP_CNTL2_BASE_IDX 1
#define regTCP_DEBUG_INDEX 0x19a5

View File

@ -781,6 +781,8 @@
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2_BASE_IDX 5
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1 0x420187
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3 0x4201c6
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3_BASE_IDX 5
// addressBlock: nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp

View File

@ -24646,6 +24646,35 @@
//BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
//BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x8
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x9
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_10BIT_TAG_EN_OVERRIDE__SHIFT 0xb
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_10BIT_TAG_EN_OVERRIDE__SHIFT 0xd
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__MST_DROP_SYNC_FLOOD_EN__SHIFT 0xf
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x10
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x11
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x14
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x15
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_SAFE_MODE__SHIFT 0x18
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1b
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV__SHIFT 0x1c
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x1e
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_PAYLOAD_SIZE_MODE_MASK 0x00000100L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE_MASK 0x00000600L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_10BIT_TAG_EN_OVERRIDE_MASK 0x00001800L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_10BIT_TAG_EN_OVERRIDE_MASK 0x00006000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__MST_DROP_SYNC_FLOOD_EN_MASK 0x00008000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_PAYLOAD_SIZE_MODE_MASK 0x00010000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_PAYLOAD_SIZE_MASK 0x000E0000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_REQUEST_SIZE_MODE_MASK 0x00100000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_READ_REQUEST_SIZE_MASK 0x00E00000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_SAFE_MODE_MASK 0x01000000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE_MASK 0x08000000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV_MASK 0x30000000L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE_MASK 0xC0000000L
// addressBlock: nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
//BIF_CFG_DEV0_RC0_VENDOR_ID

View File

@ -444,6 +444,7 @@ struct amd_pm_funcs {
struct dpm_clocks *clock_table);
int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size);
void (*pm_compute_clocks)(void *handle);
int (*notify_rlc_state)(void *handle, bool en);
};
struct metrics_table_header {

View File

@ -181,6 +181,24 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
return ret;
}
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (pp_funcs && pp_funcs->notify_rlc_state) {
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->notify_rlc_state(
adev->powerplay.pp_handle,
en);
mutex_unlock(&adev->pm.mutex);
}
return ret;
}
bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;

View File

@ -415,6 +415,8 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state);
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);

View File

@ -1710,6 +1710,16 @@ static int smu_disable_dpms(struct smu_context *smu)
}
}
/* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
* otherwise SMU will hang while interacting with RLC if RLC is halted
* this is a WA for Vangogh asic which fix the SMU hang issue.
*/
ret = smu_notify_rlc_state(smu, false);
if (ret) {
dev_err(adev->dev, "Fail to notify rlc status!\n");
return ret;
}
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
!((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
!amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)

View File

@ -1360,6 +1360,11 @@ struct pptable_funcs {
* management.
*/
int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
/**
* @notify_rlc_state: Notify RLC power state to SMU.
*/
int (*notify_rlc_state)(struct smu_context *smu, bool en);
};
typedef enum {

View File

@ -2193,8 +2193,7 @@ static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clock
return 0;
}
static int vangogh_system_features_control(struct smu_context *smu, bool en)
static int vangogh_notify_rlc_state(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
@ -2523,7 +2522,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.print_clk_levels = vangogh_common_print_clk_levels,
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
.system_features_control = vangogh_system_features_control,
.notify_rlc_state = vangogh_notify_rlc_state,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.set_power_profile_mode = vangogh_set_power_profile_mode,
.get_power_profile_mode = vangogh_get_power_profile_mode,

View File

@ -257,8 +257,11 @@ static int aldebaran_tables_init(struct smu_context *smu)
}
smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
if (!smu_table->ecc_table)
if (!smu_table->ecc_table) {
kfree(smu_table->metrics_table);
kfree(smu_table->gpu_metrics_table);
return -ENOMEM;
}
return 0;
}

View File

@ -97,6 +97,7 @@
#define smu_get_default_config_table_settings(smu, config_table) smu_ppt_funcs(get_default_config_table_settings, -EOPNOTSUPP, smu, config_table)
#define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table)
#define smu_init_pptable_microcode(smu) smu_ppt_funcs(init_pptable_microcode, 0, smu)
#define smu_notify_rlc_state(smu, en) smu_ppt_funcs(notify_rlc_state, 0, smu, en)
#endif
#endif

View File

@ -4,8 +4,6 @@
* Copyright (C) 2017 Broadcom
*/
#include <linux/device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
@ -21,7 +19,6 @@ struct panel_bridge {
struct drm_bridge bridge;
struct drm_connector connector;
struct drm_panel *panel;
struct device_link *link;
u32 connector_type;
};
@ -63,24 +60,13 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
struct drm_panel *panel = panel_bridge->panel;
struct drm_device *drm_dev = bridge->dev;
int ret;
panel_bridge->link = device_link_add(drm_dev->dev, panel->dev,
DL_FLAG_STATELESS);
if (!panel_bridge->link) {
DRM_ERROR("Failed to add device link between %s and %s\n",
dev_name(drm_dev->dev), dev_name(panel->dev));
return -EINVAL;
}
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
if (!bridge->encoder) {
DRM_ERROR("Missing encoder\n");
device_link_del(panel_bridge->link);
return -ENODEV;
}
@ -92,7 +78,6 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
panel_bridge->connector_type);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
device_link_del(panel_bridge->link);
return ret;
}
@ -115,8 +100,6 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
device_link_del(panel_bridge->link);
/*
* Cleanup the connector if we know it was initialized.
*

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (c) 2022 Red Hat.
*

View File

@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
/*
/**
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
* @dev: drm_device to import into
* @file_priv: drm file-private structure
@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
*
* Returns 0 on success or a negative error code on failure.
*/
static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd,
uint32_t *handle)
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd,
uint32_t *handle)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
@ -360,6 +360,7 @@ out_put:
dma_buf_put(dma_buf);
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@ -408,7 +409,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
/*
/**
* drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
* @dev: dev to export the buffer from
* @file_priv: drm file-private structure
@ -421,10 +422,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_gem_object_funcs.export callback.
*/
static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
uint32_t flags,
int *prime_fd)
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
uint32_t flags,
int *prime_fd)
{
struct drm_gem_object *obj;
int ret = 0;
@ -506,6 +507,7 @@ out_unlock:
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@ -864,9 +866,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size);
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
*
* This is the implementation of the &drm_gem_object_funcs.export functions
* for GEM drivers using the PRIME helpers. It is used as the default for
* drivers that do not set their own.
* This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
* using the PRIME helpers. It is used as the default in
* drm_gem_prime_handle_to_fd().
*/
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags)
@ -962,9 +964,10 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
*
* This is the implementation of the gem_prime_import functions for GEM
* drivers using the PRIME helpers. It is the default for drivers that do
* not set their own &drm_driver.gem_prime_import.
* This is the implementation of the gem_prime_import functions for GEM drivers
* using the PRIME helpers. Drivers can use this as their
* &drm_driver.gem_prime_import implementation. It is used as the default
* implementation in drm_gem_prime_fd_to_handle().
*
* Drivers must arrange to call drm_prime_gem_destroy() from their
* &drm_gem_object_funcs.free hook when using this function.

View File

@ -6853,10 +6853,11 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
intel_pre_plane_update(state, crtc);
if (!old_crtc_state->hw.active)
continue;
intel_pre_plane_update(state, crtc);
intel_crtc_disable_planes(state, crtc);
}

View File

@ -6037,8 +6037,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* (eg. Acer Chromebook C710), so we'll check it only if multiple
* ports are attempting to use the same AUX CH, according to VBT.
*/
if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
!intel_digital_port_connected(encoder)) {
if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
/*
* If this fails, presume the DPCD answer came
* from some other port using the same AUX CH.
@ -6046,10 +6045,27 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* FIXME maybe cleaner to check this before the
* DPCD read? Would need sort out the VDD handling...
*/
drm_info(&dev_priv->drm,
"[ENCODER:%d:%s] HPD is down, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
if (!intel_digital_port_connected(encoder)) {
drm_info(&dev_priv->drm,
"[ENCODER:%d:%s] HPD is down, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
}
/*
* Unfortunately even the HPD based detection fails on
* eg. Asus B360M-A (CFL+CNP), so as a last resort fall
* back to checking for a VGA branch device. Only do this
* on known affected platforms to minimize false positives.
*/
if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
DP_DWN_STRM_PORT_TYPE_ANALOG) {
drm_info(&dev_priv->drm,
"[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
encoder->base.base.id, encoder->base.name);
goto out_vdd_off;
}
}
mutex_lock(&dev_priv->drm.mode_config.mutex);

View File

@ -41,12 +41,15 @@ void intel_engine_add_user(struct intel_engine_cs *engine)
llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
}
static const u8 uabi_classes[] = {
#define I915_NO_UABI_CLASS ((u16)(-1))
static const u16 uabi_classes[] = {
[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
[OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
};
static int engine_cmp(void *priv, const struct list_head *A,
@ -200,6 +203,7 @@ static void engine_rename(struct intel_engine_cs *engine, const char *name, u16
void intel_engines_driver_register(struct drm_i915_private *i915)
{
u16 name_instance, other_instance = 0;
struct legacy_ring ring = {};
struct list_head *it, *next;
struct rb_node **p, *prev;
@ -216,27 +220,28 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
/*
* We don't want to expose the GSC engine to the users, but we
* still rename it so it is easier to identify in the debug logs
*/
if (engine->id == GSC0) {
engine_rename(engine, "gsc", 0);
continue;
}
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
if (engine->uabi_class == I915_NO_UABI_CLASS) {
name_instance = other_instance++;
} else {
GEM_BUG_ON(engine->uabi_class >=
ARRAY_SIZE(i915->engine_uabi_class_count));
name_instance =
i915->engine_uabi_class_count[engine->uabi_class]++;
}
engine->uabi_instance = name_instance;
GEM_BUG_ON(engine->uabi_class >=
ARRAY_SIZE(i915->engine_uabi_class_count));
engine->uabi_instance =
i915->engine_uabi_class_count[engine->uabi_class]++;
/* Replace the internal name with the final user facing name */
/*
* Replace the internal name with the final user and log facing
* name.
*/
engine_rename(engine,
intel_engine_class_repr(engine->class),
engine->uabi_instance);
name_instance);
if (engine->uabi_class == I915_NO_UABI_CLASS)
continue;
rb_link_node(&engine->uabi_node, prev, p);
rb_insert_color(&engine->uabi_node, &i915->uabi_engines);

View File

@ -38,7 +38,7 @@ typedef struct PACKED_REGISTRY_TABLE
{
NvU32 size;
NvU32 numEntries;
PACKED_REGISTRY_ENTRY entries[0];
PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
} PACKED_REGISTRY_TABLE;
#endif

View File

@ -318,8 +318,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
if (pi < 0)
pi = i;
/* pick the last one as it will be smallest. */
pi = i;
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;

View File

@ -365,10 +365,8 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
}
ret = r535_gsp_cmdq_push(gsp, rpc);
if (ret) {
mutex_unlock(&gsp->cmdq.mutex);
if (ret)
return ERR_PTR(ret);
}
if (wait) {
msg = r535_gsp_msg_recv(gsp, fn, repc);
@ -1048,7 +1046,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
char *strings;
int str_offset;
int i;
size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES;
size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES);
/* add strings + null terminator */
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)

View File

@ -1764,6 +1764,7 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = starry_qfh032011_53g_init_cmd,
.lp11_before_reset = true,
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {

View File

@ -1254,9 +1254,9 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
if (!pinfo->dsi[1]) {
if (IS_ERR(pinfo->dsi[1])) {
dev_err(dev, "cannot get secondary DSI device\n");
return -ENODEV;
return PTR_ERR(pinfo->dsi[1]);
}
}

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
#ifndef __DRM_GPUVM_H__
#define __DRM_GPUVM_H__

View File

@ -60,12 +60,19 @@ enum dma_data_direction;
struct drm_device;
struct drm_gem_object;
struct drm_file;
/* core prime functions */
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
/* helper functions for exporting */
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);

View File

@ -498,6 +498,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
}
/**
* dma_fence_is_later_or_same - return true if f1 is later or same as f2
* @f1: the first fence from the same context
* @f2: the second fence from the same context
*
* Returns true if f1 is chronologically later than f2 or the same fence. Both
* fences must be from the same context, since a seqno is not re-used across
* contexts.
*/
static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
struct dma_fence *f2)
{
return f1 == f2 || dma_fence_is_later(f1, f2);
}
/**
* dma_fence_later - return the chronologically later fence
* @f1: the first fence from the same context