mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-19 11:04:00 +08:00
Merge tag 'amd-drm-next-5.14-2021-06-09' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.14-2021-06-09: amdgpu: - SR-IOV fixes - Smartshift updates - GPUVM TLB flush updates - 16bpc fixed point display fix for DCE11 - BACO cleanups and core refactoring - Aldebaran updates - Initial Yellow Carp support - RAS fixes - PM API cleanup - DC visual confirm updates - DC DP MST fixes - DC DML fixes - Misc code cleanups and bug fixes amdkfd: - Initial Yellow Carp support radeon: - memcpy_to/from_io fixes UAPI: - Add Yellow Carp chip family id Used internally in the kernel driver and by mesa Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210610031649.4006-1-alexander.deucher@amd.com
This commit is contained in:
commit
c707b73f0c
@ -300,4 +300,25 @@ pcie_replay_count
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
|
||||
:doc: pcie_replay_count
|
||||
|
||||
+GPU SmartShift Information
|
||||
============================
|
||||
|
||||
GPU SmartShift information via sysfs
|
||||
|
||||
smartshift_apu_power
|
||||
--------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
|
||||
:doc: smartshift_apu_power
|
||||
|
||||
smartshift_dgpu_power
|
||||
---------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
|
||||
:doc: smartshift_dgpu_power
|
||||
|
||||
smartshift_bias
|
||||
---------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
|
||||
:doc: smartshift_bias
|
||||
|
@ -75,7 +75,7 @@ amdgpu-y += \
|
||||
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
|
||||
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
|
||||
nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \
|
||||
beige_goby_reg_init.o
|
||||
beige_goby_reg_init.o yellow_carp_reg_init.o
|
||||
|
||||
# add DF block
|
||||
amdgpu-y += \
|
||||
|
@ -211,6 +211,7 @@ extern int amdgpu_discovery;
|
||||
extern int amdgpu_mes;
|
||||
extern int amdgpu_noretry;
|
||||
extern int amdgpu_force_asic_type;
|
||||
extern int amdgpu_smartshift_bias;
|
||||
#ifdef CONFIG_HSA_AMD
|
||||
extern int sched_policy;
|
||||
extern bool debug_evictions;
|
||||
@ -268,6 +269,10 @@ extern int amdgpu_num_kcq;
|
||||
#define CIK_CURSOR_WIDTH 128
|
||||
#define CIK_CURSOR_HEIGHT 128
|
||||
|
||||
/* smasrt shift bias level limits */
|
||||
#define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
|
||||
#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
|
||||
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_ib;
|
||||
struct amdgpu_cs_parser;
|
||||
@ -1280,6 +1285,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
|
||||
int amdgpu_device_baco_enter(struct drm_device *dev);
|
||||
int amdgpu_device_baco_exit(struct drm_device *dev);
|
||||
|
||||
void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring);
|
||||
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring);
|
||||
|
||||
/* atpx handler */
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
void amdgpu_register_atpx_handler(void);
|
||||
|
@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
|
||||
uint64_t *size);
|
||||
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
|
||||
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
|
||||
int amdgpu_amdkfd_gpuvm_sync_memory(
|
||||
|
@ -95,8 +95,8 @@ static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid,
|
||||
|
||||
lock_srbm(kgd, 0, 0, 0, vmid);
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
|
||||
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
|
||||
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
|
||||
/* APE1 no longer exists on GFX9 */
|
||||
|
||||
unlock_srbm(kgd);
|
||||
@ -129,7 +129,7 @@ static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id)
|
||||
|
||||
lock_srbm(kgd, mec, pipe, 0, 0);
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
|
||||
WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
|
||||
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
|
||||
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
|
||||
|
||||
@ -212,10 +212,10 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
|
||||
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
|
||||
mec, pipe, queue_id);
|
||||
value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
|
||||
value = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
|
||||
value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
|
||||
((mec << 5) | (pipe << 3) | queue_id | 0x80));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
|
||||
WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, value);
|
||||
}
|
||||
|
||||
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
|
||||
@ -224,13 +224,13 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
|
||||
for (reg = hqd_base;
|
||||
reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
|
||||
WREG32(reg, mqd_hqd[reg - hqd_base]);
|
||||
WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
|
||||
|
||||
|
||||
/* Activate doorbell logic before triggering WPTR poll. */
|
||||
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
|
||||
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
|
||||
|
||||
if (wptr) {
|
||||
/* Don't read wptr with get_user because the user
|
||||
@ -259,17 +259,17 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
|
||||
guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
|
||||
lower_32_bits(guessed_wptr));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
|
||||
upper_32_bits(guessed_wptr));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
|
||||
lower_32_bits((uint64_t)wptr));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
|
||||
upper_32_bits((uint64_t)wptr));
|
||||
pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
|
||||
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
|
||||
WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
|
||||
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
|
||||
}
|
||||
|
||||
@ -279,7 +279,7 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
||||
CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
|
||||
|
||||
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
|
||||
|
||||
release_queue(kgd);
|
||||
|
||||
@ -350,7 +350,7 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd,
|
||||
if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
|
||||
break; \
|
||||
(*dump)[i][0] = (addr) << 2; \
|
||||
(*dump)[i++][1] = RREG32(addr); \
|
||||
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
|
||||
} while (0)
|
||||
|
||||
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
|
||||
@ -482,13 +482,13 @@ static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address,
|
||||
uint32_t low, high;
|
||||
|
||||
acquire_queue(kgd, pipe_id, queue_id);
|
||||
act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
|
||||
act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
|
||||
if (act) {
|
||||
low = lower_32_bits(queue_address >> 8);
|
||||
high = upper_32_bits(queue_address >> 8);
|
||||
|
||||
if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
|
||||
high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
|
||||
if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
|
||||
high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
|
||||
retval = true;
|
||||
}
|
||||
release_queue(kgd);
|
||||
@ -542,11 +542,11 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
|
||||
break;
|
||||
}
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
|
||||
WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
|
||||
|
||||
end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
||||
while (true) {
|
||||
temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
|
||||
temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
|
||||
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
|
||||
break;
|
||||
if (time_after(jiffies, end_jiffies)) {
|
||||
@ -626,7 +626,7 @@ static int wave_control_execute_v10_3(struct kgd_dev *kgd,
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
|
||||
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
|
||||
|
||||
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
|
||||
@ -636,7 +636,7 @@ static int wave_control_execute_v10_3(struct kgd_dev *kgd,
|
||||
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
|
||||
SE_BROADCAST_WRITES, 1);
|
||||
|
||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
|
||||
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
|
@ -278,7 +278,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
|
||||
write_seqcount_end(&resv->seq);
|
||||
|
||||
/* Drop the references to the removed fences or move them to ef_list */
|
||||
for (i = j, k = 0; i < old->shared_count; ++i) {
|
||||
for (i = j; i < old->shared_count; ++i) {
|
||||
struct dma_fence *f;
|
||||
|
||||
f = rcu_dereference_protected(new->shared[i],
|
||||
@ -1070,7 +1070,8 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
|
||||
|
||||
static int update_gpuvm_pte(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *entry,
|
||||
struct amdgpu_sync *sync)
|
||||
struct amdgpu_sync *sync,
|
||||
bool *table_freed)
|
||||
{
|
||||
struct amdgpu_bo_va *bo_va = entry->bo_va;
|
||||
struct amdgpu_device *adev = entry->adev;
|
||||
@ -1081,7 +1082,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
|
||||
return ret;
|
||||
|
||||
/* Update the page tables */
|
||||
ret = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
|
||||
if (ret) {
|
||||
pr_err("amdgpu_vm_bo_update failed\n");
|
||||
return ret;
|
||||
@ -1093,7 +1094,8 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
|
||||
static int map_bo_to_gpuvm(struct kgd_mem *mem,
|
||||
struct kfd_mem_attachment *entry,
|
||||
struct amdgpu_sync *sync,
|
||||
bool no_update_pte)
|
||||
bool no_update_pte,
|
||||
bool *table_freed)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1110,7 +1112,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
|
||||
if (no_update_pte)
|
||||
return 0;
|
||||
|
||||
ret = update_gpuvm_pte(mem, entry, sync);
|
||||
ret = update_gpuvm_pte(mem, entry, sync, table_freed);
|
||||
if (ret) {
|
||||
pr_err("update_gpuvm_pte() failed\n");
|
||||
goto update_gpuvm_pte_failed;
|
||||
@ -1608,7 +1610,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
|
||||
struct kgd_dev *kgd, struct kgd_mem *mem,
|
||||
void *drm_priv, bool *table_freed)
|
||||
{
|
||||
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
||||
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
|
||||
@ -1696,7 +1699,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
entry->va, entry->va + bo_size, entry);
|
||||
|
||||
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
|
||||
is_invalid_userptr);
|
||||
is_invalid_userptr, table_freed);
|
||||
if (ret) {
|
||||
pr_err("Failed to map bo to gpuvm\n");
|
||||
goto out_unreserve;
|
||||
@ -2146,7 +2149,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
|
||||
if (ret) {
|
||||
pr_err("%s: update PTE failed\n", __func__);
|
||||
/* make sure this gets validated again */
|
||||
@ -2352,7 +2355,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
|
||||
if (ret) {
|
||||
pr_debug("Memory eviction: update PTE failed. Try again\n");
|
||||
goto validate_map_fail;
|
||||
|
@ -662,7 +662,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
||||
* @error: error number
|
||||
* @backoff: indicator to backoff the reservation
|
||||
*
|
||||
* If error is set than unvalidate buffer, otherwise just free memory
|
||||
* If error is set then unvalidate buffer, otherwise just free memory
|
||||
* used by parsing context.
|
||||
**/
|
||||
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
||||
@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
|
||||
bo_va = fpriv->csa_va;
|
||||
BUG_ON(!bo_va);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (bo_va == NULL)
|
||||
continue;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -84,6 +84,7 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin");
|
||||
|
||||
#define AMDGPU_RESUME_MS 2000
|
||||
|
||||
@ -122,6 +123,7 @@ const char *amdgpu_asic_name[] = {
|
||||
"VANGOGH",
|
||||
"DIMGREY_CAVEFISH",
|
||||
"BEIGE_GOBY",
|
||||
"YELLOW_CARP",
|
||||
"LAST",
|
||||
};
|
||||
|
||||
@ -313,9 +315,9 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
|
||||
if (write) {
|
||||
memcpy_toio(addr, buf, count);
|
||||
mb();
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
amdgpu_device_flush_hdp(adev, NULL);
|
||||
} else {
|
||||
amdgpu_asic_invalidate_hdp(adev, NULL);
|
||||
amdgpu_device_invalidate_hdp(adev, NULL);
|
||||
mb();
|
||||
memcpy_fromio(buf, addr, count);
|
||||
}
|
||||
@ -1884,6 +1886,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
chip_name = "vangogh";
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
chip_name = "yellow_carp";
|
||||
break;
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
|
||||
@ -2062,8 +2067,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
if (adev->asic_type == CHIP_VANGOGH)
|
||||
adev->family = AMDGPU_FAMILY_VGH;
|
||||
else if (adev->asic_type == CHIP_YELLOW_CARP)
|
||||
adev->family = AMDGPU_FAMILY_YC;
|
||||
else
|
||||
adev->family = AMDGPU_FAMILY_NV;
|
||||
|
||||
@ -3146,6 +3154,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
#endif
|
||||
return amdgpu_dc != 0;
|
||||
#endif
|
||||
@ -5475,4 +5484,31 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
|
||||
return true;
|
||||
}
|
||||
|
||||
void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
#endif
|
||||
if (adev->gmc.xgmi.connected_to_cpu)
|
||||
return;
|
||||
|
||||
if (ring && ring->funcs->emit_hdp_flush)
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
else
|
||||
amdgpu_asic_flush_hdp(adev, ring);
|
||||
}
|
||||
|
||||
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
#endif
|
||||
if (adev->gmc.xgmi.connected_to_cpu)
|
||||
return;
|
||||
|
||||
amdgpu_asic_invalidate_hdp(adev, ring);
|
||||
}
|
||||
|
@ -1056,7 +1056,7 @@ int amdgpu_display_gem_fb_init(struct drm_device *dev,
|
||||
|
||||
return 0;
|
||||
err:
|
||||
drm_err(dev, "Failed to init gem fb: %d\n", ret);
|
||||
drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
|
||||
rfb->base.obj[0] = NULL;
|
||||
return ret;
|
||||
}
|
||||
@ -1090,7 +1090,7 @@ int amdgpu_display_gem_fb_verify_and_init(
|
||||
|
||||
return 0;
|
||||
err:
|
||||
drm_err(dev, "Failed to verify and init gem fb: %d\n", ret);
|
||||
drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
|
||||
rfb->base.obj[0] = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
@ -173,6 +173,7 @@ int amdgpu_tmz = -1; /* auto */
|
||||
uint amdgpu_freesync_vid_mode;
|
||||
int amdgpu_reset_method = -1; /* auto */
|
||||
int amdgpu_num_kcq = -1;
|
||||
int amdgpu_smartshift_bias;
|
||||
|
||||
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
|
||||
|
||||
|
@ -121,6 +121,9 @@ static const struct file_operations amdgpu_fw_attestation_debugfs_ops = {
|
||||
|
||||
static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return 0;
|
||||
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
|
||||
return 1;
|
||||
|
||||
|
@ -250,7 +250,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||
}
|
||||
}
|
||||
mb();
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
amdgpu_device_flush_hdp(adev, NULL);
|
||||
for (i = 0; i < adev->num_vmhubs; i++)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
|
||||
|
||||
@ -337,7 +337,7 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
|
||||
int i;
|
||||
|
||||
mb();
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
amdgpu_device_flush_hdp(adev, NULL);
|
||||
for (i = 0; i < adev->num_vmhubs; i++)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
|
||||
}
|
||||
|
@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
|
||||
if (operation == AMDGPU_VA_OP_MAP ||
|
||||
operation == AMDGPU_VA_OP_REPLACE) {
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
|
||||
if (r)
|
||||
goto error;
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ struct amdgpu_gfxhub_funcs {
|
||||
void (*set_fault_enable_default)(struct amdgpu_device *adev, bool value);
|
||||
void (*init)(struct amdgpu_device *adev);
|
||||
int (*get_xgmi_info)(struct amdgpu_device *adev);
|
||||
void (*utcl2_harvest)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
struct amdgpu_gfxhub {
|
||||
|
@ -785,3 +785,22 @@ uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo
|
||||
{
|
||||
return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
|
||||
}
|
||||
|
||||
void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev)
|
||||
{
|
||||
/* Some ASICs need to reserve a region of video memory to avoid access
|
||||
* from driver */
|
||||
adev->mman.stolen_reserved_offset = 0;
|
||||
adev->mman.stolen_reserved_size = 0;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_YELLOW_CARP:
|
||||
if (amdgpu_discovery == 0) {
|
||||
adev->mman.stolen_reserved_offset = 0x1ffb0000;
|
||||
adev->mman.stolen_reserved_size = 64 * PAGE_SIZE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -332,6 +332,7 @@ amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
|
||||
bool enable);
|
||||
|
||||
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
|
||||
void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
|
||||
uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
|
||||
|
@ -214,15 +214,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
if (job && ring->funcs->init_cond_exec)
|
||||
patch_offset = amdgpu_ring_init_cond_exec(ring);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
#endif
|
||||
{
|
||||
if (ring->funcs->emit_hdp_flush)
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
else
|
||||
amdgpu_asic_flush_hdp(adev, ring);
|
||||
}
|
||||
amdgpu_device_flush_hdp(adev, ring);
|
||||
|
||||
if (need_ctx_switch)
|
||||
status |= AMDGPU_HAVE_CTX_SWITCH;
|
||||
@ -259,10 +251,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
if (job && ring->funcs->emit_frame_cntl)
|
||||
amdgpu_ring_emit_frame_cntl(ring, false, secure);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
#endif
|
||||
amdgpu_asic_invalidate_hdp(adev, ring);
|
||||
amdgpu_device_invalidate_hdp(adev, ring);
|
||||
|
||||
if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
|
||||
fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
|
||||
|
@ -76,7 +76,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
kfree(ubo->metadata);
|
||||
}
|
||||
|
||||
kfree(bo);
|
||||
kvfree(bo);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -541,7 +541,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
|
||||
|
||||
*bo_ptr = NULL;
|
||||
bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
|
||||
bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
|
||||
|
@ -118,6 +118,10 @@ static int psp_early_init(void *handle)
|
||||
case CHIP_ALDEBARAN:
|
||||
psp_v13_0_set_psp_funcs(psp);
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
psp_v13_0_set_psp_funcs(psp);
|
||||
psp->autoload_supported = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -179,6 +183,12 @@ static int psp_sw_init(void *handle)
|
||||
DRM_ERROR("Failed to load psp firmware!\n");
|
||||
return ret;
|
||||
}
|
||||
} else if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_ALDEBARAN) {
|
||||
ret = psp_init_ta_microcode(psp, "aldebaran");
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize ta microcode!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = psp_memory_training_init(psp);
|
||||
@ -282,7 +292,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
|
||||
amdgpu_device_invalidate_hdp(psp->adev, NULL);
|
||||
while (*((unsigned int *)psp->fence_buf) != index) {
|
||||
if (--timeout == 0)
|
||||
break;
|
||||
@ -295,7 +305,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
|
||||
if (ras_intr)
|
||||
break;
|
||||
usleep_range(10, 100);
|
||||
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
|
||||
amdgpu_device_invalidate_hdp(psp->adev, NULL);
|
||||
}
|
||||
|
||||
/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
|
||||
@ -684,6 +694,8 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
|
||||
|
||||
psp_prep_reg_prog_cmd_buf(cmd, reg, value);
|
||||
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
|
||||
if (ret)
|
||||
DRM_ERROR("PSP failed to program reg id %d", reg);
|
||||
|
||||
kfree(cmd);
|
||||
return ret;
|
||||
@ -2319,11 +2331,20 @@ static int psp_load_fw(struct amdgpu_device *adev)
|
||||
if (!psp->cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr,
|
||||
&psp->fw_pri_buf);
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr,
|
||||
&psp->fw_pri_buf);
|
||||
} else {
|
||||
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr,
|
||||
&psp->fw_pri_buf);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
@ -2696,7 +2717,7 @@ int psp_ring_cmd_submit(struct psp_context *psp,
|
||||
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
|
||||
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
|
||||
write_frame->fence_value = index;
|
||||
amdgpu_asic_flush_hdp(adev, NULL);
|
||||
amdgpu_device_flush_hdp(adev, NULL);
|
||||
|
||||
/* Update the write Pointer in DWORDs */
|
||||
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
|
||||
|
@ -1725,6 +1725,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
NULL);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
|
||||
adev->mman.stolen_reserved_size,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mman.stolen_reserved_memory,
|
||||
NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
|
||||
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
|
||||
@ -1794,6 +1801,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
|
||||
amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
|
||||
/* return the IP Discovery TMR memory back to VRAM */
|
||||
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
|
||||
if (adev->mman.stolen_reserved_size)
|
||||
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
|
||||
NULL, NULL);
|
||||
amdgpu_ttm_fw_reserve_vram_fini(adev);
|
||||
|
||||
amdgpu_vram_mgr_fini(adev);
|
||||
|
@ -84,6 +84,10 @@ struct amdgpu_mman {
|
||||
struct amdgpu_bo *stolen_extended_memory;
|
||||
bool keep_stolen_vga_memory;
|
||||
|
||||
struct amdgpu_bo *stolen_reserved_memory;
|
||||
uint64_t stolen_reserved_offset;
|
||||
uint64_t stolen_reserved_size;
|
||||
|
||||
/* discovery */
|
||||
uint8_t *discovery_bin;
|
||||
uint32_t discovery_tmr_size;
|
||||
|
@ -404,6 +404,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_ALDEBARAN:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
if (!load_type)
|
||||
return AMDGPU_FW_LOAD_DIRECT;
|
||||
else
|
||||
|
@ -50,6 +50,7 @@
|
||||
#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
|
||||
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
|
||||
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
|
||||
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
|
||||
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN);
|
||||
MODULE_FIRMWARE(FIRMWARE_PICASSO);
|
||||
@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
|
||||
MODULE_FIRMWARE(FIRMWARE_VANGOGH);
|
||||
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
|
||||
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
|
||||
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
|
||||
|
||||
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
|
||||
|
||||
@ -160,6 +162,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
fw_name = FIRMWARE_YELLOW_CARP;
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -385,7 +393,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
|
||||
}
|
||||
|
||||
if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
|
||||
AMD_PG_STATE_GATE);
|
||||
r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
|
||||
@ -405,7 +412,6 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
|
||||
atomic_inc(&adev->vcn.total_submission_cnt);
|
||||
|
||||
if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
|
||||
true);
|
||||
if (r)
|
||||
|
@ -910,7 +910,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
|
||||
return r;
|
||||
|
||||
bo = &(*vmbo)->bo;
|
||||
if (vm->is_compute_context && (adev->flags & AMD_IS_APU)) {
|
||||
if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
|
||||
(*vmbo)->shadow = NULL;
|
||||
return 0;
|
||||
}
|
||||
@ -1713,8 +1713,8 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
amdgpu_res_first(res, offset, (last - start + 1) * AMDGPU_GPU_PAGE_SIZE,
|
||||
&cursor);
|
||||
amdgpu_res_first(pages_addr ? NULL : res, offset,
|
||||
(last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
|
||||
while (cursor.remaining) {
|
||||
uint64_t tmp, num_entries, addr;
|
||||
|
||||
@ -1764,12 +1764,12 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
|
||||
start = tmp;
|
||||
};
|
||||
}
|
||||
|
||||
r = vm->update_funcs->commit(¶ms, fence);
|
||||
|
||||
if (table_freed)
|
||||
*table_freed = params.table_freed;
|
||||
*table_freed = *table_freed || params.table_freed;
|
||||
|
||||
error_unlock:
|
||||
amdgpu_vm_eviction_unlock(vm);
|
||||
@ -1827,6 +1827,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
|
||||
* @adev: amdgpu_device pointer
|
||||
* @bo_va: requested BO and VM object
|
||||
* @clear: if true clear the entries
|
||||
* @table_freed: return true if page table is freed
|
||||
*
|
||||
* Fill in the page table entries for @bo_va.
|
||||
*
|
||||
@ -1834,7 +1835,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
|
||||
* 0 for success, -EINVAL for failure.
|
||||
*/
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||
bool clear)
|
||||
bool clear, bool *table_freed)
|
||||
{
|
||||
struct amdgpu_bo *bo = bo_va->base.bo;
|
||||
struct amdgpu_vm *vm = bo_va->base.vm;
|
||||
@ -1913,7 +1914,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
||||
resv, mapping->start,
|
||||
mapping->last, update_flags,
|
||||
mapping->offset, mem,
|
||||
pages_addr, last_update, NULL);
|
||||
pages_addr, last_update, table_freed);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -2165,7 +2166,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
|
||||
/* Per VM BOs never need to bo cleared in the page tables */
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -2184,7 +2185,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||
else
|
||||
clear = true;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, clear);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -413,7 +413,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
||||
struct dma_fence **fence, bool *free_table);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
bool clear);
|
||||
bool clear, bool *table_freed);
|
||||
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo, bool evicted);
|
||||
|
@ -110,7 +110,7 @@ static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
|
||||
{
|
||||
/* Flush HDP */
|
||||
mb();
|
||||
amdgpu_asic_flush_hdp(p->adev, NULL);
|
||||
amdgpu_device_flush_hdp(p->adev, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -173,6 +173,9 @@
|
||||
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
|
||||
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
#define mmRLC_SPARE_INT_0_Sienna_Cichlid 0x4ca5
|
||||
#define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX 1
|
||||
|
||||
#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28)
|
||||
#define GFX_RLCG_GC_WRITE (0x0 << 28)
|
||||
#define GFX_RLCG_GC_READ (0x1 << 28)
|
||||
@ -239,6 +242,13 @@ MODULE_FIRMWARE("amdgpu/beige_goby_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_rlc.bin");
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
|
||||
@ -1485,8 +1495,15 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
|
||||
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
|
||||
scratch_reg3 = adev->rmmio +
|
||||
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
|
||||
spare_int = adev->rmmio +
|
||||
(adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
|
||||
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
|
||||
spare_int = adev->rmmio +
|
||||
(adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
|
||||
+ mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
|
||||
} else {
|
||||
spare_int = adev->rmmio +
|
||||
(adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
|
||||
}
|
||||
|
||||
grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
|
||||
grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
|
||||
@ -3370,6 +3387,30 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020),
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x000001ff, 0x00000020),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00100000)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
|
||||
@ -3659,6 +3700,11 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
golden_settings_gc_10_3_vangogh,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_vangogh));
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_10_3_3,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_3));
|
||||
break;
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_10_3_4,
|
||||
@ -3855,6 +3901,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->gfx.cp_fw_write_wait = true;
|
||||
break;
|
||||
default:
|
||||
@ -3972,6 +4019,9 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_BEIGE_GOBY:
|
||||
chip_name = "beige_goby";
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
chip_name = "yellow_carp";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -4541,6 +4591,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->gfx.config.max_hw_contexts = 8;
|
||||
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
|
||||
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
|
||||
@ -4666,6 +4717,7 @@ static int gfx_v10_0_sw_init(void *handle)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->gfx.me.num_me = 1;
|
||||
adev->gfx.me.num_pipe_per_me = 1;
|
||||
adev->gfx.me.num_queue_per_pipe = 1;
|
||||
@ -4898,7 +4950,8 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
bitmap = i * adev->gfx.config.max_sh_per_se + j;
|
||||
if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
|
||||
if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
|
||||
(adev->asic_type == CHIP_YELLOW_CARP)) &&
|
||||
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
|
||||
continue;
|
||||
gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
|
||||
@ -6174,6 +6227,7 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
|
||||
DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index);
|
||||
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
|
||||
@ -6310,6 +6364,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0);
|
||||
break;
|
||||
default:
|
||||
@ -6323,6 +6378,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid,
|
||||
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
|
||||
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
|
||||
@ -6420,6 +6476,7 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
|
||||
tmp &= 0xffffff00;
|
||||
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
|
||||
@ -7148,6 +7205,7 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
|
||||
}
|
||||
break;
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
return true;
|
||||
default:
|
||||
data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
|
||||
@ -7182,6 +7240,7 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
/* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
|
||||
data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
|
||||
GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
|
||||
@ -7406,9 +7465,15 @@ static int gfx_v10_0_hw_fini(void *handle)
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
gfx_v10_0_cp_gfx_enable(adev, false);
|
||||
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
|
||||
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
|
||||
tmp &= 0xffffff00;
|
||||
WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
|
||||
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
|
||||
tmp &= 0xffffff00;
|
||||
WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
|
||||
} else {
|
||||
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
|
||||
tmp &= 0xffffff00;
|
||||
WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -7492,6 +7557,7 @@ static int gfx_v10_0_soft_reset(void *handle)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid))
|
||||
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
|
||||
GRBM_SOFT_RESET,
|
||||
@ -7545,6 +7611,7 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
||||
mutex_lock(&adev->gfx.gpu_clock_mutex);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
|
||||
break;
|
||||
@ -7602,6 +7669,7 @@ static int gfx_v10_0_early_init(void *handle)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid;
|
||||
break;
|
||||
default:
|
||||
@ -7659,6 +7727,7 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
|
||||
|
||||
/* wait for RLC_SAFE_MODE */
|
||||
@ -7694,6 +7763,7 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
|
||||
break;
|
||||
default:
|
||||
@ -7996,12 +8066,23 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
|
||||
* in refclk count. Note that RLC FW is modified to take 16 bits from
|
||||
* RLC_PG_DELAY_3[15:0] as the hysteresis instead of just 8 bits.
|
||||
*
|
||||
* The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us(0x4E20)
|
||||
* as part of CGPG enablement starting point.
|
||||
* The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us as part)
|
||||
* of CGPG enablement starting point.
|
||||
* Power/performance team will optimize it and might give a new value later.
|
||||
*/
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && adev->asic_type == CHIP_VANGOGH) {
|
||||
data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
|
||||
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
|
||||
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
|
||||
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -8065,6 +8146,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
|
||||
amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
gfx_v10_cntl_pg(adev, enable);
|
||||
amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
@ -8091,6 +8173,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
gfx_v10_0_update_gfx_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
@ -9197,14 +9280,15 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_NAVI10:
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
|
||||
break;
|
||||
case CHIP_NAVI12:
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov;
|
||||
break;
|
||||
default:
|
||||
@ -9287,7 +9371,8 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
|
||||
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
|
||||
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
|
||||
bitmap = i * adev->gfx.config.max_sh_per_se + j;
|
||||
if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
|
||||
if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
|
||||
(adev->asic_type == CHIP_YELLOW_CARP)) &&
|
||||
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
|
||||
continue;
|
||||
mask = 1;
|
||||
|
@ -790,7 +790,8 @@ static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
|
||||
static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset,
|
||||
u32 v, u32 acc_flags, u32 hwip)
|
||||
{
|
||||
if (amdgpu_sriov_fullaccess(adev)) {
|
||||
if ((acc_flags & AMDGPU_REGS_RLC) &&
|
||||
amdgpu_sriov_fullaccess(adev)) {
|
||||
gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
|
||||
|
||||
return;
|
||||
|
@ -321,18 +321,6 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
|
||||
|
||||
static int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
|
||||
/*
|
||||
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
|
||||
* VF copy registers so vbios post doesn't program them, for
|
||||
* SRIOV driver need to program them
|
||||
*/
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_BASE,
|
||||
adev->gmc.vram_start >> 24);
|
||||
WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_TOP,
|
||||
adev->gmc.vram_end >> 24);
|
||||
}
|
||||
|
||||
/* GART Enable. */
|
||||
gfxhub_v1_0_init_gart_aperture_regs(adev);
|
||||
gfxhub_v1_0_init_system_aperture_regs(adev);
|
||||
|
@ -31,6 +31,9 @@
|
||||
|
||||
#include "soc15_common.h"
|
||||
|
||||
#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP 0x16f8
|
||||
#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP_BASE_IDX 0
|
||||
|
||||
static const char *gfxhub_client_ids[] = {
|
||||
"CB/DB",
|
||||
"Reserved",
|
||||
@ -531,6 +534,42 @@ static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
u32 tmp = 0, disabled_sa = 0;
|
||||
u32 efuse_setting, vbios_setting;
|
||||
|
||||
u32 max_sa_mask = amdgpu_gfx_create_bitmask(
|
||||
adev->gfx.config.max_sh_per_se *
|
||||
adev->gfx.config.max_shader_engines);
|
||||
|
||||
if (adev->asic_type == CHIP_YELLOW_CARP) {
|
||||
/* Get SA disabled bitmap from eFuse setting */
|
||||
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
|
||||
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
|
||||
efuse_setting >>= CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
|
||||
|
||||
/* Get SA disabled bitmap from VBIOS setting */
|
||||
vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SA_UNIT_DISABLE);
|
||||
vbios_setting &= GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK;
|
||||
vbios_setting >>= GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT;
|
||||
|
||||
disabled_sa |= efuse_setting | vbios_setting;
|
||||
/* Make sure not to report harvested SAs beyond the max SA count */
|
||||
disabled_sa &= max_sa_mask;
|
||||
|
||||
for (i = 0; disabled_sa > 0; i++) {
|
||||
if (disabled_sa & 1)
|
||||
tmp |= 0x3 << (i * 2);
|
||||
disabled_sa >>= 1;
|
||||
}
|
||||
disabled_sa = tmp;
|
||||
|
||||
WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
|
||||
.get_fb_location = gfxhub_v2_1_get_fb_location,
|
||||
.get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset,
|
||||
@ -540,4 +579,5 @@ const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
|
||||
.set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default,
|
||||
.init = gfxhub_v2_1_init,
|
||||
.get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
|
||||
.utcl2_harvest = gfxhub_v2_1_utcl2_harvest,
|
||||
};
|
||||
|
@ -675,6 +675,7 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->mmhub.funcs = &mmhub_v2_3_funcs;
|
||||
break;
|
||||
default:
|
||||
@ -691,6 +692,7 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
|
||||
break;
|
||||
default:
|
||||
@ -807,6 +809,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
default:
|
||||
adev->gmc.gart_size = 512ULL << 20;
|
||||
break;
|
||||
@ -875,6 +878,7 @@ static int gmc_v10_0_sw_init(void *handle)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->num_vmhubs = 2;
|
||||
/*
|
||||
* To fulfill 4-level page support,
|
||||
@ -932,6 +936,7 @@ static int gmc_v10_0_sw_init(void *handle)
|
||||
return r;
|
||||
|
||||
amdgpu_gmc_get_vbios_allocations(adev);
|
||||
amdgpu_gmc_get_reserved_allocation(adev);
|
||||
|
||||
/* Memory manager */
|
||||
r = amdgpu_bo_init(adev);
|
||||
@ -990,6 +995,7 @@ static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -1053,6 +1059,13 @@ static int gmc_v10_0_hw_init(void *handle)
|
||||
/* The sequence of these two function calls matters.*/
|
||||
gmc_v10_0_init_golden_registers(adev);
|
||||
|
||||
/*
|
||||
* harvestable groups in gc_utcl2 need to be programmed before any GFX block
|
||||
* register setup within GMC, or else system hang when harvesting SA.
|
||||
*/
|
||||
if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
|
||||
adev->gfxhub.funcs->utcl2_harvest(adev);
|
||||
|
||||
r = gmc_v10_0_gart_enable(adev);
|
||||
if (r)
|
||||
return r;
|
||||
@ -1145,7 +1158,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
|
||||
return r;
|
||||
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
|
||||
adev->asic_type <= CHIP_DIMGREY_CAVEFISH)
|
||||
adev->asic_type <= CHIP_YELLOW_CARP)
|
||||
return athub_v2_1_set_clockgating(adev, state);
|
||||
else
|
||||
return athub_v2_0_set_clockgating(adev, state);
|
||||
@ -1158,7 +1171,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
|
||||
adev->mmhub.funcs->get_clockgating(adev, flags);
|
||||
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
|
||||
adev->asic_type <= CHIP_DIMGREY_CAVEFISH)
|
||||
adev->asic_type <= CHIP_YELLOW_CARP)
|
||||
athub_v2_1_get_clockgating(adev, flags);
|
||||
else
|
||||
athub_v2_0_get_clockgating(adev, flags);
|
||||
|
@ -1288,10 +1288,7 @@ static int gmc_v9_0_late_init(void *handle)
|
||||
static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
|
||||
struct amdgpu_gmc *mc)
|
||||
{
|
||||
u64 base = 0;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
base = adev->mmhub.funcs->get_fb_location(adev);
|
||||
u64 base = adev->mmhub.funcs->get_fb_location(adev);
|
||||
|
||||
/* add the xgmi offset of the physical node */
|
||||
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
|
||||
|
@ -49,10 +49,13 @@ static int jpeg_v3_0_set_powergating_state(void *handle,
|
||||
static int jpeg_v3_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
|
||||
|
||||
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
|
||||
return -ENOENT;
|
||||
if (adev->asic_type != CHIP_YELLOW_CARP) {
|
||||
u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
|
||||
|
||||
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
adev->jpeg.num_jpeg_inst = 1;
|
||||
|
||||
|
@ -111,6 +111,9 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
|
||||
WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
/* Program the system aperture low logical page number. */
|
||||
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
||||
@ -129,8 +132,6 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
|
||||
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
|
||||
}
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
/* Set default page address. */
|
||||
value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
|
||||
@ -332,18 +333,6 @@ static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev)
|
||||
|
||||
static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/*
|
||||
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
|
||||
* VF copy registers so vbios post doesn't program them, for
|
||||
* SRIOV driver need to program them
|
||||
*/
|
||||
WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE,
|
||||
adev->gmc.vram_start >> 24);
|
||||
WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP,
|
||||
adev->gmc.vram_end >> 24);
|
||||
}
|
||||
|
||||
/* GART Enable. */
|
||||
mmhub_v1_7_init_gart_aperture_regs(adev);
|
||||
mmhub_v1_7_init_system_aperture_regs(adev);
|
||||
|
@ -92,6 +92,7 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
|
||||
status);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
mmhub_cid = mmhub_client_ids_vangogh[cid][rw];
|
||||
break;
|
||||
default:
|
||||
|
@ -120,11 +120,23 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
|
||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
|
||||
RB_USED_INT_THRESHOLD, threshold);
|
||||
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
|
||||
return;
|
||||
} else {
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
|
||||
}
|
||||
|
||||
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
|
||||
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
|
||||
RB_USED_INT_THRESHOLD, threshold);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, ih_rb_cntl))
|
||||
return;
|
||||
} else {
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
|
||||
}
|
||||
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl);
|
||||
}
|
||||
|
||||
@ -153,10 +165,8 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
|
||||
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
|
||||
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
} else {
|
||||
WREG32(ih_regs->ih_rb_cntl, tmp);
|
||||
}
|
||||
@ -328,6 +338,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid);
|
||||
ih_chicken = REG_SET_FIELD(ih_chicken,
|
||||
IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
|
||||
|
@ -28,6 +28,25 @@
|
||||
#include "nbio/nbio_7_2_0_sh_mask.h"
|
||||
#include <uapi/linux/kfd_ioctl.h>
|
||||
|
||||
#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC 0x0015
|
||||
#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC_BASE_IDX 2
|
||||
#define regBIF_BX0_BIF_FB_EN_YC 0x0100
|
||||
#define regBIF_BX0_BIF_FB_EN_YC_BASE_IDX 2
|
||||
#define regBIF1_PCIE_MST_CTRL_3 0x4601c6
|
||||
#define regBIF1_PCIE_MST_CTRL_3_BASE_IDX 5
|
||||
#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE__SHIFT \
|
||||
0x1b
|
||||
#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV__SHIFT \
|
||||
0x1c
|
||||
#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE_MASK \
|
||||
0x08000000L
|
||||
#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV_MASK \
|
||||
0x30000000L
|
||||
#define regBIF1_PCIE_TX_POWER_CTRL_1 0x460187
|
||||
#define regBIF1_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
|
||||
#define BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
|
||||
#define BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
|
||||
|
||||
static void nbio_v7_2_remap_hdp_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
|
||||
@ -38,7 +57,12 @@ static void nbio_v7_2_remap_hdp_registers(struct amdgpu_device *adev)
|
||||
|
||||
static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
|
||||
u32 tmp;
|
||||
|
||||
if (adev->asic_type == CHIP_YELLOW_CARP)
|
||||
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC);
|
||||
else
|
||||
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
|
||||
|
||||
tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
|
||||
tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
|
||||
@ -49,11 +73,19 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev)
|
||||
static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
if (enable)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
|
||||
BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
|
||||
BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
|
||||
if (adev->asic_type == CHIP_YELLOW_CARP)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC,
|
||||
BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
|
||||
BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
|
||||
BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
|
||||
BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
|
||||
if (adev->asic_type == CHIP_YELLOW_CARP)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0);
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
|
||||
}
|
||||
|
||||
static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev)
|
||||
@ -92,13 +124,13 @@ static void nbio_v7_2_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
|
||||
|
||||
if (use_doorbell) {
|
||||
doorbell_range = REG_SET_FIELD(doorbell_range,
|
||||
GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET,
|
||||
doorbell_index);
|
||||
GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET,
|
||||
doorbell_index);
|
||||
doorbell_range = REG_SET_FIELD(doorbell_range,
|
||||
GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8);
|
||||
GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8);
|
||||
} else {
|
||||
doorbell_range = REG_SET_FIELD(doorbell_range,
|
||||
GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0);
|
||||
GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0);
|
||||
}
|
||||
|
||||
WREG32_PCIE_PORT(reg, doorbell_range);
|
||||
@ -123,22 +155,22 @@ static void nbio_v7_2_enable_doorbell_selfring_aperture(struct amdgpu_device *ad
|
||||
|
||||
if (enable) {
|
||||
tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
|
||||
DOORBELL_SELFRING_GPA_APER_EN, 1) |
|
||||
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
|
||||
DOORBELL_SELFRING_GPA_APER_MODE, 1) |
|
||||
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
|
||||
DOORBELL_SELFRING_GPA_APER_SIZE, 0);
|
||||
DOORBELL_SELFRING_GPA_APER_EN, 1) |
|
||||
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
|
||||
DOORBELL_SELFRING_GPA_APER_MODE, 1) |
|
||||
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
|
||||
DOORBELL_SELFRING_GPA_APER_SIZE, 0);
|
||||
|
||||
WREG32_SOC15(NBIO, 0,
|
||||
regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
|
||||
lower_32_bits(adev->doorbell.base));
|
||||
regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
|
||||
lower_32_bits(adev->doorbell.base));
|
||||
WREG32_SOC15(NBIO, 0,
|
||||
regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
|
||||
upper_32_bits(adev->doorbell.base));
|
||||
regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
|
||||
upper_32_bits(adev->doorbell.base));
|
||||
}
|
||||
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
|
||||
tmp);
|
||||
tmp);
|
||||
}
|
||||
|
||||
|
||||
@ -218,19 +250,42 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
|
||||
data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
|
||||
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
|
||||
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
|
||||
} else {
|
||||
data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
|
||||
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
|
||||
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
|
||||
}
|
||||
if (adev->asic_type == CHIP_YELLOW_CARP) {
|
||||
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
|
||||
data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
|
||||
else
|
||||
data &= ~PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data);
|
||||
if (def != data)
|
||||
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data);
|
||||
|
||||
data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1));
|
||||
def = data;
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
|
||||
data |= (BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
|
||||
BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
|
||||
else
|
||||
data &= ~(BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
|
||||
BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
|
||||
|
||||
if (def != data)
|
||||
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1),
|
||||
data);
|
||||
} else {
|
||||
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
|
||||
data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
|
||||
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
|
||||
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
|
||||
else
|
||||
data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
|
||||
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
|
||||
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
|
||||
|
||||
if (def != data)
|
||||
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data);
|
||||
}
|
||||
}
|
||||
|
||||
static void nbio_v7_2_get_clockgating_state(struct amdgpu_device *adev,
|
||||
@ -297,14 +352,25 @@ const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg = {
|
||||
static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t def, data;
|
||||
if (adev->asic_type == CHIP_YELLOW_CARP) {
|
||||
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3));
|
||||
data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
|
||||
data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
|
||||
|
||||
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
|
||||
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
|
||||
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
|
||||
if (def != data)
|
||||
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
|
||||
} else {
|
||||
def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
|
||||
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
|
||||
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
|
||||
|
||||
if (def != data)
|
||||
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL),
|
||||
data);
|
||||
if (def != data)
|
||||
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data);
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
|
||||
|
@ -329,6 +329,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
if (encode)
|
||||
*codecs = &nv_video_codecs_encode;
|
||||
else
|
||||
@ -633,6 +634,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_YELLOW_CARP:
|
||||
return AMD_RESET_METHOD_MODE2;
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
@ -782,6 +784,9 @@ legacy_init:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
beige_goby_reg_base_init(adev);
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
yellow_carp_reg_base_init(adev);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -985,6 +990,26 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
|
||||
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
|
||||
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
|
||||
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
#endif
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1309,6 +1334,32 @@ static int nv_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_MMHUB;
|
||||
adev->external_rev_id = adev->rev_id + 0x46;
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_RLC_LS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_GFX_FGCG |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_IH_CG |
|
||||
AMD_CG_SUPPORT_VCN_MGCG |
|
||||
AMD_CG_SUPPORT_JPEG_MGCG;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
adev->external_rev_id = adev->rev_id + 0x01;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
|
@ -37,4 +37,6 @@ int sienna_cichlid_reg_base_init(struct amdgpu_device *adev);
|
||||
void vangogh_reg_base_init(struct amdgpu_device *adev);
|
||||
int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev);
|
||||
int beige_goby_reg_base_init(struct amdgpu_device *adev);
|
||||
int yellow_carp_reg_base_init(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
@ -31,6 +31,9 @@
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/aldebaran_sos.bin");
|
||||
MODULE_FIRMWARE("amdgpu/aldebaran_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_toc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_ta.bin");
|
||||
|
||||
static int psp_v13_0_init_microcode(struct psp_context *psp)
|
||||
{
|
||||
@ -42,17 +45,37 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
|
||||
case CHIP_ALDEBARAN:
|
||||
chip_name = "aldebaran";
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
chip_name = "yellow_carp";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_ALDEBARAN:
|
||||
err = psp_init_sos_microcode(psp, chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
err = psp_init_ta_microcode(&adev->psp, chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
err = psp_init_asd_microcode(psp, chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
err = psp_init_toc_microcode(psp, chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
err = psp_init_ta_microcode(psp, chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
err = psp_init_sos_microcode(psp, chip_name);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = psp_init_ta_microcode(&adev->psp, chip_name);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
|
||||
|
@ -50,6 +50,7 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/beige_goby_sdma.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/vangogh_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/yellow_carp_sdma.bin");
|
||||
|
||||
#define SDMA1_REG_OFFSET 0x600
|
||||
#define SDMA3_REG_OFFSET 0x400
|
||||
@ -94,6 +95,7 @@ static void sdma_v5_2_init_golden_registers(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -165,6 +167,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
|
||||
case CHIP_BEIGE_GOBY:
|
||||
chip_name = "beige_goby";
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
chip_name = "yellow_carp";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -495,12 +500,12 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, false);
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
|
||||
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
|
||||
ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
|
||||
ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
|
||||
}
|
||||
}
|
||||
|
||||
@ -558,11 +563,11 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
|
||||
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
|
||||
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
|
||||
if (enable && amdgpu_sdma_phase_quantum) {
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
|
||||
phase_quantum);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
|
||||
phase_quantum);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
|
||||
phase_quantum);
|
||||
}
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
|
||||
@ -620,62 +625,62 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
||||
ring = &adev->sdma.instance[i].ring;
|
||||
wb_offset = (ring->rptr_offs * 4);
|
||||
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
|
||||
|
||||
/* Set ring buffer size in dwords */
|
||||
rb_bufsz = order_base_2(ring->ring_size / 4);
|
||||
rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
|
||||
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
|
||||
#ifdef __BIG_ENDIAN
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
|
||||
RPTR_WRITEBACK_SWAP_ENABLE, 1);
|
||||
#endif
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
|
||||
|
||||
/* Initialize the ring buffer's read and write pointers */
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
|
||||
|
||||
/* setup the wptr shadow polling */
|
||||
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
|
||||
lower_32_bits(wptr_gpu_addr));
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
|
||||
upper_32_bits(wptr_gpu_addr));
|
||||
wptr_poll_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i,
|
||||
wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i,
|
||||
mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
|
||||
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
|
||||
SDMA0_GFX_RB_WPTR_POLL_CNTL,
|
||||
F32_POLL_ENABLE, 1);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
|
||||
wptr_poll_cntl);
|
||||
|
||||
/* set the wb address whether it's enabled or not */
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
|
||||
upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
|
||||
lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
|
||||
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
|
||||
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
|
||||
|
||||
ring->wptr = 0;
|
||||
|
||||
/* before programing wptr to a less value, need set minor_ptr_update first */
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
|
||||
}
|
||||
|
||||
doorbell = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
|
||||
doorbell_offset = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
|
||||
doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
|
||||
doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
|
||||
@ -684,8 +689,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
||||
} else {
|
||||
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
|
||||
}
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
|
||||
|
||||
adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
|
||||
ring->doorbell_index,
|
||||
@ -695,7 +700,7 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
||||
sdma_v5_2_ring_set_wptr(ring);
|
||||
|
||||
/* set minor_ptr_update to 0 after wptr programed */
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
|
||||
|
||||
/* set utc l1 enable flag always to 1 */
|
||||
temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
|
||||
@ -706,19 +711,19 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
|
||||
|
||||
/* Set up RESP_MODE to non-copy addresses */
|
||||
temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
|
||||
temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
|
||||
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
|
||||
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
|
||||
|
||||
/* program default cache read and write policy */
|
||||
temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
|
||||
temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
|
||||
/* clean read policy and write policy bits */
|
||||
temp &= 0xFF0FFF;
|
||||
temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
|
||||
(CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
|
||||
SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
/* unhalt engine */
|
||||
@ -729,15 +734,15 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
|
||||
|
||||
/* enable DMA RB */
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
|
||||
|
||||
ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
|
||||
ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
|
||||
#ifdef __BIG_ENDIAN
|
||||
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
|
||||
#endif
|
||||
/* enable DMA IBs */
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
|
||||
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
|
||||
|
||||
ring->sched.ready = true;
|
||||
|
||||
@ -1229,6 +1234,7 @@ static int sdma_v5_2_early_init(void *handle)
|
||||
break;
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->sdma.num_instances = 1;
|
||||
break;
|
||||
default:
|
||||
@ -1630,6 +1636,7 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
sdma_v5_2_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
sdma_v5_2_update_medium_grain_light_sleep(adev,
|
||||
|
@ -28,12 +28,12 @@
|
||||
#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
|
||||
|
||||
#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
|
||||
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->rlcg_wreg) ? \
|
||||
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->rlcg_wreg) ? \
|
||||
adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, flag, hwip) : \
|
||||
WREG32(reg, value))
|
||||
|
||||
#define __RREG32_SOC15_RLC__(reg, flag, hwip) \
|
||||
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->rlcg_rreg) ? \
|
||||
((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->rlcg_rreg) ? \
|
||||
adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, flag, hwip) : \
|
||||
RREG32(reg))
|
||||
|
||||
|
51
drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c
Normal file
51
drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "amdgpu.h"
|
||||
#include "nv.h"
|
||||
|
||||
#include "soc15_common.h"
|
||||
#include "soc15_hw_ip.h"
|
||||
#include "yellow_carp_offset.h"
|
||||
|
||||
int yellow_carp_reg_base_init(struct amdgpu_device *adev)
|
||||
{
|
||||
/* HW has more IP blocks, only initialized the block needed by driver */
|
||||
uint32_t i;
|
||||
for (i = 0 ; i < MAX_INSTANCE ; ++i) {
|
||||
adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
|
||||
adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
|
||||
adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
|
||||
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
|
||||
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
|
||||
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
|
||||
adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
|
||||
adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
|
||||
adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
|
||||
adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
|
||||
adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
|
||||
adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
|
||||
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
|
||||
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
|
||||
}
|
||||
return 0;
|
||||
}
|
@ -1393,6 +1393,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
|
||||
long err = 0;
|
||||
int i;
|
||||
uint32_t *devices_arr = NULL;
|
||||
bool table_freed = false;
|
||||
|
||||
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
|
||||
if (!dev)
|
||||
@ -1450,7 +1451,8 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
|
||||
goto get_mem_obj_from_handle_failed;
|
||||
}
|
||||
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
||||
peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
|
||||
peer->kgd, (struct kgd_mem *)mem,
|
||||
peer_pdd->drm_priv, &table_freed);
|
||||
if (err) {
|
||||
pr_err("Failed to map to gpu %d/%d\n",
|
||||
i, args->n_devices);
|
||||
@ -1468,16 +1470,17 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
|
||||
}
|
||||
|
||||
/* Flush TLBs after waiting for the page table updates to complete */
|
||||
for (i = 0; i < args->n_devices; i++) {
|
||||
peer = kfd_device_by_id(devices_arr[i]);
|
||||
if (WARN_ON_ONCE(!peer))
|
||||
continue;
|
||||
peer_pdd = kfd_get_process_device_data(peer, p);
|
||||
if (WARN_ON_ONCE(!peer_pdd))
|
||||
continue;
|
||||
kfd_flush_tlb(peer_pdd);
|
||||
if (table_freed) {
|
||||
for (i = 0; i < args->n_devices; i++) {
|
||||
peer = kfd_device_by_id(devices_arr[i]);
|
||||
if (WARN_ON_ONCE(!peer))
|
||||
continue;
|
||||
peer_pdd = kfd_get_process_device_data(peer, p);
|
||||
if (WARN_ON_ONCE(!peer_pdd))
|
||||
continue;
|
||||
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(devices_arr);
|
||||
|
||||
return err;
|
||||
@ -1563,6 +1566,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
|
||||
i, args->n_devices);
|
||||
goto unmap_memory_from_gpu_failed;
|
||||
}
|
||||
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
|
||||
args->n_success = i+1;
|
||||
}
|
||||
kfree(devices_arr);
|
||||
|
@ -746,6 +746,54 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
|
||||
{
|
||||
/* TCP L1 Cache per CU */
|
||||
.cache_size = 16,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 1,
|
||||
},
|
||||
{
|
||||
/* Scalar L1 Instruction Cache per SQC */
|
||||
.cache_size = 32,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_INST_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* Scalar L1 Data Cache per SQC */
|
||||
.cache_size = 16,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* GL1 Data Cache per SA */
|
||||
.cache_size = 128,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 6,
|
||||
},
|
||||
{
|
||||
/* L2 Data Cache per GPU (Total Tex Cache) */
|
||||
.cache_size = 2048,
|
||||
.cache_level = 2,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 6,
|
||||
},
|
||||
};
|
||||
|
||||
static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
|
||||
struct crat_subtype_computeunit *cu)
|
||||
{
|
||||
@ -1383,6 +1431,10 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
|
||||
pcache_info = beige_goby_cache_info;
|
||||
num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
|
||||
break;
|
||||
case CHIP_YELLOW_CARP:
|
||||
pcache_info = yellow_carp_cache_info;
|
||||
num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -83,6 +83,7 @@ static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
|
||||
[CHIP_VANGOGH] = &gfx_v10_3_kfd2kgd,
|
||||
[CHIP_DIMGREY_CAVEFISH] = &gfx_v10_3_kfd2kgd,
|
||||
[CHIP_BEIGE_GOBY] = &gfx_v10_3_kfd2kgd,
|
||||
[CHIP_YELLOW_CARP] = &gfx_v10_3_kfd2kgd,
|
||||
};
|
||||
|
||||
#ifdef KFD_SUPPORT_IOMMU_V2
|
||||
@ -577,6 +578,23 @@ static const struct kfd_device_info beige_goby_device_info = {
|
||||
.num_sdma_queues_per_engine = 8,
|
||||
};
|
||||
|
||||
static const struct kfd_device_info yellow_carp_device_info = {
|
||||
.asic_family = CHIP_YELLOW_CARP,
|
||||
.asic_name = "yellow_carp",
|
||||
.max_pasid_bits = 16,
|
||||
.max_no_of_hqd = 24,
|
||||
.doorbell_size = 8,
|
||||
.ih_ring_entry_size = 8 * sizeof(uint32_t),
|
||||
.event_interrupt_class = &event_interrupt_class_v9,
|
||||
.num_of_watch_points = 4,
|
||||
.mqd_size_aligned = MQD_SIZE_ALIGNED,
|
||||
.needs_iommu_device = false,
|
||||
.supports_cwsr = true,
|
||||
.needs_pci_atomics = false,
|
||||
.num_sdma_engines = 1,
|
||||
.num_xgmi_sdma_engines = 0,
|
||||
.num_sdma_queues_per_engine = 2,
|
||||
};
|
||||
|
||||
/* For each entry, [0] is regular and [1] is virtualisation device. */
|
||||
static const struct kfd_device_info *kfd_supported_devices[][2] = {
|
||||
@ -606,6 +624,7 @@ static const struct kfd_device_info *kfd_supported_devices[][2] = {
|
||||
[CHIP_VANGOGH] = {&vangogh_device_info, NULL},
|
||||
[CHIP_DIMGREY_CAVEFISH] = {&dimgrey_cavefish_device_info, &dimgrey_cavefish_device_info},
|
||||
[CHIP_BEIGE_GOBY] = {&beige_goby_device_info, &beige_goby_device_info},
|
||||
[CHIP_YELLOW_CARP] = {&yellow_carp_device_info, NULL},
|
||||
};
|
||||
|
||||
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
|
||||
|
@ -248,7 +248,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
|
||||
qpd->vmid,
|
||||
qpd->page_table_base);
|
||||
/* invalidate the VM context after pasid and vmid mapping is set up */
|
||||
kfd_flush_tlb(qpd_to_pdd(qpd));
|
||||
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
|
||||
|
||||
if (dqm->dev->kfd2kgd->set_scratch_backing_va)
|
||||
dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
|
||||
@ -284,7 +284,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
|
||||
if (flush_texture_cache_nocpsch(q->device, qpd))
|
||||
pr_err("Failed to flush TC\n");
|
||||
|
||||
kfd_flush_tlb(qpd_to_pdd(qpd));
|
||||
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
|
||||
|
||||
/* Release the vmid mapping */
|
||||
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
|
||||
@ -760,7 +760,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||
dqm->dev->kgd,
|
||||
qpd->vmid,
|
||||
qpd->page_table_base);
|
||||
kfd_flush_tlb(pdd);
|
||||
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
|
||||
}
|
||||
|
||||
/* Take a safe reference to the mm_struct, which may otherwise
|
||||
@ -1937,6 +1937,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
device_queue_manager_init_v10_navi10(&dqm->asic_ops);
|
||||
break;
|
||||
default:
|
||||
|
@ -425,6 +425,7 @@ int kfd_init_apertures(struct kfd_process *process)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
kfd_init_apertures_v9(pdd, id);
|
||||
break;
|
||||
default:
|
||||
|
@ -249,8 +249,13 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
|
||||
client_id == SOC15_IH_CLIENTID_SDMA5 ||
|
||||
client_id == SOC15_IH_CLIENTID_SDMA6 ||
|
||||
client_id == SOC15_IH_CLIENTID_SDMA7) {
|
||||
if (source_id == SOC15_INTSRC_SDMA_TRAP)
|
||||
if (source_id == SOC15_INTSRC_SDMA_TRAP) {
|
||||
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
|
||||
} else if (source_id == SOC15_INTSRC_SDMA_ECC) {
|
||||
kfd_signal_poison_consumed_event(dev, pasid);
|
||||
amdgpu_amdkfd_gpu_reset(dev->kgd);
|
||||
return;
|
||||
}
|
||||
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
|
||||
client_id == SOC15_IH_CLIENTID_VMC1 ||
|
||||
client_id == SOC15_IH_CLIENTID_UTCL2) {
|
||||
|
@ -250,6 +250,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
pm->pmf = &kfd_v9_pm_funcs;
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
|
@ -1146,7 +1146,7 @@ void kfd_signal_reset_event(struct kfd_dev *dev);
|
||||
|
||||
void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid);
|
||||
|
||||
void kfd_flush_tlb(struct kfd_process_device *pdd);
|
||||
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
|
||||
|
||||
int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
|
||||
|
||||
|
@ -35,7 +35,6 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu.h"
|
||||
#include "kfd_svm.h"
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
@ -672,7 +671,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
|
||||
if (err)
|
||||
goto err_alloc_mem;
|
||||
|
||||
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
|
||||
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
|
||||
pdd->drm_priv, NULL);
|
||||
if (err)
|
||||
goto err_map_mem;
|
||||
|
||||
@ -1838,7 +1838,7 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
|
||||
KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
|
||||
}
|
||||
|
||||
void kfd_flush_tlb(struct kfd_process_device *pdd)
|
||||
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
|
||||
{
|
||||
struct kfd_dev *dev = pdd->dev;
|
||||
|
||||
@ -1851,7 +1851,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd)
|
||||
pdd->qpd.vmid);
|
||||
} else {
|
||||
amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
|
||||
pdd->process->pasid, TLB_FLUSH_LEGACY);
|
||||
pdd->process->pasid, type);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1153,7 +1153,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
|
||||
prange->mapping.start = prange->start;
|
||||
prange->mapping.last = prange->last;
|
||||
prange->mapping.offset = prange->offset;
|
||||
prange->mapping.offset = prange->ttm_res ? prange->offset : 0;
|
||||
pte_flags = svm_range_get_pte_flags(adev, prange);
|
||||
|
||||
r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, NULL,
|
||||
|
@ -1399,6 +1399,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
||||
case CHIP_VANGOGH:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_YELLOW_CARP:
|
||||
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
|
||||
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
|
||||
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
|
||||
|
@ -30,6 +30,7 @@
|
||||
#define SOC15_INTSRC_SQ_INTERRUPT_MSG 239
|
||||
#define SOC15_INTSRC_VMC_FAULT 0
|
||||
#define SOC15_INTSRC_SDMA_TRAP 224
|
||||
#define SOC15_INTSRC_SDMA_ECC 220
|
||||
|
||||
|
||||
#define SOC15_CLIENT_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) & 0xff)
|
||||
|
@ -31,6 +31,13 @@ config DRM_AMD_DC_SI
|
||||
by default. This includes Tahiti, Pitcairn, Cape Verde, Oland.
|
||||
Hainan is not supported by AMD DC and it has no physical DCE6.
|
||||
|
||||
config DRM_AMD_DC_DCN3_1
|
||||
bool "DCN 3.1 family"
|
||||
depends on DRM_AMD_DC_DCN
|
||||
help
|
||||
Choose this option if you want to have
|
||||
DCN3.1 family support for display engine
|
||||
|
||||
config DEBUG_KERNEL_DC
|
||||
bool "Enable kgdb break in DC"
|
||||
depends on DRM_AMD_DC
|
||||
|
@ -109,6 +109,10 @@ MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
|
||||
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
|
||||
#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
|
||||
#endif
|
||||
|
||||
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
|
||||
@ -1150,6 +1154,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
case CHIP_VANGOGH:
|
||||
init_data.flags.gpu_vm_support = true;
|
||||
break;
|
||||
#endif
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case CHIP_YELLOW_CARP:
|
||||
init_data.flags.gpu_vm_support = true;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
@ -1407,6 +1416,9 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case CHIP_YELLOW_CARP:
|
||||
#endif
|
||||
return 0;
|
||||
case CHIP_NAVI12:
|
||||
fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
|
||||
@ -1525,6 +1537,12 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
dmub_asic = DMUB_ASIC_DCN303;
|
||||
fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case CHIP_YELLOW_CARP:
|
||||
dmub_asic = DMUB_ASIC_DCN31;
|
||||
fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
/* ASIC doesn't support DMUB. */
|
||||
@ -2219,6 +2237,15 @@ static int dm_resume(void *handle)
|
||||
= 0xffffffff;
|
||||
}
|
||||
}
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
/*
|
||||
* Resource allocation happens for link encoders for newer ASIC in
|
||||
* dc_validate_global_state, so we need to revalidate it.
|
||||
*
|
||||
* This shouldn't fail (it passed once before), so warn if it does.
|
||||
*/
|
||||
WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
|
||||
#endif
|
||||
|
||||
WARN_ON(!dc_commit_state(dm->dc, dc_state));
|
||||
|
||||
@ -3764,6 +3791,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
case CHIP_NAVY_FLOUNDER:
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case CHIP_YELLOW_CARP:
|
||||
#endif
|
||||
case CHIP_RENOIR:
|
||||
if (register_outbox_irq_handlers(dm->adev)) {
|
||||
DRM_ERROR("DM: Failed to initialize IRQ\n");
|
||||
@ -3868,6 +3898,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
case CHIP_BEIGE_GOBY:
|
||||
case CHIP_VANGOGH:
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case CHIP_YELLOW_CARP:
|
||||
#endif
|
||||
if (dcn10_register_irq_handlers(dm->adev)) {
|
||||
DRM_ERROR("DM: Failed to initialize IRQ\n");
|
||||
goto fail;
|
||||
@ -4039,6 +4072,13 @@ static int dm_early_init(void *handle)
|
||||
adev->mode_info.num_hpd = 6;
|
||||
adev->mode_info.num_dig = 6;
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case CHIP_YELLOW_CARP:
|
||||
adev->mode_info.num_crtc = 4;
|
||||
adev->mode_info.num_hpd = 4;
|
||||
adev->mode_info.num_dig = 4;
|
||||
break;
|
||||
#endif
|
||||
case CHIP_NAVI14:
|
||||
case CHIP_DIMGREY_CAVEFISH:
|
||||
adev->mode_info.num_crtc = 5;
|
||||
@ -4276,6 +4316,9 @@ fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
|
||||
adev->asic_type == CHIP_NAVY_FLOUNDER ||
|
||||
adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
|
||||
adev->asic_type == CHIP_BEIGE_GOBY ||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
adev->asic_type == CHIP_YELLOW_CARP ||
|
||||
#endif
|
||||
adev->asic_type == CHIP_VANGOGH)
|
||||
tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
|
||||
}
|
||||
@ -4695,6 +4738,7 @@ get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, u
|
||||
break;
|
||||
case AMDGPU_FAMILY_NV:
|
||||
case AMDGPU_FAMILY_VGH:
|
||||
case AMDGPU_FAMILY_YC:
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
|
||||
add_gfx10_3_modifiers(adev, mods, &size, &capacity);
|
||||
else
|
||||
@ -9633,7 +9677,8 @@ skip_modeset:
|
||||
BUG_ON(dm_new_crtc_state->stream == NULL);
|
||||
|
||||
/* Scaling or underscan settings */
|
||||
if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
|
||||
if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
|
||||
drm_atomic_crtc_needs_modeset(new_crtc_state))
|
||||
update_stream_scaling_settings(
|
||||
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
|
||||
|
||||
|
@ -456,6 +456,7 @@ struct dsc_preferred_settings {
|
||||
uint32_t dsc_num_slices_v;
|
||||
uint32_t dsc_num_slices_h;
|
||||
uint32_t dsc_bits_per_pixel;
|
||||
bool dsc_force_disable_passthrough;
|
||||
};
|
||||
|
||||
struct amdgpu_dm_connector {
|
||||
|
@ -887,6 +887,47 @@ unlock:
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Example usage:
|
||||
* Disable dsc passthrough, i.e.,: have dsc decoding at converver, not external RX
|
||||
* echo 1 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough
|
||||
* Enable dsc passthrough, i.e.,: have dsc passthrough to external RX
|
||||
* echo 0 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough
|
||||
*/
|
||||
static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private;
|
||||
char *wr_buf = NULL;
|
||||
uint32_t wr_buf_size = 42;
|
||||
int max_param_num = 1;
|
||||
long param;
|
||||
uint8_t param_nums = 0;
|
||||
|
||||
if (size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
|
||||
|
||||
if (!wr_buf) {
|
||||
DRM_DEBUG_DRIVER("no memory to allocate write buffer\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (parse_write_buffer_into_params(wr_buf, size,
|
||||
¶m, buf,
|
||||
max_param_num,
|
||||
¶m_nums)) {
|
||||
kfree(wr_buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
aconnector->dsc_settings.dsc_force_disable_passthrough = param;
|
||||
|
||||
kfree(wr_buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
/*
|
||||
* Returns the HDCP capability of the Display (1.4 for now).
|
||||
@ -2535,6 +2576,12 @@ static const struct file_operations dp_max_bpc_debugfs_fops = {
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.write = dp_dsc_passthrough_set,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct {
|
||||
char *name;
|
||||
const struct file_operations *fops;
|
||||
@ -2559,7 +2606,8 @@ static const struct {
|
||||
{"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops},
|
||||
{"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops},
|
||||
{"dp_dsc_fec_support", &dp_dsc_fec_support_fops},
|
||||
{"max_bpc", &dp_max_bpc_debugfs_fops}
|
||||
{"max_bpc", &dp_max_bpc_debugfs_fops},
|
||||
{"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
|
@ -222,10 +222,23 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
|
||||
struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
|
||||
struct drm_connector_state *conn_state = aconnector->base.state;
|
||||
|
||||
mutex_lock(&hdcp_w->mutex);
|
||||
hdcp_w->aconnector = aconnector;
|
||||
|
||||
/* the removal of display will invoke auth reset -> hdcp destroy and
|
||||
* we'd expect the Content Protection (CP) property changed back to
|
||||
* DESIRED if at the time ENABLED. CP property change should occur
|
||||
* before the element removed from linked list.
|
||||
*/
|
||||
if (conn_state && conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
|
||||
conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
|
||||
|
||||
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n",
|
||||
aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms);
|
||||
}
|
||||
|
||||
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
|
||||
|
||||
process_output(hdcp_w);
|
||||
@ -454,6 +467,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
|
||||
display->dig_fe = config->dig_fe;
|
||||
link->dig_be = config->dig_be;
|
||||
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
display->stream_enc_idx = config->stream_enc_idx;
|
||||
link->link_enc_idx = config->link_enc_idx;
|
||||
link->phy_idx = config->phy_idx;
|
||||
link->hdcp_supported_informational = dc_link_is_hdcp14(aconnector->dc_link,
|
||||
aconnector->dc_sink->sink_signal) ? 1 : 0;
|
||||
#endif
|
||||
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
|
||||
link->dp.assr_enabled = config->assr_enabled;
|
||||
link->dp.mst_enabled = config->mst_enabled;
|
||||
@ -462,7 +482,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
|
||||
link->adjust.hdcp1.disable = 0;
|
||||
conn_state = aconnector->base.state;
|
||||
|
||||
pr_debug("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
|
||||
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
|
||||
(!!aconnector->base.state) ? aconnector->base.state->content_protection : -1,
|
||||
(!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1);
|
||||
|
||||
@ -637,6 +657,12 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
|
||||
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
|
||||
|
||||
hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
if (dc->ctx->dce_version == DCN_VERSION_3_1) {
|
||||
hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1;
|
||||
hdcp_work[i].hdcp.config.psp.caps.opm_state_query_supported = false;
|
||||
}
|
||||
#endif
|
||||
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
|
||||
hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
|
||||
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
|
||||
|
@ -655,6 +655,12 @@ void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
|
||||
/* TODO: something */
|
||||
}
|
||||
|
||||
void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us)
|
||||
{
|
||||
// TODO:
|
||||
//amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
|
||||
}
|
||||
|
||||
void *dm_helpers_allocate_gpu_mem(
|
||||
struct dc_context *ctx,
|
||||
enum dc_gpu_mem_alloc_type type,
|
||||
|
@ -34,6 +34,9 @@ DC_LIBS += dcn30
|
||||
DC_LIBS += dcn301
|
||||
DC_LIBS += dcn302
|
||||
DC_LIBS += dcn303
|
||||
ifdef CONFIG_DRM_AMD_DC_DCN3_1
|
||||
DC_LIBS += dcn31
|
||||
endif
|
||||
endif
|
||||
|
||||
DC_LIBS += dce120
|
||||
|
@ -576,6 +576,13 @@ static struct device_id device_type_from_device_id(uint16_t device_id)
|
||||
result_device_id.device_type = DEVICE_TYPE_LCD;
|
||||
result_device_id.enum_id = 1;
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
|
||||
case ATOM_DISPLAY_LCD2_SUPPORT:
|
||||
result_device_id.device_type = DEVICE_TYPE_LCD;
|
||||
result_device_id.enum_id = 2;
|
||||
break;
|
||||
#endif
|
||||
|
||||
case ATOM_DISPLAY_DFP1_SUPPORT:
|
||||
result_device_id.device_type = DEVICE_TYPE_DFP;
|
||||
@ -2155,6 +2162,107 @@ static enum bp_result get_integrated_info_v2_1(
|
||||
return BP_RESULT_OK;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
static enum bp_result get_integrated_info_v2_2(
|
||||
struct bios_parser *bp,
|
||||
struct integrated_info *info)
|
||||
{
|
||||
struct atom_integrated_system_info_v2_2 *info_v2_2;
|
||||
uint32_t i;
|
||||
|
||||
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
|
||||
DATA_TABLES(integratedsysteminfo));
|
||||
|
||||
if (info_v2_2 == NULL)
|
||||
return BP_RESULT_BADBIOSTABLE;
|
||||
|
||||
info->gpu_cap_info =
|
||||
le32_to_cpu(info_v2_2->gpucapinfo);
|
||||
/*
|
||||
* system_config: Bit[0] = 0 : PCIE power gating disabled
|
||||
* = 1 : PCIE power gating enabled
|
||||
* Bit[1] = 0 : DDR-PLL shut down disabled
|
||||
* = 1 : DDR-PLL shut down enabled
|
||||
* Bit[2] = 0 : DDR-PLL power down disabled
|
||||
* = 1 : DDR-PLL power down enabled
|
||||
*/
|
||||
info->system_config = le32_to_cpu(info_v2_2->system_config);
|
||||
info->cpu_cap_info = le32_to_cpu(info_v2_2->cpucapinfo);
|
||||
info->memory_type = info_v2_2->memorytype;
|
||||
info->ma_channel_number = info_v2_2->umachannelnumber;
|
||||
info->dp_ss_control =
|
||||
le16_to_cpu(info_v2_2->reserved1);
|
||||
|
||||
for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
|
||||
info->ext_disp_conn_info.gu_id[i] =
|
||||
info_v2_2->extdispconninfo.guid[i];
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
|
||||
info->ext_disp_conn_info.path[i].device_connector_id =
|
||||
object_id_from_bios_object_id(
|
||||
le16_to_cpu(info_v2_2->extdispconninfo.path[i].connectorobjid));
|
||||
|
||||
info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
|
||||
object_id_from_bios_object_id(
|
||||
le16_to_cpu(
|
||||
info_v2_2->extdispconninfo.path[i].ext_encoder_objid));
|
||||
|
||||
info->ext_disp_conn_info.path[i].device_tag =
|
||||
le16_to_cpu(
|
||||
info_v2_2->extdispconninfo.path[i].device_tag);
|
||||
info->ext_disp_conn_info.path[i].device_acpi_enum =
|
||||
le16_to_cpu(
|
||||
info_v2_2->extdispconninfo.path[i].device_acpi_enum);
|
||||
info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
|
||||
info_v2_2->extdispconninfo.path[i].auxddclut_index;
|
||||
info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
|
||||
info_v2_2->extdispconninfo.path[i].hpdlut_index;
|
||||
info->ext_disp_conn_info.path[i].channel_mapping.raw =
|
||||
info_v2_2->extdispconninfo.path[i].channelmapping;
|
||||
info->ext_disp_conn_info.path[i].caps =
|
||||
le16_to_cpu(info_v2_2->extdispconninfo.path[i].caps);
|
||||
}
|
||||
|
||||
info->ext_disp_conn_info.checksum =
|
||||
info_v2_2->extdispconninfo.checksum;
|
||||
|
||||
info->edp1_info.edp_backlight_pwm_hz =
|
||||
le16_to_cpu(info_v2_2->edp1_info.edp_backlight_pwm_hz);
|
||||
info->edp1_info.edp_ss_percentage =
|
||||
le16_to_cpu(info_v2_2->edp1_info.edp_ss_percentage);
|
||||
info->edp1_info.edp_ss_rate_10hz =
|
||||
le16_to_cpu(info_v2_2->edp1_info.edp_ss_rate_10hz);
|
||||
info->edp1_info.edp_pwr_on_off_delay =
|
||||
info_v2_2->edp1_info.edp_pwr_on_off_delay;
|
||||
info->edp1_info.edp_pwr_on_vary_bl_to_blon =
|
||||
info_v2_2->edp1_info.edp_pwr_on_vary_bl_to_blon;
|
||||
info->edp1_info.edp_pwr_down_bloff_to_vary_bloff =
|
||||
info_v2_2->edp1_info.edp_pwr_down_bloff_to_vary_bloff;
|
||||
info->edp1_info.edp_panel_bpc =
|
||||
info_v2_2->edp1_info.edp_panel_bpc;
|
||||
info->edp1_info.edp_bootup_bl_level =
|
||||
|
||||
info->edp2_info.edp_backlight_pwm_hz =
|
||||
le16_to_cpu(info_v2_2->edp2_info.edp_backlight_pwm_hz);
|
||||
info->edp2_info.edp_ss_percentage =
|
||||
le16_to_cpu(info_v2_2->edp2_info.edp_ss_percentage);
|
||||
info->edp2_info.edp_ss_rate_10hz =
|
||||
le16_to_cpu(info_v2_2->edp2_info.edp_ss_rate_10hz);
|
||||
info->edp2_info.edp_pwr_on_off_delay =
|
||||
info_v2_2->edp2_info.edp_pwr_on_off_delay;
|
||||
info->edp2_info.edp_pwr_on_vary_bl_to_blon =
|
||||
info_v2_2->edp2_info.edp_pwr_on_vary_bl_to_blon;
|
||||
info->edp2_info.edp_pwr_down_bloff_to_vary_bloff =
|
||||
info_v2_2->edp2_info.edp_pwr_down_bloff_to_vary_bloff;
|
||||
info->edp2_info.edp_panel_bpc =
|
||||
info_v2_2->edp2_info.edp_panel_bpc;
|
||||
info->edp2_info.edp_bootup_bl_level =
|
||||
info_v2_2->edp2_info.edp_bootup_bl_level;
|
||||
|
||||
return BP_RESULT_OK;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* construct_integrated_info
|
||||
*
|
||||
@ -2202,6 +2310,11 @@ static enum bp_result construct_integrated_info(
|
||||
case 1:
|
||||
result = get_integrated_info_v2_1(bp, info);
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case 2:
|
||||
result = get_integrated_info_v2_2(bp, info);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
return result;
|
||||
}
|
||||
|
@ -82,9 +82,6 @@ void bios_set_scratch_critical_state(
|
||||
uint32_t bios_get_vga_enabled_displays(
|
||||
struct dc_bios *bios)
|
||||
{
|
||||
uint32_t active_disp = 1;
|
||||
|
||||
active_disp = REG_READ(BIOS_SCRATCH_3) & 0XFFFF;
|
||||
return active_disp;
|
||||
return REG_READ(BIOS_SCRATCH_3) & 0XFFFF;
|
||||
}
|
||||
|
||||
|
@ -72,6 +72,11 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
|
||||
case DCN_VERSION_2_1:
|
||||
case DCN_VERSION_3_0:
|
||||
case DCN_VERSION_3_01:
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case DCN_VERSION_3_1:
|
||||
*h = dal_cmd_tbl_helper_dce112_get_table2();
|
||||
return true;
|
||||
#endif
|
||||
case DCN_VERSION_3_02:
|
||||
case DCN_VERSION_3_03:
|
||||
*h = dal_cmd_tbl_helper_dce112_get_table2();
|
||||
|
@ -136,3 +136,14 @@ AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_DCN3_1
|
||||
###############################################################################
|
||||
# DCN31
|
||||
###############################################################################
|
||||
CLK_MGR_DCN31 = dcn31_smu.o dcn31_clk_mgr.o
|
||||
|
||||
AMD_DAL_CLK_MGR_DCN31 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn31/,$(CLK_MGR_DCN31))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN31)
|
||||
endif
|
||||
|
@ -41,6 +41,9 @@
|
||||
#include "dcn21/rn_clk_mgr.h"
|
||||
#include "dcn30/dcn30_clk_mgr.h"
|
||||
#include "dcn301/vg_clk_mgr.h"
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
#include "dcn31/dcn31_clk_mgr.h"
|
||||
#endif
|
||||
|
||||
|
||||
int clk_mgr_helper_get_active_display_cnt(
|
||||
@ -90,15 +93,20 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
|
||||
struct dc_link *edp_links[MAX_NUM_EDP];
|
||||
struct dc_link *edp_link = NULL;
|
||||
int edp_num;
|
||||
unsigned int panel_inst;
|
||||
|
||||
get_edp_links(dc, edp_links, &edp_num);
|
||||
if (dc->hwss.exit_optimized_pwr_state)
|
||||
dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
|
||||
|
||||
if (edp_num) {
|
||||
edp_link = edp_links[0];
|
||||
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
|
||||
dc_link_set_psr_allow_active(edp_link, false, false, false);
|
||||
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
|
||||
edp_link = edp_links[panel_inst];
|
||||
if (!edp_link->psr_settings.psr_feature_enabled)
|
||||
continue;
|
||||
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
|
||||
dc_link_set_psr_allow_active(edp_link, false, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@ -108,12 +116,17 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
|
||||
struct dc_link *edp_links[MAX_NUM_EDP];
|
||||
struct dc_link *edp_link = NULL;
|
||||
int edp_num;
|
||||
unsigned int panel_inst;
|
||||
|
||||
get_edp_links(dc, edp_links, &edp_num);
|
||||
if (edp_num) {
|
||||
edp_link = edp_links[0];
|
||||
dc_link_set_psr_allow_active(edp_link,
|
||||
clk_mgr->psr_allow_active_cache, false, false);
|
||||
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
|
||||
edp_link = edp_links[panel_inst];
|
||||
if (!edp_link->psr_settings.psr_feature_enabled)
|
||||
continue;
|
||||
dc_link_set_psr_allow_active(edp_link,
|
||||
clk_mgr->psr_allow_active_cache, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (dc->hwss.optimize_pwr_state)
|
||||
@ -261,6 +274,26 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case FAMILY_YELLOW_CARP: {
|
||||
struct clk_mgr_dcn31 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
|
||||
|
||||
if (clk_mgr == NULL) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev)) {
|
||||
/* TODO: to add DCN31 clk_mgr support, once CLK IP header files are available,
|
||||
* for now use DCN3.0 clk mgr.
|
||||
*/
|
||||
dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
|
||||
return &clk_mgr->base.base;
|
||||
}
|
||||
return &clk_mgr->base.base;
|
||||
}
|
||||
#endif
|
||||
|
||||
default:
|
||||
ASSERT(0); /* Unknown Asic */
|
||||
break;
|
||||
@ -292,6 +325,13 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
|
||||
vg_clk_mgr_destroy(clk_mgr);
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case FAMILY_YELLOW_CARP:
|
||||
if (ASICREV_IS_YELLOW_CARP(clk_mgr_base->ctx->asic_id.hw_internal_rev))
|
||||
dcn31_clk_mgr_destroy(clk_mgr);
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -324,6 +324,10 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
|
||||
// Both fclk and ref_dppclk run on the same scemi clock.
|
||||
clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
/* TODO: set dtbclk in correct place */
|
||||
clk_mgr->clks.dtbclk_en = false;
|
||||
#endif
|
||||
dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
|
||||
}
|
||||
|
||||
|
@ -835,66 +835,47 @@ static struct wm_table lpddr4_wm_table_rn = {
|
||||
}
|
||||
};
|
||||
|
||||
static unsigned int find_max_fclk_for_voltage(struct dpm_clocks *clock_table,
|
||||
unsigned int voltage)
|
||||
static unsigned int find_socclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
uint32_t max_clk = 0;
|
||||
|
||||
for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) {
|
||||
if (clock_table->FClocks[i].Vol <= voltage) {
|
||||
max_clk = clock_table->FClocks[i].Freq > max_clk ?
|
||||
clock_table->FClocks[i].Freq : max_clk;
|
||||
}
|
||||
}
|
||||
|
||||
return max_clk;
|
||||
}
|
||||
|
||||
static unsigned int find_max_memclk_for_voltage(struct dpm_clocks *clock_table,
|
||||
unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
uint32_t max_clk = 0;
|
||||
|
||||
for (i = 0; i < PP_SMU_NUM_MEMCLK_DPM_LEVELS; i++) {
|
||||
if (clock_table->MemClocks[i].Vol <= voltage) {
|
||||
max_clk = clock_table->MemClocks[i].Freq > max_clk ?
|
||||
clock_table->MemClocks[i].Freq : max_clk;
|
||||
}
|
||||
}
|
||||
|
||||
return max_clk;
|
||||
}
|
||||
|
||||
static unsigned int find_max_socclk_for_voltage(struct dpm_clocks *clock_table,
|
||||
unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
uint32_t max_clk = 0;
|
||||
|
||||
for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) {
|
||||
if (clock_table->SocClocks[i].Vol <= voltage) {
|
||||
max_clk = clock_table->SocClocks[i].Freq > max_clk ?
|
||||
clock_table->SocClocks[i].Freq : max_clk;
|
||||
}
|
||||
if (clock_table->SocClocks[i].Vol == voltage)
|
||||
return clock_table->SocClocks[i].Freq;
|
||||
}
|
||||
|
||||
return max_clk;
|
||||
ASSERT(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
|
||||
if (clock_table->DcfClocks[i].Vol == voltage)
|
||||
return clock_table->DcfClocks[i].Freq;
|
||||
}
|
||||
|
||||
ASSERT(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info)
|
||||
{
|
||||
int i, j = 0;
|
||||
unsigned int volt;
|
||||
|
||||
j = -1;
|
||||
|
||||
/* Find max DPM */
|
||||
for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; ++i) {
|
||||
if (clock_table->DcfClocks[i].Freq != 0 &&
|
||||
clock_table->DcfClocks[i].Vol != 0)
|
||||
ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
|
||||
|
||||
/* Find lowest DPM, FCLK is filled in reverse order*/
|
||||
|
||||
for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
|
||||
if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) {
|
||||
j = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (j == -1) {
|
||||
@ -905,18 +886,13 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
|
||||
|
||||
bw_params->clk_table.num_entries = j + 1;
|
||||
|
||||
for (i = 0; i < bw_params->clk_table.num_entries; i++) {
|
||||
volt = clock_table->DcfClocks[i].Vol;
|
||||
|
||||
bw_params->clk_table.entries[i].voltage = volt;
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz =
|
||||
clock_table->DcfClocks[i].Freq;
|
||||
bw_params->clk_table.entries[i].fclk_mhz =
|
||||
find_max_fclk_for_voltage(clock_table, volt);
|
||||
bw_params->clk_table.entries[i].memclk_mhz =
|
||||
find_max_memclk_for_voltage(clock_table, volt);
|
||||
bw_params->clk_table.entries[i].socclk_mhz =
|
||||
find_max_socclk_for_voltage(clock_table, volt);
|
||||
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
|
||||
bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
|
||||
bw_params->clk_table.entries[i].socclk_mhz = find_socclk_for_voltage(clock_table,
|
||||
bw_params->clk_table.entries[i].voltage);
|
||||
}
|
||||
|
||||
bw_params->vram_type = bios_info->memory_type;
|
||||
|
673
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
Normal file
673
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
Normal file
@ -0,0 +1,673 @@
|
||||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "dccg.h"
|
||||
#include "clk_mgr_internal.h"
|
||||
|
||||
// For dce12_get_dp_ref_freq_khz
|
||||
#include "dce100/dce_clk_mgr.h"
|
||||
|
||||
// For dcn20_update_clocks_update_dpp_dto
|
||||
#include "dcn20/dcn20_clk_mgr.h"
|
||||
|
||||
|
||||
|
||||
#include "dcn31_clk_mgr.h"
|
||||
|
||||
#include "reg_helper.h"
|
||||
#include "core_types.h"
|
||||
#include "dcn31_smu.h"
|
||||
#include "dm_helpers.h"
|
||||
|
||||
/* TODO: remove this include once we ported over remaining clk mgr functions*/
|
||||
#include "dcn30/dcn30_clk_mgr.h"
|
||||
|
||||
#include "dc_dmub_srv.h"
|
||||
|
||||
#define TO_CLK_MGR_DCN31(clk_mgr)\
|
||||
container_of(clk_mgr, struct clk_mgr_dcn31, base)
|
||||
|
||||
int dcn31_get_active_display_cnt_wa(
|
||||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
int i, display_count;
|
||||
bool tmds_present = false;
|
||||
|
||||
display_count = 0;
|
||||
for (i = 0; i < context->stream_count; i++) {
|
||||
const struct dc_stream_state *stream = context->streams[i];
|
||||
|
||||
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
|
||||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
|
||||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
|
||||
tmds_present = true;
|
||||
}
|
||||
|
||||
for (i = 0; i < dc->link_count; i++) {
|
||||
const struct dc_link *link = dc->links[i];
|
||||
|
||||
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
|
||||
if (link->link_enc->funcs->is_dig_enabled &&
|
||||
link->link_enc->funcs->is_dig_enabled(link->link_enc))
|
||||
display_count++;
|
||||
}
|
||||
|
||||
/* WA for hang on HDMI after display off back back on*/
|
||||
if (display_count == 0 && tmds_present)
|
||||
display_count = 1;
|
||||
|
||||
return display_count;
|
||||
}
|
||||
|
||||
static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
|
||||
{
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->top_pipe || pipe->prev_odm_pipe)
|
||||
continue;
|
||||
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
|
||||
if (disable)
|
||||
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
|
||||
else
|
||||
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
struct dc_state *context,
|
||||
bool safe_to_lower)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
|
||||
struct dc *dc = clk_mgr_base->ctx->dc;
|
||||
int display_count;
|
||||
bool update_dppclk = false;
|
||||
bool update_dispclk = false;
|
||||
bool dpp_clock_lowered = false;
|
||||
|
||||
if (dc->work_arounds.skip_clock_update)
|
||||
return;
|
||||
|
||||
/*
|
||||
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
|
||||
* also if safe to lower is false, we just go in the higher state
|
||||
*/
|
||||
if (safe_to_lower) {
|
||||
if (new_clocks->z9_support == DCN_Z9_SUPPORT_ALLOW &&
|
||||
new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
|
||||
dcn31_smu_set_Z9_support(clk_mgr, true);
|
||||
clk_mgr_base->clks.z9_support = new_clocks->z9_support;
|
||||
}
|
||||
|
||||
if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
|
||||
dcn31_smu_set_dtbclk(clk_mgr, false);
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
}
|
||||
/* check that we're not already in lower */
|
||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
|
||||
display_count = dcn31_get_active_display_cnt_wa(dc, context);
|
||||
/* if we can go lower, go lower */
|
||||
if (display_count == 0) {
|
||||
union display_idle_optimization_u idle_info = { 0 };
|
||||
idle_info.idle_info.df_request_disabled = 1;
|
||||
idle_info.idle_info.phy_ref_clk_off = 1;
|
||||
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
|
||||
/* update power state */
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (new_clocks->z9_support == DCN_Z9_SUPPORT_DISALLOW &&
|
||||
new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
|
||||
dcn31_smu_set_Z9_support(clk_mgr, false);
|
||||
clk_mgr_base->clks.z9_support = new_clocks->z9_support;
|
||||
}
|
||||
|
||||
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
|
||||
dcn31_smu_set_dtbclk(clk_mgr, true);
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
}
|
||||
|
||||
/* check that we're not already in D0 */
|
||||
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
|
||||
union display_idle_optimization_u idle_info = { 0 };
|
||||
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
|
||||
/* update power state */
|
||||
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
|
||||
}
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
|
||||
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
|
||||
dcn31_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower,
|
||||
new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
|
||||
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
|
||||
dcn31_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
|
||||
}
|
||||
|
||||
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
if (new_clocks->dppclk_khz < 100000)
|
||||
new_clocks->dppclk_khz = 100000;
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
|
||||
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
|
||||
dpp_clock_lowered = true;
|
||||
clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
|
||||
update_dppclk = true;
|
||||
}
|
||||
|
||||
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
|
||||
dcn31_disable_otg_wa(clk_mgr_base, true);
|
||||
|
||||
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
|
||||
dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
|
||||
dcn31_disable_otg_wa(clk_mgr_base, false);
|
||||
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
||||
/* TODO: add back DTO programming when DPPCLK restore is fixed in FSDL*/
|
||||
if (dpp_clock_lowered) {
|
||||
// increase per DPP DTO before lowering global dppclk
|
||||
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
|
||||
} else {
|
||||
// increase global DPPCLK before lowering per DPP DTO
|
||||
if (update_dppclk || update_dispclk)
|
||||
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
|
||||
}
|
||||
|
||||
// notify DMCUB of latest clocks
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
|
||||
cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS;
|
||||
cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz;
|
||||
cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz =
|
||||
clk_mgr_base->clks.dcfclk_deep_sleep_khz;
|
||||
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
|
||||
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
|
||||
|
||||
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
|
||||
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
|
||||
}
|
||||
|
||||
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
|
||||
dcn31_smu_enable_pme_wa(clk_mgr);
|
||||
}
|
||||
|
||||
static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
// Assumption is that boot state always supports pstate
|
||||
clk_mgr->clks.p_state_change_support = true;
|
||||
clk_mgr->clks.prev_p_state_change_support = true;
|
||||
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
|
||||
clk_mgr->clks.z9_support = DCN_Z9_SUPPORT_UNKNOWN;
|
||||
}
|
||||
|
||||
static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
|
||||
struct dc_clocks *b)
|
||||
{
|
||||
if (a->dispclk_khz != b->dispclk_khz)
|
||||
return false;
|
||||
else if (a->dppclk_khz != b->dppclk_khz)
|
||||
return false;
|
||||
else if (a->dcfclk_khz != b->dcfclk_khz)
|
||||
return false;
|
||||
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
|
||||
return false;
|
||||
else if (a->z9_support != b->z9_support)
|
||||
return false;
|
||||
else if (a->dtbclk_en != b->dtbclk_en)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void dcn31_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
|
||||
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static struct clk_bw_params dcn31_bw_params = {
|
||||
.vram_type = Ddr4MemType,
|
||||
.num_channels = 1,
|
||||
.clk_table = {
|
||||
.num_entries = 4,
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
static struct wm_table ddr4_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 6.09,
|
||||
.sr_enter_plus_exit_time_us = 7.14,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table lpddr5_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 5.32,
|
||||
.sr_enter_plus_exit_time_us = 6.38,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.82,
|
||||
.sr_enter_plus_exit_time_us = 11.196,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.89,
|
||||
.sr_enter_plus_exit_time_us = 11.24,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.748,
|
||||
.sr_enter_plus_exit_time_us = 11.102,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static DpmClocks_t dummy_clocks;
|
||||
|
||||
static struct dcn31_watermarks dummy_wms = { 0 };
|
||||
|
||||
static void dcn31_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn31_watermarks *table)
|
||||
{
|
||||
int i, num_valid_sets;
|
||||
|
||||
num_valid_sets = 0;
|
||||
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
/* skip empty entries, the smu array has no holes*/
|
||||
if (!bw_params->wm_table.entries[i].valid)
|
||||
continue;
|
||||
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
|
||||
/* We will not select WM based on fclk, so leave it as unconstrained */
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
|
||||
|
||||
if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
|
||||
if (i == 0)
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
|
||||
else {
|
||||
/* add 1 to make it non-overlapping with next lvl */
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
|
||||
bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
|
||||
}
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz;
|
||||
|
||||
} else {
|
||||
/* unconstrained for memory retraining */
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
|
||||
|
||||
/* Modify previous watermark range to cover up to max */
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
|
||||
}
|
||||
num_valid_sets++;
|
||||
}
|
||||
|
||||
ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
|
||||
|
||||
/* modify the min and max to make sure we cover the whole range*/
|
||||
table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
|
||||
table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
|
||||
table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
|
||||
|
||||
/* This is for writeback only, does not matter currently as no writeback support*/
|
||||
table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
|
||||
table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
|
||||
table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
|
||||
table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
|
||||
table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
|
||||
}
|
||||
|
||||
static void dcn31_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
struct clk_mgr_dcn31 *clk_mgr_dcn31 = TO_CLK_MGR_DCN31(clk_mgr);
|
||||
struct dcn31_watermarks *table = clk_mgr_dcn31->smu_wm_set.wm_set;
|
||||
|
||||
if (!clk_mgr->smu_ver)
|
||||
return;
|
||||
|
||||
if (!table || clk_mgr_dcn31->smu_wm_set.mc_address.quad_part == 0)
|
||||
return;
|
||||
|
||||
memset(table, 0, sizeof(*table));
|
||||
|
||||
dcn31_build_watermark_ranges(clk_mgr_base->bw_params, table);
|
||||
|
||||
dcn31_smu_set_dram_addr_high(clk_mgr,
|
||||
clk_mgr_dcn31->smu_wm_set.mc_address.high_part);
|
||||
dcn31_smu_set_dram_addr_low(clk_mgr,
|
||||
clk_mgr_dcn31->smu_wm_set.mc_address.low_part);
|
||||
dcn31_smu_transfer_wm_table_dram_2_smu(clk_mgr);
|
||||
}
|
||||
|
||||
static void dcn31_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
|
||||
struct dcn31_smu_dpm_clks *smu_dpm_clks)
|
||||
{
|
||||
DpmClocks_t *table = smu_dpm_clks->dpm_clks;
|
||||
|
||||
if (!clk_mgr->smu_ver)
|
||||
return;
|
||||
|
||||
if (!table || smu_dpm_clks->mc_address.quad_part == 0)
|
||||
return;
|
||||
|
||||
memset(table, 0, sizeof(*table));
|
||||
|
||||
dcn31_smu_set_dram_addr_high(clk_mgr,
|
||||
smu_dpm_clks->mc_address.high_part);
|
||||
dcn31_smu_set_dram_addr_low(clk_mgr,
|
||||
smu_dpm_clks->mc_address.low_part);
|
||||
dcn31_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
|
||||
}
|
||||
|
||||
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
|
||||
{
|
||||
uint32_t max = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_clocks; ++i) {
|
||||
if (clocks[i] > max)
|
||||
max = clocks[i];
|
||||
}
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
static unsigned int find_clk_for_voltage(
|
||||
const DpmClocks_t *clock_table,
|
||||
const uint32_t clocks[],
|
||||
unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
|
||||
if (clock_table->SocVoltage[i] == voltage)
|
||||
return clocks[i];
|
||||
}
|
||||
|
||||
ASSERT(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dcn31_clk_mgr_helper_populate_bw_params(
|
||||
struct clk_mgr_internal *clk_mgr,
|
||||
struct integrated_info *bios_info,
|
||||
const DpmClocks_t *clock_table)
|
||||
{
|
||||
int i, j;
|
||||
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
|
||||
uint32_t max_dispclk = 0, max_dppclk = 0;
|
||||
|
||||
j = -1;
|
||||
|
||||
ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
|
||||
|
||||
/* Find lowest DPM, FCLK is filled in reverse order*/
|
||||
|
||||
for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
|
||||
if (clock_table->DfPstateTable[i].FClk != 0) {
|
||||
j = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (j == -1) {
|
||||
/* clock table is all 0s, just use our own hardcode */
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
bw_params->clk_table.num_entries = j + 1;
|
||||
|
||||
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
|
||||
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
|
||||
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
|
||||
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
|
||||
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
|
||||
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
|
||||
switch (clock_table->DfPstateTable[j].WckRatio) {
|
||||
case WCK_RATIO_1_2:
|
||||
bw_params->clk_table.entries[i].wck_ratio = 2;
|
||||
break;
|
||||
case WCK_RATIO_1_4:
|
||||
bw_params->clk_table.entries[i].wck_ratio = 4;
|
||||
break;
|
||||
default:
|
||||
bw_params->clk_table.entries[i].wck_ratio = 1;
|
||||
}
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
|
||||
bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
|
||||
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
|
||||
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
|
||||
}
|
||||
|
||||
bw_params->vram_type = bios_info->memory_type;
|
||||
bw_params->num_channels = bios_info->ma_channel_number;
|
||||
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
bw_params->wm_table.entries[i].wm_inst = i;
|
||||
|
||||
if (i >= bw_params->clk_table.num_entries) {
|
||||
bw_params->wm_table.entries[i].valid = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
|
||||
bw_params->wm_table.entries[i].valid = true;
|
||||
}
|
||||
}
|
||||
|
||||
static struct clk_mgr_funcs dcn31_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.update_clocks = dcn31_update_clocks,
|
||||
.init_clocks = dcn31_init_clocks,
|
||||
.enable_pme_wa = dcn31_enable_pme_wa,
|
||||
.are_clock_states_equal = dcn31_are_clock_states_equal,
|
||||
.notify_wm_ranges = dcn31_notify_wm_ranges
|
||||
};
|
||||
extern struct clk_mgr_funcs dcn3_fpga_funcs;
|
||||
|
||||
void dcn31_clk_mgr_construct(
|
||||
struct dc_context *ctx,
|
||||
struct clk_mgr_dcn31 *clk_mgr,
|
||||
struct pp_smu_funcs *pp_smu,
|
||||
struct dccg *dccg)
|
||||
{
|
||||
struct dcn31_smu_dpm_clks smu_dpm_clks = { 0 };
|
||||
|
||||
clk_mgr->base.base.ctx = ctx;
|
||||
clk_mgr->base.base.funcs = &dcn31_funcs;
|
||||
|
||||
clk_mgr->base.pp_smu = pp_smu;
|
||||
|
||||
clk_mgr->base.dccg = dccg;
|
||||
clk_mgr->base.dfs_bypass_disp_clk = 0;
|
||||
|
||||
clk_mgr->base.dprefclk_ss_percentage = 0;
|
||||
clk_mgr->base.dprefclk_ss_divider = 1000;
|
||||
clk_mgr->base.ss_on_dprefclk = false;
|
||||
|
||||
clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem(
|
||||
clk_mgr->base.base.ctx,
|
||||
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
|
||||
sizeof(struct dcn31_watermarks),
|
||||
&clk_mgr->smu_wm_set.mc_address.quad_part);
|
||||
|
||||
if (clk_mgr->smu_wm_set.wm_set == 0) {
|
||||
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
|
||||
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
|
||||
}
|
||||
ASSERT(clk_mgr->smu_wm_set.wm_set);
|
||||
|
||||
smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
|
||||
clk_mgr->base.base.ctx,
|
||||
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
|
||||
sizeof(DpmClocks_t),
|
||||
&smu_dpm_clks.mc_address.quad_part);
|
||||
|
||||
if (smu_dpm_clks.dpm_clks == NULL) {
|
||||
smu_dpm_clks.dpm_clks = &dummy_clocks;
|
||||
smu_dpm_clks.mc_address.quad_part = 0;
|
||||
}
|
||||
|
||||
ASSERT(smu_dpm_clks.dpm_clks);
|
||||
|
||||
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
|
||||
clk_mgr->base.base.funcs = &dcn3_fpga_funcs;
|
||||
} else {
|
||||
struct clk_log_info log_info = {0};
|
||||
|
||||
clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base);
|
||||
|
||||
if (clk_mgr->base.smu_ver)
|
||||
clk_mgr->base.smu_present = true;
|
||||
|
||||
/* TODO: Check we get what we expect during bringup */
|
||||
clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
|
||||
|
||||
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
|
||||
dcn31_bw_params.wm_table = lpddr5_wm_table;
|
||||
} else {
|
||||
dcn31_bw_params.wm_table = ddr4_wm_table;
|
||||
}
|
||||
/* Saved clocks configured at boot for debug purposes */
|
||||
dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
|
||||
|
||||
}
|
||||
|
||||
clk_mgr->base.base.dprefclk_khz = 600000;
|
||||
clk_mgr->base.dccg->ref_dtbclk_khz = 600000;
|
||||
dce_clock_read_ss_info(&clk_mgr->base);
|
||||
|
||||
clk_mgr->base.base.bw_params = &dcn31_bw_params;
|
||||
|
||||
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
|
||||
dcn31_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
|
||||
|
||||
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
|
||||
dcn31_clk_mgr_helper_populate_bw_params(
|
||||
&clk_mgr->base,
|
||||
ctx->dc_bios->integrated_info,
|
||||
smu_dpm_clks.dpm_clks);
|
||||
}
|
||||
}
|
||||
|
||||
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
|
||||
dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
|
||||
smu_dpm_clks.dpm_clks);
|
||||
}
|
||||
|
||||
void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
|
||||
{
|
||||
struct clk_mgr_dcn31 *clk_mgr = TO_CLK_MGR_DCN31(clk_mgr_int);
|
||||
|
||||
if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
|
||||
dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
|
||||
clk_mgr->smu_wm_set.wm_set);
|
||||
}
|
103
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
Normal file
103
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
Normal file
@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Copyright 2019 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __DCN31_CLK_MGR_H__
|
||||
#define __DCN31_CLK_MGR_H__
|
||||
#include "clk_mgr_internal.h"
|
||||
|
||||
//CLK1_CLK_PLL_REQ
|
||||
#ifndef CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT
|
||||
#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
|
||||
#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
|
||||
#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
|
||||
#define CLK11_CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
|
||||
#define CLK11_CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
|
||||
#define CLK11_CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
|
||||
//CLK1_CLK0_DFS_CNTL
|
||||
#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER__SHIFT 0x0
|
||||
#define CLK11_CLK1_CLK0_DFS_CNTL__CLK0_DIVIDER_MASK 0x0000007FL
|
||||
/*DPREF clock related*/
|
||||
#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
|
||||
#define CLK0_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
|
||||
#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
|
||||
#define CLK1_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
|
||||
#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
|
||||
#define CLK2_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
|
||||
#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER__SHIFT 0x0
|
||||
#define CLK3_CLK3_DFS_CNTL__CLK3_DIVIDER_MASK 0x0000007FL
|
||||
|
||||
//CLK3_0_CLK3_CLK_PLL_REQ
|
||||
#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
|
||||
#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
|
||||
#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
|
||||
#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
|
||||
#define CLK3_0_CLK3_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
|
||||
#define CLK3_0_CLK3_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
|
||||
|
||||
#define mmCLK0_CLK3_DFS_CNTL 0x16C60
|
||||
#define mmCLK00_CLK0_CLK3_DFS_CNTL 0x16C60
|
||||
#define mmCLK01_CLK0_CLK3_DFS_CNTL 0x16E60
|
||||
#define mmCLK02_CLK0_CLK3_DFS_CNTL 0x17060
|
||||
#define mmCLK03_CLK0_CLK3_DFS_CNTL 0x17260
|
||||
|
||||
#define mmCLK0_CLK_PLL_REQ 0x16C10
|
||||
#define mmCLK00_CLK0_CLK_PLL_REQ 0x16C10
|
||||
#define mmCLK01_CLK0_CLK_PLL_REQ 0x16E10
|
||||
#define mmCLK02_CLK0_CLK_PLL_REQ 0x17010
|
||||
#define mmCLK03_CLK0_CLK_PLL_REQ 0x17210
|
||||
|
||||
#define mmCLK1_CLK_PLL_REQ 0x1B00D
|
||||
#define mmCLK10_CLK1_CLK_PLL_REQ 0x1B00D
|
||||
#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D
|
||||
#define mmCLK12_CLK1_CLK_PLL_REQ 0x1B40D
|
||||
#define mmCLK13_CLK1_CLK_PLL_REQ 0x1B60D
|
||||
|
||||
#define mmCLK2_CLK_PLL_REQ 0x17E0D
|
||||
|
||||
/*AMCLK*/
|
||||
#define mmCLK11_CLK1_CLK0_DFS_CNTL 0x1B23F
|
||||
#define mmCLK11_CLK1_CLK_PLL_REQ 0x1B20D
|
||||
#endif
|
||||
|
||||
struct dcn31_watermarks;
|
||||
|
||||
struct dcn31_smu_watermark_set {
|
||||
struct dcn31_watermarks *wm_set;
|
||||
union large_integer mc_address;
|
||||
};
|
||||
|
||||
struct clk_mgr_dcn31 {
|
||||
struct clk_mgr_internal base;
|
||||
struct dcn31_smu_watermark_set smu_wm_set;
|
||||
};
|
||||
|
||||
void dcn31_clk_mgr_construct(struct dc_context *ctx,
|
||||
struct clk_mgr_dcn31 *clk_mgr,
|
||||
struct pp_smu_funcs *pp_smu,
|
||||
struct dccg *dccg);
|
||||
|
||||
void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
|
||||
|
||||
#endif //__DCN31_CLK_MGR_H__
|
333
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
Normal file
333
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
Normal file
@ -0,0 +1,333 @@
|
||||
/*
|
||||
* Copyright 2012-16 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include "core_types.h"
|
||||
#include "clk_mgr_internal.h"
|
||||
#include "reg_helper.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "dcn31_smu.h"
|
||||
|
||||
#include "yellow_carp_offset.h"
|
||||
#include "mp/mp_13_0_1_offset.h"
|
||||
#include "mp/mp_13_0_1_sh_mask.h"
|
||||
|
||||
#define REG(reg_name) \
|
||||
(MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
|
||||
|
||||
#define FN(reg_name, field) \
|
||||
FD(reg_name##__##field)
|
||||
|
||||
#define VBIOSSMC_MSG_TestMessage 0x1
|
||||
#define VBIOSSMC_MSG_GetSmuVersion 0x2
|
||||
#define VBIOSSMC_MSG_PowerUpGfx 0x3
|
||||
#define VBIOSSMC_MSG_SetDispclkFreq 0x4
|
||||
#define VBIOSSMC_MSG_SetDprefclkFreq 0x5 //Not used. DPRef is constant
|
||||
#define VBIOSSMC_MSG_SetDppclkFreq 0x6
|
||||
#define VBIOSSMC_MSG_SetHardMinDcfclkByFreq 0x7
|
||||
#define VBIOSSMC_MSG_SetMinDeepSleepDcfclk 0x8
|
||||
#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq 0x9 //Keep it in case VMIN dees not support phy clk
|
||||
#define VBIOSSMC_MSG_GetFclkFrequency 0xA
|
||||
#define VBIOSSMC_MSG_SetDisplayCount 0xB //Not used anymore
|
||||
#define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0xC //Not used anymore
|
||||
#define VBIOSSMC_MSG_UpdatePmeRestore 0xD
|
||||
#define VBIOSSMC_MSG_SetVbiosDramAddrHigh 0xE //Used for WM table txfr
|
||||
#define VBIOSSMC_MSG_SetVbiosDramAddrLow 0xF
|
||||
#define VBIOSSMC_MSG_TransferTableSmu2Dram 0x10
|
||||
#define VBIOSSMC_MSG_TransferTableDram2Smu 0x11
|
||||
#define VBIOSSMC_MSG_SetDisplayIdleOptimizations 0x12
|
||||
#define VBIOSSMC_MSG_GetDprefclkFreq 0x13
|
||||
#define VBIOSSMC_MSG_GetDtbclkFreq 0x14
|
||||
#define VBIOSSMC_MSG_AllowZstatesEntry 0x15
|
||||
#define VBIOSSMC_MSG_DisallowZstatesEntry 0x16
|
||||
#define VBIOSSMC_MSG_SetDtbClk 0x17
|
||||
#define VBIOSSMC_Message_Count 0x18
|
||||
|
||||
#define VBIOSSMC_Status_BUSY 0x0
|
||||
#define VBIOSSMC_Result_OK 0x1
|
||||
#define VBIOSSMC_Result_Failed 0xFF
|
||||
#define VBIOSSMC_Result_UnknownCmd 0xFE
|
||||
#define VBIOSSMC_Result_CmdRejectedPrereq 0xFD
|
||||
#define VBIOSSMC_Result_CmdRejectedBusy 0xFC
|
||||
|
||||
/*
|
||||
* Function to be used instead of REG_WAIT macro because the wait ends when
|
||||
* the register is NOT EQUAL to zero, and because the translation in msg_if.h
|
||||
* won't work with REG_WAIT.
|
||||
*/
|
||||
static uint32_t dcn31_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries)
|
||||
{
|
||||
uint32_t res_val = VBIOSSMC_Status_BUSY;
|
||||
|
||||
do {
|
||||
res_val = REG_READ(MP1_SMN_C2PMSG_91);
|
||||
if (res_val != VBIOSSMC_Status_BUSY)
|
||||
break;
|
||||
|
||||
if (delay_us >= 1000)
|
||||
msleep(delay_us/1000);
|
||||
else if (delay_us > 0)
|
||||
udelay(delay_us);
|
||||
} while (max_retries--);
|
||||
|
||||
return res_val;
|
||||
}
|
||||
|
||||
int dcn31_smu_send_msg_with_param(
|
||||
struct clk_mgr_internal *clk_mgr,
|
||||
unsigned int msg_id, unsigned int param)
|
||||
{
|
||||
uint32_t result;
|
||||
|
||||
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
ASSERT(result == VBIOSSMC_Result_OK);
|
||||
|
||||
if (result == VBIOSSMC_Status_BUSY) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* First clear response register */
|
||||
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
|
||||
|
||||
/* Set the parameter register for the SMU message, unit is Mhz */
|
||||
REG_WRITE(MP1_SMN_C2PMSG_83, param);
|
||||
|
||||
/* Trigger the message transaction by writing the message ID */
|
||||
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
|
||||
|
||||
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
|
||||
|
||||
if (IS_SMU_TIMEOUT(result)) {
|
||||
ASSERT(0);
|
||||
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
|
||||
}
|
||||
|
||||
return REG_READ(MP1_SMN_C2PMSG_83);
|
||||
}
|
||||
|
||||
int dcn31_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
return dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_GetSmuVersion,
|
||||
0);
|
||||
}
|
||||
|
||||
|
||||
int dcn31_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz)
|
||||
{
|
||||
int actual_dispclk_set_mhz = -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return requested_dispclk_khz;
|
||||
|
||||
/* Unit of SMU msg parameter is Mhz */
|
||||
actual_dispclk_set_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDispclkFreq,
|
||||
(requested_dispclk_khz + 999) / 1000);
|
||||
|
||||
return actual_dispclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
int dcn31_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
int actual_dprefclk_set_mhz = -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return clk_mgr->base.dprefclk_khz;
|
||||
|
||||
actual_dprefclk_set_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDprefclkFreq,
|
||||
(clk_mgr->base.dprefclk_khz + 999) / 1000);
|
||||
|
||||
/* TODO: add code for programing DP DTO, currently this is down by command table */
|
||||
|
||||
return actual_dprefclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz)
|
||||
{
|
||||
int actual_dcfclk_set_mhz = -1;
|
||||
|
||||
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
|
||||
return -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return requested_dcfclk_khz;
|
||||
|
||||
actual_dcfclk_set_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
|
||||
(requested_dcfclk_khz + 999) / 1000);
|
||||
|
||||
return actual_dcfclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
int dcn31_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz)
|
||||
{
|
||||
int actual_min_ds_dcfclk_mhz = -1;
|
||||
|
||||
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
|
||||
return -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return requested_min_ds_dcfclk_khz;
|
||||
|
||||
actual_min_ds_dcfclk_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
|
||||
(requested_min_ds_dcfclk_khz + 999) / 1000);
|
||||
|
||||
return actual_min_ds_dcfclk_mhz * 1000;
|
||||
}
|
||||
|
||||
int dcn31_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz)
|
||||
{
|
||||
int actual_dppclk_set_mhz = -1;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return requested_dpp_khz;
|
||||
|
||||
actual_dppclk_set_mhz = dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDppclkFreq,
|
||||
(requested_dpp_khz + 999) / 1000);
|
||||
|
||||
return actual_dppclk_set_mhz * 1000;
|
||||
}
|
||||
|
||||
void dcn31_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info)
|
||||
{
|
||||
if (!clk_mgr->base.ctx->dc->debug.pstate_enabled)
|
||||
return;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
//TODO: Work with smu team to define optimization options.
|
||||
dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
|
||||
idle_info);
|
||||
}
|
||||
|
||||
void dcn31_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
|
||||
{
|
||||
union display_idle_optimization_u idle_info = { 0 };
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
if (enable) {
|
||||
idle_info.idle_info.df_request_disabled = 1;
|
||||
idle_info.idle_info.phy_ref_clk_off = 1;
|
||||
}
|
||||
|
||||
dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
|
||||
idle_info.data);
|
||||
}
|
||||
|
||||
void dcn31_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_UpdatePmeRestore,
|
||||
0);
|
||||
}
|
||||
|
||||
void dcn31_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn31_smu_send_msg_with_param(clk_mgr,
|
||||
VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
|
||||
}
|
||||
|
||||
void dcn31_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn31_smu_send_msg_with_param(clk_mgr,
|
||||
VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
|
||||
}
|
||||
|
||||
void dcn31_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn31_smu_send_msg_with_param(clk_mgr,
|
||||
VBIOSSMC_MSG_TransferTableSmu2Dram, TABLE_DPMCLOCKS);
|
||||
}
|
||||
|
||||
void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn31_smu_send_msg_with_param(clk_mgr,
|
||||
VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
|
||||
}
|
||||
|
||||
void dcn31_smu_set_Z9_support(struct clk_mgr_internal *clk_mgr, bool support)
|
||||
{
|
||||
//TODO: Work with smu team to define optimization options.
|
||||
unsigned int msg_id;
|
||||
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
if (support)
|
||||
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
|
||||
else
|
||||
msg_id = VBIOSSMC_MSG_DisallowZstatesEntry;
|
||||
|
||||
dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
msg_id,
|
||||
0);
|
||||
|
||||
}
|
||||
|
||||
/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
|
||||
void dcn31_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
|
||||
{
|
||||
if (!clk_mgr->smu_present)
|
||||
return;
|
||||
|
||||
dcn31_smu_send_msg_with_param(
|
||||
clk_mgr,
|
||||
VBIOSSMC_MSG_SetDtbClk,
|
||||
enable);
|
||||
}
|
271
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h
Normal file
271
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h
Normal file
@ -0,0 +1,271 @@
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef DAL_DC_31_SMU_H_
|
||||
#define DAL_DC_31_SMU_H_
|
||||
|
||||
#ifndef PMFW_DRIVER_IF_H
|
||||
#define PMFW_DRIVER_IF_H
|
||||
#define PMFW_DRIVER_IF_VERSION 4
|
||||
|
||||
typedef struct {
|
||||
int32_t value;
|
||||
uint32_t numFractionalBits;
|
||||
} FloatInIntFormat_t;
|
||||
|
||||
typedef enum {
|
||||
DSPCLK_DCFCLK = 0,
|
||||
DSPCLK_DISPCLK,
|
||||
DSPCLK_PIXCLK,
|
||||
DSPCLK_PHYCLK,
|
||||
DSPCLK_COUNT,
|
||||
} DSPCLK_e;
|
||||
|
||||
typedef struct {
|
||||
uint16_t Freq; // in MHz
|
||||
uint16_t Vid; // min voltage in SVI3 VID
|
||||
} DisplayClockTable_t;
|
||||
|
||||
typedef struct {
|
||||
uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
|
||||
uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
|
||||
uint16_t MinMclk;
|
||||
uint16_t MaxMclk;
|
||||
|
||||
uint8_t WmSetting;
|
||||
uint8_t WmType; // Used for normal pstate change or memory retraining
|
||||
uint8_t Padding[2];
|
||||
} WatermarkRowGeneric_t;
|
||||
|
||||
#define NUM_WM_RANGES 4
|
||||
#define WM_PSTATE_CHG 0
|
||||
#define WM_RETRAINING 1
|
||||
|
||||
typedef enum {
|
||||
WM_SOCCLK = 0,
|
||||
WM_DCFCLK,
|
||||
WM_COUNT,
|
||||
} WM_CLOCK_e;
|
||||
|
||||
typedef struct {
|
||||
// Watermarks
|
||||
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
|
||||
|
||||
uint32_t MmHubPadding[7]; // SMU internal use
|
||||
} Watermarks_t;
|
||||
|
||||
typedef enum {
|
||||
CUSTOM_DPM_SETTING_GFXCLK,
|
||||
CUSTOM_DPM_SETTING_CCLK,
|
||||
CUSTOM_DPM_SETTING_FCLK_CCX,
|
||||
CUSTOM_DPM_SETTING_FCLK_GFX,
|
||||
CUSTOM_DPM_SETTING_FCLK_STALLS,
|
||||
CUSTOM_DPM_SETTING_LCLK,
|
||||
CUSTOM_DPM_SETTING_COUNT,
|
||||
} CUSTOM_DPM_SETTING_e;
|
||||
|
||||
typedef struct {
|
||||
uint8_t ActiveHystLimit;
|
||||
uint8_t IdleHystLimit;
|
||||
uint8_t FPS;
|
||||
uint8_t MinActiveFreqType;
|
||||
FloatInIntFormat_t MinActiveFreq;
|
||||
FloatInIntFormat_t PD_Data_limit;
|
||||
FloatInIntFormat_t PD_Data_time_constant;
|
||||
FloatInIntFormat_t PD_Data_error_coeff;
|
||||
FloatInIntFormat_t PD_Data_error_rate_coeff;
|
||||
} DpmActivityMonitorCoeffExt_t;
|
||||
|
||||
typedef struct {
|
||||
DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT];
|
||||
} CustomDpmSettings_t;
|
||||
|
||||
#define NUM_DCFCLK_DPM_LEVELS 8
|
||||
#define NUM_DISPCLK_DPM_LEVELS 8
|
||||
#define NUM_DPPCLK_DPM_LEVELS 8
|
||||
#define NUM_SOCCLK_DPM_LEVELS 8
|
||||
#define NUM_VCN_DPM_LEVELS 8
|
||||
#define NUM_SOC_VOLTAGE_LEVELS 8
|
||||
#define NUM_DF_PSTATE_LEVELS 4
|
||||
|
||||
typedef enum{
|
||||
WCK_RATIO_1_1 = 0, // DDR5, Wck:ck is always 1:1;
|
||||
WCK_RATIO_1_2,
|
||||
WCK_RATIO_1_4,
|
||||
WCK_RATIO_MAX
|
||||
} WCK_RATIO_e;
|
||||
|
||||
typedef struct {
|
||||
uint32_t FClk;
|
||||
uint32_t MemClk;
|
||||
uint32_t Voltage;
|
||||
uint8_t WckRatio;
|
||||
uint8_t Spare[3];
|
||||
} DfPstateTable_t;
|
||||
|
||||
//Freq in MHz
|
||||
//Voltage in milli volts with 2 fractional bits
|
||||
typedef struct {
|
||||
uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
|
||||
uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
|
||||
uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
|
||||
uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
|
||||
uint32_t VClocks[NUM_VCN_DPM_LEVELS];
|
||||
uint32_t DClocks[NUM_VCN_DPM_LEVELS];
|
||||
uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
|
||||
DfPstateTable_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
|
||||
|
||||
uint8_t NumDcfClkLevelsEnabled;
|
||||
uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
|
||||
uint8_t NumSocClkLevelsEnabled;
|
||||
uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
|
||||
uint8_t NumDfPstatesEnabled;
|
||||
uint8_t spare[3];
|
||||
|
||||
uint32_t MinGfxClk;
|
||||
uint32_t MaxGfxClk;
|
||||
} DpmClocks_t;
|
||||
|
||||
|
||||
// Throttler Status Bitmask
|
||||
#define THROTTLER_STATUS_BIT_SPL 0
|
||||
#define THROTTLER_STATUS_BIT_FPPT 1
|
||||
#define THROTTLER_STATUS_BIT_SPPT 2
|
||||
#define THROTTLER_STATUS_BIT_SPPT_APU 3
|
||||
#define THROTTLER_STATUS_BIT_THM_CORE 4
|
||||
#define THROTTLER_STATUS_BIT_THM_GFX 5
|
||||
#define THROTTLER_STATUS_BIT_THM_SOC 6
|
||||
#define THROTTLER_STATUS_BIT_TDC_VDD 7
|
||||
#define THROTTLER_STATUS_BIT_TDC_SOC 8
|
||||
#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9
|
||||
#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10
|
||||
#define THROTTLER_STATUS_BIT_EDC_CPU 11
|
||||
#define THROTTLER_STATUS_BIT_EDC_GFX 12
|
||||
|
||||
typedef struct {
|
||||
uint16_t GfxclkFrequency; //[MHz]
|
||||
uint16_t SocclkFrequency; //[MHz]
|
||||
uint16_t VclkFrequency; //[MHz]
|
||||
uint16_t DclkFrequency; //[MHz]
|
||||
uint16_t MemclkFrequency; //[MHz]
|
||||
uint16_t spare;
|
||||
|
||||
uint16_t GfxActivity; //[centi]
|
||||
uint16_t UvdActivity; //[centi]
|
||||
|
||||
uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC
|
||||
uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC
|
||||
uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC
|
||||
|
||||
//3rd party tools in Windows need this info in the case of APUs
|
||||
uint16_t CoreFrequency[8]; //[MHz]
|
||||
uint16_t CorePower[8]; //[mW]
|
||||
uint16_t CoreTemperature[8]; //[centi-Celsius]
|
||||
uint16_t L3Frequency; //[MHz]
|
||||
uint16_t L3Temperature; //[centi-Celsius]
|
||||
|
||||
uint16_t GfxTemperature; //[centi-Celsius]
|
||||
uint16_t SocTemperature; //[centi-Celsius]
|
||||
uint16_t ThrottlerStatus;
|
||||
|
||||
uint16_t CurrentSocketPower; //[mW]
|
||||
uint16_t StapmOriginalLimit; //[W]
|
||||
uint16_t StapmCurrentLimit; //[W]
|
||||
uint16_t ApuPower; //[W]
|
||||
uint16_t dGpuPower; //[W]
|
||||
|
||||
uint16_t VddTdcValue; //[mA]
|
||||
uint16_t SocTdcValue; //[mA]
|
||||
uint16_t VddEdcValue; //[mA]
|
||||
uint16_t SocEdcValue; //[mA]
|
||||
|
||||
uint16_t InfrastructureCpuMaxFreq; //[MHz]
|
||||
uint16_t InfrastructureGfxMaxFreq; //[MHz]
|
||||
} SmuMetrics_t;
|
||||
|
||||
|
||||
// Workload bits
|
||||
#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
|
||||
#define WORKLOAD_PPLIB_VIDEO_BIT 2
|
||||
#define WORKLOAD_PPLIB_VR_BIT 3
|
||||
#define WORKLOAD_PPLIB_COMPUTE_BIT 4
|
||||
#define WORKLOAD_PPLIB_CUSTOM_BIT 5
|
||||
#define WORKLOAD_PPLIB_COUNT 6
|
||||
|
||||
#define TABLE_BIOS_IF 0 // Called by BIOS
|
||||
#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
|
||||
#define TABLE_CUSTOM_DPM 2 // Called by Driver
|
||||
#define TABLE_SPARE1 3
|
||||
#define TABLE_DPMCLOCKS 4 // Called by Driver
|
||||
#define TABLE_MOMENTARY_PM 5 // Called by Tools
|
||||
#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
|
||||
#define TABLE_SMU_METRICS 7 // Called by Driver
|
||||
#define TABLE_COUNT 8
|
||||
|
||||
#endif
|
||||
|
||||
struct dcn31_watermarks {
|
||||
// Watermarks
|
||||
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
|
||||
|
||||
uint32_t MmHubPadding[7]; // SMU internal use
|
||||
};
|
||||
|
||||
struct dcn31_smu_dpm_clks {
|
||||
DpmClocks_t *dpm_clks;
|
||||
union large_integer mc_address;
|
||||
};
|
||||
|
||||
/* TODO: taken from vgh, may not be correct */
|
||||
struct display_idle_optimization {
|
||||
unsigned int df_request_disabled : 1;
|
||||
unsigned int phy_ref_clk_off : 1;
|
||||
unsigned int s0i2_rdy : 1;
|
||||
unsigned int reserved : 29;
|
||||
};
|
||||
|
||||
union display_idle_optimization_u {
|
||||
struct display_idle_optimization idle_info;
|
||||
uint32_t data;
|
||||
};
|
||||
|
||||
int dcn31_smu_get_smu_version(struct clk_mgr_internal *clk_mgr);
|
||||
int dcn31_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispclk_khz);
|
||||
int dcn31_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr);
|
||||
int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_dcfclk_khz);
|
||||
int dcn31_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
|
||||
int dcn31_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
|
||||
void dcn31_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, uint32_t idle_info);
|
||||
void dcn31_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
|
||||
void dcn31_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
|
||||
void dcn31_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
|
||||
void dcn31_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
|
||||
void dcn31_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr);
|
||||
void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
|
||||
|
||||
void dcn31_smu_set_Z9_support(struct clk_mgr_internal *clk_mgr, bool support);
|
||||
void dcn31_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
|
||||
|
||||
#endif /* DAL_DC_31_SMU_H_ */
|
@ -325,6 +325,48 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
*****************************************************************************
|
||||
* Function: dc_stream_get_last_vrr_vtotal
|
||||
*
|
||||
* @brief
|
||||
* Looks up the pipe context of dc_stream_state and gets the
|
||||
* last VTOTAL used by DRR (Dynamic Refresh Rate)
|
||||
*
|
||||
* @param [in] dc: dc reference
|
||||
* @param [in] stream: Initial dc stream state
|
||||
* @param [in] adjust: Updated parameters for vertical_total_min and
|
||||
* vertical_total_max
|
||||
*****************************************************************************
|
||||
*/
|
||||
bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
uint32_t *refresh_rate)
|
||||
{
|
||||
bool status = false;
|
||||
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe->stream == stream && pipe->stream_res.tg) {
|
||||
/* Only execute if a function pointer has been defined for
|
||||
* the DC version in question
|
||||
*/
|
||||
if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
|
||||
pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
|
||||
|
||||
status = true;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
bool dc_stream_get_crtc_position(struct dc *dc,
|
||||
struct dc_stream_state **streams, int num_streams,
|
||||
unsigned int *v_pos, unsigned int *nom_v_pos)
|
||||
@ -1482,6 +1524,13 @@ static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
|
||||
return stream_mask;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
void dc_z10_restore(struct dc *dc)
|
||||
{
|
||||
if (dc->hwss.z10_restore)
|
||||
dc->hwss.z10_restore(dc);
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Applies given context to HW and copy it into current context.
|
||||
* It's up to the user to release the src context afterwards.
|
||||
@ -1495,6 +1544,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
|
||||
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
dc_z10_restore(dc);
|
||||
#endif
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
#endif
|
||||
|
||||
@ -1917,8 +1969,13 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
|
||||
if (u->plane_info->dcc.enable != u->surface->dcc.enable
|
||||
|| u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
|
||||
|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
|
||||
/* During DCC on/off, stutter period is calculated before
|
||||
* DCC has fully transitioned. This results in incorrect
|
||||
* stutter period calculation. Triggering a full update will
|
||||
* recalculate stutter period.
|
||||
*/
|
||||
update_flags->bits.dcc_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL);
|
||||
}
|
||||
|
||||
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
|
||||
@ -2569,6 +2626,10 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
int i, j;
|
||||
struct pipe_ctx *top_pipe_to_program = NULL;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
dc_z10_restore(dc);
|
||||
#endif
|
||||
|
||||
if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
|
||||
/* Optimize seamless boot flag keeps clocks and watermarks high until
|
||||
* first flip. After first flip, optimization is required to lower
|
||||
@ -3024,6 +3085,9 @@ void dc_set_power_state(
|
||||
case DC_ACPI_CM_POWER_STATE_D0:
|
||||
dc_resource_state_construct(dc, dc->current_state);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
dc_z10_restore(dc);
|
||||
#endif
|
||||
if (dc->ctx->dmub_srv)
|
||||
dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
|
||||
|
||||
@ -3256,10 +3320,13 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
|
||||
continue;
|
||||
|
||||
if (link->psr_settings.psr_feature_enabled) {
|
||||
if (enable && !link->psr_settings.psr_allow_active)
|
||||
return dc_link_set_psr_allow_active(link, true, false, false);
|
||||
else if (!enable && link->psr_settings.psr_allow_active)
|
||||
return dc_link_set_psr_allow_active(link, false, true, false);
|
||||
if (enable && !link->psr_settings.psr_allow_active) {
|
||||
if (!dc_link_set_psr_allow_active(link, true, false, false))
|
||||
return false;
|
||||
} else if (!enable && link->psr_settings.psr_allow_active) {
|
||||
if (!dc_link_set_psr_allow_active(link, false, true, false))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -291,3 +291,136 @@ bool hwss_wait_for_blank_complete(
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void get_mpctree_visual_confirm_color(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color)
|
||||
{
|
||||
const struct tg_color pipe_colors[6] = {
|
||||
{MAX_TG_COLOR_VALUE, 0, 0}, /* red */
|
||||
{MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, /* orange */
|
||||
{MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* yellow */
|
||||
{0, MAX_TG_COLOR_VALUE, 0}, /* green */
|
||||
{0, 0, MAX_TG_COLOR_VALUE}, /* blue */
|
||||
{MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, /* purple */
|
||||
};
|
||||
|
||||
struct pipe_ctx *top_pipe = pipe_ctx;
|
||||
|
||||
while (top_pipe->top_pipe)
|
||||
top_pipe = top_pipe->top_pipe;
|
||||
|
||||
*color = pipe_colors[top_pipe->pipe_idx];
|
||||
}
|
||||
|
||||
void get_surface_visual_confirm_color(
|
||||
const struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color)
|
||||
{
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE;
|
||||
|
||||
switch (pipe_ctx->plane_res.scl_data.format) {
|
||||
case PIXEL_FORMAT_ARGB8888:
|
||||
/* set border color to red */
|
||||
color->color_r_cr = color_value;
|
||||
if (pipe_ctx->plane_state->layer_index > 0) {
|
||||
/* set border color to pink */
|
||||
color->color_b_cb = color_value;
|
||||
color->color_g_y = color_value * 0.5;
|
||||
}
|
||||
break;
|
||||
|
||||
case PIXEL_FORMAT_ARGB2101010:
|
||||
/* set border color to blue */
|
||||
color->color_b_cb = color_value;
|
||||
if (pipe_ctx->plane_state->layer_index > 0) {
|
||||
/* set border color to cyan */
|
||||
color->color_g_y = color_value;
|
||||
}
|
||||
break;
|
||||
case PIXEL_FORMAT_420BPP8:
|
||||
/* set border color to green */
|
||||
color->color_g_y = color_value;
|
||||
break;
|
||||
case PIXEL_FORMAT_420BPP10:
|
||||
/* set border color to yellow */
|
||||
color->color_g_y = color_value;
|
||||
color->color_r_cr = color_value;
|
||||
break;
|
||||
case PIXEL_FORMAT_FP16:
|
||||
/* set border color to white */
|
||||
color->color_r_cr = color_value;
|
||||
color->color_b_cb = color_value;
|
||||
color->color_g_y = color_value;
|
||||
if (pipe_ctx->plane_state->layer_index > 0) {
|
||||
/* set border color to orange */
|
||||
color->color_g_y = 0.22 * color_value;
|
||||
color->color_b_cb = 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void get_hdr_visual_confirm_color(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color)
|
||||
{
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE;
|
||||
|
||||
/* Determine the overscan color based on the top-most (desktop) plane's context */
|
||||
struct pipe_ctx *top_pipe_ctx = pipe_ctx;
|
||||
|
||||
while (top_pipe_ctx->top_pipe != NULL)
|
||||
top_pipe_ctx = top_pipe_ctx->top_pipe;
|
||||
|
||||
switch (top_pipe_ctx->plane_res.scl_data.format) {
|
||||
case PIXEL_FORMAT_ARGB2101010:
|
||||
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
|
||||
/* HDR10, ARGB2101010 - set border color to red */
|
||||
color->color_r_cr = color_value;
|
||||
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
|
||||
/* FreeSync 2 ARGB2101010 - set border color to pink */
|
||||
color->color_r_cr = color_value;
|
||||
color->color_b_cb = color_value;
|
||||
}
|
||||
break;
|
||||
case PIXEL_FORMAT_FP16:
|
||||
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
|
||||
/* HDR10, FP16 - set border color to blue */
|
||||
color->color_b_cb = color_value;
|
||||
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
|
||||
/* FreeSync 2 HDR - set border color to green */
|
||||
color->color_g_y = color_value;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* SDR - set border color to Gray */
|
||||
color->color_r_cr = color_value/2;
|
||||
color->color_b_cb = color_value/2;
|
||||
color->color_g_y = color_value/2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void get_surface_tile_visual_confirm_color(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color)
|
||||
{
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE;
|
||||
/* Determine the overscan color based on the top-most (desktop) plane's context */
|
||||
struct pipe_ctx *top_pipe_ctx = pipe_ctx;
|
||||
|
||||
while (top_pipe_ctx->top_pipe != NULL)
|
||||
top_pipe_ctx = top_pipe_ctx->top_pipe;
|
||||
|
||||
switch (top_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) {
|
||||
case DC_SW_LINEAR:
|
||||
/* LINEAR Surface - set border color to red */
|
||||
color->color_r_cr = color_value;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1099,24 +1099,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
||||
dc_is_dvi_signal(link->connector_signal)) {
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
link_disconnect_sink(link);
|
||||
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* Abort detection for DP connectors if we have
|
||||
* no EDID and connector is active converter
|
||||
* as there are no display downstream
|
||||
*
|
||||
*/
|
||||
if (dc_is_dp_sst_signal(link->connector_signal) &&
|
||||
(link->dpcd_caps.dongle_type ==
|
||||
DISPLAY_DONGLE_DP_VGA_CONVERTER ||
|
||||
link->dpcd_caps.dongle_type ==
|
||||
DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
link_disconnect_sink(link);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -2701,16 +2683,24 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active,
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
struct dmub_psr *psr = dc->res_pool->psr;
|
||||
unsigned int panel_inst;
|
||||
|
||||
if (psr == NULL && force_static)
|
||||
return false;
|
||||
|
||||
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
|
||||
return false;
|
||||
|
||||
link->psr_settings.psr_allow_active = allow_active;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
if (!allow_active)
|
||||
dc_z10_restore(dc);
|
||||
#endif
|
||||
|
||||
if (psr != NULL && link->psr_settings.psr_feature_enabled) {
|
||||
if (force_static && psr->funcs->psr_force_static)
|
||||
psr->funcs->psr_force_static(psr);
|
||||
psr->funcs->psr_enable(psr, allow_active, wait);
|
||||
psr->funcs->psr_force_static(psr, panel_inst);
|
||||
psr->funcs->psr_enable(psr, allow_active, wait, panel_inst);
|
||||
} else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
|
||||
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
|
||||
else
|
||||
@ -2724,9 +2714,13 @@ bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmcu *dmcu = dc->res_pool->dmcu;
|
||||
struct dmub_psr *psr = dc->res_pool->psr;
|
||||
unsigned int panel_inst;
|
||||
|
||||
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
|
||||
return false;
|
||||
|
||||
if (psr != NULL && link->psr_settings.psr_feature_enabled)
|
||||
psr->funcs->psr_get_state(psr, state);
|
||||
psr->funcs->psr_get_state(psr, state, panel_inst);
|
||||
else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
|
||||
dmcu->funcs->get_psr_state(dmcu, state);
|
||||
|
||||
@ -2776,6 +2770,7 @@ bool dc_link_setup_psr(struct dc_link *link,
|
||||
struct dmcu *dmcu;
|
||||
struct dmub_psr *psr;
|
||||
int i;
|
||||
unsigned int panel_inst;
|
||||
/* updateSinkPsrDpcdConfig*/
|
||||
union dpcd_psr_configuration psr_configuration;
|
||||
|
||||
@ -2791,6 +2786,9 @@ bool dc_link_setup_psr(struct dc_link *link,
|
||||
if (!dmcu && !psr)
|
||||
return false;
|
||||
|
||||
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
|
||||
return false;
|
||||
|
||||
|
||||
memset(&psr_configuration, 0, sizeof(psr_configuration));
|
||||
|
||||
@ -2875,8 +2873,16 @@ bool dc_link_setup_psr(struct dc_link *link,
|
||||
psr_context->psr_level.u32all = 0;
|
||||
|
||||
/*skip power down the single pipe since it blocks the cstate*/
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
if (link->ctx->asic_id.chip_family >= FAMILY_RV) {
|
||||
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
|
||||
if (link->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && !dc->debug.disable_z10)
|
||||
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
|
||||
}
|
||||
#else
|
||||
if (link->ctx->asic_id.chip_family >= FAMILY_RV)
|
||||
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
|
||||
#endif
|
||||
|
||||
/* SMU will perform additional powerdown sequence.
|
||||
* For unsupported ASICs, set psr_level flag to skip PSR
|
||||
@ -2897,7 +2903,8 @@ bool dc_link_setup_psr(struct dc_link *link,
|
||||
psr_context->frame_delay = 0;
|
||||
|
||||
if (psr)
|
||||
link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
|
||||
link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
|
||||
link, psr_context, panel_inst);
|
||||
else
|
||||
link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
|
||||
|
||||
@ -2915,10 +2922,14 @@ void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency)
|
||||
{
|
||||
struct dc *dc = link->ctx->dc;
|
||||
struct dmub_psr *psr = dc->res_pool->psr;
|
||||
unsigned int panel_inst;
|
||||
|
||||
// PSR residency measurements only supported on DMCUB
|
||||
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
|
||||
return;
|
||||
|
||||
/* PSR residency measurements only supported on DMCUB */
|
||||
if (psr != NULL && link->psr_settings.psr_feature_enabled)
|
||||
psr->funcs->psr_get_residency(psr, residency);
|
||||
psr->funcs->psr_get_residency(psr, residency, panel_inst);
|
||||
else
|
||||
*residency = 0;
|
||||
}
|
||||
@ -3208,8 +3219,14 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
|
||||
dp_get_panel_mode(pipe_ctx->stream->link);
|
||||
|
||||
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
|
||||
/*stream_enc_inst*/
|
||||
config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
|
||||
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
|
||||
config.link_enc_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
config.phy_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
|
||||
#endif
|
||||
config.dpms_off = dpms_off;
|
||||
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
|
||||
config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP);
|
||||
|
@ -435,7 +435,7 @@ bool dp_is_cr_done(enum dc_lane_count ln_count,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool is_ch_eq_done(enum dc_lane_count ln_count,
|
||||
bool dp_is_ch_eq_done(enum dc_lane_count ln_count,
|
||||
union lane_status *dpcd_lane_status)
|
||||
{
|
||||
bool done = true;
|
||||
@ -446,7 +446,7 @@ static bool is_ch_eq_done(enum dc_lane_count ln_count,
|
||||
return done;
|
||||
}
|
||||
|
||||
static bool is_symbol_locked(enum dc_lane_count ln_count,
|
||||
bool dp_is_symbol_locked(enum dc_lane_count ln_count,
|
||||
union lane_status *dpcd_lane_status)
|
||||
{
|
||||
bool locked = true;
|
||||
@ -457,7 +457,7 @@ static bool is_symbol_locked(enum dc_lane_count ln_count,
|
||||
return locked;
|
||||
}
|
||||
|
||||
static inline bool is_interlane_aligned(union lane_align_status_updated align_status)
|
||||
bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
|
||||
{
|
||||
return align_status.bits.INTERLANE_ALIGN_DONE == 1;
|
||||
}
|
||||
@ -865,9 +865,9 @@ static bool perform_post_lt_adj_req_sequence(
|
||||
if (!dp_is_cr_done(lane_count, dpcd_lane_status))
|
||||
return false;
|
||||
|
||||
if (!is_ch_eq_done(lane_count, dpcd_lane_status) ||
|
||||
!is_symbol_locked(lane_count, dpcd_lane_status) ||
|
||||
!is_interlane_aligned(dpcd_lane_status_updated))
|
||||
if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) ||
|
||||
!dp_is_symbol_locked(lane_count, dpcd_lane_status) ||
|
||||
!dp_is_interlane_aligned(dpcd_lane_status_updated))
|
||||
return false;
|
||||
|
||||
for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
|
||||
@ -913,7 +913,7 @@ static bool perform_post_lt_adj_req_sequence(
|
||||
}
|
||||
|
||||
/* Only used for channel equalization */
|
||||
static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval)
|
||||
uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval)
|
||||
{
|
||||
unsigned int aux_rd_interval_us = 400;
|
||||
|
||||
@ -998,7 +998,7 @@ static enum link_training_result perform_channel_equalization_sequence(
|
||||
|
||||
if (is_repeater(link, offset))
|
||||
wait_time_microsec =
|
||||
translate_training_aux_read_interval(
|
||||
dp_translate_training_aux_read_interval(
|
||||
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
|
||||
|
||||
dp_wait_for_training_aux_rd_interval(
|
||||
@ -1021,9 +1021,9 @@ static enum link_training_result perform_channel_equalization_sequence(
|
||||
return LINK_TRAINING_EQ_FAIL_CR;
|
||||
|
||||
/* 6. check CHEQ done*/
|
||||
if (is_ch_eq_done(lane_count, dpcd_lane_status) &&
|
||||
is_symbol_locked(lane_count, dpcd_lane_status) &&
|
||||
is_interlane_aligned(dpcd_lane_status_updated))
|
||||
if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
|
||||
dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
|
||||
dp_is_interlane_aligned(dpcd_lane_status_updated))
|
||||
return LINK_TRAINING_SUCCESS;
|
||||
|
||||
/* 7. update VS/PE/PC2 in lt_settings*/
|
||||
@ -1917,6 +1917,8 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
|
||||
link->type = dc_connection_single;
|
||||
link->local_sink = link->remote_sinks[0];
|
||||
link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
|
||||
dc_sink_retain(link->local_sink);
|
||||
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
|
||||
} else if (mst_enable == true &&
|
||||
link->type == dc_connection_single &&
|
||||
link->remote_sinks[0] != NULL) {
|
||||
|
@ -332,7 +332,16 @@ void dp_set_hw_test_pattern(
|
||||
uint32_t custom_pattern_size)
|
||||
{
|
||||
struct encoder_set_dp_phy_pattern_param pattern_param = {0};
|
||||
struct link_encoder *encoder = link->link_enc;
|
||||
struct link_encoder *encoder;
|
||||
|
||||
/* Access link encoder based on whether it is statically
|
||||
* or dynamically assigned to a link.
|
||||
*/
|
||||
if (link->is_dig_mapping_flexible &&
|
||||
link->dc->res_pool->funcs->link_encs_assign)
|
||||
encoder = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
|
||||
else
|
||||
encoder = link->link_enc;
|
||||
|
||||
pattern_param.dp_phy_pattern = test_pattern;
|
||||
pattern_param.custom_pattern = custom_pattern;
|
||||
|
@ -58,6 +58,9 @@
|
||||
#include "dcn301/dcn301_resource.h"
|
||||
#include "dcn302/dcn302_resource.h"
|
||||
#include "dcn303/dcn303_resource.h"
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
#include "../dcn31/dcn31_resource.h"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
@ -139,6 +142,14 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
dc_version = DCN_VERSION_3_01;
|
||||
break;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case FAMILY_YELLOW_CARP:
|
||||
if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_3_1;
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
dc_version = DCE_VERSION_UNKNOWN;
|
||||
break;
|
||||
@ -222,6 +233,11 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
|
||||
case DCN_VERSION_3_03:
|
||||
res_pool = dcn303_create_resource_pool(init_data, dc);
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
case DCN_VERSION_3_1:
|
||||
res_pool = dcn31_create_resource_pool(init_data, dc);
|
||||
break;
|
||||
#endif
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
@ -783,6 +799,11 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
|
||||
if (split_idx == split_count) {
|
||||
/* rightmost pipe is the remainder recout */
|
||||
data->recout.width -= data->h_active * split_count - data->recout.x;
|
||||
|
||||
/* ODM combine cases with MPO we can get negative widths */
|
||||
if (data->recout.width < 0)
|
||||
data->recout.width = 0;
|
||||
|
||||
data->recout.x = 0;
|
||||
} else
|
||||
data->recout.width = data->h_active - data->recout.x;
|
||||
@ -1042,9 +1063,16 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
|
||||
* on certain displays, such as the Sharp 4k. 36bpp is needed
|
||||
* to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and
|
||||
* SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc
|
||||
* precision on at least DCN display engines.
|
||||
* precision on at least DCN display engines. However, at least
|
||||
* Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
|
||||
* so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3
|
||||
* did not show such problems, so this seems to be the exception.
|
||||
*/
|
||||
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
|
||||
if (plane_state->ctx->dce_version != DCE_VERSION_11_0)
|
||||
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
|
||||
else
|
||||
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
|
||||
|
||||
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
|
||||
|
||||
if (pipe_ctx->plane_res.xfm != NULL)
|
||||
@ -2114,6 +2142,16 @@ enum dc_status dc_validate_global_state(
|
||||
|
||||
if (!new_ctx)
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
|
||||
/*
|
||||
* Update link encoder to stream assignment.
|
||||
* TODO: Split out reason allocation from validation.
|
||||
*/
|
||||
if (dc->res_pool->funcs->link_encs_assign)
|
||||
dc->res_pool->funcs->link_encs_assign(
|
||||
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
|
||||
#endif
|
||||
|
||||
if (dc->res_pool->funcs->validate_global) {
|
||||
result = dc->res_pool->funcs->validate_global(dc, new_ctx);
|
||||
|
@ -294,6 +294,9 @@ bool dc_stream_set_cursor_attributes(
|
||||
stream->cursor_attributes = *attributes;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
dc_z10_restore(dc);
|
||||
#endif
|
||||
/* disable idle optimizations while updating cursor */
|
||||
if (dc->idle_optimizations_allowed) {
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
@ -355,6 +358,9 @@ bool dc_stream_set_cursor_position(
|
||||
dc = stream->ctx->dc;
|
||||
res_ctx = &dc->current_state->res_ctx;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
dc_z10_restore(dc);
|
||||
#endif
|
||||
|
||||
/* disable idle optimizations if enabling cursor */
|
||||
if (dc->idle_optimizations_allowed && !stream->cursor_position.enable && position->enable) {
|
||||
|
@ -45,7 +45,7 @@
|
||||
/* forward declaration */
|
||||
struct aux_payload;
|
||||
|
||||
#define DC_VER "3.2.137"
|
||||
#define DC_VER "3.2.139"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -318,6 +318,7 @@ enum visual_confirm {
|
||||
VISUAL_CONFIRM_HDR = 2,
|
||||
VISUAL_CONFIRM_MPCTREE = 4,
|
||||
VISUAL_CONFIRM_PSR = 5,
|
||||
VISUAL_CONFIRM_SWIZZLE = 9,
|
||||
};
|
||||
|
||||
enum dcc_option {
|
||||
@ -350,6 +351,13 @@ enum dcn_pwr_state {
|
||||
DCN_PWR_STATE_LOW_POWER = 3,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
enum dcn_z9_support_state {
|
||||
DCN_Z9_SUPPORT_UNKNOWN,
|
||||
DCN_Z9_SUPPORT_ALLOW,
|
||||
DCN_Z9_SUPPORT_DISALLOW,
|
||||
};
|
||||
#endif
|
||||
/*
|
||||
* For any clocks that may differ per pipe
|
||||
* only the max is stored in this structure
|
||||
@ -367,6 +375,10 @@ struct dc_clocks {
|
||||
int phyclk_khz;
|
||||
int dramclk_khz;
|
||||
bool p_state_change_support;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
enum dcn_z9_support_state z9_support;
|
||||
bool dtbclk_en;
|
||||
#endif
|
||||
enum dcn_pwr_state pwr_state;
|
||||
/*
|
||||
* Elements below are not compared for the purposes of
|
||||
@ -433,6 +445,7 @@ struct dc_bw_validation_profile {
|
||||
|
||||
union mem_low_power_enable_options {
|
||||
struct {
|
||||
bool vga: 1;
|
||||
bool i2c: 1;
|
||||
bool dmcu: 1;
|
||||
bool dscl: 1;
|
||||
@ -487,6 +500,9 @@ struct dc_debug_options {
|
||||
bool disable_pplib_clock_request;
|
||||
bool disable_clock_gate;
|
||||
bool disable_mem_low_power;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
bool pstate_enabled;
|
||||
#endif
|
||||
bool disable_dmcu;
|
||||
bool disable_psr;
|
||||
bool force_abm_enable;
|
||||
@ -505,6 +521,9 @@ struct dc_debug_options {
|
||||
unsigned int force_odm_combine; //bit vector based on otg inst
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
|
||||
#endif
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
bool disable_z9_mpc;
|
||||
#endif
|
||||
unsigned int force_fclk_khz;
|
||||
bool enable_tri_buf;
|
||||
@ -547,6 +566,10 @@ struct dc_debug_options {
|
||||
bool force_enable_edp_fec;
|
||||
/* FEC/PSR1 sequence enable delay in 100us */
|
||||
uint8_t fec_enable_delay_in100us;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
bool disable_z10;
|
||||
bool enable_sw_cntl_psr;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct dc_debug_data {
|
||||
@ -571,6 +594,9 @@ struct dc_phy_addr_space_config {
|
||||
uint64_t page_table_start_addr;
|
||||
uint64_t page_table_end_addr;
|
||||
uint64_t page_table_base_addr;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
bool base_addr_is_mc_addr;
|
||||
#endif
|
||||
} gart_config;
|
||||
|
||||
bool valid;
|
||||
@ -1308,6 +1334,9 @@ void dc_hardware_release(struct dc *dc);
|
||||
#endif
|
||||
|
||||
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
void dc_z10_restore(struct dc *dc);
|
||||
#endif
|
||||
|
||||
bool dc_enable_dmub_notifications(struct dc *dc);
|
||||
|
||||
|
@ -180,6 +180,29 @@ bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
|
||||
dmub, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
|
||||
stream_mask, timeout) == DMUB_STATUS_OK;
|
||||
}
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
|
||||
{
|
||||
struct dmub_srv *dmub;
|
||||
struct dc_context *dc_ctx;
|
||||
union dmub_fw_boot_status boot_status;
|
||||
enum dmub_status status;
|
||||
|
||||
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
|
||||
return false;
|
||||
|
||||
dmub = dc_dmub_srv->dmub;
|
||||
dc_ctx = dc_dmub_srv->ctx;
|
||||
|
||||
status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
|
||||
if (status != DMUB_STATUS_OK) {
|
||||
DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
|
||||
return false;
|
||||
}
|
||||
|
||||
return boot_status.bits.restore_required;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry)
|
||||
{
|
||||
|
@ -62,6 +62,9 @@ bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub
|
||||
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
|
||||
unsigned int stream_mask);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv);
|
||||
#endif
|
||||
bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry);
|
||||
|
||||
void dc_dmub_trace_event_control(struct dc *dc, bool enable);
|
||||
|
@ -465,6 +465,10 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
struct dc_crtc_timing_adjust *adjust);
|
||||
|
||||
bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
|
||||
struct dc_stream_state *stream,
|
||||
uint32_t *refresh_rate);
|
||||
|
||||
bool dc_stream_get_crtc_position(struct dc *dc,
|
||||
struct dc_stream_state **stream,
|
||||
int num_streams,
|
||||
|
@ -635,6 +635,7 @@ struct dce_hwseq_registers {
|
||||
uint32_t HPO_TOP_CLOCK_CONTROL;
|
||||
uint32_t ODM_MEM_PWR_CTRL3;
|
||||
uint32_t DMU_MEM_PWR_CNTL;
|
||||
uint32_t MMHUBBUB_MEM_PWR_CNTL;
|
||||
};
|
||||
/* set field name */
|
||||
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
|
||||
@ -875,7 +876,8 @@ struct dce_hwseq_registers {
|
||||
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
|
||||
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
|
||||
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
|
||||
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh)
|
||||
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
|
||||
HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh)
|
||||
|
||||
#define HWSEQ_DCN301_MASK_SH_LIST(mask_sh)\
|
||||
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
|
||||
@ -1092,7 +1094,8 @@ struct dce_hwseq_registers {
|
||||
type AZALIA_AUDIO_DTO_MODULE; \
|
||||
type ODM_MEM_UNASSIGNED_PWR_MODE; \
|
||||
type ODM_MEM_VBLANK_PWR_MODE; \
|
||||
type DMCU_ERAM_MEM_PWR_FORCE;
|
||||
type DMCU_ERAM_MEM_PWR_FORCE; \
|
||||
type VGA_MEM_PWR_FORCE;
|
||||
|
||||
#define HWSEQ_DCN3_REG_FIELD_LIST(type) \
|
||||
type HPO_HDMISTREAMCLK_GATE_DIS;
|
||||
@ -1103,11 +1106,22 @@ struct dce_hwseq_registers {
|
||||
type PANEL_DIGON_OVRD;\
|
||||
type PANEL_PWRSEQ_TARGET_STATE_R;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
#define HWSEQ_DCN31_REG_FIELD_LIST(type) \
|
||||
type DOMAIN_POWER_FORCEON;\
|
||||
type DOMAIN_POWER_GATE;\
|
||||
type DOMAIN_PGFSM_PWR_STATUS;\
|
||||
type HPO_HDMISTREAMCLK_G_GATE_DIS;
|
||||
|
||||
#endif
|
||||
struct dce_hwseq_shift {
|
||||
HWSEQ_REG_FIELD_LIST(uint8_t)
|
||||
HWSEQ_DCN_REG_FIELD_LIST(uint8_t)
|
||||
HWSEQ_DCN3_REG_FIELD_LIST(uint8_t)
|
||||
HWSEQ_DCN301_REG_FIELD_LIST(uint8_t)
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
HWSEQ_DCN31_REG_FIELD_LIST(uint8_t)
|
||||
#endif
|
||||
};
|
||||
|
||||
struct dce_hwseq_mask {
|
||||
@ -1115,6 +1129,9 @@ struct dce_hwseq_mask {
|
||||
HWSEQ_DCN_REG_FIELD_LIST(uint32_t)
|
||||
HWSEQ_DCN3_REG_FIELD_LIST(uint32_t)
|
||||
HWSEQ_DCN301_REG_FIELD_LIST(uint32_t)
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
HWSEQ_DCN31_REG_FIELD_LIST(uint32_t)
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
|
@ -297,6 +297,7 @@ struct dce_mem_input_registers {
|
||||
MI_DCP_PTE_MASK_SH_LIST(mask_sh, )
|
||||
|
||||
#define MI_GFX9_TILE_MASK_SH_LIST(mask_sh, blk)\
|
||||
SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
|
||||
SFB(blk, GRPH_CONTROL, GRPH_SW_MODE, mask_sh),\
|
||||
SFB(blk, GRPH_CONTROL, GRPH_SE_ENABLE, mask_sh),\
|
||||
SFB(blk, GRPH_CONTROL, GRPH_NUM_SHADER_ENGINES, mask_sh),\
|
||||
|
@ -77,7 +77,7 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
|
||||
/*
|
||||
* Get PSR state from firmware.
|
||||
*/
|
||||
static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
|
||||
static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state, uint8_t panel_inst)
|
||||
{
|
||||
struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
|
||||
uint32_t raw_state = 0;
|
||||
@ -86,7 +86,7 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
|
||||
|
||||
do {
|
||||
// Send gpint command and wait for ack
|
||||
status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
|
||||
status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, panel_inst, 30);
|
||||
|
||||
if (status == DMUB_STATUS_OK) {
|
||||
// GPINT was executed, get response
|
||||
@ -105,7 +105,7 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
|
||||
/*
|
||||
* Set PSR version.
|
||||
*/
|
||||
static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
|
||||
static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream, uint8_t panel_inst)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dc_context *dc = dmub->ctx;
|
||||
@ -125,6 +125,8 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
|
||||
cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
|
||||
break;
|
||||
}
|
||||
cmd.psr_set_version.psr_set_version_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
|
||||
cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
|
||||
cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
|
||||
|
||||
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
|
||||
@ -137,7 +139,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
|
||||
/*
|
||||
* Enable/Disable PSR.
|
||||
*/
|
||||
static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
|
||||
static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8_t panel_inst)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dc_context *dc = dmub->ctx;
|
||||
@ -147,6 +149,9 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.psr_enable.header.type = DMUB_CMD__PSR;
|
||||
|
||||
cmd.psr_enable.data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
|
||||
cmd.psr_enable.data.panel_inst = panel_inst;
|
||||
|
||||
if (enable)
|
||||
cmd.psr_enable.header.sub_type = DMUB_CMD__PSR_ENABLE;
|
||||
else
|
||||
@ -164,7 +169,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
|
||||
*/
|
||||
if (wait) {
|
||||
for (retry_count = 0; retry_count <= 1000; retry_count++) {
|
||||
dmub_psr_get_state(dmub, &state);
|
||||
dmub_psr_get_state(dmub, &state, panel_inst);
|
||||
|
||||
if (enable) {
|
||||
if (state != PSR_STATE0)
|
||||
@ -186,13 +191,13 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
|
||||
/*
|
||||
* Set PSR level.
|
||||
*/
|
||||
static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
|
||||
static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_t panel_inst)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
enum dc_psr_state state = PSR_STATE0;
|
||||
struct dc_context *dc = dmub->ctx;
|
||||
|
||||
dmub_psr_get_state(dmub, &state);
|
||||
dmub_psr_get_state(dmub, &state, panel_inst);
|
||||
|
||||
if (state == PSR_STATE0)
|
||||
return;
|
||||
@ -202,7 +207,8 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
|
||||
cmd.psr_set_level.header.sub_type = DMUB_CMD__PSR_SET_LEVEL;
|
||||
cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
|
||||
cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
|
||||
|
||||
cmd.psr_set_level.psr_set_level_data.cmd_version = PSR_VERSION_1;
|
||||
cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
|
||||
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->dmub_srv);
|
||||
dc_dmub_srv_wait_idle(dc->dmub_srv);
|
||||
@ -213,7 +219,8 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
|
||||
*/
|
||||
static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
||||
struct dc_link *link,
|
||||
struct psr_context *psr_context)
|
||||
struct psr_context *psr_context,
|
||||
uint8_t panel_inst)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dc_context *dc = dmub->ctx;
|
||||
@ -237,7 +244,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
||||
return false;
|
||||
|
||||
// First, set the psr version
|
||||
if (!dmub_psr_set_version(dmub, pipe_ctx->stream))
|
||||
if (!dmub_psr_set_version(dmub, pipe_ctx->stream, panel_inst))
|
||||
return false;
|
||||
|
||||
// Program DP DPHY fast training registers
|
||||
@ -286,6 +293,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
||||
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
|
||||
copy_settings_data->fec_enable_status = (link->fec_state == dc_link_fec_enabled);
|
||||
copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;
|
||||
copy_settings_data->cmd_version = PSR_VERSION_1;
|
||||
copy_settings_data->panel_inst = panel_inst;
|
||||
|
||||
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->dmub_srv);
|
||||
@ -297,12 +306,15 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
|
||||
/*
|
||||
* Send command to PSR to force static ENTER and ignore all state changes until exit
|
||||
*/
|
||||
static void dmub_psr_force_static(struct dmub_psr *dmub)
|
||||
static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
|
||||
{
|
||||
union dmub_rb_cmd cmd;
|
||||
struct dc_context *dc = dmub->ctx;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
cmd.psr_force_static.psr_force_static_data.panel_inst = panel_inst;
|
||||
cmd.psr_force_static.psr_force_static_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
|
||||
cmd.psr_force_static.header.type = DMUB_CMD__PSR;
|
||||
cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
|
||||
cmd.psr_enable.header.payload_bytes = 0;
|
||||
@ -315,12 +327,13 @@ static void dmub_psr_force_static(struct dmub_psr *dmub)
|
||||
/*
|
||||
* Get PSR residency from firmware.
|
||||
*/
|
||||
static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency)
|
||||
static void dmub_psr_get_residency(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst)
|
||||
{
|
||||
struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
|
||||
uint16_t param = (uint16_t)(panel_inst << 8);
|
||||
|
||||
// Send gpint command and wait for ack
|
||||
dmub_srv_send_gpint_command(srv, DMUB_GPINT__PSR_RESIDENCY, 0, 30);
|
||||
/* Send gpint command and wait for ack */
|
||||
dmub_srv_send_gpint_command(srv, DMUB_GPINT__PSR_RESIDENCY, param, 30);
|
||||
|
||||
dmub_srv_get_gpint_response(srv, residency);
|
||||
}
|
||||
|
@ -35,12 +35,17 @@ struct dmub_psr {
|
||||
};
|
||||
|
||||
struct dmub_psr_funcs {
|
||||
bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
|
||||
void (*psr_enable)(struct dmub_psr *dmub, bool enable, bool wait);
|
||||
void (*psr_get_state)(struct dmub_psr *dmub, enum dc_psr_state *dc_psr_state);
|
||||
void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
|
||||
void (*psr_force_static)(struct dmub_psr *dmub);
|
||||
void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency);
|
||||
bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link,
|
||||
struct psr_context *psr_context, uint8_t panel_inst);
|
||||
void (*psr_enable)(struct dmub_psr *dmub, bool enable, bool wait,
|
||||
uint8_t panel_inst);
|
||||
void (*psr_get_state)(struct dmub_psr *dmub, enum dc_psr_state *dc_psr_state,
|
||||
uint8_t panel_inst);
|
||||
void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level,
|
||||
uint8_t panel_inst);
|
||||
void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst);
|
||||
void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency,
|
||||
uint8_t panel_inst);
|
||||
};
|
||||
|
||||
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "core_status.h"
|
||||
#include "resource.h"
|
||||
#include "dm_helpers.h"
|
||||
#include "dce110_hw_sequencer.h"
|
||||
#include "dce110_timing_generator.h"
|
||||
#include "dce/dce_hwseq.h"
|
||||
#include "gpio_service_interface.h"
|
||||
@ -49,6 +48,9 @@
|
||||
#include "link_encoder.h"
|
||||
#include "link_hwss.h"
|
||||
#include "dc_link_dp.h"
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
#include "dccg.h"
|
||||
#endif
|
||||
#include "clock_source.h"
|
||||
#include "clk_mgr.h"
|
||||
#include "abm.h"
|
||||
@ -1310,41 +1312,6 @@ static void build_audio_output(
|
||||
pipe_ctx->pll_settings.ss_percentage;
|
||||
}
|
||||
|
||||
static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color)
|
||||
{
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->stream_res.tg->inst) / 4;
|
||||
|
||||
switch (pipe_ctx->plane_res.scl_data.format) {
|
||||
case PIXEL_FORMAT_ARGB8888:
|
||||
/* set boarder color to red */
|
||||
color->color_r_cr = color_value;
|
||||
break;
|
||||
|
||||
case PIXEL_FORMAT_ARGB2101010:
|
||||
/* set boarder color to blue */
|
||||
color->color_b_cb = color_value;
|
||||
break;
|
||||
case PIXEL_FORMAT_420BPP8:
|
||||
/* set boarder color to green */
|
||||
color->color_g_y = color_value;
|
||||
break;
|
||||
case PIXEL_FORMAT_420BPP10:
|
||||
/* set boarder color to yellow */
|
||||
color->color_g_y = color_value;
|
||||
color->color_r_cr = color_value;
|
||||
break;
|
||||
case PIXEL_FORMAT_FP16:
|
||||
/* set boarder color to white */
|
||||
color->color_r_cr = color_value;
|
||||
color->color_b_cb = color_value;
|
||||
color->color_g_y = color_value;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void program_scaler(const struct dc *dc,
|
||||
const struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
@ -2124,11 +2091,31 @@ static void dce110_setup_audio_dto(
|
||||
|
||||
build_audio_output(context, pipe_ctx, &audio_output);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
/* For DCN3.1, audio to HPO FRL encoder is using audio DTBCLK DTO */
|
||||
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) {
|
||||
/* disable audio DTBCLK DTO */
|
||||
dc->res_pool->dccg->funcs->set_audio_dtbclk_dto(
|
||||
dc->res_pool->dccg, 0);
|
||||
|
||||
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
|
||||
pipe_ctx->stream_res.audio,
|
||||
pipe_ctx->stream->signal,
|
||||
&audio_output.crtc_info,
|
||||
&audio_output.pll_info);
|
||||
} else
|
||||
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
|
||||
pipe_ctx->stream_res.audio,
|
||||
pipe_ctx->stream->signal,
|
||||
&audio_output.crtc_info,
|
||||
&audio_output.pll_info);
|
||||
#else
|
||||
pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
|
||||
pipe_ctx->stream_res.audio,
|
||||
pipe_ctx->stream->signal,
|
||||
&audio_output.crtc_info,
|
||||
&audio_output.pll_info);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2231,6 +2231,7 @@ static const struct timing_generator_funcs dce110_tg_funcs = {
|
||||
dce110_timing_generator_enable_advanced_request,
|
||||
.set_drr =
|
||||
dce110_timing_generator_set_drr,
|
||||
.get_last_used_drr_vtotal = NULL,
|
||||
.set_static_screen_control =
|
||||
dce110_timing_generator_set_static_screen_control,
|
||||
.set_test_pattern = dce110_timing_generator_set_test_pattern,
|
||||
|
@ -1190,6 +1190,7 @@ static const struct timing_generator_funcs dce120_tg_funcs = {
|
||||
.tear_down_global_swap_lock = dce120_timing_generator_tear_down_global_swap_lock,
|
||||
.enable_advanced_request = dce120_timing_generator_enable_advanced_request,
|
||||
.set_drr = dce120_timing_generator_set_drr,
|
||||
.get_last_used_drr_vtotal = NULL,
|
||||
.set_static_screen_control = dce120_timing_generator_set_static_screen_control,
|
||||
.set_test_pattern = dce120_timing_generator_set_test_pattern,
|
||||
.arm_vert_intr = dce120_arm_vert_intr,
|
||||
|
@ -209,6 +209,7 @@ static const struct timing_generator_funcs dce80_tg_funcs = {
|
||||
.tear_down_global_swap_lock =
|
||||
dce110_timing_generator_tear_down_global_swap_lock,
|
||||
.set_drr = dce110_timing_generator_set_drr,
|
||||
.get_last_used_drr_vtotal = NULL,
|
||||
.set_static_screen_control =
|
||||
dce110_timing_generator_set_static_screen_control,
|
||||
.set_test_pattern = dce110_timing_generator_set_test_pattern,
|
||||
|
@ -139,6 +139,23 @@ struct dcn_hubbub_registers {
|
||||
uint32_t DCHVM_CLK_CTRL;
|
||||
uint32_t DCHVM_RIOMMU_CTRL0;
|
||||
uint32_t DCHVM_RIOMMU_STAT0;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
uint32_t DCHUBBUB_DET0_CTRL;
|
||||
uint32_t DCHUBBUB_DET1_CTRL;
|
||||
uint32_t DCHUBBUB_DET2_CTRL;
|
||||
uint32_t DCHUBBUB_DET3_CTRL;
|
||||
uint32_t DCHUBBUB_COMPBUF_CTRL;
|
||||
uint32_t COMPBUF_RESERVED_SPACE;
|
||||
uint32_t DCHUBBUB_DEBUG_CTRL_0;
|
||||
uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A;
|
||||
uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A;
|
||||
uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B;
|
||||
uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B;
|
||||
uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C;
|
||||
uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;
|
||||
uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;
|
||||
uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* set field name */
|
||||
@ -275,17 +292,48 @@ struct dcn_hubbub_registers {
|
||||
type HOSTVM_POWERSTATUS; \
|
||||
type RIOMMU_ACTIVE; \
|
||||
type HOSTVM_PREFETCH_DONE
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
#define HUBBUB_RET_REG_FIELD_LIST(type) \
|
||||
type DET_DEPTH;\
|
||||
type DET0_SIZE;\
|
||||
type DET1_SIZE;\
|
||||
type DET2_SIZE;\
|
||||
type DET3_SIZE;\
|
||||
type DET0_SIZE_CURRENT;\
|
||||
type DET1_SIZE_CURRENT;\
|
||||
type DET2_SIZE_CURRENT;\
|
||||
type DET3_SIZE_CURRENT;\
|
||||
type COMPBUF_SIZE;\
|
||||
type COMPBUF_SIZE_CURRENT;\
|
||||
type COMPBUF_RESERVED_SPACE_64B;\
|
||||
type COMPBUF_RESERVED_SPACE_ZS;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\
|
||||
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D
|
||||
#endif
|
||||
|
||||
|
||||
struct dcn_hubbub_shift {
|
||||
DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
|
||||
HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
|
||||
HUBBUB_HVM_REG_FIELD_LIST(uint8_t);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
HUBBUB_RET_REG_FIELD_LIST(uint8_t);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct dcn_hubbub_mask {
|
||||
DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
|
||||
HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
|
||||
HUBBUB_HVM_REG_FIELD_LIST(uint32_t);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
HUBBUB_RET_REG_FIELD_LIST(uint32_t);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct dc;
|
||||
|
@ -2407,83 +2407,6 @@ void dcn10_program_output_csc(struct dc *dc,
|
||||
}
|
||||
}
|
||||
|
||||
void dcn10_get_surface_visual_confirm_color(
|
||||
const struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color)
|
||||
{
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE;
|
||||
|
||||
switch (pipe_ctx->plane_res.scl_data.format) {
|
||||
case PIXEL_FORMAT_ARGB8888:
|
||||
/* set border color to red */
|
||||
color->color_r_cr = color_value;
|
||||
break;
|
||||
|
||||
case PIXEL_FORMAT_ARGB2101010:
|
||||
/* set border color to blue */
|
||||
color->color_b_cb = color_value;
|
||||
break;
|
||||
case PIXEL_FORMAT_420BPP8:
|
||||
/* set border color to green */
|
||||
color->color_g_y = color_value;
|
||||
break;
|
||||
case PIXEL_FORMAT_420BPP10:
|
||||
/* set border color to yellow */
|
||||
color->color_g_y = color_value;
|
||||
color->color_r_cr = color_value;
|
||||
break;
|
||||
case PIXEL_FORMAT_FP16:
|
||||
/* set border color to white */
|
||||
color->color_r_cr = color_value;
|
||||
color->color_b_cb = color_value;
|
||||
color->color_g_y = color_value;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void dcn10_get_hdr_visual_confirm_color(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color)
|
||||
{
|
||||
uint32_t color_value = MAX_TG_COLOR_VALUE;
|
||||
|
||||
// Determine the overscan color based on the top-most (desktop) plane's context
|
||||
struct pipe_ctx *top_pipe_ctx = pipe_ctx;
|
||||
|
||||
while (top_pipe_ctx->top_pipe != NULL)
|
||||
top_pipe_ctx = top_pipe_ctx->top_pipe;
|
||||
|
||||
switch (top_pipe_ctx->plane_res.scl_data.format) {
|
||||
case PIXEL_FORMAT_ARGB2101010:
|
||||
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
|
||||
/* HDR10, ARGB2101010 - set border color to red */
|
||||
color->color_r_cr = color_value;
|
||||
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
|
||||
/* FreeSync 2 ARGB2101010 - set border color to pink */
|
||||
color->color_r_cr = color_value;
|
||||
color->color_b_cb = color_value;
|
||||
}
|
||||
break;
|
||||
case PIXEL_FORMAT_FP16:
|
||||
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
|
||||
/* HDR10, FP16 - set border color to blue */
|
||||
color->color_b_cb = color_value;
|
||||
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
|
||||
/* FreeSync 2 HDR - set border color to green */
|
||||
color->color_g_y = color_value;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* SDR - set border color to Gray */
|
||||
color->color_r_cr = color_value/2;
|
||||
color->color_b_cb = color_value/2;
|
||||
color->color_g_y = color_value/2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
|
||||
{
|
||||
struct dc_bias_and_scale bns_params = {0};
|
||||
@ -2502,9 +2425,26 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state
|
||||
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
|
||||
}
|
||||
|
||||
void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
|
||||
{
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
|
||||
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
|
||||
get_hdr_visual_confirm_color(pipe_ctx, color);
|
||||
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
|
||||
get_surface_visual_confirm_color(pipe_ctx, color);
|
||||
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
|
||||
get_surface_tile_visual_confirm_color(pipe_ctx, color);
|
||||
else
|
||||
color_space_to_black_color(
|
||||
dc, pipe_ctx->stream->output_color_space, color);
|
||||
|
||||
if (mpc->funcs->set_bg_color)
|
||||
mpc->funcs->set_bg_color(mpc, color, mpcc_id);
|
||||
}
|
||||
|
||||
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dce_hwseq *hws = dc->hwseq;
|
||||
struct hubp *hubp = pipe_ctx->plane_res.hubp;
|
||||
struct mpcc_blnd_cfg blnd_cfg = {{0}};
|
||||
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
|
||||
@ -2513,18 +2453,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
struct mpc *mpc = dc->res_pool->mpc;
|
||||
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
|
||||
|
||||
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
|
||||
hws->funcs.get_hdr_visual_confirm_color(
|
||||
pipe_ctx, &blnd_cfg.black_color);
|
||||
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
|
||||
hws->funcs.get_surface_visual_confirm_color(
|
||||
pipe_ctx, &blnd_cfg.black_color);
|
||||
} else {
|
||||
color_space_to_black_color(
|
||||
dc, pipe_ctx->stream->output_color_space,
|
||||
&blnd_cfg.black_color);
|
||||
}
|
||||
|
||||
if (per_pixel_alpha)
|
||||
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
|
||||
else
|
||||
@ -2559,6 +2487,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
/* If there is no full update, don't need to touch MPC tree*/
|
||||
if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
|
||||
mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
|
||||
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2580,6 +2509,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
NULL,
|
||||
hubp->inst,
|
||||
mpcc_id);
|
||||
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
|
||||
|
||||
ASSERT(new_mpcc != NULL);
|
||||
|
||||
|
@ -189,12 +189,6 @@ void dcn10_bios_golden_init(struct dc *dc);
|
||||
void dcn10_plane_atomic_power_down(struct dc *dc,
|
||||
struct dpp *dpp,
|
||||
struct hubp *hubp);
|
||||
void dcn10_get_surface_visual_confirm_color(
|
||||
const struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color);
|
||||
void dcn10_get_hdr_visual_confirm_color(
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color);
|
||||
bool dcn10_disconnect_pipes(
|
||||
struct dc *dc,
|
||||
struct dc_state *context);
|
||||
@ -206,4 +200,10 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc);
|
||||
|
||||
void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits);
|
||||
|
||||
void dcn10_update_visual_confirm_color(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
struct tg_color *color,
|
||||
int mpcc_id);
|
||||
|
||||
#endif /* __DC_HWSS_DCN10_H__ */
|
||||
|
@ -82,6 +82,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
||||
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
|
||||
.set_pipe = dce110_set_pipe,
|
||||
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
|
||||
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
|
||||
};
|
||||
|
||||
static const struct hwseq_private_funcs dcn10_private_funcs = {
|
||||
@ -111,8 +112,6 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
|
||||
.dpp_pg_control = dcn10_dpp_pg_control,
|
||||
.hubp_pg_control = dcn10_hubp_pg_control,
|
||||
.dsc_pg_control = NULL,
|
||||
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
|
||||
.get_hdr_visual_confirm_color = dcn10_get_hdr_visual_confirm_color,
|
||||
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
|
||||
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
|
||||
};
|
||||
|
@ -160,6 +160,14 @@ struct dcn10_link_enc_registers {
|
||||
uint32_t PHYA_LINK_CNTL2;
|
||||
uint32_t PHYB_LINK_CNTL2;
|
||||
uint32_t PHYC_LINK_CNTL2;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
uint32_t DIO_LINKA_CNTL;
|
||||
uint32_t DIO_LINKB_CNTL;
|
||||
uint32_t DIO_LINKC_CNTL;
|
||||
uint32_t DIO_LINKD_CNTL;
|
||||
uint32_t DIO_LINKE_CNTL;
|
||||
uint32_t DIO_LINKF_CNTL;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define LE_SF(reg_name, field_name, post_fix)\
|
||||
@ -459,17 +467,29 @@ struct dcn10_link_enc_registers {
|
||||
type DPCS_TX_DATA_SWAP_10_BIT;\
|
||||
type DPCS_TX_DATA_ORDER_INVERT_18_BIT;\
|
||||
type RDPCS_TX_CLK_EN
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
#define DCN31_LINK_ENCODER_REG_FIELD_LIST(type) \
|
||||
type ENC_TYPE_SEL;\
|
||||
type HPO_DP_ENC_SEL;\
|
||||
type HPO_HDMI_ENC_SEL
|
||||
#endif
|
||||
|
||||
struct dcn10_link_enc_shift {
|
||||
DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
|
||||
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
|
||||
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct dcn10_link_enc_mask {
|
||||
DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
|
||||
DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
|
||||
DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_1)
|
||||
DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t);
|
||||
#endif
|
||||
};
|
||||
|
||||
struct dcn10_link_encoder {
|
||||
|
@ -45,6 +45,8 @@ void mpc1_set_bg_color(struct mpc *mpc,
|
||||
struct mpcc *bottommost_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
|
||||
uint32_t bg_r_cr, bg_g_y, bg_b_cb;
|
||||
|
||||
bottommost_mpcc->blnd_cfg.black_color = *bg_color;
|
||||
|
||||
/* find bottommost mpcc. */
|
||||
while (bottommost_mpcc->mpcc_bot) {
|
||||
bottommost_mpcc = bottommost_mpcc->mpcc_bot;
|
||||
@ -81,7 +83,6 @@ static void mpc1_update_blending(
|
||||
MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha,
|
||||
MPCC_GLOBAL_GAIN, blnd_cfg->global_gain);
|
||||
|
||||
mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
|
||||
mpcc->blnd_cfg = *blnd_cfg;
|
||||
}
|
||||
|
||||
@ -495,6 +496,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
|
||||
.set_output_csc = NULL,
|
||||
.set_output_gamma = NULL,
|
||||
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
|
||||
.set_bg_color = mpc1_set_bg_color,
|
||||
};
|
||||
|
||||
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
|
||||
|
@ -968,6 +968,17 @@ void optc1_set_drr(
|
||||
}
|
||||
}
|
||||
|
||||
void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
REG_SET(OTG_V_TOTAL_MAX, 0,
|
||||
OTG_V_TOTAL_MAX, vtotal_max);
|
||||
|
||||
REG_SET(OTG_V_TOTAL_MIN, 0,
|
||||
OTG_V_TOTAL_MIN, vtotal_min);
|
||||
}
|
||||
|
||||
static void optc1_set_test_pattern(
|
||||
struct timing_generator *optc,
|
||||
/* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
|
||||
@ -1543,6 +1554,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
|
||||
.unlock = optc1_unlock,
|
||||
.enable_optc_clock = optc1_enable_optc_clock,
|
||||
.set_drr = optc1_set_drr,
|
||||
.get_last_used_drr_vtotal = NULL,
|
||||
.set_static_screen_control = optc1_set_static_screen_control,
|
||||
.set_test_pattern = optc1_set_test_pattern,
|
||||
.program_stereo = optc1_program_stereo,
|
||||
|
@ -171,6 +171,7 @@ struct dcn_optc_registers {
|
||||
uint32_t OPTC_DATA_FORMAT_CONTROL;
|
||||
uint32_t OPTC_BYTES_PER_PIXEL;
|
||||
uint32_t OPTC_WIDTH_CONTROL;
|
||||
uint32_t OTG_DRR_CONTROL;
|
||||
uint32_t OTG_BLANK_DATA_COLOR;
|
||||
uint32_t OTG_BLANK_DATA_COLOR_EXT;
|
||||
uint32_t OTG_DRR_TRIGGER_WINDOW;
|
||||
@ -517,7 +518,8 @@ struct dcn_optc_registers {
|
||||
type OTG_CRC_DSC_MODE;\
|
||||
type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
|
||||
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
|
||||
type OTG_CRC_DATA_FORMAT;
|
||||
type OTG_CRC_DATA_FORMAT;\
|
||||
type OTG_V_TOTAL_LAST_USED_BY_DRR;
|
||||
|
||||
|
||||
struct dcn_optc_shift {
|
||||
@ -666,6 +668,8 @@ void optc1_set_drr(
|
||||
struct timing_generator *optc,
|
||||
const struct drr_params *params);
|
||||
|
||||
void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
|
||||
|
||||
void optc1_set_static_screen_control(
|
||||
struct timing_generator *optc,
|
||||
uint32_t event_triggers,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user