From 4dd9f5404c7180f573b911f034df1a144abb78be Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 12 Oct 2023 11:24:10 +0800 Subject: [PATCH 01/66] drm/amd/pm: record mca debug mode in RAS Call amdgpu_ras_set_mca_debug_mode when we set mca debug mode in smu v13_0_6. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 60eb6f8af187..f42b48b31927 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1478,6 +1478,7 @@ static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable) if (smu->smc_fw_version < 0x554800) return 0; + amdgpu_ras_set_mca_debug_mode(smu->adev, enable); return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead, enable ? 0 : ClearMcaOnRead_UE_FLAG_MASK | ClearMcaOnRead_CE_POLL_MASK, NULL); From bf795156105150a7a242389c56fca382ddc984c5 Mon Sep 17 00:00:00 2001 From: Hugo Hu Date: Tue, 22 Aug 2023 17:01:39 +0800 Subject: [PATCH 02/66] drm/amd/display: reprogram det size while seamless boot [Why] During system boot in second screen only mode on a seamless boot system, there is a chance that the pipe's det size might not be reset. [How] Reset the det size while resetting the pipe during seamless boot. Reviewed-by: Dmytro Laktyushkin Acked-by: Roman Li Signed-off-by: Hugo Hu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn31/dcn31_hubbub.c | 23 +++++++++++++++++++ .../amd/display/dc/hwss/dcn31/dcn31_hwseq.c | 9 ++++++++ .../gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 1 + 3 files changed, 33 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index f6b59c29cee2..5b5b5e0775fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -109,6 +109,28 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); } +static void dcn31_wait_for_det_apply(struct hubbub *hubbub, int hubp_inst) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + switch (hubp_inst) { + case 0: + REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1000, 30); + break; + case 1: + REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1000, 30); + break; + case 2: + REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1000, 30); + break; + case 3: + REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1000, 30); + break; + default: + break; + } +} + static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) { struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); @@ -1041,6 +1063,7 @@ static const struct hubbub_funcs hubbub31_funcs = { .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, .program_det_size = dcn31_program_det_size, + .wait_for_det_apply = dcn31_wait_for_det_apply, .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, .hubbub_read_state = hubbub2_read_state, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index f369f7af6b3a..a22cd2aee286 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -584,6 +584,15 @@ void dcn31_reset_hw_ctx_wrap( pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { struct clock_source *old_clk = pipe_ctx_old->clock_source; + /* Reset pipe which is seamless boot stream. */ + if (!pipe_ctx_old->plane_state) { + dc->res_pool->hubbub->funcs->program_det_size( + dc->res_pool->hubbub, pipe_ctx_old->plane_res.hubp->inst, 0); + /* Wait det size changed. */ + dc->res_pool->hubbub->funcs->wait_for_det_apply( + dc->res_pool->hubbub, pipe_ctx_old->plane_res.hubp->inst); + } + dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) hws->funcs.enable_stream_gating(dc, pipe_ctx_old); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index cea05843990c..901891316dfb 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -188,6 +188,7 @@ struct hubbub_funcs { * compressed or detiled buffers. */ void (*program_det_size)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_in_kbyte); + void (*wait_for_det_apply)(struct hubbub *hubbub, int hubp_inst); void (*program_compbuf_size)(struct hubbub *hubbub, unsigned compbuf_size_kb, bool safe_to_increase); void (*init_crb)(struct hubbub *hubbub); void (*force_usr_retraining_allow)(struct hubbub *hubbub, bool allow); From 53d4d7792757d195979a630a6402f272d3fd2a47 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 20 Oct 2023 10:12:07 +0800 Subject: [PATCH 03/66] drm/amdgpu: fix find ras error node error the origin function might return the wrong node. Fixes: 5b1270beb380 ("drm/amdgpu: add ras_err_info to identify RAS error source") Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3c83a2b8fb2c..88e1b47c3d3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -3529,11 +3529,10 @@ static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data for_each_ras_error(err_node, err_data) { ref_id = &err_node->err_info.mcm_info; - if ((mcm_info->socket_id >= 0 && mcm_info->socket_id != ref_id->socket_id) || - (mcm_info->die_id >= 0 && mcm_info->die_id != ref_id->die_id)) - continue; - return err_node; + if (mcm_info->socket_id == ref_id->socket_id && + mcm_info->die_id == ref_id->die_id) + return err_node; } return NULL; From ec3e0a9167e2cc97a9b12d9f2a619afd78b77223 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 19 Oct 2023 18:16:14 +0800 Subject: [PATCH 04/66] drm/amdgpu: refine ras error kernel log print refine ras error kernel log to avoid user-ridden ambiguity. Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 116 +++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 5 +- 2 files changed, 82 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 88e1b47c3d3f..fe6b44eb6602 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -635,8 +635,11 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, static inline void put_obj(struct ras_manager *obj) { - if (obj && (--obj->use == 0)) + if (obj && (--obj->use == 0)) { list_del(&obj->node); + amdgpu_ras_error_data_fini(&obj->err_data); + } + if (obj && (obj->use < 0)) DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); } @@ -666,6 +669,9 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, if (alive_obj(obj)) return NULL; + if (amdgpu_ras_error_data_init(&obj->err_data)) + return NULL; + obj->head = *head; obj->adev = adev; list_add(&obj->node, &con->head); @@ -1023,44 +1029,68 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d } static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, - struct ras_query_if *query_if, + struct ras_manager *ras_mgr, struct ras_err_data *err_data, + const char *blk_name, bool is_ue) { - struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head); - const char *blk_name = get_ras_block_str(&query_if->head); struct amdgpu_smuio_mcm_config_info *mcm_info; struct ras_err_node *err_node; struct ras_err_info *err_info; - if (is_ue) - dev_info(adev->dev, "%ld uncorrectable hardware errors detected in %s block\n", - ras_mgr->err_data.ue_count, blk_name); - else - dev_info(adev->dev, "%ld correctable hardware errors detected in %s block\n", - ras_mgr->err_data.ce_count, blk_name); + if (is_ue) { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + if (err_info->ue_count) { + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld new uncorrectable hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ue_count, + blk_name); + } + } - for_each_ras_error(err_node, err_data) { - err_info = &err_node->err_info; - mcm_info = &err_info->mcm_info; - if (is_ue && err_info->ue_count) { - dev_info(adev->dev, "socket: %d, die: %d " - "%lld uncorrectable hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->ue_count, - blk_name); - } else if (!is_ue && err_info->ce_count) { - dev_info(adev->dev, "socket: %d, die: %d " - "%lld correctable hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->ce_count, - blk_name); + for_each_ras_error(err_node, &ras_mgr->err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld uncorrectable hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); + } + + } else { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + if (err_info->ce_count) { + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld new correctable hardware errors detected in %s block, " + "no user action is needed\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ce_count, + blk_name); + } + } + + for_each_ras_error(err_node, &ras_mgr->err_data) { + err_info = &err_node->err_info; + mcm_info = &err_info->mcm_info; + dev_info(adev->dev, "socket: %d, die: %d, " + "%lld correctable hardware errors detected in total in %s block, " + "no user action is needed\n", + mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name); } } } +static inline bool err_data_has_source_info(struct ras_err_data *data) +{ + return !list_empty(&data->err_node_list); +} + static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, struct ras_query_if *query_if, struct ras_err_data *err_data) @@ -1069,9 +1099,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, const char *blk_name = get_ras_block_str(&query_if->head); if (err_data->ce_count) { - if (!list_empty(&err_data->err_node_list)) { - amdgpu_ras_error_print_error_data(adev, query_if, - err_data, false); + if (err_data_has_source_info(err_data)) { + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && @@ -1094,9 +1123,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, } if (err_data->ue_count) { - if (!list_empty(&err_data->err_node_list)) { - amdgpu_ras_error_print_error_data(adev, query_if, - err_data, true); + if (err_data_has_source_info(err_data)) { + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && @@ -1118,6 +1146,25 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, } +static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) +{ + struct ras_err_node *err_node; + struct ras_err_info *err_info; + + if (err_data_has_source_info(err_data)) { + for_each_ras_error(err_node, err_data) { + err_info = &err_node->err_info; + + amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count); + amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count); + } + } else { + /* for legacy asic path which doesn't has error source info */ + obj->err_data.ue_count += err_data->ue_count; + obj->err_data.ce_count += err_data->ce_count; + } +} + /* query/inject/cure begin */ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) @@ -1156,8 +1203,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } } - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; + amdgpu_rasmgr_error_data_statistic_update(obj, &err_data); info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 2fdfef62ee27..665414c22ca9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -515,10 +515,7 @@ struct ras_manager { /* IH data */ struct ras_ih_data ih_data; - struct { - unsigned long ue_count; - unsigned long ce_count; - } err_data; + struct ras_err_data err_data; }; struct ras_badpage { From 282c1d793076c2edac6c3db51b7e8ed2b41d60a5 Mon Sep 17 00:00:00 2001 From: Jesse Zhang Date: Fri, 20 Oct 2023 09:43:51 +0800 Subject: [PATCH 05/66] drm/amdkfd: Fix shift out-of-bounds issue [ 567.613292] shift exponent 255 is too large for 64-bit type 'long unsigned int' [ 567.614498] CPU: 5 PID: 238 Comm: kworker/5:1 Tainted: G OE 6.2.0-34-generic #34~22.04.1-Ubuntu [ 567.614502] Hardware name: AMD Splinter/Splinter-RPL, BIOS WS43927N_871 09/25/2023 [ 567.614504] Workqueue: events send_exception_work_handler [amdgpu] [ 567.614748] Call Trace: [ 567.614750] [ 567.614753] dump_stack_lvl+0x48/0x70 [ 567.614761] dump_stack+0x10/0x20 [ 567.614763] __ubsan_handle_shift_out_of_bounds+0x156/0x310 [ 567.614769] ? srso_alias_return_thunk+0x5/0x7f [ 567.614773] ? update_sd_lb_stats.constprop.0+0xf2/0x3c0 [ 567.614780] svm_range_split_by_granularity.cold+0x2b/0x34 [amdgpu] [ 567.615047] ? srso_alias_return_thunk+0x5/0x7f [ 567.615052] svm_migrate_to_ram+0x185/0x4d0 [amdgpu] [ 567.615286] do_swap_page+0x7b6/0xa30 [ 567.615291] ? srso_alias_return_thunk+0x5/0x7f [ 567.615294] ? __free_pages+0x119/0x130 [ 567.615299] handle_pte_fault+0x227/0x280 [ 567.615303] __handle_mm_fault+0x3c0/0x720 [ 567.615311] handle_mm_fault+0x119/0x330 [ 567.615314] ? lock_mm_and_find_vma+0x44/0x250 [ 567.615318] do_user_addr_fault+0x1a9/0x640 [ 567.615323] exc_page_fault+0x81/0x1b0 [ 567.615328] asm_exc_page_fault+0x27/0x30 [ 567.615332] RIP: 0010:__get_user_8+0x1c/0x30 Signed-off-by: Jesse Zhang Suggested-by: Philip Yang Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index bda88dc6e2fa..bf041b5a08a5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -783,7 +783,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, prange->flags &= ~attrs[i].value; break; case KFD_IOCTL_SVM_ATTR_GRANULARITY: - prange->granularity = attrs[i].value; + prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F); break; default: WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); From 493c75bbe3a89b5b178e61ef80f185a9614cbfaf Mon Sep 17 00:00:00 2001 From: Li Ma Date: Wed, 18 Oct 2023 13:34:29 +0800 Subject: [PATCH 06/66] drm/amdgpu: modify if condition in nbio_v7_7.c remove unnecessary "enable" in if condition. Signed-off-by: Li Ma Reviewed-by: Tim Huang Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c index def89379b51a..4df1055e640a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c @@ -254,7 +254,7 @@ static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *ade { uint32_t def, data; - if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) return; def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL); @@ -283,7 +283,7 @@ static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev { uint32_t def, data; - if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) return; def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2); From af0b7df70b4738f89061a8084015f7f93078bb71 Mon Sep 17 00:00:00 2001 From: Jiadong Zhu Date: Wed, 30 Aug 2023 16:36:37 +0800 Subject: [PATCH 07/66] drm/amd/pm: drop unneeded dpm features disablement for SMU 14.0.0 PMFW will handle the features disablement properly for gpu reset case, driver involvement may cause some unexpected issues. Signed-off-by: Jiadong Zhu Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 7087f9840ab7..a0b8d5d78beb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1678,13 +1678,14 @@ static int smu_disable_dpms(struct smu_context *smu) } /* - * For SMU 13.0.4/11, PMFW will handle the features disablement properly + * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly * for gpu reset and S0i3 cases. Driver involvement is unnecessary. */ if (amdgpu_in_reset(adev) || adev->in_s0ix) { switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 11): + case IP_VERSION(14, 0, 0): return 0; default: break; From 037fb9c600240fd4e7e525c7e08e42645a44b2f7 Mon Sep 17 00:00:00 2001 From: Jiadong Zhu Date: Wed, 6 Sep 2023 08:58:02 +0800 Subject: [PATCH 08/66] drm/amdgpu: add tmz support for GC IP v11.5.0 Add tmz support for GC 11.5.0. Signed-off-by: Jiadong Zhu Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index a02992bff6af..2dce338b0f1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -786,6 +786,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 5, 0): /* Don't enable it by default yet. */ if (amdgpu_tmz < 1) { From 8eece69acee335580449ced3356f150610916fba Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 12 Oct 2023 15:07:41 +0530 Subject: [PATCH 09/66] drm/amdgpu: Add API to get full IP version Fetch the full version of IP including variant and subrevision. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8df702eaa2ad..0cd447cf2d3e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1119,6 +1119,13 @@ static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, return adev->ip_versions[ip][inst] & ~0xFFU; } +static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev, + uint8_t ip, uint8_t inst) +{ + /* This returns full version - major/minor/rev/variant/subrevision */ + return adev->ip_versions[ip][inst]; +} + static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) { return container_of(ddev, struct amdgpu_device, ddev); From d8da213478bcd0b2dde7a4591a0a6924a97592c7 Mon Sep 17 00:00:00 2001 From: Ma Jun Date: Wed, 18 Oct 2023 15:17:40 +0800 Subject: [PATCH 10/66] drm/amd/pm: Fix the return value in default case Fix the return value in default case and drop redundant 'break'. Signed-off-by: Ma Jun Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index a0b8d5d78beb..9f86c1fecbb1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -2435,7 +2435,6 @@ int smu_get_power_limit(void *handle, break; default: return -EOPNOTSUPP; - break; } switch (pp_limit_level) { @@ -2453,7 +2452,6 @@ int smu_get_power_limit(void *handle, break; default: return -EOPNOTSUPP; - break; } if (limit_type != SMU_DEFAULT_PPT_LIMIT) { @@ -2487,7 +2485,7 @@ int smu_get_power_limit(void *handle, *limit = smu->min_power_limit; break; default: - break; + return -EINVAL; } } From 79de4d9ade7411ffdddf0b69c87020311731d155 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 20 Oct 2023 15:17:09 -0600 Subject: [PATCH 11/66] drm/amd/display: Set the DML2 attribute to false in all DCNs older than version 3.5 When DML2 was introduced, it targeted only new DCN versions. For controlling which ASIC should use this new version of DML, it was introduced the using_dml2 attribute. To avoid ambiguities, this commit explicitly sets using_dml2 to false in all ASICs that do not support DML2. Cc: Vitaly Prosyak Cc: Roman Li Cc: Qingqing Zhuo Cc: Daniel Wheeler Cc: Alex Deucher Reviewed-by: Harry Wentland Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c | 3 ++- drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 3 ++- drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c | 1 + 13 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index d1d8e904346e..b94c5c97eee7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -554,6 +554,7 @@ static const struct dc_debug_options debug_defaults_drv = { .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_legacy_fast_update = true, + .using_dml2 = false, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 7eda4bbcd8ac..0a422fbb14bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -723,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_legacy_fast_update = true, + .using_dml2 = false, }; void dcn20_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index a11b2f6afe4a..bca22d867696 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -614,6 +614,7 @@ static const struct dc_debug_options debug_defaults_drv = { .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = false, .enable_legacy_fast_update = true, + .using_dml2 = false, }; static void dcn201_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 58a0d37e9523..42277b280586 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -654,6 +654,7 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .enable_legacy_fast_update = true, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 473581cff06b..7b259cb5f418 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -729,6 +729,7 @@ static const struct dc_debug_options debug_defaults_drv = { .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, .enable_legacy_fast_update = false, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index b4b3b52990b9..f3b75f283aa2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -701,7 +701,8 @@ static const struct dc_debug_options debug_defaults_drv = { .dwb_fi_phase = -1, // -1 = disable .dmub_command_table = true, .use_max_lb = false, - .exit_idle_opt_for_cursor_updates = true + .exit_idle_opt_for_cursor_updates = true, + .using_dml2 = false, }; static void dcn301_dpp_destroy(struct dpp **dpp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 06332bd4e625..63ac984a04f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -99,6 +99,7 @@ static const struct dc_debug_options debug_defaults_drv = { .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, .enable_legacy_fast_update = false, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 0d91291a54a9..3f0b52f50d93 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -81,6 +81,7 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .exit_idle_opt_for_cursor_updates = true, .disable_idle_power_optimizations = false, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index cdf005f91869..79416cfb22f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -893,6 +893,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 2d7436f2ea82..ab301ea7c10b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -924,7 +924,8 @@ static const struct dc_debug_options debug_defaults_drv = { } }, - .seamless_boot_odm_combine = true + .seamless_boot_odm_combine = true, + .using_dml2 = false, }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index c11dbb1f4033..cb8024eee8e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -889,6 +889,7 @@ static const struct dc_debug_options debug_defaults_drv = { }, .enable_legacy_fast_update = true, .psr_power_use_phy_fsm = 0, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index 4220fe4cae4a..b9753d4606f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -885,6 +885,7 @@ static const struct dc_debug_options debug_defaults_drv = { } }, .enable_legacy_fast_update = true, + .using_dml2 = false, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 44caf6711589..0b1ce6e28e11 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -732,6 +732,7 @@ static const struct dc_debug_options debug_defaults_drv = { .fpo_vactive_max_blank_us = 1000, .enable_legacy_fast_update = false, .disable_dc_mode_overwrite = true, + .using_dml2 = false, }; static struct dce_aux *dcn321_aux_engine_create( From b0399e22ada096435de3e3e73899aa8bc026820d Mon Sep 17 00:00:00 2001 From: Agustin Gutierrez Date: Mon, 2 Oct 2023 10:21:08 -0400 Subject: [PATCH 12/66] drm/amd/display: Remove power sequencing check [Why] Some ASICs keep backlight powered on after dpms off command has been issued. [How] The check for no edp power sequencing was never going to pass. The value is never changed from what it is set by design. Cc: stable@vger.kernel.org # 6.1+ Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2765 Reviewed-by: Swapnil Patel Acked-by: Roman Li Signed-off-by: Agustin Gutierrez Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 4538451945b4..34a4a8c0e18c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -1932,8 +1932,7 @@ static void disable_link_dp(struct dc_link *link, dp_disable_link_phy(link, link_res, signal); if (link->connector_signal == SIGNAL_TYPE_EDP) { - if (!link->dc->config.edp_no_power_sequencing && - !link->skip_implict_edp_power_control) + if (!link->skip_implict_edp_power_control) link->dc->hwss.edp_power_control(link, false); } From e5f52a84bf0a817016ecd13e320fe3c3c807a83c Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 20 Oct 2023 10:26:29 -0500 Subject: [PATCH 13/66] drm/amd: Disable ASPM for VI w/ all Intel systems Originally we were quirking ASPM disabled specifically for VI when used with Alder Lake, but it appears to have problems with Rocket Lake as well. Like we've done in the case of dpm for newer platforms, disable ASPM for all Intel systems. Cc: stable@vger.kernel.org # 5.15+ Fixes: 0064b0ce85bb ("drm/amd/pm: enable ASPM by default") Reported-and-tested-by: Paolo Gentili Closes: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/2036742 Signed-off-by: Mario Limonciello Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 6a8494f98d3e..fe8ba9e9837b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1124,7 +1124,7 @@ static void vi_program_aspm(struct amdgpu_device *adev) bool bL1SS = false; bool bClkReqSupport = true; - if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) + if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported()) return; if (adev->flags & AMD_IS_APU || From 210aa6650c10ee4aae60e7533303b7b28947c684 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 20 Oct 2023 10:06:50 -0600 Subject: [PATCH 14/66] drm/amd/display: Fix DMUB errors introduced by DML2 When DML 2 was introduced, it changed part of the generic sequence of DC, which caused issues on previous DCNs with DMUB support. This commit ensures the new sequence only works for new DCNs from 3.5 and above. Changes since V1: - Harry: Use the attribute using_dml2 instead of check the DCN version. Cc: Vitaly Prosyak Cc: Roman Li Cc: Qingqing Zhuo Cc: Daniel Wheeler Cc: Alex Deucher Tested-by: Daniel Wheeler Reviewed-by: Harry Wentland Fixes: 7966f319c66d ("drm/amd/display: Introduce DML2") Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 97f402123fbb..f9e472f08e21 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -321,10 +321,11 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, res_pool->ref_clocks.xtalin_clock_inKhz; res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz; - if (res_pool->hubbub && res_pool->hubbub->funcs->get_dchub_ref_freq) - res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub, - res_pool->ref_clocks.dccg_ref_clock_inKhz, - &res_pool->ref_clocks.dchub_ref_clock_inKhz); + if (dc->debug.using_dml2) + if (res_pool->hubbub && res_pool->hubbub->funcs->get_dchub_ref_freq) + res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); } else ASSERT_CRITICAL(false); } From fc4981b69c59b8c8ddedf0df47520cb592894c03 Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Mon, 23 Oct 2023 17:04:19 +0800 Subject: [PATCH 15/66] drm/amdgpu/vpe: correct queue stop programing Otherwise IB test would fail during GPU reset. Signed-off-by: Lang Yu Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c index 756f39348dd9..174f13eff575 100644 --- a/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c @@ -205,19 +205,21 @@ static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe) static int vpe_v_6_1_ring_stop(struct amdgpu_vpe *vpe) { struct amdgpu_device *adev = vpe->ring.adev; - uint32_t rb_cntl, ib_cntl; + uint32_t queue_reset; + int ret; - rb_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL)); - rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 0); - WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL), rb_cntl); + queue_reset = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ)); + queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1); + WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ), queue_reset); - ib_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL)); - ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 0); - WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL), ib_cntl); + ret = SOC15_WAIT_ON_RREG(VPE, 0, regVPEC_QUEUE_RESET_REQ, 0, + VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK); + if (ret) + dev_err(adev->dev, "VPE queue reset failed\n"); vpe->ring.sched.ready = false; - return 0; + return ret; } static int vpe_v6_1_set_trap_irq_state(struct amdgpu_device *adev, From f3a3bbf1566c7b6b0f9ac36e8e597c73dc0afdf8 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Fri, 20 Oct 2023 17:21:58 +0800 Subject: [PATCH 16/66] drm/amdgpu: enable RAS poison mode for APU Enable it by default on APU platform. Signed-off-by: Tao Zhou Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index fe6b44eb6602..f4c2c737b12f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2738,7 +2738,8 @@ static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) return; /* Init poison supported flag, the default value is false */ - if (adev->gmc.xgmi.connected_to_cpu) { + if (adev->gmc.xgmi.connected_to_cpu || + adev->gmc.is_app_apu) { /* enabled by default when GPU is connected to CPU */ con->poison_supported = true; } else if (adev->df.funcs && From 73582be11ac8f6d6765e185bf48f22efb9d28c3b Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 12 Oct 2023 14:33:37 +0800 Subject: [PATCH 17/66] drm/amdgpu: bypass RAS error reset in some conditions PMFW is responsible for RAS error reset in some conditions, driver can skip the operation. v2: add check for ras->in_recovery, it's set earlier than amdgpu_in_reset. v3: fix error in gpu reset check. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index f4c2c737b12f..303fbb6a48b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1220,6 +1220,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block block) { struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; if (!block_obj || !block_obj->hw_ops) { dev_dbg_once(adev->dev, "%s doesn't config RAS function\n", @@ -1227,7 +1229,13 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, return -EOPNOTSUPP; } - if (!amdgpu_ras_is_supported(adev, block)) + /* skip ras error reset in gpu reset */ + if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery)) && + mca_funcs && mca_funcs->mca_set_debug_mode) + return -EOPNOTSUPP; + + if (!amdgpu_ras_is_supported(adev, block) || + !amdgpu_ras_get_mca_debug_mode(adev)) return -EOPNOTSUPP; if (block_obj->hw_ops->reset_ras_error_count) From 5104fdf50d326db2c1a994f8b35dcd46e63ae4ad Mon Sep 17 00:00:00 2001 From: Qu Huang Date: Mon, 23 Oct 2023 12:56:37 +0000 Subject: [PATCH 18/66] drm/amdgpu: Fix a null pointer access when the smc_rreg pointer is NULL In certain types of chips, such as VEGA20, reading the amdgpu_regs_smc file could result in an abnormal null pointer access when the smc_rreg pointer is NULL. Below are the steps to reproduce this issue and the corresponding exception log: 1. Navigate to the directory: /sys/kernel/debug/dri/0 2. Execute command: cat amdgpu_regs_smc 3. Exception Log:: [4005007.702554] BUG: kernel NULL pointer dereference, address: 0000000000000000 [4005007.702562] #PF: supervisor instruction fetch in kernel mode [4005007.702567] #PF: error_code(0x0010) - not-present page [4005007.702570] PGD 0 P4D 0 [4005007.702576] Oops: 0010 [#1] SMP NOPTI [4005007.702581] CPU: 4 PID: 62563 Comm: cat Tainted: G OE 5.15.0-43-generic #46-Ubunt u [4005007.702590] RIP: 0010:0x0 [4005007.702598] Code: Unable to access opcode bytes at RIP 0xffffffffffffffd6. [4005007.702600] RSP: 0018:ffffa82b46d27da0 EFLAGS: 00010206 [4005007.702605] RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffffa82b46d27e68 [4005007.702609] RDX: 0000000000000001 RSI: 0000000000000000 RDI: ffff9940656e0000 [4005007.702612] RBP: ffffa82b46d27dd8 R08: 0000000000000000 R09: ffff994060c07980 [4005007.702615] R10: 0000000000020000 R11: 0000000000000000 R12: 00007f5e06753000 [4005007.702618] R13: ffff9940656e0000 R14: ffffa82b46d27e68 R15: 00007f5e06753000 [4005007.702622] FS: 00007f5e0755b740(0000) GS:ffff99479d300000(0000) knlGS:0000000000000000 [4005007.702626] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [4005007.702629] CR2: ffffffffffffffd6 CR3: 00000003253fc000 CR4: 00000000003506e0 [4005007.702633] Call Trace: [4005007.702636] [4005007.702640] amdgpu_debugfs_regs_smc_read+0xb0/0x120 [amdgpu] [4005007.703002] full_proxy_read+0x5c/0x80 [4005007.703011] vfs_read+0x9f/0x1a0 [4005007.703019] ksys_read+0x67/0xe0 [4005007.703023] __x64_sys_read+0x19/0x20 [4005007.703028] do_syscall_64+0x5c/0xc0 [4005007.703034] ? do_user_addr_fault+0x1e3/0x670 [4005007.703040] ? exit_to_user_mode_prepare+0x37/0xb0 [4005007.703047] ? irqentry_exit_to_user_mode+0x9/0x20 [4005007.703052] ? irqentry_exit+0x19/0x30 [4005007.703057] ? exc_page_fault+0x89/0x160 [4005007.703062] ? asm_exc_page_fault+0x8/0x30 [4005007.703068] entry_SYSCALL_64_after_hwframe+0x44/0xae [4005007.703075] RIP: 0033:0x7f5e07672992 [4005007.703079] Code: c0 e9 b2 fe ff ff 50 48 8d 3d fa b2 0c 00 e8 c5 1d 02 00 0f 1f 44 00 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 0f 05 <48> 3d 00 f0 ff ff 77 56 c3 0f 1f 44 00 00 48 83 e c 28 48 89 54 24 [4005007.703083] RSP: 002b:00007ffe03097898 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [4005007.703088] RAX: ffffffffffffffda RBX: 0000000000020000 RCX: 00007f5e07672992 [4005007.703091] RDX: 0000000000020000 RSI: 00007f5e06753000 RDI: 0000000000000003 [4005007.703094] RBP: 00007f5e06753000 R08: 00007f5e06752010 R09: 00007f5e06752010 [4005007.703096] R10: 0000000000000022 R11: 0000000000000246 R12: 0000000000022000 [4005007.703099] R13: 0000000000000003 R14: 0000000000020000 R15: 0000000000020000 [4005007.703105] [4005007.703107] Modules linked in: nf_tables libcrc32c nfnetlink algif_hash af_alg binfmt_misc nls_ iso8859_1 ipmi_ssif ast intel_rapl_msr intel_rapl_common drm_vram_helper drm_ttm_helper amd64_edac t tm edac_mce_amd kvm_amd ccp mac_hid k10temp kvm acpi_ipmi ipmi_si rapl sch_fq_codel ipmi_devintf ipm i_msghandler msr parport_pc ppdev lp parport mtd pstore_blk efi_pstore ramoops pstore_zone reed_solo mon ip_tables x_tables autofs4 ib_uverbs ib_core amdgpu(OE) amddrm_ttm_helper(OE) amdttm(OE) iommu_v 2 amd_sched(OE) amdkcl(OE) drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops cec rc_core drm igb ahci xhci_pci libahci i2c_piix4 i2c_algo_bit xhci_pci_renesas dca [4005007.703184] CR2: 0000000000000000 [4005007.703188] ---[ end trace ac65a538d240da39 ]--- [4005007.800865] RIP: 0010:0x0 [4005007.800871] Code: Unable to access opcode bytes at RIP 0xffffffffffffffd6. [4005007.800874] RSP: 0018:ffffa82b46d27da0 EFLAGS: 00010206 [4005007.800878] RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffffa82b46d27e68 [4005007.800881] RDX: 0000000000000001 RSI: 0000000000000000 RDI: ffff9940656e0000 [4005007.800883] RBP: ffffa82b46d27dd8 R08: 0000000000000000 R09: ffff994060c07980 [4005007.800886] R10: 0000000000020000 R11: 0000000000000000 R12: 00007f5e06753000 [4005007.800888] R13: ffff9940656e0000 R14: ffffa82b46d27e68 R15: 00007f5e06753000 [4005007.800891] FS: 00007f5e0755b740(0000) GS:ffff99479d300000(0000) knlGS:0000000000000000 [4005007.800895] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [4005007.800898] CR2: ffffffffffffffd6 CR3: 00000003253fc000 CR4: 00000000003506e0 Signed-off-by: Qu Huang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 3136a0774dd9..a53f436fa9f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -748,6 +748,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, ssize_t result = 0; int r; + if (!adev->smc_rreg) + return -EPERM; + if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -804,6 +807,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * ssize_t result = 0; int r; + if (!adev->smc_wreg) + return -EPERM; + if (size & 0x3 || *pos & 0x3) return -EINVAL; From 0300882ed6238bfd6343bbd06eb776eb65dedece Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Mon, 23 Oct 2023 21:35:30 +0530 Subject: [PATCH 19/66] drm/amdkfd: Address 'remap_list' not described in 'svm_range_add' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes the below: drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_svm.c:2073: warning: Function parameter or member 'remap_list' not described in 'svm_range_add' Cc: Felix Kuehling Cc: Christian König Cc: Alex Deucher Cc: "Pan, Xinhui" Signed-off-by: Srinivasan Shanmugam Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index bf041b5a08a5..3b04dd3c89d7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2035,6 +2035,7 @@ svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last, * @update_list: output, the ranges need validate and update GPU mapping * @insert_list: output, the ranges need insert to svms * @remove_list: output, the ranges are replaced and need remove from svms + * @remap_list: output, remap unaligned svm ranges * * Check if the virtual address range has overlap with any existing ranges, * split partly overlapping ranges and add new ranges in the gaps. All changes From 9ee819285c2c13fb9283c4cf8b1b9b69fbba986f Mon Sep 17 00:00:00 2001 From: "Lin.Cao" Date: Wed, 18 Oct 2023 10:15:06 +0800 Subject: [PATCH 20/66] drm/amdgpu remove restriction of sriov max_pfn on Vega10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove restriction of sriov max_pfn so that TBA and TMA can move to high 47 bits address. Regression test: change range alloc flag of libdrm as AMDGPU_VA_RANGE_HIGH and there is no flr occur when testing amdgpu_test of drm. Signed-off-by: Lin.Cao Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 5fed01e34928..fee3141bb607 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -2018,11 +2018,8 @@ static int gmc_v9_0_sw_init(void *handle) * vm size is 256TB (48bit), maximum size of Vega10, * block size 512 (9bit) */ - /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */ - if (amdgpu_sriov_vf(adev)) - amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); - else - amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); + + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) adev->gmc.translate_further = adev->vm_manager.num_level > 1; break; From b258a4d5b383f0c087dd231dee2662126f3d0d83 Mon Sep 17 00:00:00 2001 From: Stylon Wang Date: Thu, 19 Oct 2023 22:43:05 +0800 Subject: [PATCH 21/66] drm/amd/display: Add missing copyright notice in DMUB [Why & How] Add missing/incomplete copyright notice in DMUB files Reviewed-by: Harry Wentland Signed-off-by: Stylon Wang Signed-off-by: Alex Deucher --- .../drm/amd/display/dmub/src/dmub_dcn303.c | 19 +++++++++++++++++++ .../drm/amd/display/dmub/src/dmub_dcn303.h | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c index b42369984473..878700160fa9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #include "../dmub_srv.h" diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.h index 84141d450256..abe087251cc1 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.h @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #ifndef _DMUB_DCN303_H_ From d30a584cd70ebc5a8be3bd38ea1f184018bff151 Mon Sep 17 00:00:00 2001 From: Stylon Wang Date: Thu, 19 Oct 2023 22:46:51 +0800 Subject: [PATCH 22/66] drm/amd/display: Fix copyright notice in DML2 code [Why & How] Fix incomplete copyright notice in DML2 code. Reviewed-by: Harry Wentland Signed-off-by: Stylon Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml2/Makefile | 4 +++- drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c | 2 ++ .../gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h | 2 ++ .../gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h | 2 ++ drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h | 2 ++ 22 files changed, 45 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index 66431525f2a0..70ae5eba624e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -20,7 +20,9 @@ # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # -# makefile for dml2 +# Authors: AMD +# +# Makefile for dml2. ifdef CONFIG_X86 dml2_ccflags-$(CONFIG_CC_IS_GCC) := -mhard-float diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h index 5450aa5295f7..e450445bc05d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __CMNTYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 5f54251a559c..510be909cd75 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "display_mode_core.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h index c2fa28ff57ab..b274bfb4225f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DISPLAY_MODE_CORE_STRUCT_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h index 99bdb2ddd8ab..de63364be01d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DISPLAY_MODE_LIB_DEFINES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c index 7dd1f8a12582..c247aee89caf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "display_mode_util.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h index fb74385e1060..113b0265e1d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DISPLAY_MODE_UTIL_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index f45fbe820445..393ecad64636 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dml2_mall_phantom.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h index a78de194793d..2f91244a7b01 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML2_DC_RESOURCE_MGMT_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h index 191b2e63ce6e..e85866db80ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h index bcb59bcd9179..ed5b767d46e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML2_INTERNAL_TYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 02797cb2667e..32f8a43af3d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dml2_dc_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h index c14b4de29d73..9d64851f54e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML2_MALL_PHANTOM_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c index f8e9aa32ceab..c4c52173ef22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dml2_policy.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index e5ccd2887c94..331f6bd97d38 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "display_mode_core.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h index 1bcfca51e665..dac6d27b14cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML2_TRANSLATION_HELPER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index ac6bf776bad0..69fd96f4f3b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ //#include "dml2_utils.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 9a5e145168bc..0a06bf3b135a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "display_mode_core.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index f3b85b0891d3..252442ea9d3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef _DML2_WRAPPER_H_ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h index b5e463aa61af..17f0972b1af7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML_ASSERT_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h index d8c7f7497e9c..f7d30b47beff 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ /* This header intentinally does not include an #ifdef guard as it only contains includes for other headers*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h index 890c0a072012..2a2f84e07ca8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DML_LOGGING_H__ #define __DML_LOGGING_H__ From 78964fcac47fc1525ecb4c37cd5fbc873c28320b Mon Sep 17 00:00:00 2001 From: Stylon Wang Date: Thu, 19 Oct 2023 22:49:15 +0800 Subject: [PATCH 23/66] drm/amd/display: Fix copyright notice in DC code [Why & How] Fix incomplete copyright notice in DC code. Reviewed-by: Harry Wentland Signed-off-by: Stylon Wang Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn303/dcn303_dccg.h | 18 ++++++++++++++++++ .../drm/amd/display/dc/dcn303/dcn303_init.c | 18 ++++++++++++++++++ .../drm/amd/display/dc/dcn303/dcn303_init.h | 18 ++++++++++++++++++ .../amd/display/dc/dcn303/dcn303_resource.c | 18 ++++++++++++++++++ .../amd/display/dc/dcn303/dcn303_resource.h | 18 ++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dcn31/Makefile | 2 +- .../gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h | 2 ++ .../drm/amd/display/dc/dcn35/dcn35_hubbub.c | 2 ++ .../drm/amd/display/dc/dcn35/dcn35_hubbub.h | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_init.c | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_init.h | 2 ++ .../drm/amd/display/dc/dcn35/dcn35_mmhubbub.c | 2 ++ .../drm/amd/display/dc/dcn35/dcn35_mmhubbub.h | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_opp.c | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_opp.h | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_optc.c | 2 ++ .../gpu/drm/amd/display/dc/dcn35/dcn35_optc.h | 2 ++ .../drm/amd/display/dc/dcn35/dcn35_pg_cntl.c | 2 ++ .../drm/amd/display/dc/dcn35/dcn35_pg_cntl.h | 2 ++ .../drm/amd/display/dc/dcn35/dcn35_resource.c | 2 ++ .../drm/amd/display/dc/dcn35/dcn35_resource.h | 2 ++ drivers/gpu/drm/amd/display/dc/hdcp/Makefile | 2 +- .../amd/display/dc/hwss/dcn303/dcn303_hwseq.c | 19 +++++++++++++++++++ .../amd/display/dc/hwss/dcn303/dcn303_hwseq.h | 19 +++++++++++++++++++ .../amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 2 ++ .../amd/display/dc/hwss/dcn35/dcn35_hwseq.h | 2 ++ .../dc/irq/dcn201/irq_service_dcn201.c | 2 +- .../dc/irq/dcn303/irq_service_dcn303.c | 19 +++++++++++++++++++ .../dc/irq/dcn303/irq_service_dcn303.h | 19 +++++++++++++++++++ 35 files changed, 215 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h index 294bd757bcb5..2e12fb643005 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c index 39cf7a50bd26..edb4d68b8187 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h index 66b1e3604f07..4949981126d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 3f0b52f50d93..49cb7fde416a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h index 9c7d79540900..37cf1525820b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h @@ -2,6 +2,24 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile index 996d8c1e9d2a..96e45c9efb46 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile @@ -1,5 +1,5 @@ # -# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved +# Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved # # All rights reserved. This notice is intended as a precaution against # inadvertent publication and does not imply publication or any waiver diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c index 84a9afb7098a..3341ef71009b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "core_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h index 17f5344994f0..09b84307cd9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_DPP_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c index d7915c96bcd1..71d2dff9986d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_dsc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h index c19c2e022f12..133ad38842cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_DSC_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h index 877f93c8168e..886e727ed080 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dwb.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_DWB_H diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c index f8e63bd541bc..339bf0c722dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h index d57ed580305e..54cf00ffceb8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_HUBBUB_DCN35_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c index 2ae7b151b56c..1ed58660779e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_hubp.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h index a8879c3db447..3d830f93141e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_HUBP_DCN35_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c index 534223dbe595..296bf3a38cb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dce110/dce110_hwseq.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h index ccfc28225cff..b67015032c35 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_DCN35_INIT_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c index ea1042cdc88d..4317100564a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_mmhubbub.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h index e7b5b6703e73..098e13e07272 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_mmhubbub.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_MMHUBBUB_H diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c index d79e8c6365c1..3542b51c9aac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_opp.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h index 9dd21b104287..a9a413527801 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_opp.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DCN35_OPP_H diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c index b0c068240a94..a4a39f1638cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dcn35_optc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h index 7e7a5f4b85b0..1f422e4c468f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_OPTC_DCN35_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c index e62a192c595e..0f60c40e1fc5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "reg_helper.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h index d073ce5cc6f3..3de240884d22 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef _DCN35_PG_CNTL_H_ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c index 99d55b958977..3edc57acab78 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dm_services.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h index 5ec70d46a38f..99aea102e3f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef _DCN35_RESOURCE_H_ diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile index 4170b6eb9ec0..c1c47a6cefe1 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile @@ -1,4 +1,4 @@ -# Copyright 2019 Advanced Micro Devices, Inc. +# Copyright 2022 Advanced Micro Devices, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c index b48b732aa647..3bc56ac346f3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #include "dcn303_hwseq.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.h index 8b69a3b76c11..7fdfc4175f80 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.h @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #ifndef __DC_HWSS_DCN303_H__ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 0e218f9e2a86..d3429134b2e3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #include "dm_services.h" diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 14bbdb0fa634..0dff10d179b8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -20,6 +20,8 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * + * Authors: AMD + * */ #ifndef __DC_HWSS_DCN35_H__ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index 5171d04519ee..4fb9cd6708d5 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -1,5 +1,5 @@ /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c index e09ca4594ec3..262bb8b74b15 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #include "dm_services.h" diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h index fd64e3848ff3..be8fe836b3f1 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h @@ -2,7 +2,26 @@ /* * Copyright (C) 2021 Advanced Micro Devices, Inc. * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * * Authors: AMD + * */ #ifndef __DAL_IRQ_SERVICE_DCN303_H__ From afaec204d2912305d907abeac14c640f1cad2592 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Mon, 23 Oct 2023 16:05:24 -0400 Subject: [PATCH 24/66] Revert "drm/amdkfd:remove unused code" This reverts commit f9caf6cdd5cc1f4006fd7b6b113658c0b0159f23. Needed for the next revert patch. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 60 ++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 3 ++ 2 files changed, 63 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 3b04dd3c89d7..77259d8fb671 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1147,6 +1147,66 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, list_add_tail(&pchild->child_list, &prange->child_list); } +/** + * svm_range_split_by_granularity - collect ranges within granularity boundary + * + * @p: the process with svms list + * @mm: mm structure + * @addr: the vm fault address in pages, to split the prange + * @parent: parent range if prange is from child list + * @prange: prange to split + * + * Trims @prange to be a single aligned block of prange->granularity if + * possible. The head and tail are added to the child_list in @parent. + * + * Context: caller must hold mmap_read_lock and prange->lock + * + * Return: + * 0 - OK, otherwise error code + */ +int +svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, + unsigned long addr, struct svm_range *parent, + struct svm_range *prange) +{ + struct svm_range *head, *tail; + unsigned long start, last, size; + int r; + + /* Align splited range start and size to granularity size, then a single + * PTE will be used for whole range, this reduces the number of PTE + * updated and the L1 TLB space used for translation. + */ + size = 1UL << prange->granularity; + start = ALIGN_DOWN(addr, size); + last = ALIGN(addr + 1, size) - 1; + + pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n", + prange->svms, prange->start, prange->last, start, last, size); + + if (start > prange->start) { + r = svm_range_split(prange, start, prange->last, &head); + if (r) + return r; + svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE); + } + + if (last < prange->last) { + r = svm_range_split(prange, prange->start, last, &tail); + if (r) + return r; + svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); + } + + /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ + if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) { + prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP; + pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n", + prange, prange->start, prange->last, + SVM_OP_ADD_RANGE_AND_MAP); + } + return 0; +} static bool svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 026863a0abcd..be11ba0c4289 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -172,6 +172,9 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear); void svm_range_vram_node_free(struct svm_range *prange); +int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, + unsigned long addr, struct svm_range *parent, + struct svm_range *prange); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault); From 541c341d2ee351f8deabef467dab4ba68bfb024f Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Mon, 23 Oct 2023 16:08:34 -0400 Subject: [PATCH 25/66] Revert "drm/amdkfd: Use partial migrations in GPU page faults" This reverts commit dc427a473e5d119232ddb27530920d9796cdea70. The change prevents migrating the entire range to VRAM because retry fault restore_pages map the remaining system memory range to GPUs. It will work correctly to submit together with partial mapping to GPU patch later. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 150 ++++++++++------------- drivers/gpu/drm/amd/amdkfd/kfd_migrate.h | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 83 +++---------- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 6 +- 4 files changed, 85 insertions(+), 160 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 81d25a679427..6c25dab051d5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -442,10 +442,10 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, goto out_free; } if (cpages != npages) - pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", cpages, npages); else - pr_debug("0x%lx pages collected\n", cpages); + pr_debug("0x%lx pages migrated\n", cpages); r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); @@ -479,8 +479,6 @@ out: * svm_migrate_ram_to_vram - migrate svm range from system to device * @prange: range structure * @best_loc: the device to migrate to - * @start_mgr: start page to migrate - * @last_mgr: last page to migrate * @mm: the process mm structure * @trigger: reason of migration * @@ -491,7 +489,6 @@ out: */ static int svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, - unsigned long start_mgr, unsigned long last_mgr, struct mm_struct *mm, uint32_t trigger) { unsigned long addr, start, end; @@ -501,30 +498,23 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, unsigned long cpages = 0; long r = 0; - if (!best_loc) { - pr_debug("svms 0x%p [0x%lx 0x%lx] migrate to sys ram\n", - prange->svms, start_mgr, last_mgr); + if (prange->actual_loc == best_loc) { + pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", + prange->svms, prange->start, prange->last, best_loc); return 0; } - if (start_mgr < prange->start || last_mgr > prange->last) { - pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", - start_mgr, last_mgr, prange->start, prange->last); - return -EFAULT; - } - node = svm_range_get_node_by_id(prange, best_loc); if (!node) { pr_debug("failed to get kfd node by id 0x%x\n", best_loc); return -ENODEV; } - pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n", - prange->svms, start_mgr, last_mgr, prange->start, prange->last, - best_loc); + pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, + prange->start, prange->last, best_loc); - start = start_mgr << PAGE_SHIFT; - end = (last_mgr + 1) << PAGE_SHIFT; + start = prange->start << PAGE_SHIFT; + end = (prange->last + 1) << PAGE_SHIFT; r = svm_range_vram_node_new(node, prange, true); if (r) { @@ -554,11 +544,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, if (cpages) { prange->actual_loc = best_loc; - prange->vram_pages = prange->vram_pages + cpages; - } else if (!prange->actual_loc) { - /* if no page migrated and all pages from prange are at - * sys ram drop svm_bo got from svm_range_vram_node_new - */ + svm_range_dma_unmap(prange); + } else { svm_range_vram_node_free(prange); } @@ -676,8 +663,9 @@ out_oom: * Context: Process context, caller hold mmap read lock, prange->migrate_mutex * * Return: + * 0 - success with all pages migrated * negative values - indicate error - * positive values or zero - number of pages got migrated + * positive values - partial migration, number of pages not migrated */ static long svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, @@ -688,7 +676,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, uint64_t npages = (end - start) >> PAGE_SHIFT; unsigned long upages = npages; unsigned long cpages = 0; - unsigned long mpages = 0; struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; @@ -738,10 +725,10 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, goto out_free; } if (cpages != npages) - pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", cpages, npages); else - pr_debug("0x%lx pages collected\n", cpages); + pr_debug("0x%lx pages migrated\n", cpages); r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, scratch, npages); @@ -764,21 +751,17 @@ out_free: kvfree(buf); out: if (!r && cpages) { - mpages = cpages - upages; pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) - WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); + WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); } - - return r ? r : mpages; + return r ? r : upages; } /** * svm_migrate_vram_to_ram - migrate svm range from device to system * @prange: range structure * @mm: process mm, use current->mm if NULL - * @start_mgr: start page need be migrated to sys ram - * @last_mgr: last page need be migrated to sys ram * @trigger: reason of migration * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback * @@ -788,7 +771,6 @@ out: * 0 - OK, otherwise error code */ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, - unsigned long start_mgr, unsigned long last_mgr, uint32_t trigger, struct page *fault_page) { struct kfd_node *node; @@ -796,33 +778,26 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, unsigned long addr; unsigned long start; unsigned long end; - unsigned long mpages = 0; + unsigned long upages = 0; long r = 0; - /* this pragne has no any vram page to migrate to sys ram */ if (!prange->actual_loc) { pr_debug("[0x%lx 0x%lx] already migrated to ram\n", prange->start, prange->last); return 0; } - if (start_mgr < prange->start || last_mgr > prange->last) { - pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", - start_mgr, last_mgr, prange->start, prange->last); - return -EFAULT; - } - node = svm_range_get_node_by_id(prange, prange->actual_loc); if (!node) { pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); return -ENODEV; } pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", - prange->svms, prange, start_mgr, last_mgr, + prange->svms, prange, prange->start, prange->last, prange->actual_loc); - start = start_mgr << PAGE_SHIFT; - end = (last_mgr + 1) << PAGE_SHIFT; + start = prange->start << PAGE_SHIFT; + end = (prange->last + 1) << PAGE_SHIFT; for (addr = start; addr < end;) { unsigned long next; @@ -841,21 +816,14 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, pr_debug("failed %ld to migrate prange %p\n", r, prange); break; } else { - mpages += r; + upages += r; } addr = next; } - if (r >= 0) { - prange->vram_pages -= mpages; - - /* prange does not have vram page set its actual_loc to system - * and drop its svm_bo ref - */ - if (prange->vram_pages == 0 && prange->ttm_res) { - prange->actual_loc = 0; - svm_range_vram_node_free(prange); - } + if (r >= 0 && !upages) { + svm_range_vram_node_free(prange); + prange->actual_loc = 0; } return r < 0 ? r : 0; @@ -865,23 +833,17 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, * svm_migrate_vram_to_vram - migrate svm range from device to device * @prange: range structure * @best_loc: the device to migrate to - * @start: start page need be migrated to sys ram - * @last: last page need be migrated to sys ram * @mm: process mm, use current->mm if NULL * @trigger: reason of migration * * Context: Process context, caller hold mmap read lock, svms lock, prange lock * - * migrate all vram pages in prange to sys ram, then migrate - * [start, last] pages from sys ram to gpu node best_loc. - * * Return: * 0 - OK, otherwise error code */ static int svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, - unsigned long start, unsigned long last, - struct mm_struct *mm, uint32_t trigger) + struct mm_struct *mm, uint32_t trigger) { int r, retries = 3; @@ -893,8 +855,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); do { - r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, - trigger, NULL); + r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL); if (r) return r; } while (prange->actual_loc && --retries); @@ -902,21 +863,17 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, if (prange->actual_loc) return -EDEADLK; - return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger); + return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); } int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, - unsigned long start, unsigned long last, - struct mm_struct *mm, uint32_t trigger) + struct mm_struct *mm, uint32_t trigger) { - if (!prange->actual_loc || prange->actual_loc == best_loc) - return svm_migrate_ram_to_vram(prange, best_loc, start, last, - mm, trigger); - + if (!prange->actual_loc) + return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger); else - return svm_migrate_vram_to_vram(prange, best_loc, start, last, - mm, trigger); + return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger); } @@ -932,9 +889,10 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, */ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) { - unsigned long start, last, size; unsigned long addr = vmf->address; struct svm_range_bo *svm_bo; + enum svm_work_list_ops op; + struct svm_range *parent; struct svm_range *prange; struct kfd_process *p; struct mm_struct *mm; @@ -971,31 +929,51 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) mutex_lock(&p->svms.lock); - prange = svm_range_from_addr(&p->svms, addr, NULL); + prange = svm_range_from_addr(&p->svms, addr, &parent); if (!prange) { pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); r = -EFAULT; goto out_unlock_svms; } - mutex_lock(&prange->migrate_mutex); + mutex_lock(&parent->migrate_mutex); + if (prange != parent) + mutex_lock_nested(&prange->migrate_mutex, 1); if (!prange->actual_loc) goto out_unlock_prange; - /* Align migration range start and size to granularity size */ - size = 1UL << prange->granularity; - start = max(ALIGN_DOWN(addr, size), prange->start); - last = min(ALIGN(addr + 1, size) - 1, prange->last); + svm_range_lock(parent); + if (prange != parent) + mutex_lock_nested(&prange->lock, 1); + r = svm_range_split_by_granularity(p, mm, addr, parent, prange); + if (prange != parent) + mutex_unlock(&prange->lock); + svm_range_unlock(parent); + if (r) { + pr_debug("failed %d to split range by granularity\n", r); + goto out_unlock_prange; + } - r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last, - KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page); + r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, + vmf->page); if (r) pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange, start, last); + r, prange->svms, prange, prange->start, prange->last); + + /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ + if (p->xnack_enabled && parent == prange) + op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; + else + op = SVM_OP_UPDATE_RANGE_NOTIFIER; + svm_range_add_list_work(&p->svms, parent, mm, op); + schedule_deferred_list_work(&p->svms); out_unlock_prange: - mutex_unlock(&prange->migrate_mutex); + if (prange != parent) + mutex_unlock(&prange->migrate_mutex); + mutex_unlock(&parent->migrate_mutex); out_unlock_svms: mutex_unlock(&p->svms.lock); out_unref_process: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index 9e48d10e848e..487f26368164 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -41,13 +41,9 @@ enum MIGRATION_COPY_DIR { }; int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, - unsigned long start, unsigned long last, struct mm_struct *mm, uint32_t trigger); - int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, - unsigned long start, unsigned long last, - uint32_t trigger, struct page *fault_page); - + uint32_t trigger, struct page *fault_page); unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 77259d8fb671..3560a5a58090 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -158,13 +158,12 @@ svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr) static int svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, unsigned long offset, unsigned long npages, - unsigned long *hmm_pfns, uint32_t gpuidx, uint64_t *vram_pages) + unsigned long *hmm_pfns, uint32_t gpuidx) { enum dma_data_direction dir = DMA_BIDIRECTIONAL; dma_addr_t *addr = prange->dma_addr[gpuidx]; struct device *dev = adev->dev; struct page *page; - uint64_t vram_pages_dev; int i, r; if (!addr) { @@ -174,7 +173,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, prange->dma_addr[gpuidx] = addr; } - vram_pages_dev = 0; addr += offset; for (i = 0; i < npages; i++) { if (svm_is_valid_dma_mapping_addr(dev, addr[i])) @@ -184,7 +182,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, if (is_zone_device_page(page)) { struct amdgpu_device *bo_adev = prange->svm_bo->node->adev; - vram_pages_dev++; addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + bo_adev->vm_manager.vram_base_offset - bo_adev->kfd.pgmap.range.start; @@ -201,14 +198,13 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", addr[i] >> PAGE_SHIFT, page_to_pfn(page)); } - *vram_pages = vram_pages_dev; return 0; } static int svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, unsigned long offset, unsigned long npages, - unsigned long *hmm_pfns, uint64_t *vram_pages) + unsigned long *hmm_pfns) { struct kfd_process *p; uint32_t gpuidx; @@ -227,7 +223,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, } r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, - hmm_pfns, gpuidx, vram_pages); + hmm_pfns, gpuidx); if (r) break; } @@ -353,7 +349,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, INIT_LIST_HEAD(&prange->child_list); atomic_set(&prange->invalid, 0); prange->validate_timestamp = 0; - prange->vram_pages = 0; mutex_init(&prange->migrate_mutex); mutex_init(&prange->lock); @@ -400,8 +395,6 @@ static void svm_range_bo_release(struct kref *kref) prange->start, prange->last); mutex_lock(&prange->lock); prange->svm_bo = NULL; - /* prange should not hold vram page now */ - WARN_ON(prange->actual_loc); mutex_unlock(&prange->lock); spin_lock(&svm_bo->list_lock); @@ -982,11 +975,6 @@ svm_range_split_nodes(struct svm_range *new, struct svm_range *old, new->svm_bo = svm_range_bo_ref(old->svm_bo); new->ttm_res = old->ttm_res; - /* set new's vram_pages as old range's now, the acurate vram_pages - * will be updated during mapping - */ - new->vram_pages = min(old->vram_pages, new->npages); - spin_lock(&new->svm_bo->list_lock); list_add(&new->svm_bo_list, &new->svm_bo->range_list); spin_unlock(&new->svm_bo->list_lock); @@ -1630,7 +1618,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm, struct svm_validate_context *ctx; unsigned long start, end, addr; struct kfd_process *p; - uint64_t vram_pages; void *owner; int32_t idx; int r = 0; @@ -1699,13 +1686,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } } - vram_pages = 0; start = prange->start << PAGE_SHIFT; end = (prange->last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { struct hmm_range *hmm_range; struct vm_area_struct *vma; - uint64_t vram_pages_vma; unsigned long next = 0; unsigned long offset; unsigned long npages; @@ -1734,11 +1719,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm, if (!r) { offset = (addr - start) >> PAGE_SHIFT; r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, - hmm_range->hmm_pfns, &vram_pages_vma); + hmm_range->hmm_pfns); if (r) pr_debug("failed %d to dma map range\n", r); - else - vram_pages += vram_pages_vma; } svm_range_lock(prange); @@ -1764,19 +1747,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm, addr = next; } - if (addr == end) { - prange->vram_pages = vram_pages; - - /* if prange does not include any vram page and it - * has not released svm_bo drop its svm_bo reference - * and set its actaul_loc to sys ram - */ - if (!vram_pages && prange->ttm_res) { - prange->actual_loc = 0; - svm_range_vram_node_free(prange); - } - } - svm_range_unreserve_bos(ctx); if (!r) prange->validate_timestamp = ktime_get_boottime(); @@ -2029,7 +1999,6 @@ static struct svm_range *svm_range_clone(struct svm_range *old) new->actual_loc = old->actual_loc; new->granularity = old->granularity; new->mapped_to_gpu = old->mapped_to_gpu; - new->vram_pages = old->vram_pages; bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); @@ -2937,7 +2906,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault) { - unsigned long start, last, size; struct mm_struct *mm = NULL; struct svm_range_list *svms; struct svm_range *prange; @@ -3073,35 +3041,32 @@ retry_write_locked: kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr, write_fault, timestamp); - if (prange->actual_loc != 0 || best_loc != 0) { + if (prange->actual_loc != best_loc) { migration = true; - /* Align migration range start and size to granularity size */ - size = 1UL << prange->granularity; - start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start); - last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); - if (best_loc) { - r = svm_migrate_to_vram(prange, best_loc, start, last, - mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); + r = svm_migrate_to_vram(prange, best_loc, mm, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); if (r) { pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n", r, addr); /* Fallback to system memory if migration to * VRAM failed */ - if (prange->actual_loc && prange->actual_loc != best_loc) - r = svm_migrate_vram_to_ram(prange, mm, start, last, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); + if (prange->actual_loc) + r = svm_migrate_vram_to_ram(prange, mm, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, + NULL); else r = 0; } } else { - r = svm_migrate_vram_to_ram(prange, mm, start, last, - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); + r = svm_migrate_vram_to_ram(prange, mm, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, + NULL); } if (r) { pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", - r, svms, start, last); + r, svms, prange->start, prange->last); goto out_unlock_range; } } @@ -3455,24 +3420,18 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, *migrated = false; best_loc = svm_range_best_prefetch_location(prange); - /* when best_loc is a gpu node and same as prange->actual_loc - * we still need do migration as prange->actual_loc !=0 does - * not mean all pages in prange are vram. hmm migrate will pick - * up right pages during migration. - */ - if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) || - (best_loc == 0 && prange->actual_loc == 0)) + if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED || + best_loc == prange->actual_loc) return 0; if (!best_loc) { - r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, + r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH, NULL); *migrated = !r; return r; } - r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last, - mm, KFD_MIGRATE_TRIGGER_PREFETCH); + r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH); *migrated = !r; return r; @@ -3527,11 +3486,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) mutex_lock(&prange->migrate_mutex); do { - /* migrate all vram pages in this prange to sys ram - * after that prange->actual_loc should be zero - */ r = svm_migrate_vram_to_ram(prange, mm, - prange->start, prange->last, KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL); } while (!r && prange->actual_loc && --retries); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index be11ba0c4289..c528df1d0ba2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -78,7 +78,6 @@ struct svm_work_list_item { * @update_list:link list node used to add to update_list * @mapping: bo_va mapping structure to create and update GPU page table * @npages: number of pages - * @vram_pages: vram pages number in this svm_range * @dma_addr: dma mapping address on each GPU for system memory physical page * @ttm_res: vram ttm resource map * @offset: range start offset within mm_nodes @@ -89,9 +88,7 @@ struct svm_work_list_item { * @flags: flags defined as KFD_IOCTL_SVM_FLAG_* * @perferred_loc: perferred location, 0 for CPU, or GPU id * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id - * @actual_loc: this svm_range location. 0: all pages are from sys ram; - * GPU id: this svm_range may include vram pages from GPU with - * id actual_loc. + * @actual_loc: the actual location, 0 for CPU, or GPU id * @granularity:migration granularity, log2 num pages * @invalid: not 0 means cpu page table is invalidated * @validate_timestamp: system timestamp when range is validated @@ -115,7 +112,6 @@ struct svm_range { struct list_head list; struct list_head update_list; uint64_t npages; - uint64_t vram_pages; dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; struct ttm_resource *ttm_res; uint64_t offset; From fbf1035b033a51eee48d5f42e781b02fff272ca0 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Mon, 23 Oct 2023 15:42:00 -0500 Subject: [PATCH 26/66] drm/amd: Disable PP_PCIE_DPM_MASK when dynamic speed switching not supported Rather than individual ASICs checking for the quirk, set the quirk at the driver level. Signed-off-by: Mario Limonciello Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c | 4 +--- drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9340e8dc0413..286f3f60c25b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2315,6 +2315,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK; + if (!amdgpu_device_pcie_dynamic_switching_supported()) + adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; total = true; for (i = 0; i < adev->num_ip_blocks; i++) { diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 5a2371484a58..11372fcc59c8 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -1823,9 +1823,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; - data->pcie_dpm_key_disabled = - !amdgpu_device_pcie_dynamic_switching_supported() || - !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); + data->pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); /* need to set voltage control types before EVV patching */ data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 090249b6422a..97a5c9b3e941 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2115,7 +2115,7 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, min_lane_width = min_lane_width > max_lane_width ? max_lane_width : min_lane_width; - if (!amdgpu_device_pcie_dynamic_switching_supported()) { + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { pcie_table->pcie_gen[0] = max_gen_speed; pcie_table->pcie_lane[0] = max_lane_width; } else { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index ba3ef3c2918a..3917ae5e681a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2438,7 +2438,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, uint32_t smu_pcie_arg; int ret, i; - if (!amdgpu_device_pcie_dynamic_switching_supported()) { + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; From 1a6513de493d13f8d7501611fcc5bbaea4c799b3 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Mon, 23 Oct 2023 15:47:43 -0500 Subject: [PATCH 27/66] drm/amd: Move AMD_IS_APU check for ASPM into top level function There is no need for every ASIC driver to perform the same check. Move the duplicated code into amdgpu_device_should_use_aspm(). Signed-off-by: Mario Limonciello Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ drivers/gpu/drm/amd/amdgpu/cik.c | 4 ---- drivers/gpu/drm/amd/amdgpu/nv.c | 3 +-- drivers/gpu/drm/amd/amdgpu/si.c | 2 -- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 +-- drivers/gpu/drm/amd/amdgpu/soc21.c | 3 +-- drivers/gpu/drm/amd/amdgpu/vi.c | 3 +-- 7 files changed, 6 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 286f3f60c25b..9c51a31864a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1496,6 +1496,8 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) default: return false; } + if (adev->flags & AMD_IS_APU) + return false; return pcie_aspm_enabled(adev->pdev); } diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index e63abdf52b6c..4dfaa017cf7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1709,10 +1709,6 @@ static void cik_program_aspm(struct amdgpu_device *adev) if (pci_is_root_bus(adev->pdev->bus)) return; - /* XXX double check APUs */ - if (adev->flags & AMD_IS_APU) - return; - orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK; data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) | diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 1995c7459f20..9fa220de1490 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -516,8 +516,7 @@ static void nv_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) return; - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->program_aspm)) + if (adev->nbio.funcs->program_aspm) adev->nbio.funcs->program_aspm(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 4b81f29e5fd5..a757526153e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -2440,8 +2440,6 @@ static void si_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - if (adev->flags & AMD_IS_APU) - return; orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL); data &= ~LC_XMIT_N_FTS_MASK; data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 66ed28136bc8..d4b8d62f4294 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -646,8 +646,7 @@ static void soc15_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->program_aspm)) + if (adev->nbio.funcs->program_aspm) adev->nbio.funcs->program_aspm(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 8c6cab641a1c..d5083c549330 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -433,8 +433,7 @@ static void soc21_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev)) return; - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->program_aspm)) + if (adev->nbio.funcs->program_aspm) adev->nbio.funcs->program_aspm(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index fe8ba9e9837b..1a08052bade3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1127,8 +1127,7 @@ static void vi_program_aspm(struct amdgpu_device *adev) if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported()) return; - if (adev->flags & AMD_IS_APU || - adev->asic_type < CHIP_POLARIS10) + if (adev->asic_type < CHIP_POLARIS10) return; orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); From 2757a848cb0f184850d3e0a33b4a69e8014fdc5d Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Mon, 23 Oct 2023 15:50:05 -0500 Subject: [PATCH 28/66] drm/amd: Explicitly disable ASPM when dynamic switching disabled Currently there are separate but related checks: * amdgpu_device_should_use_aspm() * amdgpu_device_aspm_support_quirk() * amdgpu_device_pcie_dynamic_switching_supported() Simplify into checking whether DPM was enabled or not in the auto case. This works because amdgpu_device_pcie_dynamic_switching_supported() populates that value. Signed-off-by: Mario Limonciello Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 21 ++++++--------------- drivers/gpu/drm/amd/amdgpu/nv.c | 7 +++---- drivers/gpu/drm/amd/amdgpu/vi.c | 2 +- 4 files changed, 10 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 0cd447cf2d3e..91820838b63b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1340,9 +1340,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev); -bool amdgpu_device_pcie_dynamic_switching_supported(void); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); -bool amdgpu_device_aspm_support_quirk(void); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9c51a31864a7..46112f8250f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1456,14 +1456,14 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) } /* - * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic - * speed switching. Until we have confirmation from Intel that a specific host - * supports it, it's safer that we keep it disabled for all. + * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids + * don't support dynamic speed switching. Until we have confirmation from Intel + * that a specific host supports it, it's safer that we keep it disabled for all. * * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663 */ -bool amdgpu_device_pcie_dynamic_switching_supported(void) +static bool amdgpu_device_pcie_dynamic_switching_supported(void) { #if IS_ENABLED(CONFIG_X86) struct cpuinfo_x86 *c = &cpu_data(0); @@ -1498,20 +1498,11 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) } if (adev->flags & AMD_IS_APU) return false; + if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) + return false; return pcie_aspm_enabled(adev->pdev); } -bool amdgpu_device_aspm_support_quirk(void) -{ -#if IS_ENABLED(CONFIG_X86) - struct cpuinfo_x86 *c = &cpu_data(0); - - return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE); -#else - return true; -#endif -} - /* if we get transitioned to only one device, take VGA back */ /** * amdgpu_device_vga_set_decode - enable/disable vga decode diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 9fa220de1490..4d7976b77767 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -513,7 +513,7 @@ static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) static void nv_program_aspm(struct amdgpu_device *adev) { - if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) + if (!amdgpu_device_should_use_aspm(adev)) return; if (adev->nbio.funcs->program_aspm) @@ -608,9 +608,8 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev, if (adev->gfx.funcs->update_perfmon_mgcg) adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); - if (!(adev->flags & AMD_IS_APU) && - (adev->nbio.funcs->enable_aspm) && - amdgpu_device_should_use_aspm(adev)) + if (adev->nbio.funcs->enable_aspm && + amdgpu_device_should_use_aspm(adev)) adev->nbio.funcs->enable_aspm(adev, !enter); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 1a08052bade3..1a98812981f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1124,7 +1124,7 @@ static void vi_program_aspm(struct amdgpu_device *adev) bool bL1SS = false; bool bClkReqSupport = true; - if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported()) + if (!amdgpu_device_should_use_aspm(adev)) return; if (adev->asic_type < CHIP_POLARIS10) From 543068f0e3721e1cbd6cee48c17f277950f59670 Mon Sep 17 00:00:00 2001 From: Fangzhi Zuo Date: Mon, 2 Oct 2023 16:38:02 -0400 Subject: [PATCH 29/66] drm/amd/display: Fix MST Multi-Stream Not Lighting Up on dcn35 dcn35 misses .enable_symclk_se hook that makes MST DSC not functional when having multiple FE clk to be enabled. Reviewed-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Fangzhi Zuo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 4d6493e0ccfc..608221b0dd5d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -2746,6 +2746,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) struct dce_hwseq *hws = dc->hwseq; unsigned int k1_div = PIXEL_RATE_DIV_NA; unsigned int k2_div = PIXEL_RATE_DIV_NA; + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { if (dc->hwseq->funcs.setup_hpo_hw_control) @@ -2765,6 +2767,10 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) dto_params.timing = &pipe_ctx->stream->timing; dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } else { + if (dccg->funcs->enable_symclk_se) + dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, + link_enc->transmitter - TRANSMITTER_UNIPHY_A); } if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) { hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div); From d5f9a92bd1e234b8a7cf6f350b5bc0169221ae59 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 3 Oct 2023 12:18:44 -0400 Subject: [PATCH 30/66] drm/amd/display: Revert "Improve x86 and dmub ips handshake" This reverts commit 1288d702080949f87688d49dfeeacc99f40adc9b. Causes intermittent hangs during reboot stress testing. Reviewed-by: Duncan Ma Acked-by: Roman Li Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 37 ------------ .../amd/display/dc/clk_mgr/dcn35/dcn35_smu.c | 14 +---- .../amd/display/dc/clk_mgr/dcn35/dcn35_smu.h | 4 +- drivers/gpu/drm/amd/display/dc/dc.h | 2 - drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 57 ++++--------------- .../gpu/drm/amd/display/dc/dcn35/dcn35_init.c | 2 - .../drm/amd/display/dc/dcn35/dcn35_resource.c | 2 - .../amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 30 ++++------ .../amd/display/dc/hwss/dcn35/dcn35_hwseq.h | 3 - .../drm/amd/display/dc/hwss/hw_sequencer.h | 3 +- .../gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 2 - 11 files changed, 27 insertions(+), 129 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index f80917f6153b..302a3d348c76 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -808,34 +808,6 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) } } -static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr); - - if (dc->config.disable_ips == 0) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { - val |= DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } - - if (!allow_idle) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } - - dcn35_smu_write_ips_scratch(clk_mgr, val); -} - static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -855,13 +827,6 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) return ips_supported; } -static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - - return dcn35_smu_read_ips_scratch(clk_mgr); -} - static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) { dcn35_init_clocks(clk_mgr); @@ -949,8 +914,6 @@ static struct clk_mgr_funcs dcn35_funcs = { .set_low_power_state = dcn35_set_low_power_state, .exit_low_power_state = dcn35_exit_low_power_state, .is_ips_supported = dcn35_is_ips_supported, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state }; struct clk_mgr_funcs dcn35_fpga_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index b6b8c3ca1572..b20b3a5eb3c4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -444,9 +444,9 @@ void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *cl enable); } -int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) +void dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) { - return dcn35_smu_send_msg_with_param( + dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_DispPsrExit, 0); @@ -459,13 +459,3 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_QueryIPS2Support, 0); } - -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) -{ - REG_WRITE(MP1_SMN_C2PMSG_71, param); -} - -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) -{ - return REG_READ(MP1_SMN_C2PMSG_71); -} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h index 2b8e6959a03d..38b7a4420d6c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h @@ -194,10 +194,8 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable); void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable); -int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr); +void dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr); -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param); -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_35_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 582d94c759f6..342022b7ab37 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -975,8 +975,6 @@ struct dc_debug_options { bool replay_skip_crtc_disabled; bool ignore_pg;/*do nothing, let pmfw control it*/ bool psp_disabled_wa; - unsigned int ips2_eval_delay_us; - unsigned int ips2_entry_delay_us; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 4fd3f09432be..a388f34c6d04 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1100,64 +1100,31 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle; - if (allow_idle) { - if (dc->hwss.set_idle_state) - dc->hwss.set_idle_state(dc, true); - } - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + if (allow_idle) + udelay(500); } void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { - uint32_t allow_state = 0; - uint32_t commit_state = 0; - if (dc->debug.dmcub_emulation) return; if (!dc->idle_optimizations_allowed) return; - if (dc->hwss.get_idle_state && - dc->hwss.set_idle_state && - dc->clk_mgr->funcs->exit_low_power_state) { + // Tell PMFW to exit low power state + if (dc->clk_mgr->funcs->exit_low_power_state) + dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); - allow_state = dc->hwss.get_idle_state(dc); - dc->hwss.set_idle_state(dc, false); + // Wait for dmcub to load up + dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true); - if (allow_state & DMUB_IPS2_ALLOW_MASK) { - // Wait for evaluation time - udelay(dc->debug.ips2_eval_delay_us); - commit_state = dc->hwss.get_idle_state(dc); - if (commit_state & DMUB_IPS2_COMMIT_MASK) { - // Tell PMFW to exit low power state - dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + // Notify dmcub disallow idle + dc_dmub_srv_notify_idle(dc, false); - // Wait for IPS2 entry upper bound - udelay(dc->debug.ips2_entry_delay_us); - dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); - - do { - commit_state = dc->hwss.get_idle_state(dc); - } while (commit_state & DMUB_IPS2_COMMIT_MASK); - - if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) - ASSERT(0); - - return; - } - } - - dc_dmub_srv_notify_idle(dc, false); - if (allow_state & DMUB_IPS1_ALLOW_MASK) { - do { - commit_state = dc->hwss.get_idle_state(dc); - } while (commit_state & DMUB_IPS1_COMMIT_MASK); - } - } - - if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) - ASSERT(0); + // Confirm dmu is powered up + dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c index 296bf3a38cb9..3ccf1c8cedda 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c @@ -120,8 +120,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, .block_power_control = dcn35_block_power_control, .root_clock_control = dcn35_root_clock_control, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state }; static const struct hwseq_private_funcs dcn35_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c index 3edc57acab78..f9166c1a6a34 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c @@ -748,8 +748,6 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, - .ips2_eval_delay_us = 200, - .ips2_entry_delay_us = 400 }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index d3429134b2e3..8f717db01f85 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -652,10 +652,18 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) // TODO: review other cases when idle optimization is allowed + if (!enable) { + // Tell PMFW to exit low power state + if (dc->clk_mgr->funcs->exit_low_power_state) + dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + + dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true); + } + + dc_dmub_srv_notify_idle(dc, enable); + if (!enable) - dc_dmub_srv_exit_low_power_state(dc); - else - dc_dmub_srv_notify_idle(dc, enable); + dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true); return true; } @@ -1189,19 +1197,3 @@ void dcn35_optimize_bandwidth( dc->hwss.root_clock_control(dc, &pg_update_state, false); } } - -void dcn35_set_idle_state(const struct dc *dc, bool allow_idle) -{ - // TODO: Find a more suitable communcation - if (dc->clk_mgr->funcs->set_idle_state) - dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle); -} - -uint32_t dcn35_get_idle_state(const struct dc *dc) -{ - // TODO: Find a more suitable communcation - if (dc->clk_mgr->funcs->get_idle_state) - return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr); - - return 0; -} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 0dff10d179b8..9b66ab0c909c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -81,7 +81,4 @@ void dcn35_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on); - -void dcn35_set_idle_state(const struct dc *dc, bool allow_idle); -uint32_t dcn35_get_idle_state(const struct dc *dc); #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 452680fe9aab..d45302035e3f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -418,8 +418,7 @@ struct hw_sequencer_funcs { struct pg_block_update *update_state, bool power_on); void (*root_clock_control)(struct dc *dc, struct pg_block_update *update_state, bool power_on); - void (*set_idle_state)(const struct dc *dc, bool allow_idle); - uint32_t (*get_idle_state)(const struct dc *dc); + bool (*is_pipe_topology_transition_seamless)(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index fa9614bcb160..cb2dc3f75ae2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -262,8 +262,6 @@ struct clk_mgr_funcs { void (*set_low_power_state)(struct clk_mgr *clk_mgr); void (*exit_low_power_state)(struct clk_mgr *clk_mgr); bool (*is_ips_supported)(struct clk_mgr *clk_mgr); - void (*set_idle_state)(struct clk_mgr *clk_mgr, bool allow_idle); - uint32_t (*get_idle_state)(struct clk_mgr *clk_mgr); void (*init_clocks)(struct clk_mgr *clk_mgr); From d591284288c29f04e52ae4f3d605e2f39c3e316c Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Fri, 29 Sep 2023 12:12:47 -0400 Subject: [PATCH 31/66] drm/amd/display: Add a check for idle power optimization [why] Need a helper function to check idle power is allowed so that dc doesn't access any registers that are power-gated. [how] Implement helper function to check idle power optimization. Enable a hook to check if detection is allowed. V2: Add function hooks for set and get idle states. Check if function hook was properly initialized. Reviewed-by: Aric Cyr Reviewed-by: Nicholas Choi Acked-by: Roman Li Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 23 +++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../amd/display/dc/hwss/dcn31/dcn31_hwseq.c | 4 +++- .../amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 8 ++----- .../drm/amd/display/dc/hwss/hw_sequencer.h | 2 ++ .../gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 2 ++ .../gpu/drm/amd/display/dmub/src/dmub_srv.c | 1 + 7 files changed, 34 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index e13d8bab0b33..d13904548505 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4884,6 +4884,9 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) if (dc->debug.disable_idle_power_optimizations) return; + if (dc->caps.ips_support && dc->config.disable_ips) + return; + if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) return; @@ -4895,6 +4898,26 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) dc->idle_optimizations_allowed = allow; } +bool dc_dmub_is_ips_idle_state(struct dc *dc) +{ + uint32_t idle_state = 0; + + if (dc->debug.disable_idle_power_optimizations) + return false; + + if (!dc->caps.ips_support || dc->config.disable_ips) + return false; + + if (dc->hwss.get_idle_state) + idle_state = dc->hwss.get_idle_state(dc); + + if ((idle_state & DMUB_IPS1_ALLOW_MASK) || + (idle_state & DMUB_IPS2_ALLOW_MASK)) + return true; + + return false; +} + /* set min and max memory clock to lowest and highest DPM level, respectively */ void dc_unlock_memory_clock_frequency(struct dc *dc) { diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 342022b7ab37..eab9a0be3328 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -2315,6 +2315,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_ struct dc_cursor_attributes *cursor_attr); void dc_allow_idle_optimizations(struct dc *dc, bool allow); +bool dc_dmub_is_ips_idle_state(struct dc *dc); /* set min and max memory clock to lowest and highest DPM level, respectively */ void dc_unlock_memory_clock_frequency(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index a22cd2aee286..97798cee876e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -585,7 +585,9 @@ void dcn31_reset_hw_ctx_wrap( struct clock_source *old_clk = pipe_ctx_old->clock_source; /* Reset pipe which is seamless boot stream. */ - if (!pipe_ctx_old->plane_state) { + if (!pipe_ctx_old->plane_state && + dc->res_pool->hubbub->funcs->program_det_size && + dc->res_pool->hubbub->funcs->wait_for_det_apply) { dc->res_pool->hubbub->funcs->program_det_size( dc->res_pool->hubbub, pipe_ctx_old->plane_res.hubp->inst, 0); /* Wait det size changed. */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 8f717db01f85..ece806a63d8d 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -629,12 +629,8 @@ void dcn35_power_down_on_boot(struct dc *dc) if (dc->clk_mgr->funcs->set_low_power_state) dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); - if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER) { - if (!dc->idle_optimizations_allowed) { - dc_dmub_srv_notify_idle(dc, true); - dc->idle_optimizations_allowed = true; - } - } + if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER) + dc_allow_idle_optimizations(dc, true); } bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index d45302035e3f..c43d1f6c2a06 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -418,6 +418,8 @@ struct hw_sequencer_funcs { struct pg_block_update *update_state, bool power_on); void (*root_clock_control)(struct dc *dc, struct pg_block_update *update_state, bool power_on); + void (*set_idle_state)(const struct dc *dc, bool allow_idle); + uint32_t (*get_idle_state)(const struct dc *dc); bool (*is_pipe_topology_transition_seamless)(struct dc *dc, const struct dc_state *cur_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index cb2dc3f75ae2..fa9614bcb160 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -262,6 +262,8 @@ struct clk_mgr_funcs { void (*set_low_power_state)(struct clk_mgr *clk_mgr); void (*exit_low_power_state)(struct clk_mgr *clk_mgr); bool (*is_ips_supported)(struct clk_mgr *clk_mgr); + void (*set_idle_state)(struct clk_mgr *clk_mgr, bool allow_idle); + uint32_t (*get_idle_state)(struct clk_mgr *clk_mgr); void (*init_clocks)(struct clk_mgr *clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index b99db771e071..e43e8d4bfe37 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -352,6 +352,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->init_reg_offsets = dmub_srv_dcn35_regs_init; funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up; + funcs->should_detect = dmub_dcn35_should_detect; break; default: From a67f7a0b18c09d5b62eafb6d5c2f54e6f6ea6cf1 Mon Sep 17 00:00:00 2001 From: George Shen Date: Mon, 2 Oct 2023 15:31:16 -0400 Subject: [PATCH 32/66] drm/amd/display: Update SDP VSC colorimetry from DP test automation request [Why] Certain test equipment vendors check the SDP VSC for colorimetry against the value from the test request during certain DP link layer tests for YCbCr test cases. [How] Update SDP VSC with colorimetry from test automation request. Reviewed-by: Wenjing Liu Acked-by: Roman Li Signed-off-by: George Shen Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 0894e6aef3dd..21a39afd274b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -839,6 +839,12 @@ bool dp_set_test_pattern( pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range else pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); + + if (color_space == COLOR_SPACE_YCBCR601_LIMITED) + pipe_ctx->stream->vsc_infopacket.sb[16] &= 0xf0; + else if (color_space == COLOR_SPACE_YCBCR709_LIMITED) + pipe_ctx->stream->vsc_infopacket.sb[16] |= 1; + resource_build_info_frame(pipe_ctx); link->dc->hwss.update_info_frame(pipe_ctx); } From 5edb7cdff85af8f8c5fda5b88310535ab823f663 Mon Sep 17 00:00:00 2001 From: Swapnil Patel Date: Wed, 4 Oct 2023 15:58:57 -0400 Subject: [PATCH 33/66] drm/amd/display: Reduce default backlight min from 5 nits to 1 nits [Why & How] Currently set_default_brightness_aux function uses 5 nits as lower limit to check for valid default_backlight setting. However some newer panels can support even lower default settings Reviewed-by: Agustin Gutierrez Acked-by: Roman Li Signed-off-by: Swapnil Patel Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/link/protocols/link_edp_panel_control.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 938df1f0f7da..86f97ddcc595 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -283,8 +283,8 @@ bool set_default_brightness_aux(struct dc_link *link) if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { if (!read_default_bl_aux(link, &default_backlight)) default_backlight = 150000; - // if < 5 nits or > 5000, it might be wrong readback - if (default_backlight < 5000 || default_backlight > 5000000) + // if < 1 nits or > 5000, it might be wrong readback + if (default_backlight < 1000 || default_backlight > 5000000) default_backlight = 150000; // return edp_set_backlight_level_nits(link, true, From 2c071cae6bb0f942136a530039faaa707c48893c Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 4 Oct 2023 12:12:57 -0400 Subject: [PATCH 34/66] drm/amd/display: add pipe resource management callbacks to DML2 [why] Need DML2 to support new pipe resource management APIs. Reviewed-by: Chaitanya Dhere Acked-by: Roman Li Signed-off-by: Wenjing Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 5 +++++ .../drm/amd/display/dc/dcn321/dcn321_resource.c | 5 +++++ .../gpu/drm/amd/display/dc/dcn35/dcn35_resource.c | 5 +++++ .../gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 15 +++++++++++++++ 4 files changed, 30 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 81b0588fa80b..02d3168f1673 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -2445,6 +2445,11 @@ static bool dcn32_resource_construct( dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; dc->dml2_options.svp_pstate.callbacks.dc = dc; dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 0b1ce6e28e11..7d0e5e9d611f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -1999,6 +1999,11 @@ static bool dcn321_resource_construct( dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; dc->dml2_options.svp_pstate.callbacks.dc = dc; dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c index f9166c1a6a34..4e333e2a3147 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c @@ -2084,6 +2084,11 @@ static bool dcn35_resource_construct( dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 252442ea9d3d..20e7c74af6e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -73,6 +73,21 @@ struct dml2_dc_callbacks { bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx); bool (*can_support_mclk_switch_using_fw_based_vblank_stretch)(struct dc *dc, struct dc_state *context); bool (*acquire_secondary_pipe_for_mpc_odm)(const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm); + bool (*update_pipes_for_stream_with_slice_count)( + struct dc_state *new_ctx, + const struct dc_state *cur_ctx, + const struct resource_pool *pool, + const struct dc_stream_state *stream, + int new_slice_count); + bool (*update_pipes_for_plane_with_slice_count)( + struct dc_state *new_ctx, + const struct dc_state *cur_ctx, + const struct resource_pool *pool, + const struct dc_plane_state *plane, + int slice_count); + int (*get_odm_slice_index)(const struct pipe_ctx *opp_head); + int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe); + struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx); }; struct dml2_dc_svp_callbacks { From 488bb99d42e607a40524ee1514b0b1246b1f69c8 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 4 Oct 2023 14:30:30 -0400 Subject: [PATCH 35/66] drm/amd/display: implement map dc pipe with callback in DML2 [why] Unify pipe resource management logic in dc resource layer. V2: Add default case for switch. CC: Hamza Mahfooz Reviewed-by: Chaitanya Dhere Signed-off-by: Wenjing Liu Reviewed-by: Rodrigo Siqueira Reviewed-by: Jun Lei Acked-by: Roman Li Signed-off-by: Qingqing Zhuo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn32/dcn32_resource.c | 1 + .../display/dc/dml2/dml2_dc_resource_mgmt.c | 146 ++++++++++++++++++ .../drm/amd/display/dc/dml2/dml2_wrapper.h | 1 + 3 files changed, 148 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 02d3168f1673..0e1d395a9340 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -2481,6 +2481,7 @@ static bool dcn32_resource_construct( dc->dml2_options.max_segments_per_hubp = 18; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; + dc->dml2_options.map_dc_pipes_with_callbacks = true; if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) dc->config.sdpif_request_limit_words_per_umc = 16; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index 393ecad64636..d2046e770c50 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -758,6 +758,148 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id); } +static unsigned int get_mpc_factor(struct dml2_context *ctx, + const struct dc_state *state, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_stream_status *status, unsigned int stream_id, + int plane_idx) +{ + unsigned int plane_id; + unsigned int cfg_idx; + + get_plane_id(state, status->plane_states[plane_idx], stream_id, &plane_id); + cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); + if (ctx->architecture == dml2_architecture_20) + return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; + ASSERT(false); + return 1; +} + +static unsigned int get_odm_factor( + const struct dml2_context *ctx, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_stream_state *stream) +{ + unsigned int cfg_idx = find_disp_cfg_idx_by_stream_id( + mapping, stream->stream_id); + + if (ctx->architecture == dml2_architecture_20) + switch (disp_cfg->hw.ODMMode[cfg_idx]) { + case dml_odm_mode_bypass: + return 1; + case dml_odm_mode_combine_2to1: + return 2; + case dml_odm_mode_combine_4to1: + return 4; + default: + break; + } + ASSERT(false); + return 1; +} + +static void populate_mpc_factors_for_stream( + struct dml2_context *ctx, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_state *state, + unsigned int stream_idx, + unsigned int odm_factor, + unsigned int mpc_factors[MAX_PIPES]) +{ + const struct dc_stream_status *status = &state->stream_status[stream_idx]; + unsigned int stream_id = state->streams[stream_idx]->stream_id; + int i; + + for (i = 0; i < status->plane_count; i++) + if (odm_factor == 1) + mpc_factors[i] = get_mpc_factor( + ctx, state, disp_cfg, mapping, status, + stream_id, i); + else + mpc_factors[i] = 1; +} + +static void populate_odm_factors(const struct dml2_context *ctx, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_state *state, + unsigned int odm_factors[MAX_PIPES]) +{ + int i; + + for (i = 0; i < state->stream_count; i++) + odm_factors[i] = get_odm_factor( + ctx, disp_cfg, mapping, state->streams[i]); +} + +static bool map_dc_pipes_for_stream(struct dml2_context *ctx, + struct dc_state *state, + const struct dc_state *existing_state, + const struct dc_stream_state *stream, + const struct dc_stream_status *status, + unsigned int odm_factor, + unsigned int mpc_factors[MAX_PIPES]) +{ + int plane_idx; + bool result = true; + + if (odm_factor == 1) + /* + * ODM and MPC combines are by DML design mutually exclusive. + * ODM factor of 1 means MPC factors may be greater than 1. + * In this case, we want to set ODM factor to 1 first to free up + * pipe resources from previous ODM configuration before setting + * up MPC combine to acquire more pipe resources. + */ + result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + stream, + odm_factor); + for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++) + result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + status->plane_states[plane_idx], + mpc_factors[plane_idx]); + if (odm_factor > 1) + result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + stream, + odm_factor); + return result; +} + +static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx, + struct dc_state *state, + const struct dml_display_cfg_st *disp_cfg, + struct dml2_dml_to_dc_pipe_mapping *mapping, + const struct dc_state *existing_state) +{ + unsigned int odm_factors[MAX_PIPES]; + unsigned int mpc_factors_for_stream[MAX_PIPES]; + int i; + bool result = true; + + populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors); + for (i = 0; i < state->stream_count; i++) { + populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state, + i, odm_factors[i], mpc_factors_for_stream); + result &= map_dc_pipes_for_stream(ctx, state, existing_state, + state->streams[i], + &state->stream_status[i], + odm_factors[i], mpc_factors_for_stream); + } + return result; +} + bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state) { int stream_index, plane_index, i; @@ -772,6 +914,10 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s unsigned int odm_mode_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}, dpp_per_surface_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}; struct dc_pipe_mapping_scratch scratch; + if (ctx->config.map_dc_pipes_with_callbacks) + return map_dc_pipes_with_callbacks( + ctx, state, disp_cfg, mapping, existing_state); + if (ctx->architecture == dml2_architecture_21) { /* * Extract ODM and DPP outputs from DML2.1 and map them in an array as required for pipe mapping in dml2_map_dc_pipes. diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 20e7c74af6e2..317f90776d97 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -169,6 +169,7 @@ struct dml2_configuration_options { struct dml2_soc_bbox_overrides bbox_overrides; unsigned int max_segments_per_hubp; unsigned int det_segment_size; + bool map_dc_pipes_with_callbacks; }; /* From da2d16fcdda344b18ec9a4a55dff9805d5d781d2 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 5 Oct 2023 11:48:44 -0400 Subject: [PATCH 36/66] drm/amd/display: Fix IPS handshake for idle optimizations [Why] Intermittent reboot hangs are observed introduced by "Improve x86 and dmub ips handshake". [How] Bring back the commit but fix the polling. Avoid hanging in place forever by bounding the delay and ensure that we still message DMCUB on IPS2 exit to notify driver idle has been cleared. Reviewed-by: Duncan Ma Reviewed-by: Jun Lei Acked-by: Roman Li Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 37 ++++++++++ .../amd/display/dc/clk_mgr/dcn35/dcn35_smu.c | 14 +++- .../amd/display/dc/clk_mgr/dcn35/dcn35_smu.h | 4 +- drivers/gpu/drm/amd/display/dc/dc.h | 2 + drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 73 ++++++++++++++++--- .../gpu/drm/amd/display/dc/dcn35/dcn35_init.c | 2 + .../drm/amd/display/dc/dcn35/dcn35_resource.c | 2 + .../amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 30 +++++--- .../amd/display/dc/hwss/dcn35/dcn35_hwseq.h | 3 + .../drm/amd/display/dc/hwss/hw_sequencer.h | 1 - 10 files changed, 141 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 302a3d348c76..f80917f6153b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -808,6 +808,34 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) } } +static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc *dc = clk_mgr_base->ctx->dc; + uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr); + + if (dc->config.disable_ips == 0) { + val |= DMUB_IPS1_ALLOW_MASK; + val |= DMUB_IPS2_ALLOW_MASK; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { + val = val & ~DMUB_IPS1_ALLOW_MASK; + val = val & ~DMUB_IPS2_ALLOW_MASK; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { + val |= DMUB_IPS1_ALLOW_MASK; + val = val & ~DMUB_IPS2_ALLOW_MASK; + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { + val |= DMUB_IPS1_ALLOW_MASK; + val |= DMUB_IPS2_ALLOW_MASK; + } + + if (!allow_idle) { + val = val & ~DMUB_IPS1_ALLOW_MASK; + val = val & ~DMUB_IPS2_ALLOW_MASK; + } + + dcn35_smu_write_ips_scratch(clk_mgr, val); +} + static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -827,6 +855,13 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) return ips_supported; } +static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return dcn35_smu_read_ips_scratch(clk_mgr); +} + static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) { dcn35_init_clocks(clk_mgr); @@ -914,6 +949,8 @@ static struct clk_mgr_funcs dcn35_funcs = { .set_low_power_state = dcn35_set_low_power_state, .exit_low_power_state = dcn35_exit_low_power_state, .is_ips_supported = dcn35_is_ips_supported, + .set_idle_state = dcn35_set_idle_state, + .get_idle_state = dcn35_get_idle_state }; struct clk_mgr_funcs dcn35_fpga_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index b20b3a5eb3c4..b6b8c3ca1572 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -444,9 +444,9 @@ void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *cl enable); } -void dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) +int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr) { - dcn35_smu_send_msg_with_param( + return dcn35_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_DispPsrExit, 0); @@ -459,3 +459,13 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_QueryIPS2Support, 0); } + +void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) +{ + REG_WRITE(MP1_SMN_C2PMSG_71, param); +} + +uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) +{ + return REG_READ(MP1_SMN_C2PMSG_71); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h index 38b7a4420d6c..2b8e6959a03d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h @@ -194,8 +194,10 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable); void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable); -void dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr); +int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr); +void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param); +uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_35_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index eab9a0be3328..72ba62d1a01e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -975,6 +975,8 @@ struct dc_debug_options { bool replay_skip_crtc_disabled; bool ignore_pg;/*do nothing, let pmfw control it*/ bool psp_disabled_wa; + unsigned int ips2_eval_delay_us; + unsigned int ips2_entry_delay_us; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index a388f34c6d04..ba142bef626b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1100,31 +1100,80 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle; - dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + if (allow_idle) { + if (dc->hwss.set_idle_state) + dc->hwss.set_idle_state(dc, true); + } - if (allow_idle) - udelay(500); + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { + const uint32_t max_num_polls = 10000; + uint32_t allow_state = 0; + uint32_t commit_state = 0; + uint32_t i; + if (dc->debug.dmcub_emulation) return; if (!dc->idle_optimizations_allowed) return; - // Tell PMFW to exit low power state - if (dc->clk_mgr->funcs->exit_low_power_state) - dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + if (dc->hwss.get_idle_state && + dc->hwss.set_idle_state && + dc->clk_mgr->funcs->exit_low_power_state) { - // Wait for dmcub to load up - dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true); + allow_state = dc->hwss.get_idle_state(dc); + dc->hwss.set_idle_state(dc, false); - // Notify dmcub disallow idle - dc_dmub_srv_notify_idle(dc, false); + if (allow_state & DMUB_IPS2_ALLOW_MASK) { + // Wait for evaluation time + udelay(dc->debug.ips2_eval_delay_us); + commit_state = dc->hwss.get_idle_state(dc); + if (commit_state & DMUB_IPS2_COMMIT_MASK) { + // Tell PMFW to exit low power state + dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); - // Confirm dmu is powered up - dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true); + // Wait for IPS2 entry upper bound + udelay(dc->debug.ips2_entry_delay_us); + dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + + for (i = 0; i < max_num_polls; ++i) { + commit_state = dc->hwss.get_idle_state(dc); + if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) + break; + + udelay(1); + } + ASSERT(i < max_num_polls); + + if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) + ASSERT(0); + + /* TODO: See if we can return early here - IPS2 should go + * back directly to IPS0 and clear the flags, but it will + * be safer to directly notify DMCUB of this. + */ + allow_state = dc->hwss.get_idle_state(dc); + } + } + + dc_dmub_srv_notify_idle(dc, false); + if (allow_state & DMUB_IPS1_ALLOW_MASK) { + for (i = 0; i < max_num_polls; ++i) { + commit_state = dc->hwss.get_idle_state(dc); + if (!(commit_state & DMUB_IPS1_COMMIT_MASK)) + break; + + udelay(1); + } + ASSERT(i < max_num_polls); + } + } + + if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) + ASSERT(0); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c index 3ccf1c8cedda..296bf3a38cb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c @@ -120,6 +120,8 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, .block_power_control = dcn35_block_power_control, .root_clock_control = dcn35_root_clock_control, + .set_idle_state = dcn35_set_idle_state, + .get_idle_state = dcn35_get_idle_state }; static const struct hwseq_private_funcs dcn35_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c index 4e333e2a3147..3c7c810bab1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c @@ -748,6 +748,8 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, + .ips2_eval_delay_us = 200, + .ips2_entry_delay_us = 400 }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index ece806a63d8d..34737d60b965 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -648,18 +648,10 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) // TODO: review other cases when idle optimization is allowed - if (!enable) { - // Tell PMFW to exit low power state - if (dc->clk_mgr->funcs->exit_low_power_state) - dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); - - dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true); - } - - dc_dmub_srv_notify_idle(dc, enable); - if (!enable) - dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true); + dc_dmub_srv_exit_low_power_state(dc); + else + dc_dmub_srv_notify_idle(dc, enable); return true; } @@ -1193,3 +1185,19 @@ void dcn35_optimize_bandwidth( dc->hwss.root_clock_control(dc, &pg_update_state, false); } } + +void dcn35_set_idle_state(const struct dc *dc, bool allow_idle) +{ + // TODO: Find a more suitable communcation + if (dc->clk_mgr->funcs->set_idle_state) + dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle); +} + +uint32_t dcn35_get_idle_state(const struct dc *dc) +{ + // TODO: Find a more suitable communcation + if (dc->clk_mgr->funcs->get_idle_state) + return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr); + + return 0; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 9b66ab0c909c..0dff10d179b8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -81,4 +81,7 @@ void dcn35_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on); + +void dcn35_set_idle_state(const struct dc *dc, bool allow_idle); +uint32_t dcn35_get_idle_state(const struct dc *dc); #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index c43d1f6c2a06..452680fe9aab 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -420,7 +420,6 @@ struct hw_sequencer_funcs { struct pg_block_update *update_state, bool power_on); void (*set_idle_state)(const struct dc *dc, bool allow_idle); uint32_t (*get_idle_state)(const struct dc *dc); - bool (*is_pipe_topology_transition_seamless)(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); From 2a6a491dfc0073b2bd28a69d1270c5bb8d3fc33a Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Thu, 5 Oct 2023 14:56:24 -0400 Subject: [PATCH 37/66] drm/amd/display: Fix HDMI framepack 3D test issue [why] Bandwidth validation failure on framepack tests. Need to double pixel clock when 3D format is framepack. Also for HDMI displays, we need to keep the ITC flag to 1 by default. [how] Double the pixel clock when using framepack 3D format. Set hdmi ITC bit to 1. Reviewed-by: Charlene Liu Acked-by: Roman Li Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index f9e472f08e21..1d48278cba96 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -4229,7 +4229,7 @@ static void set_avi_info_frame( switch (stream->content_type) { case DISPLAY_CONTENT_TYPE_NO_DATA: hdmi_info.bits.CN0_CN1 = 0; - hdmi_info.bits.ITC = 0; + hdmi_info.bits.ITC = 1; break; case DISPLAY_CONTENT_TYPE_GRAPHICS: hdmi_info.bits.CN0_CN1 = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 331f6bd97d38..89836f175a13 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -571,6 +571,8 @@ static void populate_dml_timing_cfg_from_stream_state(struct dml_timing_cfg_st * out->RefreshRate[location] = ((in->timing.pix_clk_100hz * 100) / in->timing.h_total) / in->timing.v_total; out->VFrontPorch[location] = in->timing.v_front_porch; out->PixelClock[location] = in->timing.pix_clk_100hz / 10000.00; + if (in->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + out->PixelClock[location] *= 2; out->HTotal[location] = in->timing.h_total; out->VTotal[location] = in->timing.v_total; out->Interlace[location] = in->timing.flags.INTERLACE; From 0604ffead6e5927d2e70698df6bcb1c68690ad0e Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Tue, 3 Oct 2023 09:48:11 -0600 Subject: [PATCH 38/66] drm/amd/display: Revert "drm/amd/display: allow edp updates for virtual signal" This reverts commit 4ad3ee5ccc77aa3f9d702f7b9ad4d9cfeca6c443. [WHY & HOW] Virtual signal is not supported as audio capable by DC. Reviewed-by: Chao-kai Wang Acked-by: Roman Li Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/signal_types.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index 325c5ba4c82a..1b14b17a79c7 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -132,7 +132,6 @@ static inline bool dc_is_audio_capable_signal(enum signal_type signal) { return (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST || - signal == SIGNAL_TYPE_VIRTUAL || dc_is_hdmi_signal(signal)); } From fc0479ac5dd9ac48673ade462622a4efbda30223 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Tue, 3 Oct 2023 15:25:30 -0600 Subject: [PATCH 39/66] drm/amd/display: Set emulated sink type to HDMI accordingly. [WHY & HOW] Virtual sink is not audio-capable and this causes kms_hdmi_inject's inject-audio to fail. Set it to HDMI according to EDID. Reviewed-by: Chao-kai Wang Acked-by: Roman Li Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9a712791f309..4f6c8af4f046 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6513,6 +6513,9 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) return; } + if (drm_detect_hdmi_monitor(edid)) + init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A; + aconnector->edid = edid; aconnector->dc_em_sink = dc_link_add_remote_sink( From 0d93f39516b0608384317923f9feda6d1ae210fb Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 5 Oct 2023 10:58:51 -0600 Subject: [PATCH 40/66] drm/amd/display: Correct enum typo This commit just replaces dc_interrupt_po*r*larity with its correct name, which is dc_interrupt_polarity. Reviewed-by: Aurabindo Pillai Acked-by: Roman Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/irq_types.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h index 530c2578db40..93354bff456a 100644 --- a/drivers/gpu/drm/amd/display/dc/irq_types.h +++ b/drivers/gpu/drm/amd/display/dc/irq_types.h @@ -178,7 +178,7 @@ enum dc_interrupt_context { INTERRUPT_CONTEXT_NUMBER }; -enum dc_interrupt_porlarity { +enum dc_interrupt_polarity { INTERRUPT_POLARITY_DEFAULT = 0, INTERRUPT_POLARITY_LOW = INTERRUPT_POLARITY_DEFAULT, INTERRUPT_POLARITY_HIGH, @@ -199,12 +199,12 @@ struct dc_interrupt_params { /* The polarity *change* which will trigger an interrupt. * If 'requested_polarity == INTERRUPT_POLARITY_BOTH', then * 'current_polarity' must be initialised. */ - enum dc_interrupt_porlarity requested_polarity; + enum dc_interrupt_polarity requested_polarity; /* If 'requested_polarity == INTERRUPT_POLARITY_BOTH', * 'current_polarity' should contain the current state, which means * the interrupt will be triggered when state changes from what is, * in 'current_polarity'. */ - enum dc_interrupt_porlarity current_polarity; + enum dc_interrupt_polarity current_polarity; enum dc_irq_source irq_source; enum dc_interrupt_context int_context; }; From 6ce4f9ee25ffc1f6be693a103c37d6d47edb0f0d Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 5 Oct 2023 08:27:45 -0600 Subject: [PATCH 41/66] drm/amd/display: Add prefix to amdgpu crtc functions The ftrace debug feature allows filtering functions based on a prefix, which can be helpful in some complex debug scenarios. The driver can benefit more from this feature if the function name follows some patterns; for this reason, this commit adds the prefix amdgpu_dm_crtc_ to all the functions that do not have it in the amdgpu_dm_crtc.c file. Reviewed-by: Aurabindo Pillai Acked-by: Roman Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 97b7a0b8a1c2..cb0b48bb2a7d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -96,7 +96,7 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } -static void vblank_control_worker(struct work_struct *work) +static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) { struct vblank_control_work *vblank_work = container_of(work, struct vblank_control_work, work); @@ -151,7 +151,7 @@ static void vblank_control_worker(struct work_struct *work) kfree(vblank_work); } -static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) +static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_device *adev = drm_to_adev(crtc->dev); @@ -191,7 +191,7 @@ skip: if (!work) return -ENOMEM; - INIT_WORK(&work->work, vblank_control_worker); + INIT_WORK(&work->work, amdgpu_dm_crtc_vblank_control_worker); work->dm = dm; work->acrtc = acrtc; work->enable = enable; @@ -209,15 +209,15 @@ skip: int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc) { - return dm_set_vblank(crtc, true); + return amdgpu_dm_crtc_set_vblank(crtc, true); } void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc) { - dm_set_vblank(crtc, false); + amdgpu_dm_crtc_set_vblank(crtc, false); } -static void dm_crtc_destroy_state(struct drm_crtc *crtc, +static void amdgpu_dm_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct dm_crtc_state *cur = to_dm_crtc_state(state); @@ -233,7 +233,7 @@ static void dm_crtc_destroy_state(struct drm_crtc *crtc, kfree(state); } -static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc) +static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *crtc) { struct dm_crtc_state *state, *cur; @@ -273,12 +273,12 @@ static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) kfree(crtc); } -static void dm_crtc_reset_state(struct drm_crtc *crtc) +static void amdgpu_dm_crtc_reset_state(struct drm_crtc *crtc) { struct dm_crtc_state *state; if (crtc->state) - dm_crtc_destroy_state(crtc, crtc->state); + amdgpu_dm_crtc_destroy_state(crtc, crtc->state); state = kzalloc(sizeof(*state), GFP_KERNEL); if (WARN_ON(!state)) @@ -298,12 +298,12 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) /* Implemented only the options currently available for the driver */ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { - .reset = dm_crtc_reset_state, + .reset = amdgpu_dm_crtc_reset_state, .destroy = amdgpu_dm_crtc_destroy, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = dm_crtc_duplicate_state, - .atomic_destroy_state = dm_crtc_destroy_state, + .atomic_duplicate_state = amdgpu_dm_crtc_duplicate_state, + .atomic_destroy_state = amdgpu_dm_crtc_destroy_state, .set_crc_source = amdgpu_dm_crtc_set_crc_source, .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, @@ -316,11 +316,11 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { #endif }; -static void dm_crtc_helper_disable(struct drm_crtc *crtc) +static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc) { } -static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) +static int amdgpu_dm_crtc_count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) { struct drm_atomic_state *state = new_crtc_state->state; struct drm_plane *plane; @@ -352,8 +352,8 @@ static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) return num_active; } -static void dm_update_crtc_active_planes(struct drm_crtc *crtc, - struct drm_crtc_state *new_crtc_state) +static void amdgpu_dm_crtc_update_crtc_active_planes(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) { struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -364,18 +364,18 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc, return; dm_new_crtc_state->active_planes = - count_crtc_active_planes(new_crtc_state); + amdgpu_dm_crtc_count_crtc_active_planes(new_crtc_state); } -static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, +static bool amdgpu_dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } -static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, - struct drm_atomic_state *state) +static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); @@ -386,7 +386,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, trace_amdgpu_dm_crtc_atomic_check(crtc_state); - dm_update_crtc_active_planes(crtc, crtc_state); + amdgpu_dm_crtc_update_crtc_active_planes(crtc, crtc_state); if (WARN_ON(unlikely(!dm_crtc_state->stream && amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { @@ -429,9 +429,9 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, } static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { - .disable = dm_crtc_helper_disable, - .atomic_check = dm_crtc_helper_atomic_check, - .mode_fixup = dm_crtc_helper_mode_fixup, + .disable = amdgpu_dm_crtc_helper_disable, + .atomic_check = amdgpu_dm_crtc_helper_atomic_check, + .mode_fixup = amdgpu_dm_crtc_helper_mode_fixup, .get_scanout_position = amdgpu_crtc_get_scanout_position, }; From c4066d8be4d8c7c01d74ba1872cab2bc589d4912 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 5 Oct 2023 10:41:33 -0600 Subject: [PATCH 42/66] drm/amd/display: Add prefix for plane functions This commit adds the amdgpu_dm_plane_ prefix for all functions in the amdgpu_dm_plane.c. This change enables an easy way to filter code paths via ftrace. Reviewed-by: Aurabindo Pillai Acked-by: Roman Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- .../amd/display/amdgpu_dm/amdgpu_dm_plane.c | 516 +++++++++--------- .../amd/display/amdgpu_dm/amdgpu_dm_plane.h | 2 +- 3 files changed, 262 insertions(+), 258 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4f6c8af4f046..f08202abab4e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9880,7 +9880,7 @@ static int dm_update_plane_state(struct dc *dc, /* Block top most plane from being a video plane */ if (plane->type == DRM_PLANE_TYPE_OVERLAY) { - if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) + if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) return -EINVAL; *is_top_most_overlay = false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 03df26bd8e83..116121e647ca 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -139,7 +139,7 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state } } -static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) +static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) { if (!*mods) return; @@ -164,12 +164,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_ *size += 1; } -static bool modifier_has_dcc(uint64_t modifier) +static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier) { return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); } -static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) +static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier) { if (modifier == DRM_FORMAT_MOD_LINEAR) return 0; @@ -177,8 +177,8 @@ static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) return AMD_FMT_MOD_GET(TILE, modifier); } -static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, - uint64_t tiling_flags) +static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, + uint64_t tiling_flags) { /* Fill GFX8 params */ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { @@ -209,8 +209,8 @@ static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); } -static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info) +static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info) { /* Fill GFX9 params */ tiling_info->gfx9.num_pipes = @@ -230,9 +230,9 @@ static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; } -static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, - union dc_tiling_info *tiling_info, - uint64_t modifier) +static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info, + uint64_t modifier) { unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); @@ -241,7 +241,7 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev pipes_log2 = min(5u, mod_pipe_xor_bits); - fill_gfx9_tiling_info_from_device(adev, tiling_info); + amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info); if (!IS_AMD_FMT_MOD(modifier)) return; @@ -258,13 +258,13 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev } } -static int validate_dcc(struct amdgpu_device *adev, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const union dc_tiling_info *tiling_info, - const struct dc_plane_dcc_param *dcc, - const struct dc_plane_address *address, - const struct plane_size *plane_size) +static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const union dc_tiling_info *tiling_info, + const struct dc_plane_dcc_param *dcc, + const struct dc_plane_address *address, + const struct plane_size *plane_size) { struct dc *dc = adev->dm.dc; struct dc_dcc_surface_param input; @@ -303,23 +303,23 @@ static int validate_dcc(struct amdgpu_device *adev, return 0; } -static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, - const struct amdgpu_framebuffer *afb, - const enum surface_pixel_format format, - const enum dc_rotation_angle rotation, - const struct plane_size *plane_size, - union dc_tiling_info *tiling_info, - struct dc_plane_dcc_param *dcc, - struct dc_plane_address *address, - const bool force_disable_dcc) +static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const struct plane_size *plane_size, + union dc_tiling_info *tiling_info, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address, + const bool force_disable_dcc) { const uint64_t modifier = afb->base.modifier; int ret = 0; - fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); - tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); + amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); + tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier); - if (modifier_has_dcc(modifier) && !force_disable_dcc) { + if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) { uint64_t dcc_address = afb->address + afb->base.offsets[1]; bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); @@ -347,60 +347,64 @@ static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, address->grph.meta_addr.high_part = upper_32_bits(dcc_address); } - ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); + ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); if (ret) - drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); + drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret); return ret; } -static void add_gfx10_1_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } -static void add_gfx9_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pipe_xor_bits = min(8, pipes + @@ -421,163 +425,164 @@ static void add_gfx9_modifiers(const struct amdgpu_device *adev, */ if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); if (has_constant_encode) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | - AMD_FMT_MOD_SET(RB, rb) | - AMD_FMT_MOD_SET(PIPE, pipes)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); } /* * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. + * amdgpu_dm_plane_format_mod_supported. */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); } /* * Only supported for 64bpp on Raven, will be filtered on format in - * dm_plane_format_mod_supported. + * amdgpu_dm_plane_format_mod_supported. */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); if (adev->family == AMDGPU_FAMILY_RV) { - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } } -static void add_gfx10_3_modifiers(const struct amdgpu_device *adev, - uint64_t **mods, uint64_t *size, uint64_t *capacity) +static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, + uint64_t *size, + uint64_t *capacity) { int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs) | - AMD_FMT_MOD_SET(DCC, 1) | - AMD_FMT_MOD_SET(DCC_RETILE, 1) | - AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | - AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | - AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | - AMD_FMT_MOD_SET(PACKERS, pkrs)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); - /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */ + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); } -static void add_gfx11_modifiers(struct amdgpu_device *adev, +static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev, uint64_t **mods, uint64_t *size, uint64_t *capacity) { int num_pipes = 0; @@ -628,21 +633,21 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev, AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); - add_modifier(mods, size, capacity, modifier_dcc_best); - add_modifier(mods, size, capacity, modifier_dcc_4k); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k); - add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); - add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); - add_modifier(mods, size, capacity, modifier_r_x); + amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x); } - add_modifier(mods, size, capacity, AMD_FMT_MOD | - AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | - AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); + amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); } -static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) +static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) { uint64_t size = 0, capacity = 128; *mods = NULL; @@ -654,15 +659,15 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); if (plane_type == DRM_PLANE_TYPE_CURSOR) { - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); return *mods ? 0 : -ENOMEM; } switch (adev->family) { case AMDGPU_FAMILY_AI: case AMDGPU_FAMILY_RV: - add_gfx9_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_NV: case AMDGPU_FAMILY_VGH: @@ -670,21 +675,21 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty case AMDGPU_FAMILY_GC_10_3_6: case AMDGPU_FAMILY_GC_10_3_7: if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) - add_gfx10_3_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity); else - add_gfx10_1_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_GC_11_0_0: case AMDGPU_FAMILY_GC_11_0_1: case AMDGPU_FAMILY_GC_11_5_0: - add_gfx11_modifiers(adev, mods, &size, &capacity); + amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity); break; } - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); /* INVALID marks the end of the list. */ - add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); if (!*mods) return -ENOMEM; @@ -692,9 +697,9 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty return 0; } -static int get_plane_formats(const struct drm_plane *plane, - const struct dc_plane_cap *plane_cap, - uint32_t *formats, int max_formats) +static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane, + const struct dc_plane_cap *plane_cap, + uint32_t *formats, int max_formats) { int i, num_formats = 0; @@ -818,22 +823,22 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, } if (adev->family >= AMDGPU_FAMILY_AI) { - ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, - rotation, plane_size, - tiling_info, dcc, - address, - force_disable_dcc); + ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, + rotation, plane_size, + tiling_info, dcc, + address, + force_disable_dcc); if (ret) return ret; } else { - fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); + amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); } return 0; } -static int dm_plane_helper_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) +static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) { struct amdgpu_framebuffer *afb; struct drm_gem_object *obj; @@ -928,8 +933,8 @@ error_unlock: return r; } -static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) +static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct amdgpu_bo *rbo; int r; @@ -949,7 +954,7 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, amdgpu_bo_unref(&rbo); } -static void get_min_max_dc_plane_scaling(struct drm_device *dev, +static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev, struct drm_framebuffer *fb, int *min_downscale, int *max_upscale) { @@ -1030,8 +1035,8 @@ int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, } /* Get min/max allowed scaling factors from plane caps. */ - get_min_max_dc_plane_scaling(state->crtc->dev, fb, - &min_downscale, &max_upscale); + amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb, + &min_downscale, &max_upscale); /* * Convert to drm convention: 16.16 fixed point, instead of dc's * 1.0 == 1000. Also drm scaling is src/dst instead of dc's @@ -1101,8 +1106,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, /* Validate scaling per-format with DC plane caps */ if (state->plane && state->plane->dev && state->fb) { - get_min_max_dc_plane_scaling(state->plane->dev, state->fb, - &min_downscale, &max_upscale); + amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb, + &min_downscale, &max_upscale); } else { min_downscale = 250; max_upscale = 16000; @@ -1128,8 +1133,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, return 0; } -static int dm_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) +static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); @@ -1167,8 +1172,8 @@ static int dm_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } -static int dm_plane_atomic_async_check(struct drm_plane *plane, - struct drm_atomic_state *state) +static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane, + struct drm_atomic_state *state) { /* Only support async updates on cursor planes. */ if (plane->type != DRM_PLANE_TYPE_CURSOR) @@ -1177,8 +1182,8 @@ static int dm_plane_atomic_async_check(struct drm_plane *plane, return 0; } -static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, - struct dc_cursor_position *position) +static int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, + struct dc_cursor_position *position) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); int x, y; @@ -1241,7 +1246,7 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, amdgpu_crtc->crtc_id, plane->state->crtc_w, plane->state->crtc_h); - ret = get_cursor_position(plane, crtc, &position); + ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position); if (ret) return; @@ -1290,8 +1295,8 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, } } -static void dm_plane_atomic_async_update(struct drm_plane *plane, - struct drm_atomic_state *state) +static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); @@ -1315,14 +1320,14 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { - .prepare_fb = dm_plane_helper_prepare_fb, - .cleanup_fb = dm_plane_helper_cleanup_fb, - .atomic_check = dm_plane_atomic_check, - .atomic_async_check = dm_plane_atomic_async_check, - .atomic_async_update = dm_plane_atomic_async_update + .prepare_fb = amdgpu_dm_plane_helper_prepare_fb, + .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb, + .atomic_check = amdgpu_dm_plane_atomic_check, + .atomic_async_check = amdgpu_dm_plane_atomic_async_check, + .atomic_async_update = amdgpu_dm_plane_atomic_async_update }; -static void dm_drm_plane_reset(struct drm_plane *plane) +static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane) { struct dm_plane_state *amdgpu_state = NULL; @@ -1336,8 +1341,7 @@ static void dm_drm_plane_reset(struct drm_plane *plane) __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); } -static struct drm_plane_state * -dm_drm_plane_duplicate_state(struct drm_plane *plane) +static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane) { struct dm_plane_state *dm_plane_state, *old_dm_plane_state; @@ -1356,15 +1360,15 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane) return &dm_plane_state->base; } -static bool dm_plane_format_mod_supported(struct drm_plane *plane, - uint32_t format, - uint64_t modifier) +static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) { struct amdgpu_device *adev = drm_to_adev(plane->dev); const struct drm_format_info *info = drm_format_info(format); int i; - enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; + enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3; if (!info) return false; @@ -1401,7 +1405,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, info->cpp[0] < 8) return false; - if (modifier_has_dcc(modifier)) { + if (amdgpu_dm_plane_modifier_has_dcc(modifier)) { /* Per radeonsi comments 16/64 bpp are more complicated. */ if (info->cpp[0] != 4) return false; @@ -1415,8 +1419,8 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, return true; } -static void dm_drm_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state) +static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) { struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); @@ -1430,10 +1434,10 @@ static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_helper_destroy, - .reset = dm_drm_plane_reset, - .atomic_duplicate_state = dm_drm_plane_duplicate_state, - .atomic_destroy_state = dm_drm_plane_destroy_state, - .format_mod_supported = dm_plane_format_mod_supported, + .reset = amdgpu_dm_plane_drm_plane_reset, + .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, + .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, + .format_mod_supported = amdgpu_dm_plane_format_mod_supported, }; int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, @@ -1447,10 +1451,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, unsigned int supported_rotations; uint64_t *modifiers = NULL; - num_formats = get_plane_formats(plane, plane_cap, formats, - ARRAY_SIZE(formats)); + num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats, + ARRAY_SIZE(formats)); - res = get_plane_modifiers(dm->adev, plane->type, &modifiers); + res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers); if (res) return res; @@ -1520,7 +1524,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, return 0; } -bool is_video_format(uint32_t format) +bool amdgpu_dm_plane_is_video_format(uint32_t format) { int i; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h index 930f1572f898..b51a6b57bd9b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -62,5 +62,5 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state bool *per_pixel_alpha, bool *pre_multiplied_alpha, bool *global_alpha, int *global_alpha_value); -bool is_video_format(uint32_t format); +bool amdgpu_dm_plane_is_video_format(uint32_t format); #endif From 79f3f1b66753b3a3a269d73676bf50987921f267 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Thu, 5 Oct 2023 01:31:12 -0400 Subject: [PATCH 43/66] drm/amd/display: fix num_ways overflow error [Why] Helper function calculates num_ways using 32-bit. But is returned as 8-bit. If num_ways exceeds 8-bit, then it reports back the incorrect num_ways and erroneously uses MALL when it should not [How] Make returned value 32-bit and convert after it checks against caps.cache_num_ways, which is under 8-bit Reviewed-by: Alvin Lee Acked-by: Roman Li Signed-off-by: Samson Tam Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 68dc99034eba..2173d84e4953 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -217,7 +217,7 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc) static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx) { int i; - uint8_t num_ways = 0; + uint32_t num_ways = 0; uint32_t mall_ss_size_bytes = 0; mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes; @@ -247,7 +247,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) { union dmub_rb_cmd cmd; - uint8_t ways, i; + uint8_t i; + uint32_t ways; int j; bool mall_ss_unsupported = false; struct dc_plane_state *plane = NULL; @@ -307,7 +308,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS; cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB; cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); - cmd.cab.cab_alloc_ways = ways; + cmd.cab.cab_alloc_ways = (uint8_t)ways; dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); From f583db812bc9a97384303761932768e44d1d92a3 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 6 Oct 2023 18:01:24 -0400 Subject: [PATCH 44/66] drm/amd/display: Update FAMS sequence for DCN30 & DCN32 Provide DCN32 specific sequence and update DCN30 sequence Reviewed-by: Samson Tam Acked-by: Roman Li Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn32/dcn32_init.c | 2 +- .../amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 21 ++----------- .../amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 31 +++++++++++++++++++ .../amd/display/dc/hwss/dcn32/dcn32_hwseq.h | 3 ++ 4 files changed, 38 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c index 90f061edb64c..427cfc8c24a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c @@ -60,7 +60,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, - .prepare_bandwidth = dcn30_prepare_bandwidth, + .prepare_bandwidth = dcn32_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 1c839e52bae5..d71faf2ecd41 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -993,11 +993,7 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context) { - bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; - /* Any transition into an FPO config should disable MCLK switching first to avoid - * driver and FW P-State synchronization issues. - */ - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !dc->clk_mgr->clks.fw_based_mclk_switching) { dc->optimized_required = true; context->bw_ctx.bw.dcn.clk.p_state_change_support = false; } @@ -1008,20 +1004,9 @@ void dcn30_prepare_bandwidth(struct dc *dc, dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - /* - * enabled -> enabled: do not disable - * enabled -> disabled: disable - * disabled -> enabled: don't care - * disabled -> disabled: don't care - */ - if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) - dc_dmub_srv_p_state_delegate(dc, false, context); - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { - /* After disabling P-State, restore the original value to ensure we get the correct P-State - * on the next optimize. */ - context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; - } + if (!dc->clk_mgr->clks.fw_based_mclk_switching) + dc_dmub_srv_p_state_delegate(dc, false, context); } void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 2173d84e4953..e837554b8a28 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -50,6 +50,7 @@ #include "dce/dmub_hw_lock_mgr.h" #include "dcn32/dcn32_resource.h" #include "link.h" +#include "../dcn20/dcn20_hwseq.h" #define DC_LOGGER_INIT(logger) @@ -1677,3 +1678,33 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, return is_seamless; } + +void dcn32_prepare_bandwidth(struct dc *dc, + struct dc_state *context) +{ + bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; + /* Any transition into an FPO config should disable MCLK switching first to avoid + * driver and FW P-State synchronization issues. + */ + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + dc->optimized_required = true; + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + } + + if (dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && + context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) + dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); + + dcn20_prepare_bandwidth(dc, context); + + if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + dc_dmub_srv_p_state_delegate(dc, false, context); + + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + /* After disabling P-State, restore the original value to ensure we get the correct P-State + * on the next optimize. + */ + context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index 9992e40acd21..cecf7f0f5671 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -124,4 +124,7 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); +void dcn32_prepare_bandwidth(struct dc *dc, + struct dc_state *context); + #endif /* __DC_HWSS_DCN32_H__ */ From 8d0f4cd2ae44ebe50ff85a49fb248e64f28b6d66 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Fri, 6 Oct 2023 17:36:16 -0400 Subject: [PATCH 45/66] drm/amd/display: add null check for invalid opps [Why] In cases where number of pipes available is less than num_opp, there will opp instances that are null [How] Add null check to skip over these opp instances Fixes: 40de8403b998 ("drm/amd/display: Update OPP counter from new interface") Reviewed-by: Alvin Lee Acked-by: Roman Li Signed-off-by: Samson Tam Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index d13904548505..74c21d98b4de 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3575,7 +3575,8 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state mpcc_inst = hubp->inst; // MPCC inst is equal to pipe index in practice for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { - if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { + if ((dc->res_pool->opps[opp_inst] != NULL) && + (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) { dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; break; From 85ca6e85303c10019710f31d4abedafab7994d68 Mon Sep 17 00:00:00 2001 From: Ilya Bakoulin Date: Fri, 6 Oct 2023 15:57:28 -0400 Subject: [PATCH 46/66] drm/amd/display: Fix shaper using bad LUT params [Why] LUT params are not cleared after setting blend TF, which can lead to same params being used for the shaper, if the shaper func is bypassed. [How] Set lut_params to NULL after program_1dlut. Reviewed-by: Krunoslav Kovac Acked-by: Roman Li Signed-off-by: Ilya Bakoulin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index e837554b8a28..1b9f21fd4f17 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -494,6 +494,7 @@ bool dcn32_set_mcm_luts( } } result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); + lut_params = NULL; // Shaper if (plane_state->in_shaper_func) { From 1b9ec7cb424441de67d09c3abad46467f82ff161 Mon Sep 17 00:00:00 2001 From: Michael Strauss Date: Tue, 10 Oct 2023 10:47:55 -0400 Subject: [PATCH 47/66] drm/amd/display: Disable SYMCLK32_SE RCO on DCN314 [WHY] Currently causes some DP link layer failures, backing out until the failures are root caused. Reviewed-by: Nicholas Kazlauskas Acked-by: Roman Li Signed-off-by: Michael Strauss Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index ab301ea7c10b..677361d74a4e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -916,7 +916,7 @@ static const struct dc_debug_options debug_defaults_drv = { .hdmistream = true, .hdmichar = true, .dpstream = true, - .symclk32_se = true, + .symclk32_se = false, .symclk32_le = true, .symclk_fe = true, .physymclk = true, From b231933da7d6be53d08139f8adf2560a90b47ca9 Mon Sep 17 00:00:00 2001 From: Iswara Nagulendran Date: Fri, 29 Sep 2023 11:20:46 -0400 Subject: [PATCH 48/66] drm/amd/display: Read before writing Backlight Mode Set Register [HOW&WHY] Reading the value from DP_EDP_BACKLIGHT_MODE_SET_REGISTER, DPCD 0x721 before setting the BP_EDP_PANEL_LUMINANC_CONTROL_ENABLE bit to ensure there are no accidental overwrites. Reviewed-by: Sreeja Golui Reviewed-by: Harry Vanzylldejong Acked-by: Roman Li Signed-off-by: Iswara Nagulendran Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/link/protocols/link_edp_panel_control.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 86f97ddcc595..e32a7974a4bc 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -182,7 +182,7 @@ bool edp_set_backlight_level_nits(struct dc_link *link, &backlight_control, 1) != DC_OK) return false; } else { - const uint8_t backlight_enable = DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + uint8_t backlight_enable = 0; struct target_luminance_value *target_luminance = NULL; //if target luminance value is greater than 24 bits, clip the value to 24 bits @@ -191,6 +191,11 @@ bool edp_set_backlight_level_nits(struct dc_link *link, target_luminance = (struct target_luminance_value *)&backlight_millinits; + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(uint8_t)); + + backlight_enable |= DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + if (core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &backlight_enable, sizeof(backlight_enable)) != DC_OK) From 670da29faf5ff160043a1f02e6ac2ed8345b5d7e Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Tue, 10 Oct 2023 16:32:23 -0400 Subject: [PATCH 49/66] drm/amd/display: add interface to query SubVP status [Why&How] To enable automated testing through IGT, expose an API that is accessible through debugfs to query current status of SubVP feature. Reviewed-by: Alvin Lee Acked-by: Roman Li Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 4 ++++ drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c | 3 ++- drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c | 3 ++- 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 1259d6351c50..13a177d34376 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -3645,7 +3645,9 @@ static int capabilities_show(struct seq_file *m, void *unused) struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct dc *dc = adev->dm.dc; bool mall_supported = dc->caps.mall_size_total; + bool subvp_supported = dc->caps.subvp_fw_processing_delay_us; unsigned int mall_in_use = false; + unsigned int subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state); struct hubbub *hubbub = dc->res_pool->hubbub; if (hubbub->funcs->get_mall_en) @@ -3653,6 +3655,8 @@ static int capabilities_show(struct seq_file *m, void *unused) seq_printf(m, "mall supported: %s, enabled: %s\n", mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no"); + seq_printf(m, "sub-viewport supported: %s, enabled: %s\n", + subvp_supported ? "yes" : "no", subvp_in_use ? "yes" : "no"); return 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 72ba62d1a01e..2b5c0361819b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -382,6 +382,7 @@ struct dc_cap_funcs { bool (*get_dcc_compression_cap)(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output); + bool (*get_subvp_en)(struct dc *dc, struct dc_state *context); }; struct link_training_settings; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 0e1d395a9340..89b072447dba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1993,7 +1993,8 @@ int dcn32_populate_dml_pipes_from_context( } static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, + .get_subvp_en = dcn32_subvp_in_use, }; void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 7d0e5e9d611f..f7de3eca1225 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -1571,7 +1571,8 @@ static void dcn321_destroy_resource_pool(struct resource_pool **pool) } static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = dcn20_get_dcc_compression_cap + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, + .get_subvp_en = dcn32_subvp_in_use, }; static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) From ae8cffe353b510d0bbb12488f7ed0ea01ace4823 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 9 Oct 2023 13:11:32 -0400 Subject: [PATCH 50/66] drm/amd/display: 3.2.256 DC v3.2.256 Summary: * Fixes null-deref regression after "drm/amd/display: Update OPP counter from new interface" * Fixes display flashing when VSR and HDR enabled on dcn32 * Fixes dcn3x intermittent hangs due to FPO * Fixes MST Multi-Stream light up on dcn35 * Fixes green screen on DCN31x when DVI and HDMI monitors attached * Adds DML2 improvements * Adds idle power optimization improvements * Accommodates panels with lower nit backlight * Updates SDP VSC colorimetry from DP test automation request * Reverts "drm/amd/display: allow edp updates for virtual signal" Acked-by: Roman Li Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 2b5c0361819b..6e54ca055fcb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -49,7 +49,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.255" +#define DC_VER "3.2.256" #define MAX_SURFACES 3 #define MAX_PLANES 6 From dd2687f5d9b2cf950fbe17fbc7c4f64489b19cd6 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 12 Oct 2023 15:09:38 +0530 Subject: [PATCH 51/66] drm/amdgpu: Use discovery table's subrevision Use subrevision of IP version in discovery table to identify SOC revision id for NBIO v7.9 SOCs. Only newer bootloaders update subrevision field. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index eccb006e78aa..23f26f8caad4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -56,8 +56,15 @@ static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev) { u32 tmp; + tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0)); + /* If it is VF or subrevision holds a non-zero value, that should be used */ + if (tmp || amdgpu_sriov_vf(adev)) + return tmp; + + /* If discovery subrev is not updated, use register version */ tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); - tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, STRAP_ATI_REV_ID_DEV0_F0); + tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, + STRAP_ATI_REV_ID_DEV0_F0); return tmp; } From 2cea7bb9110d3c52e55977824f79875777b574b4 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 11 Sep 2023 18:05:22 +0800 Subject: [PATCH 52/66] drm/amdgpu: get RAS poison status from DF v4_6_2 Add DF block and RAS poison mode query for DF v4_6_2. Signed-off-by: Tao Zhou Reviewed-by: Stanley.Yang Acked-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 4 +++ drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c | 34 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h | 31 +++++++++++++++++ 4 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index d58e74ae8ade..2afecc55090f 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -104,7 +104,8 @@ amdgpu-y += \ amdgpu-y += \ df_v1_7.o \ df_v3_6.o \ - df_v4_3.o + df_v4_3.o \ + df_v4_6_2.o # add GMC block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index b6cddcad122f..5f9d75900bfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -35,6 +35,7 @@ #include "df_v1_7.h" #include "df_v3_6.h" #include "df_v4_3.h" +#include "df_v4_6_2.h" #include "nbio_v6_1.h" #include "nbio_v7_0.h" #include "nbio_v7_4.h" @@ -2559,6 +2560,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 3, 0): adev->df.funcs = &df_v4_3_funcs; break; + case IP_VERSION(4, 6, 2): + adev->df.funcs = &df_v4_6_2_funcs; + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c new file mode 100644 index 000000000000..a47960a0babd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c @@ -0,0 +1,34 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "df_v4_6_2.h" + +static bool df_v4_6_2_query_ras_poison_mode(struct amdgpu_device *adev) +{ + /* return true since related regs are inaccessible */ + return true; +} + +const struct amdgpu_df_funcs df_v4_6_2_funcs = { + .query_ras_poison_mode = df_v4_6_2_query_ras_poison_mode, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h new file mode 100644 index 000000000000..3bc3e6d216e2 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DF_V4_6_2_H__ +#define __DF_V4_6_2_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_df_funcs df_v4_6_2_funcs; + +#endif From 5bd8e05fe203aa33721cf301a6883b28493f73ab Mon Sep 17 00:00:00 2001 From: Yifan Zhang Date: Tue, 24 Oct 2023 21:16:26 +0800 Subject: [PATCH 53/66] drm/amd/pm: call smu_cmn_get_smc_version in is_mode1_reset_supported. is_mode1_reset_supported may be called before smu init, when smu_context is unitialized in driver load/unload test. Call smu_cmn_get_smc_version explicitly in is_mode1_reset_supported. v2: apply to aldebaran in case is_mode1_reset_supported will be uncommented (Candice Li) Fixes: 710d9caec70c ("drm/amd/pm: drop most smu_cmn_get_smc_version in smu") Signed-off-by: Yifan Zhang Reviewed-by: Candice Li Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 8 +++++++- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 10 +++++++++- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 8 +++++++- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 97a5c9b3e941..1de9f8b5cc5f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2461,12 +2461,18 @@ static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t val; + uint32_t smu_version; + int ret; /** * SRIOV env will not support SMU mode1 reset * PM FW support mode1 reset from 58.26 */ - if (amdgpu_sriov_vf(adev) || (smu->smc_fw_version < 0x003a1a00)) + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) + return false; + + if (amdgpu_sriov_vf(adev) || (smu_version < 0x003a1a00)) return false; /** diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index f082cd4b40c1..1a6675d70a4b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1931,11 +1931,19 @@ static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu) #if 0 struct amdgpu_device *adev = smu->adev; uint32_t val; + uint32_t smu_version; + int ret; + /** * PM FW version support mode1 reset from 68.07 */ - if ((smu->smc_fw_version < 0x00440700)) + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) return false; + + if ((smu_version < 0x00440700)) + return false; + /** * mode1 reset relies on PSP, so we should check if * PSP is alive. diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index bcd7b39a3a1b..34bd99b0e137 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2615,13 +2615,19 @@ static int smu_v13_0_0_baco_exit(struct smu_context *smu) static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + u32 smu_version; + int ret; /* SRIOV does not support SMU mode1 reset */ if (amdgpu_sriov_vf(adev)) return false; /* PMFW support is available since 78.41 */ - if (smu->smc_fw_version < 0x004e2900) + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (ret) + return false; + + if (smu_version < 0x004e2900) return false; return true; From 3f69d5860f5beeb7714922b0c4a653db7d667190 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Fri, 20 Oct 2023 12:23:37 +0530 Subject: [PATCH 54/66] drm/amdgpu: Add a read to GFX v9.4.3 ring test Issue a read to confirm the register write before ringing doorbell. With multiple XCCs there is chance for race condition. Signed-off-by: Lijo Lazar Acked-by: Hawking Zhang Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 362bf51ab1d2..41bbabd9ad4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -256,6 +256,7 @@ static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); WREG32(scratch_reg0_offset, 0xCAFEDEAD); + tmp = RREG32(scratch_reg0_offset); r = amdgpu_ring_alloc(ring, 3); if (r) From 406e8845356d18bdf3d3a23b347faf67706472ec Mon Sep 17 00:00:00 2001 From: "Lin.Cao" Date: Wed, 25 Oct 2023 11:32:41 +0800 Subject: [PATCH 55/66] drm/amd: check num of link levels when update pcie param In SR-IOV environment, the value of pcie_table->num_of_link_levels will be 0, and num_of_levels - 1 will cause array index out of bounds Signed-off-by: Lin.Cao Acked-by: Jingwen Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 3917ae5e681a..a49e5adf7cc3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2438,6 +2438,9 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, uint32_t smu_pcie_arg; int ret, i; + if (!num_of_levels) + return 0; + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; From d055714a21cc0287c7e1b15c355795c42fb3a5cf Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 19 Oct 2023 12:49:57 +0530 Subject: [PATCH 56/66] drm/amdgpu: Use pcie domain of xcc acpi objects PCI domain/segment information of xccs is available through ACPI DSM methods. Consider that also while looking for devices. Signed-off-by: Lijo Lazar Reviewed-by: Rajneesh Bhardwaj Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 40 +++++++++++++----------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 2bca37044ad0..d62e49758635 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -68,7 +68,7 @@ struct amdgpu_acpi_xcc_info { struct amdgpu_acpi_dev_info { struct list_head list; struct list_head xcc_list; - uint16_t bdf; + uint32_t sbdf; uint16_t supp_xcp_mode; uint16_t xcp_mode; uint16_t mem_mode; @@ -927,7 +927,7 @@ static acpi_status amdgpu_acpi_get_node_id(acpi_handle handle, #endif } -static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf) +static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u32 sbdf) { struct amdgpu_acpi_dev_info *acpi_dev; @@ -935,14 +935,14 @@ static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf) return NULL; list_for_each_entry(acpi_dev, &amdgpu_acpi_dev_list, list) - if (acpi_dev->bdf == bdf) + if (acpi_dev->sbdf == sbdf) return acpi_dev; return NULL; } static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, - struct amdgpu_acpi_xcc_info *xcc_info, u16 bdf) + struct amdgpu_acpi_xcc_info *xcc_info, u32 sbdf) { struct amdgpu_acpi_dev_info *tmp; union acpi_object *obj; @@ -955,7 +955,7 @@ static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, INIT_LIST_HEAD(&tmp->xcc_list); INIT_LIST_HEAD(&tmp->list); - tmp->bdf = bdf; + tmp->sbdf = sbdf; obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0, AMD_XCC_DSM_GET_SUPP_MODE, NULL, @@ -1007,7 +1007,7 @@ static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info, DRM_DEBUG_DRIVER( "New dev(%x): Supported xcp mode: %x curr xcp_mode : %x mem mode : %x, tmr base: %llx tmr size: %llx ", - tmp->bdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode, + tmp->sbdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode, tmp->tmr_base, tmp->tmr_size); list_add_tail(&tmp->list, &amdgpu_acpi_dev_list); *dev_info = tmp; @@ -1023,7 +1023,7 @@ out: } static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info, - u16 *bdf) + u32 *sbdf) { union acpi_object *obj; acpi_status status; @@ -1054,8 +1054,10 @@ static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info, xcc_info->phy_id = (obj->integer.value >> 32) & 0xFF; /* xcp node of this xcc [47:40] */ xcc_info->xcp_node = (obj->integer.value >> 40) & 0xFF; + /* PF domain of this xcc [31:16] */ + *sbdf = (obj->integer.value) & 0xFFFF0000; /* PF bus/dev/fn of this xcc [63:48] */ - *bdf = (obj->integer.value >> 48) & 0xFFFF; + *sbdf |= (obj->integer.value >> 48) & 0xFFFF; ACPI_FREE(obj); obj = NULL; @@ -1079,7 +1081,7 @@ static int amdgpu_acpi_enumerate_xcc(void) struct acpi_device *acpi_dev; char hid[ACPI_ID_LEN]; int ret, id; - u16 bdf; + u32 sbdf; INIT_LIST_HEAD(&amdgpu_acpi_dev_list); xa_init(&numa_info_xa); @@ -1107,16 +1109,16 @@ static int amdgpu_acpi_enumerate_xcc(void) xcc_info->handle = acpi_device_handle(acpi_dev); acpi_dev_put(acpi_dev); - ret = amdgpu_acpi_get_xcc_info(xcc_info, &bdf); + ret = amdgpu_acpi_get_xcc_info(xcc_info, &sbdf); if (ret) { kfree(xcc_info); continue; } - dev_info = amdgpu_acpi_get_dev(bdf); + dev_info = amdgpu_acpi_get_dev(sbdf); if (!dev_info) - ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, bdf); + ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, sbdf); if (ret == -ENOMEM) return ret; @@ -1136,13 +1138,14 @@ int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, u64 *tmr_size) { struct amdgpu_acpi_dev_info *dev_info; - u16 bdf; + u32 sbdf; if (!tmr_offset || !tmr_size) return -EINVAL; - bdf = pci_dev_id(adev->pdev); - dev_info = amdgpu_acpi_get_dev(bdf); + sbdf = (pci_domain_nr(adev->pdev->bus) << 16); + sbdf |= pci_dev_id(adev->pdev); + dev_info = amdgpu_acpi_get_dev(sbdf); if (!dev_info) return -ENOENT; @@ -1157,13 +1160,14 @@ int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, { struct amdgpu_acpi_dev_info *dev_info; struct amdgpu_acpi_xcc_info *xcc_info; - u16 bdf; + u32 sbdf; if (!numa_info) return -EINVAL; - bdf = pci_dev_id(adev->pdev); - dev_info = amdgpu_acpi_get_dev(bdf); + sbdf = (pci_domain_nr(adev->pdev->bus) << 16); + sbdf |= pci_dev_id(adev->pdev); + dev_info = amdgpu_acpi_get_dev(sbdf); if (!dev_info) return -ENOENT; From f0b8f65b482548c9d1d87c20fa4850c61305ff47 Mon Sep 17 00:00:00 2001 From: Li Ma Date: Tue, 24 Oct 2023 18:28:24 +0800 Subject: [PATCH 57/66] drm/amd/amdgpu: fix the GPU power print error in pm info Modify the print format of the fractional part to avoid display error. Signed-off-by: Li Ma Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 358bb5e485f2..517b9fb4624c 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -4290,10 +4290,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a seq_printf(m, "\t%u mV (VDDNB)\n", value); size = sizeof(uint32_t); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) - seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); + seq_printf(m, "\t%u.%02u W (average GPU)\n", query >> 8, query & 0xff); size = sizeof(uint32_t); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) - seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff); + seq_printf(m, "\t%u.%02u W (current GPU)\n", query >> 8, query & 0xff); size = sizeof(value); seq_printf(m, "\n"); From e2ae32d8c2a303af58d22ee61b3b7aa7021e54c9 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Thu, 7 Sep 2023 10:41:00 -0400 Subject: [PATCH 58/66] drm/amdxcp: fix amdxcp unloads incompletely amdxcp unloads incompletely, and below error will be seen during load/unload, sysfs: cannot create duplicate filename '/devices/platform/amdgpu_xcp.0' devres_release_group will free xcp device at first, platform device will be unregistered later in platform_device_unregister. Signed-off-by: James Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c index 353597fc908d..90ddd8371176 100644 --- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c +++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c @@ -89,9 +89,10 @@ EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc); void amdgpu_xcp_drv_release(void) { for (--pdev_num; pdev_num >= 0; --pdev_num) { - devres_release_group(&xcp_dev[pdev_num]->pdev->dev, NULL); - platform_device_unregister(xcp_dev[pdev_num]->pdev); - xcp_dev[pdev_num]->pdev = NULL; + struct platform_device *pdev = xcp_dev[pdev_num]->pdev; + + devres_release_group(&pdev->dev, NULL); + platform_device_unregister(pdev); xcp_dev[pdev_num] = NULL; } pdev_num = 0; From 1efdd37cc015ed1cade8c1c12227ad25ebb17c77 Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Thu, 26 Oct 2023 11:50:45 -0400 Subject: [PATCH 59/66] drm/amd/display: fix S/G display enablement An assignment statement was reversed during a refactor which effectively disabled S/G display outright. Since, we use adev->mode_info.gpu_vm_support to indicate to the rest of the driver that S/G display should be enabled and currently it is always set to false. So, to fix this set adev->mode_info.gpu_vm_support's value to that of init_data.flags.gpu_vm_support (and not vice versa). Fixes: 098c13079c6f ("drm/amd/display: enable S/G display for for recent APUs by default") Reported-by: Mark Broadworth Tested-by: Mark Broadworth Acked-by: Alex Deucher Reviewed-by: Harry Wentland Reviewed-by: Yifan Zhang Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f08202abab4e..6f99f6754c11 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1642,7 +1642,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU); } - init_data.flags.gpu_vm_support = adev->mode_info.gpu_vm_support; + adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support; if (amdgpu_dc_feature_mask & DC_FBC_MASK) init_data.flags.fbc_support = true; From f7a17b2b36043a4cc9e2d0b0eea7647133f78b13 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Thu, 26 Oct 2023 13:52:23 -0400 Subject: [PATCH 60/66] drm/amdgpu: Fix typo in IP discovery parsing Fix a typo in parsing of the GC info table header when reading the IP discovery table. Fixes: 0e64c9aad031 ("drm/amdgpu: add type conversion for gc info") Signed-off-by: Mukul Joshi Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 5f9d75900bfa..b6a53e8429b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1488,7 +1488,7 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / le32_to_cpu(gc_info->v2.gc_num_sh_per_se); adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); - if (le16_to_cpu(gc_info->v2.header.version_minor == 1)) { + if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) { adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ From d59fcfb0848b49d5efc62079d3aad4bbaf760aa1 Mon Sep 17 00:00:00 2001 From: Candice Li Date: Wed, 25 Oct 2023 17:27:16 +0800 Subject: [PATCH 61/66] drm/amdgpu: Identify data parity error corrected in replay mode Use ErrorCodeExt field to identify data parity error in replay mode. Signed-off-by: Candice Li Reviewed-by: Tao Zhou Reviewed-by: Yang Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 32 ++++++++++++++++++-------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 025e6aeb058d..743d2f68b090 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -88,6 +88,27 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev) umc_v12_0_reset_error_count_per_channel, NULL); } +static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status) +{ + return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)); +} + +static bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status) +{ + return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) || + /* Identify data parity error in replay mode */ + ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) && + !(umc_v12_0_is_uncorrectable_error(mc_umc_status))))); +} + static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, uint64_t umc_reg_offset, unsigned long *error_count) @@ -104,10 +125,7 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev, mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 || - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 && - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0))) + if (umc_v12_0_is_correctable_error(mc_umc_status)) *error_count += 1; } @@ -125,11 +143,7 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4); - if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + if (umc_v12_0_is_uncorrectable_error(mc_umc_status)) *error_count += 1; } From a395f7ffcebe59477d80f049889cb652d80db040 Mon Sep 17 00:00:00 2001 From: Candice Li Date: Thu, 26 Oct 2023 12:28:15 +0800 Subject: [PATCH 62/66] drm/amdgpu: Retrieve CE count from ce_count_lo_chip in EccInfo table Retrieve correctable error count from ce_count_lo_chip instead of mca_umc_status. Signed-off-by: Candice Li Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v8_10.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c index 46bfdee79bfd..c4c77257710c 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c @@ -336,7 +336,7 @@ static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_devic uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst, unsigned long *error_count) { - uint64_t mc_umc_status; + uint16_t ecc_ce_cnt; uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -345,12 +345,10 @@ static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_devic umc_inst * adev->umc.channel_inst_num + ch_inst; - /* check the MCUMC_STATUS */ - mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) { - *error_count += 1; - } + /* Retrieve CE count */ + ecc_ce_cnt = ras->umc_ecc.ecc[eccinfo_table_idx].ce_count_lo_chip; + if (ecc_ce_cnt) + *error_count += ecc_ce_cnt; } static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev, From 142262a1c02ad4d334ca1152dc4a0f6db3ef3bfc Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 12 Oct 2023 10:35:20 -0400 Subject: [PATCH 63/66] drm/amdgpu: Add EXT_COHERENT support for APU and NUMA systems On gfx943 APU, EXT_COHERENT should give MTYPE_CC for local and MTYPE_UC for nonlocal memory. On NUMA systems, local memory gets the local mtype, set by an override callback. If EXT_COHERENT is set, memory will be set as MTYPE_UC by default, with local memory MTYPE_CC. Add an option in the override function for this case, and add a check to ensure it is not used on UNCACHED memory. V2: Combined APU and NUMA code into one patch V3: Fixed a potential nullptr in amdgpu_vm_bo_update Signed-off-by: David Francis Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 17 +++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 8 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 33 +++++++++++++++-------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 +++--- 5 files changed, 45 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f3c9f93d8899..3cd5977c0709 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -844,6 +844,7 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, * @immediate: immediate submission in a page fault * @unlocked: unlocked invalidation during MM callback * @flush_tlb: trigger tlb invalidation after update completed + * @allow_override: change MTYPE for local NUMA nodes * @resv: fences we need to sync to * @start: start of mapped range * @last: last mapped entry @@ -860,7 +861,7 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, * 0 for success, negative erro code for failure. */ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, - bool immediate, bool unlocked, bool flush_tlb, + bool immediate, bool unlocked, bool flush_tlb, bool allow_override, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, @@ -898,6 +899,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.immediate = immediate; params.pages_addr = pages_addr; params.unlocked = unlocked; + params.allow_override = allow_override; /* Implicitly sync to command submissions in the same VM before * unmapping. Sync to moving fences before mapping. @@ -1073,6 +1075,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct ttm_resource *mem; struct dma_fence **last_update; bool flush_tlb = clear; + bool uncached; struct dma_resv *resv; uint64_t vram_base; uint64_t flags; @@ -1110,9 +1113,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); vram_base = bo_adev->vm_manager.vram_base_offset; + uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0; } else { flags = 0x0; vram_base = 0; + uncached = false; } if (clear || (bo && bo->tbo.base.resv == @@ -1146,7 +1151,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, trace_amdgpu_vm_bo_update(mapping); r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, - resv, mapping->start, mapping->last, + !uncached, resv, mapping->start, mapping->last, update_flags, mapping->offset, vram_base, mem, pages_addr, last_update); @@ -1341,8 +1346,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, mapping->start < AMDGPU_GMC_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; - r = amdgpu_vm_update_range(adev, vm, false, false, true, resv, - mapping->start, mapping->last, + r = amdgpu_vm_update_range(adev, vm, false, false, true, false, + resv, mapping->start, mapping->last, init_pte_value, 0, 0, NULL, NULL, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); @@ -2618,8 +2623,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, goto error_unlock; } - r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr, - addr, flags, value, 0, NULL, NULL, NULL); + r = amdgpu_vm_update_range(adev, vm, true, false, false, false, + NULL, addr, addr, flags, value, 0, NULL, NULL, NULL); if (r) goto error_unlock; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 411d42fecfb6..9c7b5d33b56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -246,6 +246,12 @@ struct amdgpu_vm_update_params { * @table_freed: return true if page table is freed when updating */ bool table_freed; + + /** + * @allow_override: true for memory that is not uncached: allows MTYPE + * to be overridden for NUMA local memory. + */ + bool allow_override; }; struct amdgpu_vm_update_funcs { @@ -441,7 +447,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, struct amdgpu_vm *vm, struct amdgpu_bo *bo); int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, - bool immediate, bool unlocked, bool flush_tlb, + bool immediate, bool unlocked, bool flush_tlb, bool allow_override, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 9b025fd17b84..a2287bb25223 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -843,7 +843,7 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params, */ if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) && adev->gmc.gmc_funcs->override_vm_pte_flags && - num_possible_nodes() > 1 && !params->pages_addr) + num_possible_nodes() > 1 && !params->pages_addr && params->allow_override) amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags); params->vm->update_funcs->update(params, pt, pe, addr, count, incr, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index fee3141bb607..b66c5f7e1c56 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1251,12 +1251,15 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, return; } - /* Only override mappings with MTYPE_NC, which is the safe default for - * cacheable memory. + /* MTYPE_NC is the same default and can be overridden. + * MTYPE_UC will be present if the memory is extended-coherent + * and can also be overridden. */ if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) != - AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) { - dev_dbg_ratelimited(adev->dev, "MTYPE is not NC\n"); + AMDGPU_PTE_MTYPE_VG10(MTYPE_NC) && + (*flags & AMDGPU_PTE_MTYPE_VG10_MASK) != + AMDGPU_PTE_MTYPE_VG10(MTYPE_UC)) { + dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n"); return; } @@ -1283,15 +1286,23 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, vm->mem_id, local_node, nid); if (nid == local_node) { uint64_t old_flags = *flags; - unsigned int mtype_local = MTYPE_RW; + if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) == + AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) { + unsigned int mtype_local = MTYPE_RW; - if (amdgpu_mtype_local == 1) - mtype_local = MTYPE_NC; - else if (amdgpu_mtype_local == 2) - mtype_local = MTYPE_CC; + if (amdgpu_mtype_local == 1) + mtype_local = MTYPE_NC; + else if (amdgpu_mtype_local == 2) + mtype_local = MTYPE_CC; + + *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | + AMDGPU_PTE_MTYPE_VG10(mtype_local); + } else { + /* MTYPE_UC case */ + *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | + AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); + } - *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | - AMDGPU_PTE_MTYPE_VG10(mtype_local); dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n", old_flags, *flags); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 3560a5a58090..e67d06a42809 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1282,7 +1282,7 @@ svm_range_get_pte_flags(struct kfd_node *node, if (num_possible_nodes() <= 1) mapping_flags |= mtype_local; else - mapping_flags |= AMDGPU_VM_MTYPE_NC; + mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; /* system memory accessed by the dGPU */ } else { mapping_flags |= AMDGPU_VM_MTYPE_UC; @@ -1317,7 +1317,7 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, pr_debug("[0x%llx 0x%llx]\n", start, last); - return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start, + return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start, last, init_pte_value, 0, 0, NULL, NULL, fence); } @@ -1424,8 +1424,8 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, * different memory partition based on fpfn/lpfn, we should use * same vm_manager.vram_base_offset regardless memory partition. */ - r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, - last_start, prange->start + i, + r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true, + NULL, last_start, prange->start + i, pte_flags, (last_start - prange->start) << PAGE_SHIFT, bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, From 3ea8dd3758ba551f0e3999faefd5b0bb80cbf2f1 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Tue, 24 Oct 2023 11:20:27 +0800 Subject: [PATCH 64/66] drm/amd/amdgpu: avoid to disable gfxhub interrupt when driver is unloaded avoid to disable gfxhub interrupt when driver is unloaded on gmc 11 Signed-off-by: Kenneth Feng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 19eaada35ede..4713a62ad586 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -73,7 +73,8 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * fini/suspend, so the overall state doesn't * change over the course of suspend/resume. */ - if (!adev->in_s0ix) + if (!adev->in_s0ix && (adev->in_runpm || adev->in_suspend || + amdgpu_in_reset(adev))) amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: From e8e696c307c36ef2d5addb65fc3ba42d54ca2dbb Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Thu, 26 Oct 2023 22:05:49 +0000 Subject: [PATCH 65/66] drm/amdgpu: Remove duplicate fdinfo fields MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some of the fields that are handled by drm_show_fdinfo() crept back in when rebasing the patch. Remove them again. Fixes: 376c25f8ca47 ("drm/amdgpu: Switch to fdinfo helper") Reviewed-by: Christian König Signed-off-by: Rob Clark Reviewed-by: Co-developed-by: Umio Yasuno Signed-off-by: Umio Yasuno Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index cba7e6cdc7cc..b5f9c9e29612 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -88,9 +88,6 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) */ drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid); - drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name); - drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn); - drm_printf(p, "drm-client-id:\t%llu\n", vm->immediate.fence_context); drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); From dd3dd9829bf9a4ecd55482050745efdd9f7f97fc Mon Sep 17 00:00:00 2001 From: Umio Yasuno Date: Thu, 26 Oct 2023 22:05:57 +0000 Subject: [PATCH 66/66] drm/amdgpu: Remove unused variables from amdgpu_show_fdinfo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove unused variables from amdgpu_show_fdinfo Reviewed-by: Christian König Signed-off-by: Umio Yasuno Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index b5f9c9e29612..5706b282a0c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -56,21 +56,15 @@ static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = { void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file) { - struct amdgpu_device *adev = drm_to_adev(file->minor->dev); struct amdgpu_fpriv *fpriv = file->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_mem_stats stats; ktime_t usage[AMDGPU_HW_IP_NUM]; - uint32_t bus, dev, fn, domain; unsigned int hw_ip; int ret; memset(&stats, 0, sizeof(stats)); - bus = adev->pdev->bus->number; - domain = pci_domain_nr(adev->pdev->bus); - dev = PCI_SLOT(adev->pdev->devfn); - fn = PCI_FUNC(adev->pdev->devfn); ret = amdgpu_bo_reserve(vm->root.bo, false); if (ret)