Merge tag 'amd-drm-next-5.19-2022-04-29' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-5.19-2022-04-29:

amdgpu
- RAS updates
- SI dpm deadlock fix
- Misc code cleanups
- HDCP fixes
- PSR fixes
- DSC fixes
- SDMA doorbell cleanups
- S0ix fix
- DC FP fix
- Zen dom0 regression fix for APUs
- IP discovery updates
- Initial SoC21 support
- Support for new vbios tables
- Runtime PM fixes
- Add PSP TA debugfs interface

amdkfd:
- Misc code cleanups
- Ignore bogus MEC signals more efficiently
- SVM fixes
- Use bitmap helpers

radeon:
- Misc code cleanups
- Spelling/grammer fixes

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220429144853.5742-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2022-05-06 15:05:27 +10:00
commit b900352f9d
82 changed files with 62775 additions and 799 deletions

View File

@ -74,7 +74,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o
# add DF block
amdgpu-y += \

View File

@ -666,10 +666,13 @@ enum amd_hw_ip_block_type {
MAX_HWIP
};
#define HWIP_MAX_INSTANCE 10
#define HWIP_MAX_INSTANCE 11
#define HW_ID_MAX 300
#define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv))
#define IP_VERSION_MAJ(ver) ((ver) >> 16)
#define IP_VERSION_MIN(ver) (((ver) >> 8) & 0xFF)
#define IP_VERSION_REV(ver) ((ver) & 0xFF)
struct amd_powerplay {
void *pp_handle;

View File

@ -162,12 +162,14 @@ union vram_info {
struct atom_vram_info_header_v2_4 v24;
struct atom_vram_info_header_v2_5 v25;
struct atom_vram_info_header_v2_6 v26;
struct atom_vram_info_header_v3_0 v30;
};
union vram_module {
struct atom_vram_module_v9 v9;
struct atom_vram_module_v10 v10;
struct atom_vram_module_v11 v11;
struct atom_vram_module_v3_0 v30;
};
static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
@ -294,88 +296,116 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
vram_info = (union vram_info *)
(mode_info->atom_context->bios + data_offset);
module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
switch (crev) {
case 3:
if (module_id > vram_info->v23.vram_module_num)
module_id = 0;
vram_module = (union vram_module *)vram_info->v23.vram_module;
while (i < module_id) {
vram_module = (union vram_module *)
((u8 *)vram_module + vram_module->v9.vram_module_size);
i++;
if (frev == 3) {
switch (crev) {
/* v30 */
case 0:
vram_module = (union vram_module *)vram_info->v30.vram_module;
mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
if (vram_vendor)
*vram_vendor = mem_vendor;
mem_type = vram_info->v30.memory_type;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
mem_channel_number = vram_info->v30.channel_num;
mem_channel_width = vram_info->v30.channel_width;
if (vram_width)
*vram_width = mem_channel_number * mem_channel_width;
break;
default:
return -EINVAL;
}
mem_type = vram_module->v9.memory_type;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
mem_channel_number = vram_module->v9.channel_num;
mem_channel_width = vram_module->v9.channel_width;
if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width);
mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
case 4:
if (module_id > vram_info->v24.vram_module_num)
module_id = 0;
vram_module = (union vram_module *)vram_info->v24.vram_module;
while (i < module_id) {
vram_module = (union vram_module *)
((u8 *)vram_module + vram_module->v10.vram_module_size);
i++;
} else if (frev == 2) {
switch (crev) {
/* v23 */
case 3:
if (module_id > vram_info->v23.vram_module_num)
module_id = 0;
vram_module = (union vram_module *)vram_info->v23.vram_module;
while (i < module_id) {
vram_module = (union vram_module *)
((u8 *)vram_module + vram_module->v9.vram_module_size);
i++;
}
mem_type = vram_module->v9.memory_type;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
mem_channel_number = vram_module->v9.channel_num;
mem_channel_width = vram_module->v9.channel_width;
if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width);
mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
/* v24 */
case 4:
if (module_id > vram_info->v24.vram_module_num)
module_id = 0;
vram_module = (union vram_module *)vram_info->v24.vram_module;
while (i < module_id) {
vram_module = (union vram_module *)
((u8 *)vram_module + vram_module->v10.vram_module_size);
i++;
}
mem_type = vram_module->v10.memory_type;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
mem_channel_number = vram_module->v10.channel_num;
mem_channel_width = vram_module->v10.channel_width;
if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width);
mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
/* v25 */
case 5:
if (module_id > vram_info->v25.vram_module_num)
module_id = 0;
vram_module = (union vram_module *)vram_info->v25.vram_module;
while (i < module_id) {
vram_module = (union vram_module *)
((u8 *)vram_module + vram_module->v11.vram_module_size);
i++;
}
mem_type = vram_module->v11.memory_type;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
mem_channel_number = vram_module->v11.channel_num;
mem_channel_width = vram_module->v11.channel_width;
if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width);
mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
/* v26 */
case 6:
if (module_id > vram_info->v26.vram_module_num)
module_id = 0;
vram_module = (union vram_module *)vram_info->v26.vram_module;
while (i < module_id) {
vram_module = (union vram_module *)
((u8 *)vram_module + vram_module->v9.vram_module_size);
i++;
}
mem_type = vram_module->v9.memory_type;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
mem_channel_number = vram_module->v9.channel_num;
mem_channel_width = vram_module->v9.channel_width;
if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width);
mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
default:
return -EINVAL;
}
mem_type = vram_module->v10.memory_type;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
mem_channel_number = vram_module->v10.channel_num;
mem_channel_width = vram_module->v10.channel_width;
if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width);
mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
case 5:
if (module_id > vram_info->v25.vram_module_num)
module_id = 0;
vram_module = (union vram_module *)vram_info->v25.vram_module;
while (i < module_id) {
vram_module = (union vram_module *)
((u8 *)vram_module + vram_module->v11.vram_module_size);
i++;
}
mem_type = vram_module->v11.memory_type;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
mem_channel_number = vram_module->v11.channel_num;
mem_channel_width = vram_module->v11.channel_width;
if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width);
mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
case 6:
if (module_id > vram_info->v26.vram_module_num)
module_id = 0;
vram_module = (union vram_module *)vram_info->v26.vram_module;
while (i < module_id) {
vram_module = (union vram_module *)
((u8 *)vram_module + vram_module->v9.vram_module_size);
i++;
}
mem_type = vram_module->v9.memory_type;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
mem_channel_number = vram_module->v9.channel_num;
mem_channel_width = vram_module->v9.channel_width;
if (vram_width)
*vram_width = mem_channel_number * (1 << mem_channel_width);
mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
default:
} else {
/* invalid frev */
return -EINVAL;
}
}
@ -528,6 +558,13 @@ union smu_info {
struct atom_smu_info_v3_1 v31;
};
union gfx_info {
struct atom_gfx_info_v2_2 v22;
struct atom_gfx_info_v2_4 v24;
struct atom_gfx_info_v2_7 v27;
struct atom_gfx_info_v3_0 v30;
};
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
@ -609,22 +646,26 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
gfx_info);
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
struct atom_gfx_info_v2_2 *gfx_info = (struct atom_gfx_info_v2_2*)
union gfx_info *gfx_info = (union gfx_info *)
(mode_info->atom_context->bios + data_offset);
if ((frev == 2) && (crev >= 2))
spll->reference_freq = le32_to_cpu(gfx_info->rlc_gpu_timer_refclk);
ret = 0;
if ((frev == 3) ||
(frev == 2 && crev == 6)) {
spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk);
ret = 0;
} else if ((frev == 2) &&
(crev >= 2) &&
(crev != 6)) {
spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk);
ret = 0;
} else {
BUG();
}
}
}
return ret;
}
union gfx_info {
struct atom_gfx_info_v2_4 v24;
struct atom_gfx_info_v2_7 v27;
};
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
@ -638,42 +679,58 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
&frev, &crev, &data_offset)) {
union gfx_info *gfx_info = (union gfx_info *)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 4:
adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
adev->gfx.config.gs_prim_buffer_depth =
le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf =
gfx_info->v24.gc_double_offchip_lds_buffer;
adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
return 0;
case 7:
adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
return 0;
default:
if (frev == 2) {
switch (crev) {
case 4:
adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
adev->gfx.config.gs_prim_buffer_depth =
le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf =
gfx_info->v24.gc_double_offchip_lds_buffer;
adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
return 0;
case 7:
adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
return 0;
default:
return -EINVAL;
}
} else if (frev == 3) {
switch (crev) {
case 0:
adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines;
adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh;
adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se;
adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se;
adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches;
return 0;
default:
return -EINVAL;
}
} else {
return -EINVAL;
}
@ -731,3 +788,67 @@ int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
return fw_reserved_fb_size;
}
/*
* Helper function to execute asic_init table
*
* @adev: amdgpu_device pointer
* @fb_reset: flag to indicate whether fb is reset or not
*
* Return 0 if succeed, otherwise failed
*/
int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
struct atom_context *ctx;
uint8_t frev, crev;
uint16_t data_offset;
uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz;
struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1;
int index;
if (!mode_info)
return -EINVAL;
ctx = mode_info->atom_context;
if (!ctx)
return -EINVAL;
/* query bootup sclk/mclk from firmware_info table */
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
firmwareinfo);
if (amdgpu_atom_parse_data_header(ctx, index, NULL,
&frev, &crev, &data_offset)) {
union firmware_info *firmware_info =
(union firmware_info *)(ctx->bios +
data_offset);
bootup_sclk_in10khz =
le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
bootup_mclk_in10khz =
le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
} else {
return -EINVAL;
}
index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
asic_init);
if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
if (frev == 2 && crev >= 1) {
memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz;
asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz;
asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT;
if (!fb_reset)
asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT;
else
asic_init_ps_v2_1.param.memparam.memflag = 0;
} else {
return -EINVAL;
}
} else {
return -EINVAL;
}
return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1);
}

View File

@ -40,5 +40,6 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_a
bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset);
#endif

View File

@ -471,6 +471,7 @@ bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
{
u32 *dw_ptr;
u32 i, length_dw;
u32 rom_offset;
u32 rom_index_offset;
u32 rom_data_offset;
@ -494,8 +495,16 @@ bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
rom_data_offset =
adev->smuio.funcs->get_rom_data_offset(adev);
/* set rom index to 0 */
WREG32(rom_index_offset, 0);
if (adev->nbio.funcs &&
adev->nbio.funcs->get_rom_offset) {
rom_offset = adev->nbio.funcs->get_rom_offset(adev);
rom_offset = rom_offset << 17;
} else {
rom_offset = 0;
}
/* set rom index to rom_offset */
WREG32(rom_index_offset, rom_offset);
/* read out the rom data */
for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(rom_data_offset);

View File

@ -552,7 +552,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (r) {
kvfree(e->user_pages);
e->user_pages = NULL;
return r;
goto out_free_user_pages;
}
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
@ -569,7 +569,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
goto out;
goto out_free_user_pages;
}
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
@ -638,7 +638,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
error_validate:
if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
out:
out_free_user_pages:
if (r) {
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
if (!e->user_pages)
continue;
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
kvfree(e->user_pages);
e->user_pages = NULL;
}
}
return r;
}

View File

@ -913,7 +913,10 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
amdgpu_asic_pre_asic_init(adev);
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
return amdgpu_atomfirmware_asic_init(adev, true);
else
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
}
/**
@ -1926,11 +1929,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
if (adev->mman.discovery_bin) {
amdgpu_discovery_get_gfx_info(adev);
/*
* FIXME: The bounding box is still needed by Navi12, so
* temporarily read it from gpu_info firmware. Should be droped
* temporarily read it from gpu_info firmware. Should be dropped
* when DAL no longer needs it.
*/
if (adev->asic_type != CHIP_NAVI12)

View File

@ -271,8 +271,6 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct gpu_info_header *ghdr;
uint16_t offset;
uint16_t size;
uint16_t checksum;
@ -290,7 +288,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
/* retry read ip discovery binary from file */
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
@ -324,31 +322,110 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[IP_DISCOVERY];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
if (offset) {
struct ip_discovery_header *ihdr =
(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le16_to_cpu(ihdr->size), checksum)) {
dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le16_to_cpu(ihdr->size), checksum)) {
dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
}
}
info = &bhdr->table_list[GC];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le32_to_cpu(ghdr->size), checksum)) {
dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
if (offset) {
struct gpu_info_header *ghdr =
(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery gc table id\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le32_to_cpu(ghdr->size), checksum)) {
dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
}
}
info = &bhdr->table_list[HARVEST_INFO];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
if (offset) {
struct harvest_info_header *hhdr =
(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
sizeof(struct harvest_table), checksum)) {
dev_err(adev->dev, "invalid harvest data table checksum\n");
r = -EINVAL;
goto out;
}
}
info = &bhdr->table_list[VCN_INFO];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
if (offset) {
struct vcn_info_header *vhdr =
(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery vcn table id\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le32_to_cpu(vhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid vcn data table checksum\n");
r = -EINVAL;
goto out;
}
}
info = &bhdr->table_list[MALL_INFO];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
if (0 && offset) {
struct mall_info_header *mhdr =
(struct mall_info_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery mall table id\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le32_to_cpu(mhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid mall data table checksum\n");
r = -EINVAL;
goto out;
}
}
return 0;
@ -436,15 +513,24 @@ next_ip:
}
static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count)
uint32_t *vcn_harvest_count,
uint32_t *umc_harvest_count)
{
struct binary_header *bhdr;
struct harvest_table *harvest_info;
u16 offset;
int i;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
if (!offset) {
dev_err(adev->dev, "invalid harvest table offset\n");
return;
}
harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
for (i = 0; i < 32; i++) {
if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
break;
@ -460,6 +546,9 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
break;
case UMC_HWID:
(*umc_harvest_count)++;
break;
default:
break;
}
@ -957,7 +1046,7 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
/* ================================================== */
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
@ -1033,6 +1122,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(ip->hw_id) == SDMA3_HWID)
adev->sdma.num_instances++;
if (le16_to_cpu(ip->hw_id) == UMC_HWID)
adev->gmc.num_umc++;
for (k = 0; k < num_base_address; k++) {
/*
* convert the endianness of base addresses in place,
@ -1120,9 +1212,10 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n
return -EINVAL;
}
void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
/*
* Harvest table does not fit Navi1x and legacy GPUs,
@ -1141,7 +1234,8 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
&vcn_harvest_count);
} else {
amdgpu_discovery_read_from_harvest_table(adev,
&vcn_harvest_count);
&vcn_harvest_count,
&umc_harvest_count);
}
amdgpu_discovery_harvest_config_quirk(adev);
@ -1150,17 +1244,24 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
}
if (umc_harvest_count < adev->gmc.num_umc) {
adev->gmc.num_umc -= umc_harvest_count;
}
}
union gc_info {
struct gc_info_v1_0 v1;
struct gc_info_v1_1 v1_1;
struct gc_info_v1_2 v1_2;
struct gc_info_v2_0 v2;
};
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
union gc_info *gc_info;
u16 offset;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
@ -1168,9 +1269,14 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
gc_info = (union gc_info *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
switch (gc_info->v1.header.version_major) {
offset = le16_to_cpu(bhdr->table_list[GC].offset);
if (!offset)
return 0;
gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
switch (le16_to_cpu(gc_info->v1.header.version_major)) {
case 1:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
@ -1190,6 +1296,21 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
if (gc_info->v1.header.version_minor >= 1) {
adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
}
if (gc_info->v1.header.version_minor >= 2) {
adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
}
break;
case 2:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
@ -1213,8 +1334,105 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
default:
dev_err(adev->dev,
"Unhandled GC info table %d.%d\n",
gc_info->v1.header.version_major,
gc_info->v1.header.version_minor);
le16_to_cpu(gc_info->v1.header.version_major),
le16_to_cpu(gc_info->v1.header.version_minor));
return -EINVAL;
}
return 0;
}
union mall_info {
struct mall_info_v1_0 v1;
};
int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
union mall_info *mall_info;
u32 u, mall_size_per_umc, m_s_present, half_use;
u64 mall_size;
u16 offset;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
if (!offset)
return 0;
mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
switch (le16_to_cpu(mall_info->v1.header.version_major)) {
case 1:
mall_size = 0;
mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
half_use = le32_to_cpu(mall_info->v1.m_half_use);
for (u = 0; u < adev->gmc.num_umc; u++) {
if (m_s_present & (1 << u))
mall_size += mall_size_per_umc * 2;
else if (half_use & (1 << u))
mall_size += mall_size_per_umc / 2;
else
mall_size += mall_size_per_umc;
}
adev->gmc.mall_size = mall_size;
break;
default:
dev_err(adev->dev,
"Unhandled MALL info table %d.%d\n",
le16_to_cpu(mall_info->v1.header.version_major),
le16_to_cpu(mall_info->v1.header.version_minor));
return -EINVAL;
}
return 0;
}
union vcn_info {
struct vcn_info_v1_0 v1;
};
static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
union vcn_info *vcn_info;
u16 offset;
int v;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
dev_err(adev->dev, "invalid vcn instances\n");
return -EINVAL;
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
if (!offset)
return 0;
vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
case 1:
for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
adev->vcn.vcn_codec_disable_mask[v] =
le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
}
break;
default:
dev_err(adev->dev,
"Unhandled VCN info table %d.%d\n",
le16_to_cpu(vcn_info->v1.header.version_major),
le16_to_cpu(vcn_info->v1.header.version_minor));
return -EINVAL;
}
return 0;
@ -1650,6 +1868,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VEGA10:
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
@ -1671,6 +1890,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VEGA12:
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
@ -1693,6 +1913,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
adev->vcn.num_vcn_inst = 1;
adev->gmc.num_umc = 2;
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
@ -1730,6 +1951,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VEGA20:
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 8;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
@ -1753,6 +1975,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 8;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
@ -1780,6 +2003,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 4;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
@ -1807,6 +2031,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
amdgpu_discovery_get_mall_info(adev);
amdgpu_discovery_get_vcn_info(adev);
break;
}

View File

@ -28,12 +28,8 @@
#define DISCOVERY_TMR_OFFSET (64 << 10)
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */

View File

@ -183,6 +183,17 @@ struct amdgpu_gfx_config {
uint32_t num_packer_per_sc;
uint32_t pa_sc_tile_steering_override;
uint64_t tcc_disabled_mask;
uint32_t gc_num_tcp_per_sa;
uint32_t gc_num_sdp_interface;
uint32_t gc_num_tcps;
uint32_t gc_num_tcp_per_wpg;
uint32_t gc_tcp_l1_size;
uint32_t gc_num_sqc_per_wgp;
uint32_t gc_l1_instruction_cache_size_per_sqc;
uint32_t gc_l1_data_cache_size_per_sqc;
uint32_t gc_gl1c_per_sa;
uint32_t gc_gl1c_size_per_instance;
uint32_t gc_gl2c_per_gpu;
};
struct amdgpu_cu_info {

View File

@ -257,6 +257,11 @@ struct amdgpu_gmc {
struct amdgpu_bo *pdb0_bo;
/* CPU kmapped address of pdb0*/
void *ptr_pdb0;
/* MALL size */
u64 mall_size;
/* number of UMC instances */
int num_umc;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))

View File

@ -193,20 +193,7 @@ static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
/* For the hardware that cannot enable bif ring for both ras_controller_irq
* and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
* register to check whether the interrupt is triggered or not, and properly
* ack the interrupt if it is there
*/
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
}
amdgpu_ras_interrupt_fatal_error_handler(adev);
return ret;
}

View File

@ -43,6 +43,17 @@
#include "amdgpu_display.h"
#include "amdgpu_ras.h"
static void amdgpu_runtime_pm_quirk(struct amdgpu_device *adev)
{
/*
* Add below quirk on several sienna_cichlid cards to disable
* runtime pm to fix EMI failures.
*/
if (((adev->pdev->device == 0x73A1) && (adev->pdev->revision == 0x00)) ||
((adev->pdev->device == 0x73BF) && (adev->pdev->revision == 0xCF)))
adev->runpm = false;
}
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
struct amdgpu_gpu_instance *gpu_instance;
@ -180,6 +191,9 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
*/
if (adev->is_fw_fb)
adev->runpm = false;
amdgpu_runtime_pm_quirk(adev);
if (adev->runpm)
dev_info(adev->dev, "Using BACO for runtime pm\n");
}

View File

@ -93,6 +93,7 @@ struct amdgpu_nbio_funcs {
void (*apply_lc_spc_mode_wa)(struct amdgpu_device *adev);
void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev);
void (*clear_doorbell_interrupt)(struct amdgpu_device *adev);
u32 (*get_rom_offset)(struct amdgpu_device *adev);
};
struct amdgpu_nbio {

View File

@ -276,6 +276,7 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
break;
case IP_VERSION(13, 0, 2):
ret = psp_init_cap_microcode(psp, "aldebaran");
ret &= psp_init_ta_microcode(psp, "aldebaran");
break;
default:
BUG();

View File

@ -24,12 +24,7 @@
#include "amdgpu.h"
#include "amdgpu_psp_ta.h"
static const char *TA_IF_FS_NAME = "ta_if";
struct dentry *dir;
static struct dentry *ta_load_debugfs_dentry;
static struct dentry *ta_unload_debugfs_dentry;
static struct dentry *ta_invoke_debugfs_dentry;
#if defined(CONFIG_DEBUG_FS)
static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf,
size_t len, loff_t *off);
@ -38,7 +33,6 @@ static ssize_t ta_if_unload_debugfs_write(struct file *fp, const char *buf,
static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf,
size_t len, loff_t *off);
static uint32_t get_bin_version(const uint8_t *bin)
{
const struct common_firmware_header *hdr =
@ -74,19 +68,19 @@ static bool is_ta_type_valid(enum ta_type_id ta_type)
}
static const struct file_operations ta_load_debugfs_fops = {
.write = ta_if_load_debugfs_write,
.write = ta_if_load_debugfs_write,
.llseek = default_llseek,
.owner = THIS_MODULE
};
static const struct file_operations ta_unload_debugfs_fops = {
.write = ta_if_unload_debugfs_write,
.write = ta_if_unload_debugfs_write,
.llseek = default_llseek,
.owner = THIS_MODULE
};
static const struct file_operations ta_invoke_debugfs_fops = {
.write = ta_if_invoke_debugfs_write,
.write = ta_if_invoke_debugfs_write,
.llseek = default_llseek,
.owner = THIS_MODULE
};
@ -159,9 +153,10 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
if (!ta_bin)
ret = -ENOMEM;
ret = copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len);
if (ret)
if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
ret = -EFAULT;
goto err_free_bin;
}
ret = psp_ras_terminate(psp);
if (ret) {
@ -180,11 +175,14 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
if (ret || context.resp_status) {
dev_err(adev->dev, "TA load via debugfs failed (%d) status %d\n",
ret, context.resp_status);
if (!ret)
ret = -EINVAL;
goto err_free_bin;
}
context.initialized = true;
ret = copy_to_user((char *)buf, (void *)&context.session_id, sizeof(uint32_t));
if (copy_to_user((char *)buf, (void *)&context.session_id, sizeof(uint32_t)))
ret = -EFAULT;
err_free_bin:
kfree(ta_bin);
@ -250,10 +248,11 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
if (!shared_buf)
ret = -ENOMEM;
ret = copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len);
if (ret)
return -ENOMEM;
if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
ret = -EFAULT;
goto err_free_shared_buf;
}
context.session_id = ta_id;
@ -264,10 +263,13 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
if (ret || context.resp_status) {
dev_err(adev->dev, "TA invoke via debugfs failed (%d) status %d\n",
ret, context.resp_status);
if (!ret)
ret = -EINVAL;
goto err_free_ta_shared_buf;
}
ret = copy_to_user((char *)buf, context.mem_context.shared_buf, shared_buf_len);
if (copy_to_user((char *)buf, context.mem_context.shared_buf, shared_buf_len))
ret = -EFAULT;
err_free_ta_shared_buf:
psp_ta_free_shared_buf(&context.mem_context);
@ -278,31 +280,25 @@ err_free_shared_buf:
return ret;
}
static struct dentry *amdgpu_ta_if_debugfs_create(struct amdgpu_device *adev)
void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev)
{
struct drm_minor *minor = adev_to_drm(adev)->primary;
dir = debugfs_create_dir(TA_IF_FS_NAME, minor->debugfs_root);
struct dentry *dir = debugfs_create_dir("ta_if", minor->debugfs_root);
ta_load_debugfs_dentry = debugfs_create_file("ta_load", 0200, dir, adev,
&ta_load_debugfs_fops);
debugfs_create_file("ta_load", 0200, dir, adev,
&ta_load_debugfs_fops);
ta_unload_debugfs_dentry = debugfs_create_file("ta_unload", 0200, dir,
adev, &ta_unload_debugfs_fops);
debugfs_create_file("ta_unload", 0200, dir,
adev, &ta_unload_debugfs_fops);
ta_invoke_debugfs_dentry = debugfs_create_file("ta_invoke", 0200, dir,
adev, &ta_invoke_debugfs_fops);
return dir;
debugfs_create_file("ta_invoke", 0200, dir,
adev, &ta_invoke_debugfs_fops);
}
#else
void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
dir = amdgpu_ta_if_debugfs_create(adev);
#endif
}
void amdgpu_ta_if_debugfs_remove(void)
{
debugfs_remove_recursive(dir);
}
#endif

View File

@ -25,6 +25,5 @@
#define __AMDGPU_PSP_TA_H__
void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev);
void amdgpu_ta_if_debugfs_remove(void);
#endif

View File

@ -1515,12 +1515,97 @@ static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
/* ras fs end */
/* ih begin */
/* For the hardware that cannot enable bif ring for both ras_controller_irq
* and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
* register to check whether the interrupt is triggered or not, and properly
* ack the interrupt if it is there
*/
void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
return;
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
}
static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
struct amdgpu_iv_entry *entry)
{
bool poison_stat = true, need_reset = true;
struct amdgpu_device *adev = obj->adev;
struct ras_err_data err_data = {0, 0, 0, NULL};
struct amdgpu_ras_block_object *block_obj =
amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
if (!adev->gmc.xgmi.connected_to_cpu)
amdgpu_umc_poison_handler(adev, &err_data, false);
/* both query_poison_status and handle_poison_consumption are optional */
if (block_obj && block_obj->hw_ops) {
if (block_obj->hw_ops->query_poison_status) {
poison_stat = block_obj->hw_ops->query_poison_status(adev);
if (!poison_stat)
dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
block_obj->ras_comm.name);
}
if (poison_stat && block_obj->hw_ops->handle_poison_consumption) {
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
need_reset = poison_stat;
}
}
/* gpu reset is fallback for all failed cases */
if (need_reset)
amdgpu_ras_reset_gpu(adev);
}
static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
struct amdgpu_iv_entry *entry)
{
dev_info(obj->adev->dev,
"Poison is created, no user action is needed.\n");
}
static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
struct amdgpu_iv_entry *entry)
{
struct ras_ih_data *data = &obj->ih_data;
struct ras_err_data err_data = {0, 0, 0, NULL};
int ret;
if (!data->cb)
return;
/* Let IP handle its data, maybe we need get the output
* from the callback to update the error type/count, etc
*/
ret = data->cb(obj->adev, &err_data, entry);
/* ue will trigger an interrupt, and in that case
* we need do a reset to recovery the whole system.
* But leave IP do that recovery, here we just dispatch
* the error.
*/
if (ret == AMDGPU_RAS_SUCCESS) {
/* these counts could be left as 0 if
* some blocks do not count error number
*/
obj->err_data.ue_count += err_data.ue_count;
obj->err_data.ce_count += err_data.ce_count;
}
}
static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
{
struct ras_ih_data *data = &obj->ih_data;
struct amdgpu_iv_entry entry;
int ret;
struct ras_err_data err_data = {0, 0, 0, NULL};
while (data->rptr != data->wptr) {
rmb();
@ -1531,30 +1616,17 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
data->rptr = (data->aligned_element_size +
data->rptr) % data->ring_size;
if (data->cb) {
if (amdgpu_ras_is_poison_mode_supported(obj->adev) &&
obj->head.block == AMDGPU_RAS_BLOCK__UMC)
dev_info(obj->adev->dev,
"Poison is created, no user action is needed.\n");
else {
/* Let IP handle its data, maybe we need get the output
* from the callback to udpate the error type/count, etc
*/
memset(&err_data, 0, sizeof(err_data));
ret = data->cb(obj->adev, &err_data, &entry);
/* ue will trigger an interrupt, and in that case
* we need do a reset to recovery the whole system.
* But leave IP do that recovery, here we just dispatch
* the error.
*/
if (ret == AMDGPU_RAS_SUCCESS) {
/* these counts could be left as 0 if
* some blocks do not count error number
*/
obj->err_data.ue_count += err_data.ue_count;
obj->err_data.ce_count += err_data.ce_count;
}
}
if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
else
amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
} else {
if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
amdgpu_ras_interrupt_umc_handler(obj, &entry);
else
dev_warn(obj->adev->dev,
"No RAS interrupt handler for non-UMC block with poison disabled.\n");
}
}
}
@ -1849,7 +1921,6 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
if (!bps) {
kfree(bps);
return -ENOMEM;
}

View File

@ -509,6 +509,7 @@ struct amdgpu_ras_block_hw_ops {
void (*reset_ras_error_count)(struct amdgpu_device *adev);
void (*reset_ras_error_status)(struct amdgpu_device *adev);
bool (*query_poison_status)(struct amdgpu_device *adev);
bool (*handle_poison_consumption)(struct amdgpu_device *adev);
};
/* work flow
@ -682,4 +683,5 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co
int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
struct amdgpu_ras_block_object *ras_block_obj);
void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev);
#endif

View File

@ -760,3 +760,36 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
}
return 0;
}
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
char *ip_name;
uint32_t version = adev->ip_versions[block_type][0];
switch (block_type) {
case GC_HWIP:
ip_name = "gc";
break;
case SDMA0_HWIP:
ip_name = "sdma";
break;
case MP0_HWIP:
ip_name = "psp";
break;
case MP1_HWIP:
ip_name = "smu";
break;
case UVD_HWIP:
ip_name = "vcn";
break;
default:
BUG();
}
maj = IP_VERSION_MAJ(version);
min = IP_VERSION_MIN(version);
rev = IP_VERSION_REV(version);
snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev);
}

View File

@ -463,4 +463,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
#endif

View File

@ -166,6 +166,11 @@
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
#define VCN_CODEC_DISABLE_MASK_AV1 (1 << 0)
#define VCN_CODEC_DISABLE_MASK_VP9 (1 << 1)
#define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2)
#define VCN_CODEC_DISABLE_MASK_H264 (1 << 3)
enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@ -250,6 +255,7 @@ struct amdgpu_vcn {
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
uint8_t vcn_config[AMDGPU_MAX_VCN_INSTANCES];
uint32_t vcn_codec_disable_mask[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
struct mutex vcn_pg_lock;
struct mutex vcn1_jpeg1_workaround;

View File

@ -28,6 +28,7 @@
#endif
#include <drm/drm_drv.h>
#include <xen/xen.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
@ -714,7 +715,8 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (!reg) {
if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
/* passthrough mode exclus sriov mod */
if (is_virtual_machine() && !xen_initial_domain())
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}

View File

@ -195,7 +195,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
(lower_32_bits(ring->wptr) << 2) & 0x3fffc);
(ring->wptr << 2) & 0x3fffc);
}
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@ -487,7 +487,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring->wptr = 0;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
/* enable DMA RB */
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],

View File

@ -3830,8 +3830,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16,
AMDGPU_IB_POOL_DIRECT, &ib);
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;

View File

@ -883,6 +883,24 @@ static int gmc_v10_0_sw_init(void *handle)
adev->gmc.vram_vendor = vram_vendor;
}
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 0):
adev->gmc.mall_size = 128 * 1024 * 1024;
break;
case IP_VERSION(10, 3, 2):
adev->gmc.mall_size = 96 * 1024 * 1024;
break;
case IP_VERSION(10, 3, 4):
adev->gmc.mall_size = 32 * 1024 * 1024;
break;
case IP_VERSION(10, 3, 5):
adev->gmc.mall_size = 16 * 1024 * 1024;
break;
default:
adev->gmc.mall_size = 0;
break;
}
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):

View File

@ -223,7 +223,7 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2);
}
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@ -465,7 +465,7 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring->wptr = 0;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);

View File

@ -389,14 +389,14 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
/* XXX check if swapping is necessary on BE */
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
WRITE_ONCE(*wb, ring->wptr << 2);
WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
} else if (ring->use_pollmem) {
u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
WRITE_ONCE(*wb, ring->wptr << 2);
} else {
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2);
}
}

View File

@ -772,8 +772,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
"lower_32_bits(ring->wptr << 2) == 0x%08x "
"upper_32_bits(ring->wptr << 2) == 0x%08x\n",
ring->wptr_offs,
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));

View File

@ -394,8 +394,8 @@ static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
"lower_32_bits(ring->wptr << 2) == 0x%08x "
"upper_32_bits(ring->wptr << 2) == 0x%08x\n",
ring->wptr_offs,
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
@ -774,9 +774,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
lower_32_bits(ring->wptr) << 2);
lower_32_bits(ring->wptr << 2));
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
upper_32_bits(ring->wptr) << 2);
upper_32_bits(ring->wptr << 2));
}
doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));

View File

@ -295,8 +295,8 @@ static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
"lower_32_bits(ring->wptr << 2) == 0x%08x "
"upper_32_bits(ring->wptr << 2) == 0x%08x\n",
ring->wptr_offs,
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
@ -672,8 +672,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
}
doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));

View File

@ -56,8 +56,7 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
WREG32(DMA_RB_WPTR + sdma_offsets[me],
(lower_32_bits(ring->wptr) << 2) & 0x3fffc);
WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
@ -175,7 +174,7 @@ static int si_dma_start(struct amdgpu_device *adev)
WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
ring->wptr = 0;
WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
ring->sched.ready = true;

View File

@ -701,25 +701,12 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
static void soc15_reg_base_init(struct amdgpu_device *adev)
{
int r;
/* Set IP register base before any HW register access */
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_RAVEN:
vega10_reg_base_init(adev);
break;
case CHIP_RENOIR:
/* It's safe to do ip discovery here for Renoir,
* it doesn't support SRIOV. */
if (amdgpu_discovery) {
r = amdgpu_discovery_reg_base_init(adev);
if (r == 0)
break;
DRM_WARN("failed to init reg base from ip discovery table, "
"fallback to legacy init method\n");
}
vega10_reg_base_init(adev);
break;
case CHIP_VEGA20:

View File

@ -45,6 +45,14 @@
~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
0, ip##_HWIP)
#define WREG32_FIELD15_PREREG(ip, idx, reg_name, field, val) \
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \
(__RREG32_SOC15_RLC__( \
adev->reg_offset[ip##_HWIP][idx][reg##reg_name##_BASE_IDX] + reg##reg_name, \
0, ip##_HWIP) & \
~REG_FIELD_MASK(reg_name, field)) | (val) << REG_FIELD_SHIFT(reg_name, field), \
0, ip##_HWIP)
#define RREG32_SOC15(ip, inst, reg) \
__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
0, ip##_HWIP)

View File

@ -0,0 +1,620 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_ucode.h"
#include "amdgpu_psp.h"
#include "amdgpu_smu.h"
#include "atom.h"
#include "amd_pcie.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "mp/mp_13_0_0_offset.h"
#include "soc15.h"
#include "soc15_common.h"
static const struct amd_ip_funcs soc21_common_ip_funcs;
/*
* Indirect registers accessor
*/
static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
return amdgpu_device_indirect_rreg(adev, address, data, reg);
}
static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
amdgpu_device_indirect_wreg(adev, address, data, reg, v);
}
static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
return amdgpu_device_indirect_rreg64(adev, address, data, reg);
}
static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
unsigned long address, data;
address = adev->nbio.funcs->get_pcie_index_offset(adev);
data = adev->nbio.funcs->get_pcie_data_offset(adev);
amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
}
static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
spin_lock_irqsave(&adev->didt_idx_lock, flags);
WREG32(address, (reg));
r = RREG32(data);
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
return r;
}
static void soc21_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
spin_lock_irqsave(&adev->didt_idx_lock, flags);
WREG32(address, (reg));
WREG32(data, (v));
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}
static u32 soc21_get_config_memsize(struct amdgpu_device *adev)
{
return adev->nbio.funcs->get_memsize(adev);
}
static u32 soc21_get_xclk(struct amdgpu_device *adev)
{
return adev->clock.spll.reference_freq;
}
void soc21_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid)
{
u32 grbm_gfx_cntl = 0;
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL), grbm_gfx_cntl);
}
static void soc21_vga_set_state(struct amdgpu_device *adev, bool state)
{
/* todo */
}
static bool soc21_read_disabled_bios(struct amdgpu_device *adev)
{
/* todo */
return false;
}
static struct soc15_allowed_register_entry soc21_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2)},
{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0)},
{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1)},
{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE2)},
{ SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE3)},
{ SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_STATUS_REG)},
{ SOC15_REG_ENTRY(SDMA1, 0, regSDMA1_STATUS_REG)},
{ SOC15_REG_ENTRY(GC, 0, regCP_STAT)},
{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2)},
{ SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3)},
{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG)},
};
static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset)
{
uint32_t val;
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
static uint32_t soc21_get_register_value(struct amdgpu_device *adev,
bool indexed, u32 se_num,
u32 sh_num, u32 reg_offset)
{
if (indexed) {
return soc21_read_indexed_register(adev, se_num, sh_num, reg_offset);
} else {
if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG) && adev->gfx.config.gb_addr_config)
return adev->gfx.config.gb_addr_config;
return RREG32(reg_offset);
}
}
static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
uint32_t i;
struct soc15_allowed_register_entry *en;
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
en = &soc21_allowed_read_registers[i];
if (reg_offset !=
(adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
continue;
*value = soc21_get_register_value(adev,
soc21_allowed_read_registers[i].grbm_indexed,
se_num, sh_num, reg_offset);
return 0;
}
return -EINVAL;
}
#if 0
static int soc21_asic_mode1_reset(struct amdgpu_device *adev)
{
u32 i;
int ret = 0;
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
/* disable BM */
pci_clear_master(adev->pdev);
amdgpu_device_cache_pci_state(adev->pdev);
if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
dev_info(adev->dev, "GPU smu mode1 reset\n");
ret = amdgpu_dpm_mode1_reset(adev);
} else {
dev_info(adev->dev, "GPU psp mode1 reset\n");
ret = psp_gpu_reset(adev);
}
if (ret)
dev_err(adev->dev, "GPU mode1 reset failed\n");
amdgpu_device_load_pci_state(adev->pdev);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
udelay(1);
}
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return ret;
}
#endif
static enum amd_reset_method
soc21_asic_reset_method(struct amdgpu_device *adev)
{
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO)
return amdgpu_reset_method;
if (amdgpu_reset_method != -1)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 0):
return AMD_RESET_METHOD_MODE1;
default:
if (amdgpu_dpm_is_baco_supported(adev))
return AMD_RESET_METHOD_BACO;
else
return AMD_RESET_METHOD_MODE1;
}
}
static int soc21_asic_reset(struct amdgpu_device *adev)
{
int ret = 0;
switch (soc21_asic_reset_method(adev)) {
case AMD_RESET_METHOD_PCI:
dev_info(adev->dev, "PCI reset\n");
ret = amdgpu_device_pci_reset(adev);
break;
case AMD_RESET_METHOD_BACO:
dev_info(adev->dev, "BACO reset\n");
ret = amdgpu_dpm_baco_reset(adev);
break;
default:
dev_info(adev->dev, "MODE1 reset\n");
ret = amdgpu_device_mode1_reset(adev);
break;
}
return ret;
}
static int soc21_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
{
/* todo */
return 0;
}
static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
{
/* todo */
return 0;
}
static void soc21_pcie_gen3_enable(struct amdgpu_device *adev)
{
if (pci_is_root_bus(adev->pdev->bus))
return;
if (amdgpu_pcie_gen2 == 0)
return;
if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
/* todo */
}
static void soc21_program_aspm(struct amdgpu_device *adev)
{
if (amdgpu_aspm == 0)
return;
/* todo */
}
static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
const struct amdgpu_ip_block_version soc21_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 1,
.minor = 0,
.rev = 0,
.funcs = &soc21_common_ip_funcs,
};
static uint32_t soc21_get_rev_id(struct amdgpu_device *adev)
{
return adev->nbio.funcs->get_rev_id(adev);
}
static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
return true;
}
static bool soc21_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
if (adev->flags & AMD_IS_APU)
return false;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
if (sol_reg)
return true;
return false;
}
static uint64_t soc21_get_pcie_replay_count(struct amdgpu_device *adev)
{
/* TODO
* dummy implement for pcie_replay_count sysfs interface
* */
return 0;
}
static void soc21_init_doorbell_index(struct amdgpu_device *adev)
{
adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
adev->doorbell_index.sdma_doorbell_range = 20;
}
static void soc21_pre_asic_init(struct amdgpu_device *adev)
{
}
static const struct amdgpu_asic_funcs soc21_asic_funcs =
{
.read_disabled_bios = &soc21_read_disabled_bios,
.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
.read_register = &soc21_read_register,
.reset = &soc21_asic_reset,
.reset_method = &soc21_asic_reset_method,
.set_vga_state = &soc21_vga_set_state,
.get_xclk = &soc21_get_xclk,
.set_uvd_clocks = &soc21_set_uvd_clocks,
.set_vce_clocks = &soc21_set_vce_clocks,
.get_config_memsize = &soc21_get_config_memsize,
.init_doorbell_index = &soc21_init_doorbell_index,
.need_full_reset = &soc21_need_full_reset,
.need_reset_on_init = &soc21_need_reset_on_init,
.get_pcie_replay_count = &soc21_get_pcie_replay_count,
.supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &soc21_pre_asic_init,
};
static int soc21_common_early_init(void *handle)
{
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &soc21_pcie_rreg;
adev->pcie_wreg = &soc21_pcie_wreg;
adev->pcie_rreg64 = &soc21_pcie_rreg64;
adev->pcie_wreg64 = &soc21_pcie_wreg64;
/* TODO: will add them during VCN v2 implementation */
adev->uvd_ctx_rreg = NULL;
adev->uvd_ctx_wreg = NULL;
adev->didt_rreg = &soc21_didt_rreg;
adev->didt_wreg = &soc21_didt_wreg;
adev->asic_funcs = &soc21_asic_funcs;
adev->rev_id = soc21_get_rev_id(adev);
adev->external_rev_id = 0xff;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
return 0;
}
static int soc21_common_late_init(void *handle)
{
return 0;
}
static int soc21_common_sw_init(void *handle)
{
return 0;
}
static int soc21_common_sw_fini(void *handle)
{
return 0;
}
static int soc21_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* enable pcie gen2/3 link */
soc21_pcie_gen3_enable(adev);
/* enable aspm */
soc21_program_aspm(adev);
/* setup nbio registers */
adev->nbio.funcs->init_registers(adev);
/* remap HDP registers to a hole in mmio space,
* for the purpose of expose those registers
* to process space
*/
if (adev->nbio.funcs->remap_hdp_registers)
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
soc21_enable_doorbell_aperture(adev, true);
return 0;
}
static int soc21_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* disable the doorbell aperture */
soc21_enable_doorbell_aperture(adev, false);
return 0;
}
static int soc21_common_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return soc21_common_hw_fini(adev);
}
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return soc21_common_hw_init(adev);
}
static bool soc21_common_is_idle(void *handle)
{
return true;
}
static int soc21_common_wait_for_idle(void *handle)
{
return 0;
}
static int soc21_common_soft_reset(void *handle)
{
return 0;
}
static int soc21_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(4, 3, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
static int soc21_common_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
/* TODO */
return 0;
}
static void soc21_common_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->nbio.funcs->get_clockgating_state(adev, flags);
adev->hdp.funcs->get_clock_gating_state(adev, flags);
return;
}
static const struct amd_ip_funcs soc21_common_ip_funcs = {
.name = "soc21_common",
.early_init = soc21_common_early_init,
.late_init = soc21_common_late_init,
.sw_init = soc21_common_sw_init,
.sw_fini = soc21_common_sw_fini,
.hw_init = soc21_common_hw_init,
.hw_fini = soc21_common_hw_fini,
.suspend = soc21_common_suspend,
.resume = soc21_common_resume,
.is_idle = soc21_common_is_idle,
.wait_for_idle = soc21_common_wait_for_idle,
.soft_reset = soc21_common_soft_reset,
.set_clockgating_state = soc21_common_set_clockgating_state,
.set_powergating_state = soc21_common_set_powergating_state,
.get_clockgating_state = soc21_common_get_clockgating_state,
};

View File

@ -0,0 +1,30 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __SOC21_H__
#define __SOC21_H__
extern const struct amdgpu_ip_block_version soc21_common_ip_block;
void soc21_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
#endif

View File

@ -944,8 +944,6 @@ err_drm_file:
bool kfd_dev_is_large_bar(struct kfd_dev *dev)
{
struct kfd_local_mem_info mem_info;
if (debug_largebar) {
pr_debug("Simulate large-bar allocation on non large-bar machine\n");
return true;
@ -954,9 +952,8 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
if (dev->use_iommu_v2)
return false;
amdgpu_amdkfd_get_local_mem_info(dev->adev, &mem_info);
if (mem_info.local_mem_size_private == 0 &&
mem_info.local_mem_size_public > 0)
if (dev->local_mem_info.local_mem_size_private == 0 &&
dev->local_mem_info.local_mem_size_public > 0)
return true;
return false;
}

View File

@ -2152,7 +2152,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
* report the total FB size (public+private) as a single
* private heap.
*/
amdgpu_amdkfd_get_local_mem_info(kdev->adev, &local_mem_info);
local_mem_info = kdev->local_mem_info;
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);

View File

@ -575,6 +575,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd_resume(kfd))
goto kfd_resume_error;
amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info);
if (kfd_topology_add_device(kfd)) {
dev_err(kfd_device, "Error adding device to topology\n");
goto kfd_topology_add_device_error;
@ -873,8 +875,6 @@ out:
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size)
{
unsigned int num_of_longs;
if (WARN_ON(buf_size < chunk_size))
return -EINVAL;
if (WARN_ON(buf_size == 0))
@ -885,11 +885,8 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
kfd->gtt_sa_chunk_size = chunk_size;
kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
BITS_PER_LONG;
kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
GFP_KERNEL);
if (!kfd->gtt_sa_bitmap)
return -ENOMEM;
@ -899,13 +896,12 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
mutex_init(&kfd->gtt_sa_lock);
return 0;
}
static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
{
mutex_destroy(&kfd->gtt_sa_lock);
kfree(kfd->gtt_sa_bitmap);
bitmap_free(kfd->gtt_sa_bitmap);
}
static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
@ -973,7 +969,7 @@ kfd_gtt_restart_search:
/* If we need only one chunk, mark it as allocated and get out */
if (size <= kfd->gtt_sa_chunk_size) {
pr_debug("Single bit\n");
set_bit(found, kfd->gtt_sa_bitmap);
__set_bit(found, kfd->gtt_sa_bitmap);
goto kfd_gtt_out;
}
@ -1011,10 +1007,8 @@ kfd_gtt_restart_search:
(*mem_obj)->range_start, (*mem_obj)->range_end);
/* Mark the chunks as allocated */
for (found = (*mem_obj)->range_start;
found <= (*mem_obj)->range_end;
found++)
set_bit(found, kfd->gtt_sa_bitmap);
bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
(*mem_obj)->range_end - (*mem_obj)->range_start + 1);
kfd_gtt_out:
mutex_unlock(&kfd->gtt_sa_lock);
@ -1029,8 +1023,6 @@ kfd_gtt_no_free_chunk:
int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
{
unsigned int bit;
/* Act like kfree when trying to free a NULL object */
if (!mem_obj)
return 0;
@ -1041,10 +1033,8 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
mutex_lock(&kfd->gtt_sa_lock);
/* Mark the chunks as free */
for (bit = mem_obj->range_start;
bit <= mem_obj->range_end;
bit++)
clear_bit(bit, kfd->gtt_sa_bitmap);
bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
mem_obj->range_end - mem_obj->range_start + 1);
mutex_unlock(&kfd->gtt_sa_lock);

View File

@ -238,12 +238,24 @@ static int create_other_event(struct kfd_process *p, struct kfd_event *ev, const
return 0;
}
void kfd_event_init_process(struct kfd_process *p)
int kfd_event_init_process(struct kfd_process *p)
{
int id;
mutex_init(&p->event_mutex);
idr_init(&p->event_idr);
p->signal_page = NULL;
p->signal_event_count = 0;
p->signal_event_count = 1;
/* Allocate event ID 0. It is used for a fast path to ignore bogus events
* that are sent by the CP without a context ID
*/
id = idr_alloc(&p->event_idr, NULL, 0, 1, GFP_KERNEL);
if (id < 0) {
idr_destroy(&p->event_idr);
mutex_destroy(&p->event_mutex);
return id;
}
return 0;
}
static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
@ -271,8 +283,10 @@ static void destroy_events(struct kfd_process *p)
uint32_t id;
idr_for_each_entry(&p->event_idr, ev, id)
destroy_event(p, ev);
if (ev)
destroy_event(p, ev);
idr_destroy(&p->event_idr);
mutex_destroy(&p->event_mutex);
}
/*
@ -749,7 +763,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
* iterate over the signal slots and lookup
* only signaled events from the IDR.
*/
for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
for (id = 1; id < KFD_SIGNAL_EVENT_LIMIT; id++)
if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) {
ev = lookup_event_by_id(p, id);
set_event_from_interrupt(p, ev);

View File

@ -141,6 +141,25 @@ static void event_interrupt_poison_consumption(struct kfd_dev *dev,
}
}
static bool context_id_expected(struct kfd_dev *dev)
{
switch (KFD_GC_VERSION(dev)) {
case IP_VERSION(9, 0, 1):
return dev->mec_fw_version >= 0x817a;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
return dev->mec_fw_version >= 0x17a;
default:
/* Other GFXv9 and later GPUs always sent valid context IDs
* on legitimate events
*/
return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 1);
}
}
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
@ -206,6 +225,20 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
return false;
/* Workaround CP firmware sending bogus signals with 0 context_id.
* Those can be safely ignored on hardware and firmware versions that
* include a valid context_id on legitimate signals. This avoids the
* slow path in kfd_signal_event_interrupt that scans all event slots
* for signaled events.
*/
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) {
uint32_t context_id =
SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
if (context_id == 0 && context_id_expected(dev))
return false;
}
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
*/

View File

@ -410,7 +410,6 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma migrate;
unsigned long cpages = 0;
dma_addr_t *scratch;
size_t size;
void *buf;
int r = -ENOMEM;
@ -421,9 +420,9 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
size *= npages;
buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@ -665,7 +664,6 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
dma_addr_t *scratch;
size_t size;
void *buf;
int r = -ENOMEM;
@ -676,9 +674,10 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t);
size *= npages;
buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO);
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;

View File

@ -272,6 +272,7 @@ struct kfd_dev {
struct kgd2kfd_shared_resources shared_resources;
struct kfd_vmid_info vm_info;
struct kfd_local_mem_info local_mem_info;
const struct kfd2kgd_calls *kfd2kgd;
struct mutex doorbell_mutex;
@ -1295,7 +1296,7 @@ extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
extern const struct kfd_device_global_init_class device_global_init_class_cik;
void kfd_event_init_process(struct kfd_process *p);
int kfd_event_init_process(struct kfd_process *p);
void kfd_event_free_process(struct kfd_process *p);
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
int kfd_wait_on_events(struct kfd_process *p,

View File

@ -1370,12 +1370,16 @@ static struct kfd_process *create_process(const struct task_struct *thread)
INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
process->last_restore_timestamp = get_jiffies_64();
kfd_event_init_process(process);
err = kfd_event_init_process(process);
if (err)
goto err_event_init;
process->is_32bit_user_mode = in_compat_syscall();
process->pasid = kfd_pasid_alloc();
if (process->pasid == 0)
if (process->pasid == 0) {
err = -ENOSPC;
goto err_alloc_pasid;
}
err = pqm_init(&process->pqm, process);
if (err != 0)
@ -1424,6 +1428,8 @@ err_init_apertures:
err_process_pqm_init:
kfd_pasid_free(process->pasid);
err_alloc_pasid:
kfd_event_free_process(process);
err_event_init:
mutex_destroy(&process->mutex);
kfree(process);
err_alloc_process:

View File

@ -149,8 +149,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
int i, r;
if (!addr) {
addr = kvmalloc_array(prange->npages, sizeof(*addr),
GFP_KERNEL | __GFP_ZERO);
addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
prange->dma_addr[gpuidx] = addr;
@ -686,7 +685,8 @@ svm_range_check_attr(struct kfd_process *p,
static void
svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
bool *update_mapping)
{
uint32_t i;
int gpuidx;
@ -702,6 +702,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
case KFD_IOCTL_SVM_ATTR_ACCESS:
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
*update_mapping = true;
gpuidx = kfd_process_gpuidx_from_gpuid(p,
attrs[i].value);
if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
@ -716,9 +717,11 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
}
break;
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
*update_mapping = true;
prange->flags |= attrs[i].value;
break;
case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
*update_mapping = true;
prange->flags &= ~attrs[i].value;
break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
@ -951,6 +954,7 @@ svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
new->prefetch_loc = old->prefetch_loc;
new->actual_loc = old->actual_loc;
new->granularity = old->granularity;
new->mapped_to_gpu = old->mapped_to_gpu;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
@ -1204,6 +1208,17 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
uint32_t gpuidx;
int r = 0;
if (!prange->mapped_to_gpu) {
pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
prange, prange->start, prange->last);
return 0;
}
if (prange->start == start && prange->last == last) {
pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
prange->mapped_to_gpu = false;
}
bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
MAX_GPU_INSTANCE);
p = container_of(prange->svms, struct kfd_process, svms);
@ -1239,7 +1254,7 @@ static int
svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
unsigned long offset, unsigned long npages, bool readonly,
dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
struct dma_fence **fence)
struct dma_fence **fence, bool flush_tlb)
{
struct amdgpu_device *adev = pdd->dev->adev;
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
@ -1277,7 +1292,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
(last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
pte_flags);
r = amdgpu_vm_update_range(adev, vm, false, false, false, NULL,
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
last_start, prange->start + i,
pte_flags,
last_start - prange->start,
@ -1311,7 +1326,7 @@ out:
static int
svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
unsigned long npages, bool readonly,
unsigned long *bitmap, bool wait)
unsigned long *bitmap, bool wait, bool flush_tlb)
{
struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev;
@ -1346,7 +1361,8 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
prange->dma_addr[gpuidx],
bo_adev, wait ? &fence : NULL);
bo_adev, wait ? &fence : NULL,
flush_tlb);
if (r)
break;
@ -1467,8 +1483,8 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
* 5. Release page table (and SVM BO) reservation
*/
static int svm_range_validate_and_map(struct mm_struct *mm,
struct svm_range *prange,
int32_t gpuidx, bool intr, bool wait)
struct svm_range *prange, int32_t gpuidx,
bool intr, bool wait, bool flush_tlb)
{
struct svm_validate_context ctx;
unsigned long start, end, addr;
@ -1507,8 +1523,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE))
return 0;
if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
if (!prange->mapped_to_gpu)
return 0;
bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
}
if (prange->actual_loc && !prange->ttm_res) {
/* This should never happen. actual_loc gets set by
@ -1580,7 +1600,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
r = svm_range_map_to_gpus(prange, offset, npages, readonly,
ctx.bitmap, wait);
ctx.bitmap, wait, flush_tlb);
unlock_out:
svm_range_unlock(prange);
@ -1588,8 +1608,10 @@ unlock_out:
addr = next;
}
if (addr == end)
if (addr == end) {
prange->validated_once = true;
prange->mapped_to_gpu = true;
}
unreserve_out:
svm_range_unreserve_bos(&ctx);
@ -1674,7 +1696,7 @@ static void svm_range_restore_work(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
false, true);
false, true, false);
if (r)
pr_debug("failed %d to map 0x%lx to gpus\n", r,
prange->start);
@ -1820,6 +1842,7 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
new->prefetch_loc = old->prefetch_loc;
new->actual_loc = old->actual_loc;
new->granularity = old->granularity;
new->mapped_to_gpu = old->mapped_to_gpu;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
@ -2811,7 +2834,7 @@ retry_write_locked:
}
}
r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
if (r)
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
r, svms, prange->start, prange->last);
@ -3224,6 +3247,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
struct svm_range_list *svms;
struct svm_range *prange;
struct svm_range *next;
bool update_mapping = false;
bool flush_tlb;
int r = 0;
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
@ -3262,7 +3287,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_add_notifier_locked(mm, prange);
}
list_for_each_entry(prange, &update_list, update_list) {
svm_range_apply_attrs(p, prange, nattr, attrs);
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
@ -3295,8 +3320,15 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
continue;
}
if (!migrated && !update_mapping) {
mutex_unlock(&prange->migrate_mutex);
continue;
}
flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
true, true);
true, true, flush_tlb);
if (r)
pr_debug("failed %d to map svm range\n", r);

View File

@ -133,6 +133,7 @@ struct svm_range {
DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
bool validated_once;
bool mapped_to_gpu;
};
static inline void svm_range_lock(struct svm_range *prange)

View File

@ -1112,15 +1112,12 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
uint32_t buf[7];
uint64_t local_mem_size;
int i;
struct kfd_local_mem_info local_mem_info;
if (!gpu)
return 0;
amdgpu_amdkfd_get_local_mem_info(gpu->adev, &local_mem_info);
local_mem_size = local_mem_info.local_mem_size_private +
local_mem_info.local_mem_size_public;
local_mem_size = gpu->local_mem_info.local_mem_size_private +
gpu->local_mem_info.local_mem_size_public;
buf[0] = gpu->pdev->devfn;
buf[1] = gpu->pdev->subsystem_vendor |
@ -1534,13 +1531,13 @@ static void kfd_topology_update_io_links(int proximity_domain)
list_del(&iolink->list);
dev->io_link_count--;
dev->node_props.io_links_count--;
} else if (iolink->node_from > proximity_domain) {
iolink->node_from--;
} else if (iolink->node_to > proximity_domain) {
iolink->node_to--;
} else {
if (iolink->node_from > proximity_domain)
iolink->node_from--;
if (iolink->node_to > proximity_domain)
iolink->node_to--;
}
}
}
}

View File

@ -94,6 +94,8 @@ static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
{
uint32_t result;
result = dcn301_smu_wait_for_response(clk_mgr, 10, 200000);
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);

View File

@ -615,7 +615,7 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
}
}
void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base)
static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base)
{
int display_count;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);

View File

@ -219,8 +219,50 @@ static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *reg
static struct clk_bw_params dcn315_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
.num_channels = 2,
.clk_table = {
.entries = {
{
.voltage = 0,
.dispclk_mhz = 640,
.dppclk_mhz = 640,
.phyclk_mhz = 810,
.phyclk_d18_mhz = 667,
.dtbclk_mhz = 600,
},
{
.voltage = 1,
.dispclk_mhz = 739,
.dppclk_mhz = 739,
.phyclk_mhz = 810,
.phyclk_d18_mhz = 667,
.dtbclk_mhz = 600,
},
{
.voltage = 2,
.dispclk_mhz = 960,
.dppclk_mhz = 960,
.phyclk_mhz = 810,
.phyclk_d18_mhz = 667,
.dtbclk_mhz = 600,
},
{
.voltage = 3,
.dispclk_mhz = 1200,
.dppclk_mhz = 1200,
.phyclk_mhz = 810,
.phyclk_d18_mhz = 667,
.dtbclk_mhz = 600,
},
{
.voltage = 4,
.dispclk_mhz = 1372,
.dppclk_mhz = 1372,
.phyclk_mhz = 810,
.phyclk_d18_mhz = 667,
.dtbclk_mhz = 600,
},
},
.num_entries = 5,
},
@ -300,8 +342,8 @@ static struct wm_table lpddr5_wm_table = {
}
};
static DpmClocks_315_t dummy_clocks;
/* Temporary Place holder until we can get them from fuse */
static DpmClocks_315_t dummy_clocks = { 0 };
static struct dcn315_watermarks dummy_wms = { 0 };
static void dcn315_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn315_watermarks *table)
@ -415,22 +457,6 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
return max;
}
static unsigned int find_clk_for_voltage(
const DpmClocks_315_t *clock_table,
const uint32_t clocks[],
unsigned int voltage)
{
int i;
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
if (clock_table->SocVoltage[i] == voltage)
return clocks[i];
}
ASSERT(0);
return 0;
}
static void dcn315_clk_mgr_helper_populate_bw_params(
struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
@ -438,13 +464,9 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
{
int i;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
uint32_t max_dispclk, max_dppclk, max_pstate, max_socclk, max_fclk = 0, min_pstate = 0;
uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0;
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
max_socclk = find_max_clk_value(clock_table->SocClocks, clock_table->NumSocClkLevelsEnabled);
/* Find highest fclk pstate */
for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
if (clock_table->DfPstateTable[i].FClk > max_fclk) {
@ -466,35 +488,44 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
}
}
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
break;
bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
/* Now update clocks we do read */
bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].dispclk_mhz = clock_table->DispClocks[i];
bw_params->clk_table.entries[i].dppclk_mhz = clock_table->DppClocks[i];
bw_params->clk_table.entries[i].wck_ratio = 1;
};
/* Make sure to include at least one entry and highest pstate */
if (max_pstate != min_pstate) {
if (max_pstate != min_pstate || i == 0) {
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(
clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[max_pstate].Voltage);
bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(
clock_table, clock_table->SocClocks, clock_table->DfPstateTable[max_pstate].Voltage);
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
bw_params->clk_table.entries[i].wck_ratio = 1;
i++;
}
bw_params->clk_table.num_entries = i;
bw_params->clk_table.num_entries = i--;
/* Include highest socclk */
if (bw_params->clk_table.entries[i-1].socclk_mhz < max_socclk)
bw_params->clk_table.entries[i-1].socclk_mhz = max_socclk;
/* Make sure all highest clocks are included*/
bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
/* Set any 0 clocks to max default setting. Not an issue for
* power since we aren't doing switching in such case anyway
@ -513,9 +544,18 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
if (!bw_params->clk_table.entries[i].dppclk_mhz)
bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
if (!bw_params->clk_table.entries[i].phyclk_mhz)
bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
if (!bw_params->clk_table.entries[i].dtbclk_mhz)
bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
}
ASSERT(bw_params->clk_table.entries[i].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
if (!bw_params->num_channels)
bw_params->num_channels = 2;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;

View File

@ -3317,9 +3317,14 @@ bool dc_link_setup_psr(struct dc_link *link,
*/
psr_context->frame_delay = 0;
if (psr)
if (psr) {
link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
link, psr_context, panel_inst);
if (link->psr_settings.psr_feature_enabled) {
link->psr_settings.psr_power_opt = 0;
link->psr_settings.psr_allow_active = 0;
}
}
else
link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
@ -3966,8 +3971,12 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
if (is_dp_128b_132b_signal(pipe_ctx))
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
/* dio output index */
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
/* dio output index is dpia index for DPIA endpoint & dcio index by default */
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
else
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
/* phy index */
config.phy_idx = resource_transmitter_to_phy_idx(

View File

@ -543,15 +543,9 @@ bool dal_ddc_service_query_ddc_data(
uint32_t payloads_num = write_payloads + read_payloads;
if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
return false;
if (!payloads_num)
return false;
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
struct aux_payload payload;

View File

@ -4576,6 +4576,7 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
{
int i;
struct pipe_ctx *pipe_ctx;
struct dc_link_settings prev_link_settings = link->preferred_link_setting;
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
@ -4586,6 +4587,10 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
return;
/* toggle stream state with the preference for current link settings */
dc_link_set_preferred_training_settings((struct dc *)link->dc,
&link->cur_link_settings, NULL, link, true);
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
@ -4601,6 +4606,10 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
}
/* restore previous link settings preference */
dc_link_set_preferred_training_settings((struct dc *)link->dc,
&prev_link_settings, NULL, link, true);
}
bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
@ -5822,6 +5831,10 @@ void detect_edp_sink_caps(struct dc_link *link)
core_link_read_dpcd(link, DP_PSR_SUPPORT,
&link->dpcd_caps.psr_info.psr_version,
sizeof(link->dpcd_caps.psr_info.psr_version));
if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY,
&link->dpcd_caps.psr_info.force_psrsu_cap,
sizeof(link->dpcd_caps.psr_info.force_psrsu_cap));
core_link_read_dpcd(link, DP_PSR_CAPS,
&link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw));
@ -7565,6 +7578,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u
DC_LOG_DSC(" ");
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
if (dc_is_dp_signal(stream->signal)) {
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
if (is_dp_128b_132b_signal(pipe_ctx))
@ -7582,6 +7596,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u
}
} else {
/* disable DSC PPS in stream encoder */
memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps));
if (dc_is_dp_signal(stream->signal)) {
if (is_dp_128b_132b_signal(pipe_ctx))
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(

View File

@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.181"
#define DC_VER "3.2.183"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@ -359,6 +359,12 @@ enum dc_psr_power_opts {
psr_power_opt_ds_disable_allow = 0x100,
};
enum dml_hostvm_override_opts {
DML_HOSTVM_NO_OVERRIDE = 0x0,
DML_HOSTVM_OVERRIDE_FALSE = 0x1,
DML_HOSTVM_OVERRIDE_TRUE = 0x2,
};
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@ -417,7 +423,6 @@ struct dc_clocks {
#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dcn_zstate_support_state zstate_support;
bool dtbclk_en;
int dtbclk_khz;
#endif
enum dcn_pwr_state pwr_state;
/*
@ -733,6 +738,7 @@ struct dc_debug_options {
bool extended_blank_optimization;
union aux_wake_wa_options aux_wake_wa;
uint8_t psr_power_use_phy_fsm;
enum dml_hostvm_override_opts dml_hostvm_override;
};
struct gpu_info_soc_bounding_box_v1_0;

View File

@ -162,7 +162,7 @@ struct dc_stream_state {
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
struct dc_info_packet vsp_infopacket;
uint8_t dsc_packed_pps[128];
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */

View File

@ -231,7 +231,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
/**
/*
* Set PSR power optimization flags.
*/
static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst)

View File

@ -25,6 +25,7 @@
#include <linux/delay.h>
#include "dm_services.h"
#include "dc_bios_types.h"
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"

View File

@ -92,6 +92,8 @@
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_VID_TIMING, DP, id), \
SRI(DP_SEC_AUD_N, DP, id), \
SRI(DP_SEC_AUD_N_READBACK, DP, id), \
SRI(DP_SEC_AUD_M_READBACK, DP, id), \
SRI(DP_SEC_TIMESTAMP, DP, id), \
SRI(DIG_CLOCK_PATTERN, DIG, id)
@ -140,6 +142,8 @@ struct dcn10_stream_enc_registers {
uint32_t DP_VID_STREAM_CNTL;
uint32_t DP_VID_TIMING;
uint32_t DP_SEC_AUD_N;
uint32_t DP_SEC_AUD_N_READBACK;
uint32_t DP_SEC_AUD_M_READBACK;
uint32_t DP_SEC_TIMESTAMP;
uint32_t HDMI_CONTROL;
uint32_t HDMI_GC;
@ -256,6 +260,8 @@ struct dcn10_stream_enc_registers {
SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
SE_SF(DP0_DP_SEC_AUD_N_READBACK, DP_SEC_AUD_N_READBACK, mask_sh),\
SE_SF(DP0_DP_SEC_AUD_M_READBACK, DP_SEC_AUD_M_READBACK, mask_sh),\
SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
@ -473,6 +479,8 @@ struct dcn10_stream_enc_registers {
type AFMT_60958_CS_CHANNEL_NUMBER_6;\
type AFMT_60958_CS_CHANNEL_NUMBER_7;\
type DP_SEC_AUD_N;\
type DP_SEC_AUD_N_READBACK;\
type DP_SEC_AUD_M_READBACK;\
type DP_SEC_TIMESTAMP_MODE;\
type DP_SEC_ASP_ENABLE;\
type DP_SEC_ATP_ENABLE;\

View File

@ -95,8 +95,6 @@ static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
} else if (hwseq->fb_offset.quad_part <= addr->quad_part &&
addr->quad_part <= hwseq->uma_top.quad_part) {
is_in_uma = true;
} else if (addr->quad_part == 0) {
is_in_uma = false;
} else {
is_in_uma = false;
}

View File

@ -95,6 +95,8 @@
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_VID_TIMING, DP, id), \
SRI(DP_SEC_AUD_N, DP, id), \
SRI(DP_SEC_AUD_N_READBACK, DP, id), \
SRI(DP_SEC_AUD_M_READBACK, DP, id), \
SRI(DP_SEC_TIMESTAMP, DP, id), \
SRI(DP_DSC_CNTL, DP, id), \
SRI(DP_DSC_BYTES_PER_PIXEL, DP, id), \
@ -157,6 +159,8 @@
SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
SE_SF(DP0_DP_SEC_AUD_N_READBACK, DP_SEC_AUD_N_READBACK, mask_sh),\
SE_SF(DP0_DP_SEC_AUD_M_READBACK, DP_SEC_AUD_M_READBACK, mask_sh),\
SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\

View File

@ -890,6 +890,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_sw_cntl_psr = true,
.apply_vendor_specific_lttpr_wa = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
};
static const struct dc_debug_options debug_defaults_diags = {
@ -1666,7 +1667,6 @@ int dcn31_populate_dml_pipes_from_context(
* intermittently experienced depending on peak b/w requirements.
*/
pipes[pipe_cnt].pipe.src.immediate_flip = true;
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
@ -1675,6 +1675,13 @@ int dcn31_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE)
pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_FALSE)
pipes[pipe_cnt].pipe.src.hostvm = false;
else if (dc->debug.dml_hostvm_override == DML_HOSTVM_OVERRIDE_TRUE)
pipes[pipe_cnt].pipe.src.hostvm = true;
if (pipes[pipe_cnt].dout.dsc_enable) {
switch (timing->display_color_depth) {
case COLOR_DEPTH_888:
@ -1735,6 +1742,27 @@ void dcn31_calculate_wm_and_dlg(
DC_FP_END();
}
void
dcn31_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
display_e2e_pipe_params_st *pipes)
{
DC_FP_START();
dcn30_populate_dml_writeback_from_context(dc, res_ctx, pipes);
DC_FP_END();
}
void
dcn31_set_mcif_arb_params(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt)
{
DC_FP_START();
dcn30_set_mcif_arb_params(dc, context, pipes, pipe_cnt);
DC_FP_END();
}
bool dcn31_validate_bandwidth(struct dc *dc,
struct dc_state *context,
bool fast_validate)
@ -1808,8 +1836,8 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
.populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn31_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,

View File

@ -50,6 +50,15 @@ int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
void
dcn31_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
display_e2e_pipe_params_st *pipes);
void
dcn31_set_mcif_arb_params(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
struct resource_pool *dcn31_create_resource_pool(

View File

@ -1726,8 +1726,8 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
.populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn31_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,

View File

@ -1728,8 +1728,8 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
.populate_dml_writeback_from_context = dcn31_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn31_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,

View File

@ -260,55 +260,6 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
};
struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
/*TODO: correct dispclk/dppclk voltage level determination*/
.clock_limits = {
{
.state = 0,
.dispclk_mhz = 1372.0,
.dppclk_mhz = 1372.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
.dtbclk_mhz = 600.0,
},
{
.state = 1,
.dispclk_mhz = 1372.0,
.dppclk_mhz = 1372.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
.dtbclk_mhz = 600.0,
},
{
.state = 2,
.dispclk_mhz = 1372.0,
.dppclk_mhz = 1372.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
.dtbclk_mhz = 600.0,
},
{
.state = 3,
.dispclk_mhz = 1372.0,
.dppclk_mhz = 1372.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
.dtbclk_mhz = 600.0,
},
{
.state = 4,
.dispclk_mhz = 1372.0,
.dppclk_mhz = 1372.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.dscclk_mhz = 417.0,
.dtbclk_mhz = 600.0,
},
},
.num_states = 5,
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
.sr_exit_z8_time_us = 50.0,
@ -696,80 +647,50 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
struct clk_limit_table *clk_table = &bw_params->clk_table;
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
unsigned int i, closest_clk_lvl;
int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
int j;
int i, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
dc_assert_fp_enabled();
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
dcn3_15_soc.num_chans = bw_params->num_channels;
dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
dcn3_15_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
ASSERT(clk_table->num_entries);
/* Prepass to find max clocks independent of voltage level. */
for (i = 0; i < clk_table->num_entries; ++i) {
if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
}
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_15_soc.num_states - 1; j >= 0; j--) {
if ((unsigned int) dcn3_15_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
}
if (clk_table->num_entries == 1) {
/*smu gives one DPM level, let's take the highest one*/
closest_clk_lvl = dcn3_15_soc.num_states - 1;
}
clock_limits[i].state = i;
/* Clocks dependent on voltage level. */
clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
if (clk_table->num_entries == 1 &&
clock_limits[i].dcfclk_mhz < dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
/*SMU fix not released yet*/
clock_limits[i].dcfclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
}
clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_15_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_15_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
clock_limits[i].dram_bw_per_chan_gbps = dcn3_15_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
clock_limits[i].dscclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
clock_limits[i].dtbclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
clock_limits[i].phyclk_d18_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
clock_limits[i].phyclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
for (i = 0; i < clk_table->num_entries; i++)
dcn3_15_soc.clock_limits[i] = clock_limits[i];
if (clk_table->num_entries) {
dcn3_15_soc.num_states = clk_table->num_entries;
}
/* Setup soc to always use max dispclk/dppclk to avoid odm-to-lower-voltage */
for (i = 0; i < clk_table->num_entries; ++i) {
if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
}
if (max_dispclk_mhz) {
dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
for (i = 0; i < clk_table->num_entries; i++) {
dcn3_15_soc.clock_limits[i].state = i;
/* Clocks dependent on voltage level. */
dcn3_15_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
dcn3_15_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
dcn3_15_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
dcn3_15_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
/* These aren't actually read from smu, but rather set in clk_mgr defaults */
dcn3_15_soc.clock_limits[i].dtbclk_mhz = clk_table->entries[i].dtbclk_mhz;
dcn3_15_soc.clock_limits[i].phyclk_d18_mhz = clk_table->entries[i].phyclk_d18_mhz;
dcn3_15_soc.clock_limits[i].phyclk_mhz = clk_table->entries[i].phyclk_mhz;
/* Clocks independent of voltage level. */
dcn3_15_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
dcn3_15_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz;
dcn3_15_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3.0;
}
dcn3_15_soc.num_states = clk_table->num_entries;
/* Set vco to max_dispclk * 2 to make sure the highest dispclk is always available for dml calcs,
* no impact outside of dml validation
*/
dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);

View File

@ -91,6 +91,7 @@ struct clk_limit_table_entry {
unsigned int dispclk_mhz;
unsigned int dppclk_mhz;
unsigned int phyclk_mhz;
unsigned int phyclk_d18_mhz;
unsigned int wck_ratio;
};

View File

@ -39,6 +39,8 @@
#define DP_BRANCH_HW_REV_20 0x20
#define DP_DEVICE_ID_38EC11 0x38EC11
#define DP_FORCE_PSRSU_CAPABILITY 0x40F
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
DDC_RESULT_SUCESSFULL,

View File

@ -234,6 +234,8 @@ enum DC_FEATURE_MASK {
DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
};
enum DC_DEBUG_MASK {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,461 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _mp_13_0_0_OFFSET_HEADER
#define _mp_13_0_0_OFFSET_HEADER
// addressBlock: mp_SmuMp0_SmnDec
// base address: 0x0
#define regMP0_SMN_C2PMSG_32 0x0060
#define regMP0_SMN_C2PMSG_32_BASE_IDX 0
#define regMP0_SMN_C2PMSG_33 0x0061
#define regMP0_SMN_C2PMSG_33_BASE_IDX 0
#define regMP0_SMN_C2PMSG_34 0x0062
#define regMP0_SMN_C2PMSG_34_BASE_IDX 0
#define regMP0_SMN_C2PMSG_35 0x0063
#define regMP0_SMN_C2PMSG_35_BASE_IDX 0
#define regMP0_SMN_C2PMSG_36 0x0064
#define regMP0_SMN_C2PMSG_36_BASE_IDX 0
#define regMP0_SMN_C2PMSG_37 0x0065
#define regMP0_SMN_C2PMSG_37_BASE_IDX 0
#define regMP0_SMN_C2PMSG_38 0x0066
#define regMP0_SMN_C2PMSG_38_BASE_IDX 0
#define regMP0_SMN_C2PMSG_39 0x0067
#define regMP0_SMN_C2PMSG_39_BASE_IDX 0
#define regMP0_SMN_C2PMSG_40 0x0068
#define regMP0_SMN_C2PMSG_40_BASE_IDX 0
#define regMP0_SMN_C2PMSG_41 0x0069
#define regMP0_SMN_C2PMSG_41_BASE_IDX 0
#define regMP0_SMN_C2PMSG_42 0x006a
#define regMP0_SMN_C2PMSG_42_BASE_IDX 0
#define regMP0_SMN_C2PMSG_43 0x006b
#define regMP0_SMN_C2PMSG_43_BASE_IDX 0
#define regMP0_SMN_C2PMSG_44 0x006c
#define regMP0_SMN_C2PMSG_44_BASE_IDX 0
#define regMP0_SMN_C2PMSG_45 0x006d
#define regMP0_SMN_C2PMSG_45_BASE_IDX 0
#define regMP0_SMN_C2PMSG_46 0x006e
#define regMP0_SMN_C2PMSG_46_BASE_IDX 0
#define regMP0_SMN_C2PMSG_47 0x006f
#define regMP0_SMN_C2PMSG_47_BASE_IDX 0
#define regMP0_SMN_C2PMSG_48 0x0070
#define regMP0_SMN_C2PMSG_48_BASE_IDX 0
#define regMP0_SMN_C2PMSG_49 0x0071
#define regMP0_SMN_C2PMSG_49_BASE_IDX 0
#define regMP0_SMN_C2PMSG_50 0x0072
#define regMP0_SMN_C2PMSG_50_BASE_IDX 0
#define regMP0_SMN_C2PMSG_51 0x0073
#define regMP0_SMN_C2PMSG_51_BASE_IDX 0
#define regMP0_SMN_C2PMSG_52 0x0074
#define regMP0_SMN_C2PMSG_52_BASE_IDX 0
#define regMP0_SMN_C2PMSG_53 0x0075
#define regMP0_SMN_C2PMSG_53_BASE_IDX 0
#define regMP0_SMN_C2PMSG_54 0x0076
#define regMP0_SMN_C2PMSG_54_BASE_IDX 0
#define regMP0_SMN_C2PMSG_55 0x0077
#define regMP0_SMN_C2PMSG_55_BASE_IDX 0
#define regMP0_SMN_C2PMSG_56 0x0078
#define regMP0_SMN_C2PMSG_56_BASE_IDX 0
#define regMP0_SMN_C2PMSG_57 0x0079
#define regMP0_SMN_C2PMSG_57_BASE_IDX 0
#define regMP0_SMN_C2PMSG_58 0x007a
#define regMP0_SMN_C2PMSG_58_BASE_IDX 0
#define regMP0_SMN_C2PMSG_59 0x007b
#define regMP0_SMN_C2PMSG_59_BASE_IDX 0
#define regMP0_SMN_C2PMSG_60 0x007c
#define regMP0_SMN_C2PMSG_60_BASE_IDX 0
#define regMP0_SMN_C2PMSG_61 0x007d
#define regMP0_SMN_C2PMSG_61_BASE_IDX 0
#define regMP0_SMN_C2PMSG_62 0x007e
#define regMP0_SMN_C2PMSG_62_BASE_IDX 0
#define regMP0_SMN_C2PMSG_63 0x007f
#define regMP0_SMN_C2PMSG_63_BASE_IDX 0
#define regMP0_SMN_C2PMSG_64 0x0080
#define regMP0_SMN_C2PMSG_64_BASE_IDX 0
#define regMP0_SMN_C2PMSG_65 0x0081
#define regMP0_SMN_C2PMSG_65_BASE_IDX 0
#define regMP0_SMN_C2PMSG_66 0x0082
#define regMP0_SMN_C2PMSG_66_BASE_IDX 0
#define regMP0_SMN_C2PMSG_67 0x0083
#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
#define regMP0_SMN_C2PMSG_68 0x0084
#define regMP0_SMN_C2PMSG_68_BASE_IDX 0
#define regMP0_SMN_C2PMSG_69 0x0085
#define regMP0_SMN_C2PMSG_69_BASE_IDX 0
#define regMP0_SMN_C2PMSG_70 0x0086
#define regMP0_SMN_C2PMSG_70_BASE_IDX 0
#define regMP0_SMN_C2PMSG_71 0x0087
#define regMP0_SMN_C2PMSG_71_BASE_IDX 0
#define regMP0_SMN_C2PMSG_72 0x0088
#define regMP0_SMN_C2PMSG_72_BASE_IDX 0
#define regMP0_SMN_C2PMSG_73 0x0089
#define regMP0_SMN_C2PMSG_73_BASE_IDX 0
#define regMP0_SMN_C2PMSG_74 0x008a
#define regMP0_SMN_C2PMSG_74_BASE_IDX 0
#define regMP0_SMN_C2PMSG_75 0x008b
#define regMP0_SMN_C2PMSG_75_BASE_IDX 0
#define regMP0_SMN_C2PMSG_76 0x008c
#define regMP0_SMN_C2PMSG_76_BASE_IDX 0
#define regMP0_SMN_C2PMSG_77 0x008d
#define regMP0_SMN_C2PMSG_77_BASE_IDX 0
#define regMP0_SMN_C2PMSG_78 0x008e
#define regMP0_SMN_C2PMSG_78_BASE_IDX 0
#define regMP0_SMN_C2PMSG_79 0x008f
#define regMP0_SMN_C2PMSG_79_BASE_IDX 0
#define regMP0_SMN_C2PMSG_80 0x0090
#define regMP0_SMN_C2PMSG_80_BASE_IDX 0
#define regMP0_SMN_C2PMSG_81 0x0091
#define regMP0_SMN_C2PMSG_81_BASE_IDX 0
#define regMP0_SMN_C2PMSG_82 0x0092
#define regMP0_SMN_C2PMSG_82_BASE_IDX 0
#define regMP0_SMN_C2PMSG_83 0x0093
#define regMP0_SMN_C2PMSG_83_BASE_IDX 0
#define regMP0_SMN_C2PMSG_84 0x0094
#define regMP0_SMN_C2PMSG_84_BASE_IDX 0
#define regMP0_SMN_C2PMSG_85 0x0095
#define regMP0_SMN_C2PMSG_85_BASE_IDX 0
#define regMP0_SMN_C2PMSG_86 0x0096
#define regMP0_SMN_C2PMSG_86_BASE_IDX 0
#define regMP0_SMN_C2PMSG_87 0x0097
#define regMP0_SMN_C2PMSG_87_BASE_IDX 0
#define regMP0_SMN_C2PMSG_88 0x0098
#define regMP0_SMN_C2PMSG_88_BASE_IDX 0
#define regMP0_SMN_C2PMSG_89 0x0099
#define regMP0_SMN_C2PMSG_89_BASE_IDX 0
#define regMP0_SMN_C2PMSG_90 0x009a
#define regMP0_SMN_C2PMSG_90_BASE_IDX 0
#define regMP0_SMN_C2PMSG_91 0x009b
#define regMP0_SMN_C2PMSG_91_BASE_IDX 0
#define regMP0_SMN_C2PMSG_92 0x009c
#define regMP0_SMN_C2PMSG_92_BASE_IDX 0
#define regMP0_SMN_C2PMSG_93 0x009d
#define regMP0_SMN_C2PMSG_93_BASE_IDX 0
#define regMP0_SMN_C2PMSG_94 0x009e
#define regMP0_SMN_C2PMSG_94_BASE_IDX 0
#define regMP0_SMN_C2PMSG_95 0x009f
#define regMP0_SMN_C2PMSG_95_BASE_IDX 0
#define regMP0_SMN_C2PMSG_96 0x00a0
#define regMP0_SMN_C2PMSG_96_BASE_IDX 0
#define regMP0_SMN_C2PMSG_97 0x00a1
#define regMP0_SMN_C2PMSG_97_BASE_IDX 0
#define regMP0_SMN_C2PMSG_98 0x00a2
#define regMP0_SMN_C2PMSG_98_BASE_IDX 0
#define regMP0_SMN_C2PMSG_99 0x00a3
#define regMP0_SMN_C2PMSG_99_BASE_IDX 0
#define regMP0_SMN_C2PMSG_100 0x00a4
#define regMP0_SMN_C2PMSG_100_BASE_IDX 0
#define regMP0_SMN_C2PMSG_101 0x00a5
#define regMP0_SMN_C2PMSG_101_BASE_IDX 0
#define regMP0_SMN_C2PMSG_102 0x00a6
#define regMP0_SMN_C2PMSG_102_BASE_IDX 0
#define regMP0_SMN_C2PMSG_103 0x00a7
#define regMP0_SMN_C2PMSG_103_BASE_IDX 0
#define regMP0_SMN_IH_CREDIT 0x00c1
#define regMP0_SMN_IH_CREDIT_BASE_IDX 0
#define regMP0_SMN_IH_SW_INT 0x00c2
#define regMP0_SMN_IH_SW_INT_BASE_IDX 0
#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3
#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0
// addressBlock: mp_SmuMp1_SmnDec
// base address: 0x0
#define regMP1_SMN_C2PMSG_32 0x0260
#define regMP1_SMN_C2PMSG_32_BASE_IDX 0
#define regMP1_SMN_C2PMSG_33 0x0261
#define regMP1_SMN_C2PMSG_33_BASE_IDX 0
#define regMP1_SMN_C2PMSG_34 0x0262
#define regMP1_SMN_C2PMSG_34_BASE_IDX 0
#define regMP1_SMN_C2PMSG_35 0x0263
#define regMP1_SMN_C2PMSG_35_BASE_IDX 0
#define regMP1_SMN_C2PMSG_36 0x0264
#define regMP1_SMN_C2PMSG_36_BASE_IDX 0
#define regMP1_SMN_C2PMSG_37 0x0265
#define regMP1_SMN_C2PMSG_37_BASE_IDX 0
#define regMP1_SMN_C2PMSG_38 0x0266
#define regMP1_SMN_C2PMSG_38_BASE_IDX 0
#define regMP1_SMN_C2PMSG_39 0x0267
#define regMP1_SMN_C2PMSG_39_BASE_IDX 0
#define regMP1_SMN_C2PMSG_40 0x0268
#define regMP1_SMN_C2PMSG_40_BASE_IDX 0
#define regMP1_SMN_C2PMSG_41 0x0269
#define regMP1_SMN_C2PMSG_41_BASE_IDX 0
#define regMP1_SMN_C2PMSG_42 0x026a
#define regMP1_SMN_C2PMSG_42_BASE_IDX 0
#define regMP1_SMN_C2PMSG_43 0x026b
#define regMP1_SMN_C2PMSG_43_BASE_IDX 0
#define regMP1_SMN_C2PMSG_44 0x026c
#define regMP1_SMN_C2PMSG_44_BASE_IDX 0
#define regMP1_SMN_C2PMSG_45 0x026d
#define regMP1_SMN_C2PMSG_45_BASE_IDX 0
#define regMP1_SMN_C2PMSG_46 0x026e
#define regMP1_SMN_C2PMSG_46_BASE_IDX 0
#define regMP1_SMN_C2PMSG_47 0x026f
#define regMP1_SMN_C2PMSG_47_BASE_IDX 0
#define regMP1_SMN_C2PMSG_48 0x0270
#define regMP1_SMN_C2PMSG_48_BASE_IDX 0
#define regMP1_SMN_C2PMSG_49 0x0271
#define regMP1_SMN_C2PMSG_49_BASE_IDX 0
#define regMP1_SMN_C2PMSG_50 0x0272
#define regMP1_SMN_C2PMSG_50_BASE_IDX 0
#define regMP1_SMN_C2PMSG_51 0x0273
#define regMP1_SMN_C2PMSG_51_BASE_IDX 0
#define regMP1_SMN_C2PMSG_52 0x0274
#define regMP1_SMN_C2PMSG_52_BASE_IDX 0
#define regMP1_SMN_C2PMSG_53 0x0275
#define regMP1_SMN_C2PMSG_53_BASE_IDX 0
#define regMP1_SMN_C2PMSG_54 0x0276
#define regMP1_SMN_C2PMSG_54_BASE_IDX 0
#define regMP1_SMN_C2PMSG_55 0x0277
#define regMP1_SMN_C2PMSG_55_BASE_IDX 0
#define regMP1_SMN_C2PMSG_56 0x0278
#define regMP1_SMN_C2PMSG_56_BASE_IDX 0
#define regMP1_SMN_C2PMSG_57 0x0279
#define regMP1_SMN_C2PMSG_57_BASE_IDX 0
#define regMP1_SMN_C2PMSG_58 0x027a
#define regMP1_SMN_C2PMSG_58_BASE_IDX 0
#define regMP1_SMN_C2PMSG_59 0x027b
#define regMP1_SMN_C2PMSG_59_BASE_IDX 0
#define regMP1_SMN_C2PMSG_60 0x027c
#define regMP1_SMN_C2PMSG_60_BASE_IDX 0
#define regMP1_SMN_C2PMSG_61 0x027d
#define regMP1_SMN_C2PMSG_61_BASE_IDX 0
#define regMP1_SMN_C2PMSG_62 0x027e
#define regMP1_SMN_C2PMSG_62_BASE_IDX 0
#define regMP1_SMN_C2PMSG_63 0x027f
#define regMP1_SMN_C2PMSG_63_BASE_IDX 0
#define regMP1_SMN_C2PMSG_64 0x0280
#define regMP1_SMN_C2PMSG_64_BASE_IDX 0
#define regMP1_SMN_C2PMSG_65 0x0281
#define regMP1_SMN_C2PMSG_65_BASE_IDX 0
#define regMP1_SMN_C2PMSG_66 0x0282
#define regMP1_SMN_C2PMSG_66_BASE_IDX 0
#define regMP1_SMN_C2PMSG_67 0x0283
#define regMP1_SMN_C2PMSG_67_BASE_IDX 0
#define regMP1_SMN_C2PMSG_68 0x0284
#define regMP1_SMN_C2PMSG_68_BASE_IDX 0
#define regMP1_SMN_C2PMSG_69 0x0285
#define regMP1_SMN_C2PMSG_69_BASE_IDX 0
#define regMP1_SMN_C2PMSG_70 0x0286
#define regMP1_SMN_C2PMSG_70_BASE_IDX 0
#define regMP1_SMN_C2PMSG_71 0x0287
#define regMP1_SMN_C2PMSG_71_BASE_IDX 0
#define regMP1_SMN_C2PMSG_72 0x0288
#define regMP1_SMN_C2PMSG_72_BASE_IDX 0
#define regMP1_SMN_C2PMSG_73 0x0289
#define regMP1_SMN_C2PMSG_73_BASE_IDX 0
#define regMP1_SMN_C2PMSG_74 0x028a
#define regMP1_SMN_C2PMSG_74_BASE_IDX 0
#define regMP1_SMN_C2PMSG_75 0x028b
#define regMP1_SMN_C2PMSG_75_BASE_IDX 0
#define regMP1_SMN_C2PMSG_76 0x028c
#define regMP1_SMN_C2PMSG_76_BASE_IDX 0
#define regMP1_SMN_C2PMSG_77 0x028d
#define regMP1_SMN_C2PMSG_77_BASE_IDX 0
#define regMP1_SMN_C2PMSG_78 0x028e
#define regMP1_SMN_C2PMSG_78_BASE_IDX 0
#define regMP1_SMN_C2PMSG_79 0x028f
#define regMP1_SMN_C2PMSG_79_BASE_IDX 0
#define regMP1_SMN_C2PMSG_80 0x0290
#define regMP1_SMN_C2PMSG_80_BASE_IDX 0
#define regMP1_SMN_C2PMSG_81 0x0291
#define regMP1_SMN_C2PMSG_81_BASE_IDX 0
#define regMP1_SMN_C2PMSG_82 0x0292
#define regMP1_SMN_C2PMSG_82_BASE_IDX 0
#define regMP1_SMN_C2PMSG_83 0x0293
#define regMP1_SMN_C2PMSG_83_BASE_IDX 0
#define regMP1_SMN_C2PMSG_84 0x0294
#define regMP1_SMN_C2PMSG_84_BASE_IDX 0
#define regMP1_SMN_C2PMSG_85 0x0295
#define regMP1_SMN_C2PMSG_85_BASE_IDX 0
#define regMP1_SMN_C2PMSG_86 0x0296
#define regMP1_SMN_C2PMSG_86_BASE_IDX 0
#define regMP1_SMN_C2PMSG_87 0x0297
#define regMP1_SMN_C2PMSG_87_BASE_IDX 0
#define regMP1_SMN_C2PMSG_88 0x0298
#define regMP1_SMN_C2PMSG_88_BASE_IDX 0
#define regMP1_SMN_C2PMSG_89 0x0299
#define regMP1_SMN_C2PMSG_89_BASE_IDX 0
#define regMP1_SMN_C2PMSG_90 0x029a
#define regMP1_SMN_C2PMSG_90_BASE_IDX 0
#define regMP1_SMN_C2PMSG_91 0x029b
#define regMP1_SMN_C2PMSG_91_BASE_IDX 0
#define regMP1_SMN_C2PMSG_92 0x029c
#define regMP1_SMN_C2PMSG_92_BASE_IDX 0
#define regMP1_SMN_C2PMSG_93 0x029d
#define regMP1_SMN_C2PMSG_93_BASE_IDX 0
#define regMP1_SMN_C2PMSG_94 0x029e
#define regMP1_SMN_C2PMSG_94_BASE_IDX 0
#define regMP1_SMN_C2PMSG_95 0x029f
#define regMP1_SMN_C2PMSG_95_BASE_IDX 0
#define regMP1_SMN_C2PMSG_96 0x02a0
#define regMP1_SMN_C2PMSG_96_BASE_IDX 0
#define regMP1_SMN_C2PMSG_97 0x02a1
#define regMP1_SMN_C2PMSG_97_BASE_IDX 0
#define regMP1_SMN_C2PMSG_98 0x02a2
#define regMP1_SMN_C2PMSG_98_BASE_IDX 0
#define regMP1_SMN_C2PMSG_99 0x02a3
#define regMP1_SMN_C2PMSG_99_BASE_IDX 0
#define regMP1_SMN_C2PMSG_100 0x02a4
#define regMP1_SMN_C2PMSG_100_BASE_IDX 0
#define regMP1_SMN_C2PMSG_101 0x02a5
#define regMP1_SMN_C2PMSG_101_BASE_IDX 0
#define regMP1_SMN_C2PMSG_102 0x02a6
#define regMP1_SMN_C2PMSG_102_BASE_IDX 0
#define regMP1_SMN_C2PMSG_103 0x02a7
#define regMP1_SMN_C2PMSG_103_BASE_IDX 0
#define regMP1_SMN_C2PMSG_104 0x02a8
#define regMP1_SMN_C2PMSG_104_BASE_IDX 0
#define regMP1_SMN_C2PMSG_105 0x02a9
#define regMP1_SMN_C2PMSG_105_BASE_IDX 0
#define regMP1_SMN_C2PMSG_106 0x02aa
#define regMP1_SMN_C2PMSG_106_BASE_IDX 0
#define regMP1_SMN_C2PMSG_107 0x02ab
#define regMP1_SMN_C2PMSG_107_BASE_IDX 0
#define regMP1_SMN_C2PMSG_108 0x02ac
#define regMP1_SMN_C2PMSG_108_BASE_IDX 0
#define regMP1_SMN_C2PMSG_109 0x02ad
#define regMP1_SMN_C2PMSG_109_BASE_IDX 0
#define regMP1_SMN_C2PMSG_110 0x02ae
#define regMP1_SMN_C2PMSG_110_BASE_IDX 0
#define regMP1_SMN_C2PMSG_111 0x02af
#define regMP1_SMN_C2PMSG_111_BASE_IDX 0
#define regMP1_SMN_C2PMSG_112 0x02b0
#define regMP1_SMN_C2PMSG_112_BASE_IDX 0
#define regMP1_SMN_C2PMSG_113 0x02b1
#define regMP1_SMN_C2PMSG_113_BASE_IDX 0
#define regMP1_SMN_C2PMSG_114 0x02b2
#define regMP1_SMN_C2PMSG_114_BASE_IDX 0
#define regMP1_SMN_C2PMSG_115 0x02b3
#define regMP1_SMN_C2PMSG_115_BASE_IDX 0
#define regMP1_SMN_C2PMSG_116 0x02b4
#define regMP1_SMN_C2PMSG_116_BASE_IDX 0
#define regMP1_SMN_C2PMSG_117 0x02b5
#define regMP1_SMN_C2PMSG_117_BASE_IDX 0
#define regMP1_SMN_C2PMSG_118 0x02b6
#define regMP1_SMN_C2PMSG_118_BASE_IDX 0
#define regMP1_SMN_C2PMSG_119 0x02b7
#define regMP1_SMN_C2PMSG_119_BASE_IDX 0
#define regMP1_SMN_C2PMSG_120 0x02b8
#define regMP1_SMN_C2PMSG_120_BASE_IDX 0
#define regMP1_SMN_C2PMSG_121 0x02b9
#define regMP1_SMN_C2PMSG_121_BASE_IDX 0
#define regMP1_SMN_C2PMSG_122 0x02ba
#define regMP1_SMN_C2PMSG_122_BASE_IDX 0
#define regMP1_SMN_C2PMSG_123 0x02bb
#define regMP1_SMN_C2PMSG_123_BASE_IDX 0
#define regMP1_SMN_C2PMSG_124 0x02bc
#define regMP1_SMN_C2PMSG_124_BASE_IDX 0
#define regMP1_SMN_C2PMSG_125 0x02bd
#define regMP1_SMN_C2PMSG_125_BASE_IDX 0
#define regMP1_SMN_C2PMSG_126 0x02be
#define regMP1_SMN_C2PMSG_126_BASE_IDX 0
#define regMP1_SMN_C2PMSG_127 0x02bf
#define regMP1_SMN_C2PMSG_127_BASE_IDX 0
#define regMP1_SMN_IH_CREDIT 0x02c1
#define regMP1_SMN_IH_CREDIT_BASE_IDX 0
#define regMP1_SMN_IH_SW_INT 0x02c2
#define regMP1_SMN_IH_SW_INT_BASE_IDX 0
#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3
#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0
#define regMP1_SMN_FPS_CNT 0x02c4
#define regMP1_SMN_FPS_CNT_BASE_IDX 0
#define regMP1_SMN_PUB_CTRL 0x02c5
#define regMP1_SMN_PUB_CTRL_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH0 0x0340
#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH1 0x0341
#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH2 0x0342
#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH3 0x0343
#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH4 0x0344
#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH5 0x0345
#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH6 0x0346
#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH7 0x0347
#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH8 0x0348
#define regMP1_SMN_EXT_SCRATCH8_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH10 0x034a
#define regMP1_SMN_EXT_SCRATCH10_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH11 0x034b
#define regMP1_SMN_EXT_SCRATCH11_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH12 0x034c
#define regMP1_SMN_EXT_SCRATCH12_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH13 0x034d
#define regMP1_SMN_EXT_SCRATCH13_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH14 0x034e
#define regMP1_SMN_EXT_SCRATCH14_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH15 0x034f
#define regMP1_SMN_EXT_SCRATCH15_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH16 0x0350
#define regMP1_SMN_EXT_SCRATCH16_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH17 0x0351
#define regMP1_SMN_EXT_SCRATCH17_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH18 0x0352
#define regMP1_SMN_EXT_SCRATCH18_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH19 0x0353
#define regMP1_SMN_EXT_SCRATCH19_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH20 0x0354
#define regMP1_SMN_EXT_SCRATCH20_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH21 0x0355
#define regMP1_SMN_EXT_SCRATCH21_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH22 0x0356
#define regMP1_SMN_EXT_SCRATCH22_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH23 0x0357
#define regMP1_SMN_EXT_SCRATCH23_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH24 0x0358
#define regMP1_SMN_EXT_SCRATCH24_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH25 0x0359
#define regMP1_SMN_EXT_SCRATCH25_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH26 0x035a
#define regMP1_SMN_EXT_SCRATCH26_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH27 0x035b
#define regMP1_SMN_EXT_SCRATCH27_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH28 0x035c
#define regMP1_SMN_EXT_SCRATCH28_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH29 0x035d
#define regMP1_SMN_EXT_SCRATCH29_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH30 0x035e
#define regMP1_SMN_EXT_SCRATCH30_BASE_IDX 0
#define regMP1_SMN_EXT_SCRATCH31 0x035f
#define regMP1_SMN_EXT_SCRATCH31_BASE_IDX 0
// addressBlock: mp_SmuMp1Pub_CruDec
// base address: 0x0
#define regMP1_FIRMWARE_FLAGS 0xbee009
#define regMP1_FIRMWARE_FLAGS_BASE_IDX 0
// addressBlock: mp_SmuMpIOPub_CruDec
// base address: 0x0
#define regMPIO_FIRMWARE_FLAGS 0xbee009
#define regMPIO_FIRMWARE_FLAGS_BASE_IDX 0
#endif

View File

@ -0,0 +1,682 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _mp_13_0_0_SH_MASK_HEADER
#define _mp_13_0_0_SH_MASK_HEADER
// addressBlock: mp_SmuMp0_SmnDec
//MP0_SMN_C2PMSG_32
#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_33
#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_34
#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_35
#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_36
#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_37
#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_38
#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_39
#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_40
#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_41
#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_42
#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_43
#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_44
#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_45
#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_46
#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_47
#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_48
#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_49
#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_50
#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_51
#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_52
#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_53
#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_54
#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_55
#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_56
#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_57
#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_58
#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_59
#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_60
#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_61
#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_62
#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_63
#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_64
#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_65
#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_66
#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_67
#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_68
#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_69
#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_70
#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_71
#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_72
#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_73
#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_74
#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_75
#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_76
#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_77
#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_78
#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_79
#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_80
#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_81
#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_82
#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_83
#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_84
#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_85
#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_86
#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_87
#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_88
#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_89
#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_90
#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_91
#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_92
#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_93
#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_94
#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_95
#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_96
#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_97
#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_98
#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_99
#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_100
#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_101
#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_102
#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_C2PMSG_103
#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
//MP0_SMN_IH_CREDIT
#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
//MP0_SMN_IH_SW_INT
#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
//MP0_SMN_IH_SW_INT_CTRL
#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
// addressBlock: mp_SmuMp1_SmnDec
//MP1_SMN_C2PMSG_32
#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_33
#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_34
#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_35
#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_36
#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_37
#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_38
#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_39
#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_40
#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_41
#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_42
#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_43
#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_44
#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_45
#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_46
#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_47
#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_48
#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_49
#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_50
#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_51
#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_52
#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_53
#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_54
#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_55
#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_56
#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_57
#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_58
#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_59
#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_60
#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_61
#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_62
#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_63
#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_64
#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_65
#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_66
#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_67
#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_68
#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_69
#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_70
#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_71
#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_72
#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_73
#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_74
#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_75
#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_76
#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_77
#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_78
#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_79
#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_80
#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_81
#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_82
#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_83
#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_84
#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_85
#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_86
#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_87
#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_88
#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_89
#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_90
#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_91
#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_92
#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_93
#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_94
#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_95
#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_96
#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_97
#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_98
#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_99
#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_100
#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_101
#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_102
#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_103
#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_104
#define MP1_SMN_C2PMSG_104__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_104__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_105
#define MP1_SMN_C2PMSG_105__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_105__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_106
#define MP1_SMN_C2PMSG_106__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_106__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_107
#define MP1_SMN_C2PMSG_107__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_107__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_108
#define MP1_SMN_C2PMSG_108__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_108__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_109
#define MP1_SMN_C2PMSG_109__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_109__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_110
#define MP1_SMN_C2PMSG_110__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_110__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_111
#define MP1_SMN_C2PMSG_111__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_111__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_112
#define MP1_SMN_C2PMSG_112__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_112__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_113
#define MP1_SMN_C2PMSG_113__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_113__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_114
#define MP1_SMN_C2PMSG_114__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_114__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_115
#define MP1_SMN_C2PMSG_115__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_115__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_116
#define MP1_SMN_C2PMSG_116__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_116__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_117
#define MP1_SMN_C2PMSG_117__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_117__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_118
#define MP1_SMN_C2PMSG_118__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_118__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_119
#define MP1_SMN_C2PMSG_119__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_119__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_120
#define MP1_SMN_C2PMSG_120__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_120__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_121
#define MP1_SMN_C2PMSG_121__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_121__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_122
#define MP1_SMN_C2PMSG_122__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_122__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_123
#define MP1_SMN_C2PMSG_123__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_123__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_124
#define MP1_SMN_C2PMSG_124__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_124__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_125
#define MP1_SMN_C2PMSG_125__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_125__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_126
#define MP1_SMN_C2PMSG_126__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_126__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_C2PMSG_127
#define MP1_SMN_C2PMSG_127__CONTENT__SHIFT 0x0
#define MP1_SMN_C2PMSG_127__CONTENT_MASK 0xFFFFFFFFL
//MP1_SMN_IH_CREDIT
#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
//MP1_SMN_IH_SW_INT
#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
//MP1_SMN_IH_SW_INT_CTRL
#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
//MP1_SMN_FPS_CNT
#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
//MP1_SMN_PUB_CTRL
#define MP1_SMN_PUB_CTRL__LX3_RESET__SHIFT 0x0
#define MP1_SMN_PUB_CTRL__LX3_RESET_MASK 0x00000001L
//MP1_SMN_EXT_SCRATCH0
#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH1
#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH2
#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH3
#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH4
#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH5
#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH6
#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH7
#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH8
#define MP1_SMN_EXT_SCRATCH8__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH8__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH10
#define MP1_SMN_EXT_SCRATCH10__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH10__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH11
#define MP1_SMN_EXT_SCRATCH11__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH11__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH12
#define MP1_SMN_EXT_SCRATCH12__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH12__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH13
#define MP1_SMN_EXT_SCRATCH13__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH13__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH14
#define MP1_SMN_EXT_SCRATCH14__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH14__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH15
#define MP1_SMN_EXT_SCRATCH15__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH15__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH16
#define MP1_SMN_EXT_SCRATCH16__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH16__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH17
#define MP1_SMN_EXT_SCRATCH17__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH17__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH18
#define MP1_SMN_EXT_SCRATCH18__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH18__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH19
#define MP1_SMN_EXT_SCRATCH19__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH19__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH20
#define MP1_SMN_EXT_SCRATCH20__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH20__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH21
#define MP1_SMN_EXT_SCRATCH21__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH21__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH22
#define MP1_SMN_EXT_SCRATCH22__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH22__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH23
#define MP1_SMN_EXT_SCRATCH23__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH23__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH24
#define MP1_SMN_EXT_SCRATCH24__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH24__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH25
#define MP1_SMN_EXT_SCRATCH25__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH25__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH26
#define MP1_SMN_EXT_SCRATCH26__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH26__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH27
#define MP1_SMN_EXT_SCRATCH27__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH27__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH28
#define MP1_SMN_EXT_SCRATCH28__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH28__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH29
#define MP1_SMN_EXT_SCRATCH29__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH29__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH30
#define MP1_SMN_EXT_SCRATCH30__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH30__DATA_MASK 0xFFFFFFFFL
//MP1_SMN_EXT_SCRATCH31
#define MP1_SMN_EXT_SCRATCH31__DATA__SHIFT 0x0
#define MP1_SMN_EXT_SCRATCH31__DATA_MASK 0xFFFFFFFFL
// addressBlock: mp_SmuMp1Pub_CruDec
//MP1_FIRMWARE_FLAGS
#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
// addressBlock: mp_SmuMpIOPub_CruDec
//MPIO_FIRMWARE_FLAGS
#define MPIO_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0
#define MPIO_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1
#define MPIO_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L
#define MPIO_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL
#endif

View File

@ -4665,7 +4665,7 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
UCHAR Reserved[3]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
//Related definitions, all records are differnt but they have a commond header
//Related definitions, all records are different but they have a common header
typedef struct _ATOM_COMMON_RECORD_HEADER
{
UCHAR ucRecordType; //An emun to indicate the record type

View File

@ -3,7 +3,7 @@
* File Name atomfirmware.h
* Project This is an interface header file between atombios and OS GPU drivers for SoC15 products
*
* Description header file of general definitions for OS nd pre-OS video drivers
* Description header file of general definitions for OS and pre-OS video drivers
*
* Copyright 2014 Advanced Micro Devices, Inc.
*
@ -1673,6 +1673,39 @@ struct atom_gfx_info_v2_7 {
uint32_t reserved2[6];
};
struct atom_gfx_info_v3_0 {
struct atom_common_table_header table_header;
uint8_t gfxip_min_ver;
uint8_t gfxip_max_ver;
uint8_t max_shader_engines;
uint8_t max_tile_pipes;
uint8_t max_cu_per_sh;
uint8_t max_sh_per_se;
uint8_t max_backends_per_se;
uint8_t max_texture_channel_caches;
uint32_t regaddr_lsdma_queue0_rb_rptr;
uint32_t regaddr_lsdma_queue0_rb_rptr_hi;
uint32_t regaddr_lsdma_queue0_rb_wptr;
uint32_t regaddr_lsdma_queue0_rb_wptr_hi;
uint32_t regaddr_lsdma_command;
uint32_t regaddr_lsdma_status;
uint32_t regaddr_golden_tsc_count_lower;
uint32_t golden_tsc_count_lower_refclk;
uint8_t active_wgp_per_se;
uint8_t active_rb_per_se;
uint8_t active_se;
uint8_t reserved1;
uint32_t sram_rm_fuses_val;
uint32_t sram_custom_rm_fuses_val;
uint32_t inactive_sa_mask;
uint32_t gc_config;
uint8_t inactive_wgp[16];
uint8_t inactive_rb[16];
uint32_t gdfll_as_wait_ctrl_val;
uint32_t gdfll_as_step_ctrl_val;
uint32_t reserved[8];
};
/*
***************************************************************************
Data Table smu_info structure
@ -2792,6 +2825,51 @@ struct atom_vram_info_header_v2_3 {
struct atom_vram_module_v9 vram_module[16]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
};
/*
***************************************************************************
Data Table vram_info v3.0 structure
***************************************************************************
*/
struct atom_vram_module_v3_0 {
uint8_t density;
uint8_t tunningset_id;
uint8_t ext_memory_id;
uint8_t dram_vendor_id;
uint16_t dram_info_offset;
uint16_t mem_tuning_offset;
uint16_t tmrs_seq_offset;
uint16_t reserved1;
uint32_t dram_size_per_ch;
uint32_t reserved[3];
char dram_pnstring[40];
};
struct atom_vram_info_header_v3_0 {
struct atom_common_table_header table_header;
uint16_t mem_tuning_table_offset;
uint16_t dram_info_table_offset;
uint16_t tmrs_table_offset;
uint16_t mc_init_table_offset;
uint16_t dram_data_remap_table_offset;
uint16_t umc_emuinittable_offset;
uint16_t reserved_sub_table_offset[2];
uint8_t vram_module_num;
uint8_t umcip_min_ver;
uint8_t umcip_max_ver;
uint8_t mc_phy_tile_num;
uint8_t memory_type;
uint8_t channel_num;
uint8_t channel_width;
uint8_t reserved1;
uint32_t channel_enable;
uint32_t channel1_enable;
uint32_t feature_enable;
uint32_t feature1_enable;
uint32_t hardcode_mem_size;
uint32_t reserved4[4];
struct atom_vram_module_v3_0 vram_module[8];
};
struct atom_umc_register_addr_info{
uint32_t umc_register_addr:24;
uint32_t umc_reg_type_ind:1;

View File

@ -27,15 +27,19 @@
#define PSP_HEADER_SIZE 256
#define BINARY_SIGNATURE 0x28211407
#define DISCOVERY_TABLE_SIGNATURE 0x53445049
#define GC_TABLE_ID 0x4347
#define HARVEST_TABLE_SIGNATURE 0x56524148
#define VCN_INFO_TABLE_ID 0x004E4356
#define MALL_INFO_TABLE_ID 0x4D414C4C
typedef enum
{
IP_DISCOVERY = 0,
GC,
HARVEST_INFO,
TABLE_4,
VCN_INFO,
MALL_INFO,
RESERVED_1,
RESERVED_2,
TOTAL_TABLES = 6
} table;
@ -96,6 +100,24 @@ typedef struct ip
uint32_t base_address[]; /* variable number of Addresses */
} ip;
typedef struct ip_v3
{
uint16_t hw_id; /* Hardware ID */
uint8_t instance_number; /* Instance number for the IP */
uint8_t num_base_address; /* Number of base addresses*/
uint8_t major; /* Hardware ID.major version */
uint8_t minor; /* Hardware ID.minor version */
uint8_t revision; /* Hardware ID.revision version */
#if defined(__BIG_ENDIAN)
uint8_t variant : 4; /* HW variant */
uint8_t sub_revision : 4; /* HCID Sub-Revision */
#else
uint8_t sub_revision : 4; /* HCID Sub-Revision */
uint8_t variant : 4; /* HW variant */
#endif
uint32_t base_address[1]; /* Base Address list. Corresponds to the num_base_address field*/
} ip_v3;
typedef struct die_header
{
uint16_t die_id;
@ -108,7 +130,11 @@ typedef struct ip_structure
struct die
{
die_header *die_header;
ip *ip_list;
union
{
ip *ip_list;
ip_v3 *ip_v3_list;
}; /* IP list. Variable size*/
} die;
} ip_structure;
@ -170,6 +196,40 @@ struct gc_info_v1_1 {
uint32_t gc_num_tcps;
};
struct gc_info_v1_2 {
struct gpu_info_header header;
uint32_t gc_num_se;
uint32_t gc_num_wgp0_per_sa;
uint32_t gc_num_wgp1_per_sa;
uint32_t gc_num_rb_per_se;
uint32_t gc_num_gl2c;
uint32_t gc_num_gprs;
uint32_t gc_num_max_gs_thds;
uint32_t gc_gs_table_depth;
uint32_t gc_gsprim_buff_depth;
uint32_t gc_parameter_cache_depth;
uint32_t gc_double_offchip_lds_buffer;
uint32_t gc_wave_size;
uint32_t gc_max_waves_per_simd;
uint32_t gc_max_scratch_slots_per_cu;
uint32_t gc_lds_size;
uint32_t gc_num_sc_per_se;
uint32_t gc_num_sa_per_se;
uint32_t gc_num_packer_per_sc;
uint32_t gc_num_gl2a;
uint32_t gc_num_tcp_per_sa;
uint32_t gc_num_sdp_interface;
uint32_t gc_num_tcps;
uint32_t gc_num_tcp_per_wpg;
uint32_t gc_tcp_l1_size;
uint32_t gc_num_sqc_per_wgp;
uint32_t gc_l1_instruction_cache_size_per_sqc;
uint32_t gc_l1_data_cache_size_per_sqc;
uint32_t gc_gl1c_per_sa;
uint32_t gc_gl1c_size_per_instance;
uint32_t gc_gl2c_per_gpu;
};
struct gc_info_v2_0 {
struct gpu_info_header header;
@ -208,6 +268,54 @@ typedef struct harvest_table {
harvest_info list[32];
} harvest_table;
struct mall_info_header {
uint32_t table_id; /* table ID */
uint16_t version_major; /* table version */
uint16_t version_minor; /* table version */
uint32_t size_bytes; /* size of the entire header+data in bytes */
};
struct mall_info_v1_0 {
struct mall_info_header header;
uint32_t mall_size_per_m;
uint32_t m_s_present;
uint32_t m_half_use;
uint32_t m_mall_config;
uint32_t reserved[5];
};
#define VCN_INFO_TABLE_MAX_NUM_INSTANCES 4
struct vcn_info_header {
uint32_t table_id; /* table ID */
uint16_t version_major; /* table version */
uint16_t version_minor; /* table version */
uint32_t size_bytes; /* size of the entire header+data in bytes */
};
struct vcn_instance_info_v1_0
{
uint32_t instance_num; /* VCN IP instance number. 0 - VCN0; 1 - VCN1 etc*/
union _fuse_data {
struct {
uint32_t av1_disabled : 1;
uint32_t vp9_disabled : 1;
uint32_t hevc_disabled : 1;
uint32_t h264_disabled : 1;
uint32_t reserved : 28;
} bits;
uint32_t all_bits;
} fuse_data;
uint32_t reserved[2];
};
struct vcn_info_v1_0 {
struct vcn_info_header header;
uint32_t num_of_instances; /* number of entries used in instance_info below*/
struct vcn_instance_info_v1_0 instance_info[VCN_INFO_TABLE_MAX_NUM_INSTANCES];
uint32_t reserved[4];
};
#pragma pack()
#endif

View File

@ -1623,19 +1623,7 @@ static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
{
u8 i;
struct amdgpu_clock_voltage_dependency_table *table =
&adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
for (i = 0; i < table->count; i++) {
if (table->entries[i].clk >= 0) /* XXX */
break;
}
if (i >= table->count)
i = table->count - 1;
return i;
return 0;
}
static void kv_update_acp_boot_level(struct amdgpu_device *adev)

View File

@ -44,7 +44,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
evergreen.o evergreen_cs.o \
evergreen_hdmi.o radeon_trace_points.o ni.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
radeon_prime.o cik.o cik_blit_shaders.o \
radeon_prime.o cik.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \

View File

@ -3599,7 +3599,7 @@ typedef struct _ATOM_LCD_RTS_RECORD
UCHAR ucRTSValue;
}ATOM_LCD_RTS_RECORD;
//!! If the record below exits, it shoud always be the first record for easy use in command table!!!
//!! If the record below exits, it should always be the first record for easy use in command table!!!
// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
typedef struct _ATOM_LCD_MODE_CONTROL_CAP
{
@ -3823,7 +3823,7 @@ typedef struct _ATOM_DPCD_INFO
// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
// at running time.
// note2: From RV770, the memory is more than 32bit addressable, so we will change
// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
// ucTableFormatRevision=1,ucTableContentRevision=4, the structure remains
// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
// (in offset to start of memory address) is KB aligned instead of byte aligend.
/***********************************************************************************/
@ -3858,7 +3858,7 @@ typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
}ATOM_VRAM_USAGE_BY_FIRMWARE;
// change verion to 1.5, when allow driver to allocate the vram area for command table access.
// change version to 1.5, when allow driver to allocate the vram area for command table access.
typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
{
ULONG ulStartAddrUsedByFirmware;

View File

@ -1,246 +0,0 @@
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
*/
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/kernel.h>
const u32 cik_default_state[] =
{
0xc0066900,
0x00000000,
0x00000060, /* DB_RENDER_CONTROL */
0x00000000, /* DB_COUNT_CONTROL */
0x00000000, /* DB_DEPTH_VIEW */
0x0000002a, /* DB_RENDER_OVERRIDE */
0x00000000, /* DB_RENDER_OVERRIDE2 */
0x00000000, /* DB_HTILE_DATA_BASE */
0xc0046900,
0x00000008,
0x00000000, /* DB_DEPTH_BOUNDS_MIN */
0x00000000, /* DB_DEPTH_BOUNDS_MAX */
0x00000000, /* DB_STENCIL_CLEAR */
0x00000000, /* DB_DEPTH_CLEAR */
0xc0036900,
0x0000000f,
0x00000000, /* DB_DEPTH_INFO */
0x00000000, /* DB_Z_INFO */
0x00000000, /* DB_STENCIL_INFO */
0xc0016900,
0x00000080,
0x00000000, /* PA_SC_WINDOW_OFFSET */
0xc00d6900,
0x00000083,
0x0000ffff, /* PA_SC_CLIPRECT_RULE */
0x00000000, /* PA_SC_CLIPRECT_0_TL */
0x20002000, /* PA_SC_CLIPRECT_0_BR */
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0xaaaaaaaa, /* PA_SC_EDGERULE */
0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
0x0000000f, /* CB_TARGET_MASK */
0x0000000f, /* CB_SHADER_MASK */
0xc0226900,
0x00000094,
0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x00000000, /* PA_SC_VPORT_ZMIN_0 */
0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
0xc0046900,
0x00000100,
0xffffffff, /* VGT_MAX_VTX_INDX */
0x00000000, /* VGT_MIN_VTX_INDX */
0x00000000, /* VGT_INDX_OFFSET */
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
0xc0046900,
0x00000105,
0x00000000, /* CB_BLEND_RED */
0x00000000, /* CB_BLEND_GREEN */
0x00000000, /* CB_BLEND_BLUE */
0x00000000, /* CB_BLEND_ALPHA */
0xc0016900,
0x000001e0,
0x00000000, /* CB_BLEND0_CONTROL */
0xc00c6900,
0x00000200,
0x00000000, /* DB_DEPTH_CONTROL */
0x00000000, /* DB_EQAA */
0x00cc0010, /* CB_COLOR_CONTROL */
0x00000210, /* DB_SHADER_CONTROL */
0x00010000, /* PA_CL_CLIP_CNTL */
0x00000004, /* PA_SU_SC_MODE_CNTL */
0x00000100, /* PA_CL_VTE_CNTL */
0x00000000, /* PA_CL_VS_OUT_CNTL */
0x00000000, /* PA_CL_NANINF_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
0xc0116900,
0x00000280,
0x00000000, /* PA_SU_POINT_SIZE */
0x00000000, /* PA_SU_POINT_MINMAX */
0x00000008, /* PA_SU_LINE_CNTL */
0x00000000, /* PA_SC_LINE_STIPPLE */
0x00000000, /* VGT_OUTPUT_PATH_CNTL */
0x00000000, /* VGT_HOS_CNTL */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* VGT_GS_MODE */
0xc0026900,
0x00000292,
0x00000000, /* PA_SC_MODE_CNTL_0 */
0x00000000, /* PA_SC_MODE_CNTL_1 */
0xc0016900,
0x000002a1,
0x00000000, /* VGT_PRIMITIVEID_EN */
0xc0016900,
0x000002a5,
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
0xc0026900,
0x000002a8,
0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
0x00000000,
0xc0026900,
0x000002ad,
0x00000000, /* VGT_REUSE_OFF */
0x00000000,
0xc0016900,
0x000002d5,
0x00000000, /* VGT_SHADER_STAGES_EN */
0xc0016900,
0x000002dc,
0x0000aa00, /* DB_ALPHA_TO_MASK */
0xc0066900,
0x000002de,
0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xc0026900,
0x000002e5,
0x00000000, /* VGT_STRMOUT_CONFIG */
0x00000000,
0xc01b6900,
0x000002f5,
0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
0x00000000, /* PA_SC_LINE_CNTL */
0x00000000, /* PA_SC_AA_CONFIG */
0x00000005, /* PA_SU_VTX_CNTL */
0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
0xffffffff,
0xc0026900,
0x00000316,
0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
0x00000010, /* */
};
const u32 cik_default_size = ARRAY_SIZE(cik_default_state);

View File

@ -20,13 +20,228 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
*/
#ifndef CIK_BLIT_SHADERS_H
#define CIK_BLIT_SHADERS_H
extern const u32 cik_default_state[];
static const u32 cik_default_state[] =
{
0xc0066900,
0x00000000,
0x00000060, /* DB_RENDER_CONTROL */
0x00000000, /* DB_COUNT_CONTROL */
0x00000000, /* DB_DEPTH_VIEW */
0x0000002a, /* DB_RENDER_OVERRIDE */
0x00000000, /* DB_RENDER_OVERRIDE2 */
0x00000000, /* DB_HTILE_DATA_BASE */
extern const u32 cik_default_size;
0xc0046900,
0x00000008,
0x00000000, /* DB_DEPTH_BOUNDS_MIN */
0x00000000, /* DB_DEPTH_BOUNDS_MAX */
0x00000000, /* DB_STENCIL_CLEAR */
0x00000000, /* DB_DEPTH_CLEAR */
0xc0036900,
0x0000000f,
0x00000000, /* DB_DEPTH_INFO */
0x00000000, /* DB_Z_INFO */
0x00000000, /* DB_STENCIL_INFO */
0xc0016900,
0x00000080,
0x00000000, /* PA_SC_WINDOW_OFFSET */
0xc00d6900,
0x00000083,
0x0000ffff, /* PA_SC_CLIPRECT_RULE */
0x00000000, /* PA_SC_CLIPRECT_0_TL */
0x20002000, /* PA_SC_CLIPRECT_0_BR */
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0xaaaaaaaa, /* PA_SC_EDGERULE */
0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
0x0000000f, /* CB_TARGET_MASK */
0x0000000f, /* CB_SHADER_MASK */
0xc0226900,
0x00000094,
0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x00000000, /* PA_SC_VPORT_ZMIN_0 */
0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
0xc0046900,
0x00000100,
0xffffffff, /* VGT_MAX_VTX_INDX */
0x00000000, /* VGT_MIN_VTX_INDX */
0x00000000, /* VGT_INDX_OFFSET */
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
0xc0046900,
0x00000105,
0x00000000, /* CB_BLEND_RED */
0x00000000, /* CB_BLEND_GREEN */
0x00000000, /* CB_BLEND_BLUE */
0x00000000, /* CB_BLEND_ALPHA */
0xc0016900,
0x000001e0,
0x00000000, /* CB_BLEND0_CONTROL */
0xc00c6900,
0x00000200,
0x00000000, /* DB_DEPTH_CONTROL */
0x00000000, /* DB_EQAA */
0x00cc0010, /* CB_COLOR_CONTROL */
0x00000210, /* DB_SHADER_CONTROL */
0x00010000, /* PA_CL_CLIP_CNTL */
0x00000004, /* PA_SU_SC_MODE_CNTL */
0x00000100, /* PA_CL_VTE_CNTL */
0x00000000, /* PA_CL_VS_OUT_CNTL */
0x00000000, /* PA_CL_NANINF_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
0xc0116900,
0x00000280,
0x00000000, /* PA_SU_POINT_SIZE */
0x00000000, /* PA_SU_POINT_MINMAX */
0x00000008, /* PA_SU_LINE_CNTL */
0x00000000, /* PA_SC_LINE_STIPPLE */
0x00000000, /* VGT_OUTPUT_PATH_CNTL */
0x00000000, /* VGT_HOS_CNTL */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* VGT_GS_MODE */
0xc0026900,
0x00000292,
0x00000000, /* PA_SC_MODE_CNTL_0 */
0x00000000, /* PA_SC_MODE_CNTL_1 */
0xc0016900,
0x000002a1,
0x00000000, /* VGT_PRIMITIVEID_EN */
0xc0016900,
0x000002a5,
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
0xc0026900,
0x000002a8,
0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
0x00000000,
0xc0026900,
0x000002ad,
0x00000000, /* VGT_REUSE_OFF */
0x00000000,
0xc0016900,
0x000002d5,
0x00000000, /* VGT_SHADER_STAGES_EN */
0xc0016900,
0x000002dc,
0x0000aa00, /* DB_ALPHA_TO_MASK */
0xc0066900,
0x000002de,
0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xc0026900,
0x000002e5,
0x00000000, /* VGT_STRMOUT_CONFIG */
0x00000000,
0xc01b6900,
0x000002f5,
0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
0x00000000, /* PA_SC_LINE_CNTL */
0x00000000, /* PA_SC_AA_CONFIG */
0x00000005, /* PA_SU_VTX_CNTL */
0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
0xffffffff,
0xc0026900,
0x00000316,
0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
0x00000010, /* */
};
static const u32 cik_default_size = ARRAY_SIZE(cik_default_state);
#endif

View File

@ -329,7 +329,7 @@ static const struct si_dte_data dte_data_malta =
true
};
struct si_cac_config_reg cac_weights_pitcairn[] =
static struct si_cac_config_reg cac_weights_pitcairn[] =
{
{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
@ -1085,7 +1085,7 @@ static const struct si_dte_data dte_data_venus_pro =
true
};
struct si_cac_config_reg cac_weights_oland[] =
static struct si_cac_config_reg cac_weights_oland[] =
{
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },