mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 21:44:06 +08:00
amd-drm-fixes-6.11-2024-08-08:
amdgpu: - DMCUB fix - Fix DET programming on some DCNs - DCC fixes - DCN 4.0.1 fixes - SMU 14.0.x update - MMHUB fix - DCN 3.1.4 fix - GC 12.0 fixes - Fix soft recovery error propogation - SDMA 7.0 fixes - DSC fix drm buddy: - Add start address to trim function -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZrTr/gAKCRC93/aFa7yZ 2LMPAQClXYWzfTczbOOnjCjTjjCPGhO1D69GxsJz/R5lKr+QhwD/TAbEquz2hXJk aI0oa9S2LDguZnrAl0srlZppagZKgww= =BC3+ -----END PGP SIGNATURE----- Merge tag 'amd-drm-fixes-6.11-2024-08-08' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes amd-drm-fixes-6.11-2024-08-08: amdgpu: - DMCUB fix - Fix DET programming on some DCNs - DCC fixes - DCN 4.0.1 fixes - SMU 14.0.x update - MMHUB fix - DCN 3.1.4 fix - GC 12.0 fixes - Fix soft recovery error propogation - SDMA 7.0 fixes - DSC fix drm buddy: - Add start address to trim function Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240808161134.1227671-1-alexander.deucher@amd.com
This commit is contained in:
commit
a507e750a1
@ -156,6 +156,8 @@ struct amdgpu_gmc_funcs {
|
||||
uint64_t addr, uint64_t *flags);
|
||||
/* get the amount of memory used by the vbios for pre-OS console */
|
||||
unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
|
||||
/* get the DCC buffer alignment */
|
||||
unsigned int (*get_dcc_alignment)(struct amdgpu_device *adev);
|
||||
|
||||
enum amdgpu_memory_partition (*query_mem_partition_mode)(
|
||||
struct amdgpu_device *adev);
|
||||
@ -363,6 +365,10 @@ struct amdgpu_gmc {
|
||||
(adev)->gmc.gmc_funcs->override_vm_pte_flags \
|
||||
((adev), (vm), (addr), (pte_flags))
|
||||
#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev))
|
||||
#define amdgpu_gmc_get_dcc_alignment(adev) ({ \
|
||||
typeof(adev) _adev = (adev); \
|
||||
_adev->gmc.gmc_funcs->get_dcc_alignment(_adev); \
|
||||
})
|
||||
|
||||
/**
|
||||
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
|
||||
|
@ -264,9 +264,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
|
||||
struct dma_fence *fence = NULL;
|
||||
int r;
|
||||
|
||||
/* Ignore soft recovered fences here */
|
||||
r = drm_sched_entity_error(s_entity);
|
||||
if (r && r != -ENODATA)
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
if (!fence && job->gang_submit)
|
||||
|
@ -456,6 +456,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
u64 vis_usage = 0, max_bytes, min_block_size;
|
||||
struct amdgpu_vram_mgr_resource *vres;
|
||||
u64 size, remaining_size, lpfn, fpfn;
|
||||
unsigned int adjust_dcc_size = 0;
|
||||
struct drm_buddy *mm = &mgr->mm;
|
||||
struct drm_buddy_block *block;
|
||||
unsigned long pages_per_block;
|
||||
@ -511,7 +512,19 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
/* Allocate blocks in desired range */
|
||||
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC &&
|
||||
adev->gmc.gmc_funcs->get_dcc_alignment)
|
||||
adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev);
|
||||
|
||||
remaining_size = (u64)vres->base.size;
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
|
||||
unsigned int dcc_size;
|
||||
|
||||
dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size);
|
||||
remaining_size = (u64)dcc_size;
|
||||
|
||||
vres->flags |= DRM_BUDDY_TRIM_DISABLE;
|
||||
}
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
while (remaining_size) {
|
||||
@ -521,8 +534,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
min_block_size = mgr->default_page_size;
|
||||
|
||||
size = remaining_size;
|
||||
if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
|
||||
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size)
|
||||
min_block_size = size;
|
||||
else if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
|
||||
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
|
||||
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
|
||||
|
||||
BUG_ON(min_block_size < mm->chunk_size);
|
||||
@ -553,6 +569,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
|
||||
struct drm_buddy_block *dcc_block;
|
||||
unsigned long dcc_start;
|
||||
u64 trim_start;
|
||||
|
||||
dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks);
|
||||
/* Adjust the start address for DCC buffers only */
|
||||
dcc_start =
|
||||
roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block),
|
||||
adjust_dcc_size);
|
||||
trim_start = (u64)dcc_start;
|
||||
drm_buddy_block_trim(mm, &trim_start,
|
||||
(u64)vres->base.size,
|
||||
&vres->blocks);
|
||||
}
|
||||
|
||||
vres->base.start = 0;
|
||||
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
|
||||
vres->base.size);
|
||||
|
@ -202,6 +202,12 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
|
||||
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_12_0[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020)
|
||||
};
|
||||
|
||||
#define DEFAULT_SH_MEM_CONFIG \
|
||||
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
|
||||
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
|
||||
@ -3432,6 +3438,24 @@ static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev)
|
||||
WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
|
||||
}
|
||||
|
||||
static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(12, 0, 0):
|
||||
case IP_VERSION(12, 0, 1):
|
||||
if (adev->rev_id == 0)
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_12_0,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_12_0));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int gfx_v12_0_hw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
@ -3472,6 +3496,9 @@ static int gfx_v12_0_hw_init(void *handle)
|
||||
}
|
||||
}
|
||||
|
||||
if (!amdgpu_emu_mode)
|
||||
gfx_v12_0_init_golden_registers(adev);
|
||||
|
||||
adev->gfx.is_poweron = true;
|
||||
|
||||
if (get_gb_addr_config(adev))
|
||||
|
@ -542,6 +542,23 @@ static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned int max_tex_channel_caches, alignment;
|
||||
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) &&
|
||||
amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1))
|
||||
return 0;
|
||||
|
||||
max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches;
|
||||
if (is_power_of_2(max_tex_channel_caches))
|
||||
alignment = (unsigned int)(max_tex_channel_caches / SZ_4);
|
||||
else
|
||||
alignment = roundup_pow_of_two(max_tex_channel_caches);
|
||||
|
||||
return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K);
|
||||
}
|
||||
|
||||
static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
|
||||
.flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb,
|
||||
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
|
||||
@ -551,6 +568,7 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
|
||||
.get_vm_pde = gmc_v12_0_get_vm_pde,
|
||||
.get_vm_pte = gmc_v12_0_get_vm_pte,
|
||||
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
|
||||
.get_dcc_alignment = gmc_v12_0_get_dcc_alignment,
|
||||
};
|
||||
|
||||
static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||
|
@ -80,7 +80,8 @@ static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid,
|
||||
/* invalidate using legacy mode on vmid*/
|
||||
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
|
||||
PER_VMID_INVALIDATE_REQ, 1 << vmid);
|
||||
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
|
||||
/* Only use legacy inv on mmhub side */
|
||||
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
|
||||
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
|
||||
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
|
||||
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
|
||||
|
@ -1575,8 +1575,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
|
||||
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
|
||||
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
|
||||
SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0) |
|
||||
SDMA_PKT_COPY_LINEAR_HEADER_CPV((copy_flags &
|
||||
(AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)) ? 1 : 0);
|
||||
SDMA_PKT_COPY_LINEAR_HEADER_CPV(1);
|
||||
|
||||
ib->ptr[ib->length_dw++] = byte_count - 1;
|
||||
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
|
||||
@ -1590,6 +1589,8 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
|
||||
((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
|
||||
((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
|
||||
SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
|
||||
else
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1616,7 +1617,7 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
|
||||
|
||||
static const struct amdgpu_buffer_funcs sdma_v7_0_buffer_funcs = {
|
||||
.copy_max_bytes = 0x400000,
|
||||
.copy_num_dw = 7,
|
||||
.copy_num_dw = 8,
|
||||
.emit_copy_buffer = sdma_v7_0_emit_copy_buffer,
|
||||
.fill_max_bytes = 0x400000,
|
||||
.fill_num_dw = 5,
|
||||
|
@ -1270,6 +1270,9 @@ static bool is_dsc_need_re_compute(
|
||||
}
|
||||
}
|
||||
|
||||
if (new_stream_on_link_num == 0)
|
||||
return false;
|
||||
|
||||
/* check current_state if there stream on link but it is not in
|
||||
* new request state
|
||||
*/
|
||||
|
@ -185,8 +185,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
|
||||
else
|
||||
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
|
||||
|
||||
|
||||
dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
|
||||
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -83,6 +83,8 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcfla
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags)
|
||||
|
@ -1402,6 +1402,8 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
|
||||
if (hubbub && hubp) {
|
||||
if (hubbub->funcs->program_det_size)
|
||||
hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
|
||||
if (hubbub->funcs->program_det_segments)
|
||||
hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -771,6 +771,8 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
|
||||
if (hubbub && hubp) {
|
||||
if (hubbub->funcs->program_det_size)
|
||||
hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
|
||||
if (hubbub->funcs->program_det_segments)
|
||||
hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -723,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.min_prefetch_in_strobe_ns = 60000, // 60us
|
||||
.disable_unbounded_requesting = false,
|
||||
.enable_legacy_fast_update = false,
|
||||
.dcc_meta_propagation_delay_us = 10,
|
||||
.fams2_config = {
|
||||
.bits = {
|
||||
.enable = true,
|
||||
|
@ -138,7 +138,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
|
||||
SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \
|
||||
SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \
|
||||
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \
|
||||
HUBP_3DLUT_FL_REG_LIST_DCN401(id)
|
||||
HUBP_3DLUT_FL_REG_LIST_DCN401(id), \
|
||||
SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \
|
||||
SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id)
|
||||
|
||||
/* ABM */
|
||||
#define ABM_DCN401_REG_LIST_RI(id) \
|
||||
|
@ -27,7 +27,8 @@
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
||||
#define SMU_14_0_2_TABLE_FORMAT_REVISION 3
|
||||
#define SMU_14_0_2_TABLE_FORMAT_REVISION 23
|
||||
#define SMU_14_0_2_CUSTOM_TABLE_FORMAT_REVISION 1
|
||||
|
||||
// POWERPLAYTABLE::ulPlatformCaps
|
||||
#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page.
|
||||
@ -43,6 +44,7 @@
|
||||
#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0
|
||||
|
||||
#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD
|
||||
#define SMU_14_0_2_PP_CUSTOM_OVERDRIVE_VERSION 0x1
|
||||
#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
|
||||
|
||||
enum SMU_14_0_2_OD_SW_FEATURE_CAP
|
||||
@ -107,6 +109,7 @@ enum SMU_14_0_2_PWRMODE_SETTING
|
||||
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE,
|
||||
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO,
|
||||
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE,
|
||||
SMU_14_0_2_PMSETTING_COUNT
|
||||
};
|
||||
#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings
|
||||
|
||||
@ -127,17 +130,24 @@ struct smu_14_0_2_overdrive_table
|
||||
int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings
|
||||
};
|
||||
|
||||
enum smu_14_0_3_pptable_source {
|
||||
PPTABLE_SOURCE_IFWI = 0,
|
||||
PPTABLE_SOURCE_DRIVER_HARDCODED = 1,
|
||||
PPTABLE_SOURCE_PPGEN_REGISTRY = 2,
|
||||
PPTABLE_SOURCE_MAX = PPTABLE_SOURCE_PPGEN_REGISTRY,
|
||||
};
|
||||
|
||||
struct smu_14_0_2_powerplay_table
|
||||
{
|
||||
struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen.
|
||||
uint8_t table_revision; // PPGen use only: table_revision = 3
|
||||
uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t).
|
||||
uint8_t pptable_source; // PPGen UI dropdown box
|
||||
uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t)
|
||||
uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t.
|
||||
uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable.
|
||||
uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t.
|
||||
uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable.
|
||||
uint16_t pmfw_board_table_size; // The size of BoardTable_t.
|
||||
uint16_t pmfw_sku_table_start_offset; // DO NOT CHANGE ORDER; The absolute start offset of the SkuTable_t (within smu_14_0_3_powerplay_table).
|
||||
uint16_t pmfw_sku_table_size; // DO NOT CHANGE ORDER; The size of SkuTable_t.
|
||||
uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t
|
||||
uint16_t pmfw_board_table_size; // The size of BoardTable_t.
|
||||
uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable.
|
||||
uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t.
|
||||
uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base
|
||||
@ -159,6 +169,36 @@ struct smu_14_0_2_powerplay_table
|
||||
PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes
|
||||
};
|
||||
|
||||
enum SMU_14_0_2_CUSTOM_OD_SW_FEATURE_CAP {
|
||||
SMU_14_0_2_CUSTOM_ODCAP_POWER_MODE = 0,
|
||||
SMU_14_0_2_CUSTOM_ODCAP_COUNT
|
||||
};
|
||||
|
||||
enum SMU_14_0_2_CUSTOM_OD_FEATURE_SETTING_ID {
|
||||
SMU_14_0_2_CUSTOM_ODSETTING_POWER_MODE = 0,
|
||||
SMU_14_0_2_CUSTOM_ODSETTING_COUNT,
|
||||
};
|
||||
|
||||
struct smu_14_0_2_custom_overdrive_table {
|
||||
uint8_t revision;
|
||||
uint8_t reserve[3];
|
||||
uint8_t cap[SMU_14_0_2_CUSTOM_ODCAP_COUNT];
|
||||
int32_t max[SMU_14_0_2_CUSTOM_ODSETTING_COUNT];
|
||||
int32_t min[SMU_14_0_2_CUSTOM_ODSETTING_COUNT];
|
||||
int16_t pm_setting[SMU_14_0_2_PMSETTING_COUNT];
|
||||
};
|
||||
|
||||
struct smu_14_0_3_custom_powerplay_table {
|
||||
uint8_t custom_table_revision;
|
||||
uint16_t custom_table_size;
|
||||
uint16_t custom_sku_table_offset;
|
||||
uint32_t custom_platform_caps;
|
||||
uint16_t software_shutdown_temp;
|
||||
struct smu_14_0_2_custom_overdrive_table custom_overdrive_table;
|
||||
uint32_t reserve[8];
|
||||
CustomSkuTable_t custom_sku_table_pmfw;
|
||||
};
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#endif
|
||||
|
@ -851,6 +851,7 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
|
||||
* drm_buddy_block_trim - free unused pages
|
||||
*
|
||||
* @mm: DRM buddy manager
|
||||
* @start: start address to begin the trimming.
|
||||
* @new_size: original size requested
|
||||
* @blocks: Input and output list of allocated blocks.
|
||||
* MUST contain single block as input to be trimmed.
|
||||
@ -866,11 +867,13 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
|
||||
* 0 on success, error code on failure.
|
||||
*/
|
||||
int drm_buddy_block_trim(struct drm_buddy *mm,
|
||||
u64 *start,
|
||||
u64 new_size,
|
||||
struct list_head *blocks)
|
||||
{
|
||||
struct drm_buddy_block *parent;
|
||||
struct drm_buddy_block *block;
|
||||
u64 block_start, block_end;
|
||||
LIST_HEAD(dfs);
|
||||
u64 new_start;
|
||||
int err;
|
||||
@ -882,6 +885,9 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
|
||||
struct drm_buddy_block,
|
||||
link);
|
||||
|
||||
block_start = drm_buddy_block_offset(block);
|
||||
block_end = block_start + drm_buddy_block_size(mm, block);
|
||||
|
||||
if (WARN_ON(!drm_buddy_block_is_allocated(block)))
|
||||
return -EINVAL;
|
||||
|
||||
@ -894,6 +900,20 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
|
||||
if (new_size == drm_buddy_block_size(mm, block))
|
||||
return 0;
|
||||
|
||||
new_start = block_start;
|
||||
if (start) {
|
||||
new_start = *start;
|
||||
|
||||
if (new_start < block_start)
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ALIGNED(new_start, mm->chunk_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (range_overflows(new_start, new_size, block_end))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
list_del(&block->link);
|
||||
mark_free(mm, block);
|
||||
mm->avail += drm_buddy_block_size(mm, block);
|
||||
@ -904,7 +924,6 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
|
||||
parent = block->parent;
|
||||
block->parent = NULL;
|
||||
|
||||
new_start = drm_buddy_block_offset(block);
|
||||
list_add(&block->tmp_link, &dfs);
|
||||
err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
|
||||
if (err) {
|
||||
@ -1066,7 +1085,8 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
||||
} while (1);
|
||||
|
||||
/* Trim the allocated block to the required size */
|
||||
if (original_size != size) {
|
||||
if (!(flags & DRM_BUDDY_TRIM_DISABLE) &&
|
||||
original_size != size) {
|
||||
struct list_head *trim_list;
|
||||
LIST_HEAD(temp);
|
||||
u64 trim_size;
|
||||
@ -1083,6 +1103,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
||||
}
|
||||
|
||||
drm_buddy_block_trim(mm,
|
||||
NULL,
|
||||
trim_size,
|
||||
trim_list);
|
||||
|
||||
|
@ -150,7 +150,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
} while (remaining_size);
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
if (!drm_buddy_block_trim(mm, vres->base.size, &vres->blocks))
|
||||
if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks))
|
||||
size = vres->base.size;
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
|
||||
#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3)
|
||||
#define DRM_BUDDY_CLEARED BIT(4)
|
||||
#define DRM_BUDDY_TRIM_DISABLE BIT(5)
|
||||
|
||||
struct drm_buddy_block {
|
||||
#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
|
||||
@ -155,6 +156,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
||||
unsigned long flags);
|
||||
|
||||
int drm_buddy_block_trim(struct drm_buddy *mm,
|
||||
u64 *start,
|
||||
u64 new_size,
|
||||
struct list_head *blocks);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user