2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 01:04:08 +08:00

Merge tag 'amd-drm-next-6.2-2022-11-18' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.2-2022-11-18:

amdgpu:
- SR-IOV fixes
- Clean up DC checks
- DCN 3.2.x fixes
- DCN 3.1.x fixes
- Don't enable degamma on asics which don't support it
- IP discovery fixes
- BACO fixes
- Fix vbios allocation handling when vkms is enabled
- Drop buggy tdr advanced mode GPU reset handling
- Fix the build when DCN is not set in kconfig
- MST DSC fixes
- Userptr fixes
- FRU and RAS EEPROM fixes
- VCN 4.x RAS support
- Aldrebaran CU occupancy reporting fix
- PSP ring cleanup

amdkfd:
- Memory limit fix
- Enable cooperative launch on gfx 10.3

amd-drm-next-6.2-2022-11-11:

amdgpu:
- SMU 13.x updates
- GPUVM TLB race fix
- DCN 3.1.4 updates
- DCN 3.2.x updates
- PSR fixes
- Kerneldoc fix
- Vega10 fan fix
- GPUVM locking fixes in error pathes
- BACO fix for Beige Goby
- EEPROM I2C address cleanup
- GFXOFF fix
- Fix DC memory leak in error pathes
- Flexible array updates
- Mtype fix for GPUVM PTEs
- Move Kconfig into amdgpu directory
- SR-IOV updates
- Fix possible memory leak in CS IOCTL error path

amdkfd:
- Fix possible memory overrun
- CRIU fixes

radeon:
- ACPI ref count fix
- HDA audio notifier support
- Move Kconfig into radeon directory

UAPI:
- Add new GEM_CREATE flags to help to transition more KFD functionality to the DRM UAPI.
  These are used internally in the driver to align location based memory coherency
  requirements from memory allocated in the KFD with how we manage GPUVM PTEs.  They
  are currently blocked in the GEM_CREATE IOCTL as we don't have a user right now.
  They are just used internally in the kernel driver for now for existing KFD memory
  allocations. So a change to the UAPI header, but no functional change in the UAPI.

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221118170807.6505-1-alexander.deucher@amd.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie 2022-11-22 13:41:11 +10:00
commit fc58764bbf
176 changed files with 2697 additions and 1654 deletions

View File

@ -233,64 +233,8 @@ source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
config DRM_RADEON
tristate "ATI Radeon"
depends on DRM && PCI && MMU
depends on AGP || !AGP
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
# radeon depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
select ACPI_VIDEO if ACPI
# On x86 ACPI_VIDEO also needs ACPI_WMI
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
run the Radeon in plain VGA mode.
If M is selected, the module will be called radeon.
source "drivers/gpu/drm/radeon/Kconfig"
config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_SCHED
select DRM_TTM
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
select ACPI_VIDEO if ACPI
# On x86 ACPI_VIDEO also needs ACPI_WMI
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
help
Choose this option if you have a recent AMD Radeon graphics card.
If M is selected, the module will be called amdgpu.
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"

View File

@ -1,4 +1,33 @@
# SPDX-License-Identifier: MIT
config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
select FW_LOADER
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select DRM_SCHED
select DRM_TTM
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
select ACPI_VIDEO if ACPI
# On x86 ACPI_VIDEO also needs ACPI_WMI
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
help
Choose this option if you have a recent AMD Radeon graphics card.
If M is selected, the module will be called amdgpu.
config DRM_AMDGPU_SI
bool "Enable amdgpu support for SI parts"
depends on DRM_AMDGPU

View File

@ -250,7 +250,7 @@ endif
amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o
amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_hmm.o
include $(FULL_AMD_PATH)/pm/Makefile

View File

@ -82,7 +82,6 @@
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_jpeg.h"
#include "amdgpu_mn.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
@ -1065,6 +1064,7 @@ struct amdgpu_device {
struct work_struct reset_work;
bool job_hang;
bool dc_enabled;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@ -1122,6 +1122,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context);
@ -1295,6 +1297,7 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
u32 reg, u32 v);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
struct dma_fence *gang);
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)

View File

@ -847,7 +847,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
if (atif->notifications.brightness_change) {
if (amdgpu_device_has_dc_support(adev)) {
if (adev->dc_enabled) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;

View File

@ -41,5 +41,6 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
};

View File

@ -29,6 +29,7 @@
#include "amdgpu_object.h"
#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
#include "amdgpu_hmm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
#include <uapi/linux/kfd_ioctl.h>
@ -171,9 +172,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev && adev->kfd.vram_used + vram_needed >
adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
reserved_for_pt)) {
adev->gmc.real_vram_size - reserved_for_pt)) {
ret = -ENOMEM;
goto release;
}
@ -405,63 +404,15 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
uint32_t mapping_flags;
uint64_t pte_flags;
bool snoop = false;
uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_MTYPE_DEFAULT;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
if (bo_adev == adev) {
if (uncached)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
else if (coherent)
mapping_flags |= AMDGPU_VM_MTYPE_CC;
else
mapping_flags |= AMDGPU_VM_MTYPE_RW;
if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) &&
adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
if (uncached || coherent)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
else
mapping_flags |= AMDGPU_VM_MTYPE_NC;
if (amdgpu_xgmi_same_hive(adev, bo_adev))
snoop = true;
}
} else {
if (uncached || coherent)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
else
mapping_flags |= AMDGPU_VM_MTYPE_NC;
snoop = true;
}
break;
default:
if (uncached || coherent)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
else
mapping_flags |= AMDGPU_VM_MTYPE_NC;
if (!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
snoop = true;
}
pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
return pte_flags;
return amdgpu_gem_va_map_flags(adev, mapping_flags);
}
/**
@ -988,6 +939,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
struct hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
@ -998,7 +950,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
goto out;
}
ret = amdgpu_mn_register(bo, user_addr);
ret = amdgpu_hmm_register(bo, user_addr);
if (ret) {
pr_err("%s: Failed to register MMU notifier: %d\n",
__func__, ret);
@ -1017,7 +969,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
goto unregister_out;
@ -1035,10 +987,10 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
amdgpu_bo_unreserve(bo);
release_out:
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
unregister_out:
if (ret)
amdgpu_mn_unregister(bo);
amdgpu_hmm_unregister(bo);
out:
mutex_unlock(&process_info->lock);
return ret;
@ -1673,6 +1625,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
}
}
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
if (!*mem) {
ret = -ENOMEM;
@ -1817,7 +1774,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mutex_unlock(&process_info->lock);
/* No more MMU notifiers */
amdgpu_mn_unregister(mem->bo);
amdgpu_hmm_unregister(mem->bo);
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret))
@ -2362,6 +2319,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
/* Go through userptr_inval_list and update any invalid user_pages */
list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list.head) {
struct hmm_range *range;
invalid = atomic_read(&mem->invalid);
if (!invalid)
/* BO hasn't been invalidated since the last
@ -2372,7 +2331,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo;
/* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
&range);
if (ret) {
pr_debug("Failed %d to get user pages\n", ret);
@ -2391,7 +2351,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* FIXME: Cannot ignore the return code, must hold
* notifier_lock
*/
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
}
/* Mark the BO as valid unless it was invalidated

View File

@ -101,39 +101,97 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
}
}
static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
{
uint32_t start_addr, fw_size, drv_size;
start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
start_addr,
fw_size,
drv_size);
if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->mman.fw_vram_usage_size = fw_size << 10;
/* Use the default scratch size */
*usage_bytes = 0;
} else {
*usage_bytes = drv_size << 10;
}
return 0;
}
static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
{
uint32_t fw_start_addr, fw_size, drv_start_addr, drv_size;
fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
fw_start_addr,
fw_size,
drv_start_addr,
drv_size);
if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
/* Firmware request VRAM reservation for SR-IOV */
adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->mman.fw_vram_usage_size = fw_size << 10;
}
if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
/* driver request VRAM reservation for SR-IOV */
adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->mman.drv_vram_usage_size = drv_size << 10;
}
*usage_bytes = 0;
return 0;
}
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware);
struct vram_usagebyfirmware_v2_1 *firmware_usage;
uint32_t start_addr, size;
struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
uint16_t data_offset;
uint8_t frev, crev;
int usage_bytes = 0;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
le32_to_cpu(firmware_usage->start_address_in_kb),
le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
le16_to_cpu(firmware_usage->used_by_driver_in_kb));
start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->mman.fw_vram_usage_size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
if (frev == 2 && crev == 1) {
fw_usage_v2_1 =
(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
amdgpu_atomfirmware_allocate_fb_v2_1(adev,
fw_usage_v2_1,
&usage_bytes);
} else if (frev >= 2 && crev >= 2) {
fw_usage_v2_2 =
(struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
amdgpu_atomfirmware_allocate_fb_v2_2(adev,
fw_usage_v2_2,
&usage_bytes);
}
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;

View File

@ -209,6 +209,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
list_add_tail(&e->tv.head, &bucket[priority]);
e->user_pages = NULL;
e->range = NULL;
}
/* Connect the sorted buckets in the output list. */

View File

@ -26,6 +26,8 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h>
struct hmm_range;
struct amdgpu_device;
struct amdgpu_bo;
struct amdgpu_bo_va;
@ -36,6 +38,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va *bo_va;
uint32_t priority;
struct page **user_pages;
struct hmm_range *range;
bool user_invalidated;
};

View File

@ -287,8 +287,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
if (!p->gang_size)
return -EINVAL;
if (!p->gang_size) {
ret = -EINVAL;
goto free_partial_kdata;
}
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
@ -917,7 +919,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
}
r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
if (r) {
kvfree(e->user_pages);
e->user_pages = NULL;
@ -995,10 +997,12 @@ out_free_user_pages:
if (!e->user_pages)
continue;
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
kvfree(e->user_pages);
e->user_pages = NULL;
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
return r;
}
@ -1270,7 +1274,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
e->range = NULL;
}
if (r) {
r = -EAGAIN;

View File

@ -1969,7 +1969,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_ta_if_debugfs_init(adev);
#if defined(CONFIG_DRM_AMD_DC)
if (amdgpu_device_has_dc_support(adev))
if (adev->dc_enabled)
dtn_debugfs_init(adev);
#endif

View File

@ -1916,6 +1916,16 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
DRM_INFO("virtual_display:%d, num_crtc:%d\n",
adev->enable_virtual_display, adev->mode_info.num_crtc);
}
}
/**
* amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
*
@ -3348,8 +3358,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
*/
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev) ||
adev->enable_virtual_display ||
if (adev->enable_virtual_display ||
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
@ -4216,25 +4225,27 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
amdgpu_ras_resume(adev);
/*
* Most of the connector probing functions try to acquire runtime pm
* refs to ensure that the GPU is powered on when connector polling is
* performed. Since we're calling this from a runtime PM callback,
* trying to acquire rpm refs will cause us to deadlock.
*
* Since we're guaranteed to be holding the rpm lock, it's safe to
* temporarily disable the rpm helpers so this doesn't deadlock us.
*/
if (adev->mode_info.num_crtc) {
/*
* Most of the connector probing functions try to acquire runtime pm
* refs to ensure that the GPU is powered on when connector polling is
* performed. Since we're calling this from a runtime PM callback,
* trying to acquire rpm refs will cause us to deadlock.
*
* Since we're guaranteed to be holding the rpm lock, it's safe to
* temporarily disable the rpm helpers so this doesn't deadlock us.
*/
#ifdef CONFIG_PM
dev->dev->power.disable_depth++;
dev->dev->power.disable_depth++;
#endif
if (!amdgpu_device_has_dc_support(adev))
drm_helper_hpd_irq_event(dev);
else
drm_kms_helper_hotplug_event(dev);
if (!adev->dc_enabled)
drm_helper_hpd_irq_event(dev);
else
drm_kms_helper_hotplug_event(dev);
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
dev->dev->power.disable_depth--;
#endif
}
adev->in_suspend = false;
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
@ -4583,6 +4594,10 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (amdgpu_gpu_recovery == 0)
goto disabled;
/* Skip soft reset check in fatal error mode */
if (!amdgpu_ras_is_poison_mode_supported(adev))
return true;
if (!amdgpu_device_ip_check_soft_reset(adev)) {
dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
return false;
@ -5078,94 +5093,6 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
return 0;
}
static void amdgpu_device_recheck_guilty_jobs(
struct amdgpu_device *adev, struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context)
{
int i, r = 0;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
int ret = 0;
struct drm_sched_job *s_job;
if (!ring || !ring->sched.thread)
continue;
s_job = list_first_entry_or_null(&ring->sched.pending_list,
struct drm_sched_job, list);
if (s_job == NULL)
continue;
/* clear job's guilty and depend the folowing step to decide the real one */
drm_sched_reset_karma(s_job);
drm_sched_resubmit_jobs_ext(&ring->sched, 1);
if (!s_job->s_fence->parent) {
DRM_WARN("Failed to get a HW fence for job!");
continue;
}
ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
if (ret == 0) { /* timeout */
DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
ring->sched.name, s_job->id);
amdgpu_fence_driver_isr_toggle(adev, true);
/* Clear this failed job from fence array */
amdgpu_fence_driver_clear_job_fences(ring);
amdgpu_fence_driver_isr_toggle(adev, false);
/* Since the job won't signal and we go for
* another resubmit drop this parent pointer
*/
dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
/* set guilty */
drm_sched_increase_karma(s_job);
amdgpu_reset_prepare_hwcontext(adev, reset_context);
retry:
/* do hw reset */
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_device_reset_sriov(adev, false);
if (r)
adev->asic_reset_res = r;
} else {
clear_bit(AMDGPU_SKIP_HW_RESET,
&reset_context->flags);
r = amdgpu_do_asic_reset(device_list_handle,
reset_context);
if (r && r == -EAGAIN)
goto retry;
}
/*
* add reset counter so that the following
* resubmitted job could flush vmid
*/
atomic_inc(&adev->gpu_reset_counter);
continue;
}
/* got the hw fence, signal finished fence */
atomic_dec(ring->sched.score);
dma_fence_get(&s_job->s_fence->finished);
dma_fence_signal(&s_job->s_fence->finished);
dma_fence_put(&s_job->s_fence->finished);
/* remove node from list and free the job */
spin_lock(&ring->sched.job_list_lock);
list_del_init(&s_job->list);
spin_unlock(&ring->sched.job_list_lock);
ring->sched.ops->free_job(s_job);
}
}
static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@ -5186,7 +5113,6 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
}
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@ -5209,7 +5135,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
int i, r = 0;
bool need_emergency_restart = false;
bool audio_suspended = false;
int tmp_vram_lost_counter;
bool gpu_reset_for_dev_remove = false;
gpu_reset_for_dev_remove =
@ -5355,7 +5280,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_device_stop_pending_resets(tmp_adev);
}
tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
@ -5380,29 +5304,13 @@ skip_hw_reset:
/* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/*
* Sometimes a later bad compute job can block a good gfx job as gfx
* and compute ring share internal GC HW mutually. We add an additional
* guilty jobs recheck step to find the real guilty job, it synchronously
* submits and pends for the first job being signaled. If it gets timeout,
* we identify it as a real guilty job.
*/
if (amdgpu_gpu_recovery == 2 &&
!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
amdgpu_device_recheck_guilty_jobs(
tmp_adev, device_list_handle, reset_context);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
if (!ring || !ring->sched.thread)
continue;
/* No point to resubmit jobs if we didn't HW reset*/
if (!tmp_adev->asic_reset_res && !job_signaled)
drm_sched_resubmit_jobs(&ring->sched);
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
drm_sched_start(&ring->sched, true);
}
if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
@ -5444,6 +5352,8 @@ skip_sched_resume:
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unset_mp1_state(tmp_adev);
amdgpu_ras_set_error_query_ready(tmp_adev, true);
}
recover_end:
@ -5855,8 +5765,6 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
if (!ring || !ring->sched.thread)
continue;
drm_sched_resubmit_jobs(&ring->sched);
drm_sched_start(&ring->sched, true);
}
@ -6047,3 +5955,44 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
dma_fence_put(old);
return NULL;
}
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
#endif
case CHIP_TOPAZ:
/* chips with no display hardware */
return false;
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
#endif
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
case CHIP_CARRIZO:
case CHIP_STONEY:
/* chips with display hardware */
return true;
default:
/* IP discovery */
if (!adev->ip_versions[DCE_HWIP][0] ||
(adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
return false;
return true;
}
}

View File

@ -305,8 +305,13 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) {
/* ignore the discovery binary from vram if discovery=2 in kernel module parameter */
if (amdgpu_discovery == 2)
dev_info(adev->dev,"force read ip discovery binary from file");
else
dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
/* retry read ip discovery binary from file */
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
if (r) {
@ -1697,9 +1702,15 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
return 0;
}
static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
{
amdgpu_device_set_sriov_virtual_display(adev);
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
}
static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
{
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
if (adev->enable_virtual_display) {
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
return 0;
}
@ -1727,7 +1738,10 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 6):
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
amdgpu_device_ip_block_add(adev, &dm_ip_block);
if (amdgpu_sriov_vf(adev))
amdgpu_discovery_set_sriov_display(adev);
else
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
dev_err(adev->dev,
@ -1740,7 +1754,10 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
case IP_VERSION(12, 1, 0):
amdgpu_device_ip_block_add(adev, &dm_ip_block);
if (amdgpu_sriov_vf(adev))
amdgpu_discovery_set_sriov_display(adev);
else
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
dev_err(adev->dev,
@ -2161,6 +2178,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(10, 3, 1):
adev->family = AMDGPU_FAMILY_VGH;
adev->apu_flags |= AMD_APU_IS_VANGOGH;
break;
case IP_VERSION(10, 3, 3):
adev->family = AMDGPU_FAMILY_YC;

View File

@ -44,6 +44,41 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
/**
* amdgpu_display_hotplug_work_func - work handler for display hotplug event
*
* @work: work struct pointer
*
* This is the hotplug event work handler (all ASICs).
* The work gets scheduled from the IRQ handler if there
* was a hotplug interrupt. It walks through the connector table
* and calls hotplug handler for each connector. After this, it sends
* a DRM hotplug event to alert userspace.
*
* This design approach is required in order to defer hotplug event handling
* from the IRQ handler to a work handler because hotplug handler has to use
* mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
* sleep).
*/
void amdgpu_display_hotplug_work_func(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
hotplug_work);
struct drm_device *dev = adev_to_drm(adev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
mutex_lock(&mode_config->mutex);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
amdgpu_connector_hotplug(connector);
drm_connector_list_iter_end(&iter);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
@ -514,7 +549,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
*/
if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
amdgpu_device_asic_has_dc_support(adev->asic_type) &&
adev->dc_enabled &&
adev->mode_info.gpu_vm_support)
domain |= AMDGPU_GEM_DOMAIN_GTT;
#endif
@ -1280,7 +1315,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
if (amdgpu_device_has_dc_support(adev)) {
if (adev->dc_enabled) {
adev->mode_info.abm_level_property =
drm_property_create_range(adev_to_drm(adev), 0,
"abm level", 0, 4);

View File

@ -35,6 +35,7 @@
#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
void amdgpu_display_hotplug_work_func(struct work_struct *work);
void amdgpu_display_update_priority(struct amdgpu_device *adev);
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags);

View File

@ -328,7 +328,9 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC;
flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_COHERENT |
AMDGPU_GEM_CREATE_UNCACHED);
}
ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,

View File

@ -534,7 +534,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* DOC: gpu_recovery (int)
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (2 = advanced tdr mode, 1 = enable, 0 = disable, -1 = auto)");
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/**
@ -1925,9 +1925,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
/* Van Gogh */
{0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU},
/* Yellow Carp */
{0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
{0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
@ -2472,7 +2469,7 @@ static int amdgpu_runtime_idle_check_display(struct device *dev)
if (ret)
return ret;
if (amdgpu_device_has_dc_support(adev)) {
if (adev->dc_enabled) {
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, drm_dev) {

View File

@ -79,13 +79,15 @@
* That is, for an I2C EEPROM driver everything is controlled by
* the "eeprom_addr".
*
* See also top of amdgpu_ras_eeprom.c.
*
* P.S. If you need to write, lock and read the Identification Page,
* (M24M02-DR device only, which we do not use), change the "7" to
* "0xF" in the macro below, and let the client set bit 20 to 1 in
* "eeprom_addr", and set A10 to 0 to write into it, and A10 and A1 to
* 1 to lock it permanently.
*/
#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 7))
#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
u8 *eeprom_buf, u16 buf_size, bool read)

View File

@ -29,9 +29,10 @@
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_eeprom.h"
#define FRU_EEPROM_MADDR 0x60000
#define FRU_EEPROM_MADDR_6 0x60000
#define FRU_EEPROM_MADDR_8 0x80000
static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
{
/* Only server cards have the FRU EEPROM
* TODO: See if we can figure this out dynamically instead of
@ -45,6 +46,11 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return false;
/* The default I2C EEPROM address of the FRU.
*/
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_8;
/* VBIOS is of the format ###-DXXXYYYY-##. For SKU identification,
* we can use just the "DXXX" portion. If there were more models, we
* could convert the 3 characters to a hex integer and use a switch
@ -57,21 +63,29 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
if (strnstr(atom_ctx->vbios_version, "D161",
sizeof(atom_ctx->vbios_version)) ||
strnstr(atom_ctx->vbios_version, "D163",
sizeof(atom_ctx->vbios_version)))
sizeof(atom_ctx->vbios_version))) {
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
else
} else {
return false;
}
case CHIP_ALDEBARAN:
/* All Aldebaran SKUs have the FRU */
/* All Aldebaran SKUs have an FRU */
if (!strnstr(atom_ctx->vbios_version, "D673",
sizeof(atom_ctx->vbios_version)))
if (fru_addr)
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603",
sizeof(atom_ctx->vbios_version))) {
sizeof(atom_ctx->vbios_version))) {
if (strnstr(atom_ctx->vbios_version, "D603GLXE",
sizeof(atom_ctx->vbios_version)))
sizeof(atom_ctx->vbios_version))) {
return false;
else
} else {
*fru_addr = FRU_EEPROM_MADDR_6;
return true;
}
} else {
return false;
}
@ -80,41 +94,14 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
}
}
static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
unsigned char *buf, size_t buf_size)
{
int ret;
u8 size;
ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr, buf, 1);
if (ret < 1) {
DRM_WARN("FRU: Failed to get size field");
return ret;
}
/* The size returned by the i2c requires subtraction of 0xC0 since the
* size apparently always reports as 0xC0+actual size.
*/
size = buf[0] & 0x3F;
size = min_t(size_t, size, buf_size);
ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr + 1,
buf, size);
if (ret < 1) {
DRM_WARN("FRU: Failed to get data field");
return ret;
}
return size;
}
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
unsigned char buf[AMDGPU_PRODUCT_NAME_LEN];
u32 addrptr;
unsigned char buf[8], *pia;
u32 addr, fru_addr;
int size, len;
u8 csum;
if (!is_fru_eeprom_supported(adev))
if (!is_fru_eeprom_supported(adev, &fru_addr))
return 0;
/* If algo exists, it means that the i2c_adapter's initialized */
@ -123,88 +110,102 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
return -ENODEV;
}
/* There's a lot of repetition here. This is due to the FRU having
* variable-length fields. To get the information, we have to find the
* size of each field, and then keep reading along and reading along
* until we get all of the data that we want. We use addrptr to track
* the address as we go
/* Read the IPMI Common header */
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf,
sizeof(buf));
if (len != 8) {
DRM_ERROR("Couldn't read the IPMI Common Header: %d", len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]);
return -EIO;
}
for (csum = 0; len > 0; len--)
csum += buf[len - 1];
if (csum) {
DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum);
return -EIO;
}
/* Get the offset to the Product Info Area (PIA). */
addr = buf[4] * 8;
if (!addr)
return 0;
/* Get the absolute address to the PIA. */
addr += fru_addr;
/* Read the header of the PIA. */
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3);
if (len != 3) {
DRM_ERROR("Couldn't read the Product Info Area header: %d", len);
return len < 0 ? len : -EIO;
}
if (buf[0] != 1) {
DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]);
return -EIO;
}
size = buf[1] * 8;
pia = kzalloc(size, GFP_KERNEL);
if (!pia)
return -ENOMEM;
/* Read the whole PIA. */
len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size);
if (len != size) {
kfree(pia);
DRM_ERROR("Couldn't read the Product Info Area: %d", len);
return len < 0 ? len : -EIO;
}
for (csum = 0; size > 0; size--)
csum += pia[size - 1];
if (csum) {
DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum);
return -EIO;
}
/* Now extract useful information from the PIA.
*
* Skip the Manufacturer Name at [3] and go directly to
* the Product Name field.
*/
addr = 3 + 1 + (pia[3] & 0x3F);
if (addr + 1 >= len)
goto Out;
memcpy(adev->product_name, pia + addr + 1,
min_t(size_t,
sizeof(adev->product_name),
pia[addr] & 0x3F));
adev->product_name[sizeof(adev->product_name) - 1] = '\0';
/* The first fields are all of size 1-byte, from 0-7 are offsets that
* contain information that isn't useful to us.
* Bytes 8-a are all 1-byte and refer to the size of the entire struct,
* and the language field, so just start from 0xb, manufacturer size
*/
addrptr = FRU_EEPROM_MADDR + 0xb;
size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
return -EINVAL;
}
/* Go to the Product Part/Model Number field. */
addr += 1 + (pia[addr] & 0x3F);
if (addr + 1 >= len)
goto Out;
memcpy(adev->product_number, pia + addr + 1,
min_t(size_t,
sizeof(adev->product_number),
pia[addr] & 0x3F));
adev->product_number[sizeof(adev->product_number) - 1] = '\0';
/* Increment the addrptr by the size of the field, and 1 due to the
* size field being 1 byte. This pattern continues below.
*/
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU product name, ret:%d", size);
return -EINVAL;
}
len = size;
if (len >= AMDGPU_PRODUCT_NAME_LEN) {
DRM_WARN("FRU Product Name is larger than %d characters. This is likely a mistake",
AMDGPU_PRODUCT_NAME_LEN);
len = AMDGPU_PRODUCT_NAME_LEN - 1;
}
memcpy(adev->product_name, buf, len);
adev->product_name[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU product number, ret:%d", size);
return -EINVAL;
}
len = size;
/* Product number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
if (len >= sizeof(adev->product_number)) {
DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
len = sizeof(adev->product_number) - 1;
}
memcpy(adev->product_number, buf, len);
adev->product_number[len] = '\0';
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU product version, ret:%d", size);
return -EINVAL;
}
addrptr += size + 1;
size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
if (size < 1) {
DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
return -EINVAL;
}
len = size;
/* Serial number should only be 16 characters. Any more,
* and something could be wrong. Cap it at 16 to be safe
*/
if (len >= sizeof(adev->serial)) {
DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
len = sizeof(adev->serial) - 1;
}
memcpy(adev->serial, buf, len);
adev->serial[len] = '\0';
/* Go to the Product Version field. */
addr += 1 + (pia[addr] & 0x3F);
/* Go to the Product Serial Number field. */
addr += 1 + (pia[addr] & 0x3F);
if (addr + 1 >= len)
goto Out;
memcpy(adev->serial, pia + addr + 1, min_t(size_t,
sizeof(adev->serial),
pia[addr] & 0x3F));
adev->serial[sizeof(adev->serial) - 1] = '\0';
Out:
kfree(pia);
return 0;
}

View File

@ -38,6 +38,7 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_hmm.h"
#include "amdgpu_xgmi.h"
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
@ -87,7 +88,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
if (robj) {
amdgpu_mn_unregister(robj);
amdgpu_hmm_unregister(robj);
amdgpu_bo_unref(&robj);
}
}
@ -378,6 +379,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj;
struct hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
@ -413,14 +415,13 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
r = amdgpu_mn_register(bo, args->addr);
if (r)
goto release_object;
}
r = amdgpu_hmm_register(bo, args->addr);
if (r)
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
&range);
if (r)
goto release_object;
@ -443,7 +444,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
release_object:
drm_gem_object_put(gobj);

View File

@ -583,10 +583,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
/* If going to s2idle, no need to wait */
if (adev->in_s0ix)
delay = GFX_OFF_NO_DELAY;
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
if (adev->in_s0ix) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GFX, true))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
}
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {

View File

@ -657,7 +657,7 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev) ||
!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
!amdgpu_device_has_display_hardware(adev)) {
size = 0;
} else {
size = amdgpu_gmc_get_vbios_fb_size(adev);

View File

@ -49,9 +49,10 @@
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_hmm.h"
/**
* amdgpu_mn_invalidate_gfx - callback to notify about mm change
* amdgpu_hmm_invalidate_gfx - callback to notify about mm change
*
* @mni: the range (mm) is about to update
* @range: details on the invalidation
@ -60,9 +61,9 @@
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
static bool amdgpu_hmm_invalidate_gfx(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@ -83,12 +84,12 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
return true;
}
static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = {
.invalidate = amdgpu_mn_invalidate_gfx,
static const struct mmu_interval_notifier_ops amdgpu_hmm_gfx_ops = {
.invalidate = amdgpu_hmm_invalidate_gfx,
};
/**
* amdgpu_mn_invalidate_hsa - callback to notify about mm change
* amdgpu_hmm_invalidate_hsa - callback to notify about mm change
*
* @mni: the range (mm) is about to update
* @range: details on the invalidation
@ -97,9 +98,9 @@ static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = {
* We temporarily evict the BO attached to this range. This necessitates
* evicting all user-mode queues of the process.
*/
static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@ -117,12 +118,12 @@ static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
return true;
}
static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = {
.invalidate = amdgpu_mn_invalidate_hsa,
static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
.invalidate = amdgpu_hmm_invalidate_hsa,
};
/**
* amdgpu_mn_register - register a BO for notifier updates
* amdgpu_hmm_register - register a BO for notifier updates
*
* @bo: amdgpu buffer object
* @addr: userptr addr we should monitor
@ -130,25 +131,25 @@ static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = {
* Registers a mmu_notifier for the given BO at the specified address.
* Returns 0 on success, -ERRNO if anything goes wrong.
*/
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{
if (bo->kfd_bo)
return mmu_interval_notifier_insert(&bo->notifier, current->mm,
addr, amdgpu_bo_size(bo),
&amdgpu_mn_hsa_ops);
&amdgpu_hmm_hsa_ops);
return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
amdgpu_bo_size(bo),
&amdgpu_mn_gfx_ops);
&amdgpu_hmm_gfx_ops);
}
/**
* amdgpu_mn_unregister - unregister a BO for notifier updates
* amdgpu_hmm_unregister - unregister a BO for notifier updates
*
* @bo: amdgpu buffer object
*
* Remove any registration of mmu notifier updates from the buffer object.
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
{
if (!bo->notifier.mm)
return;
@ -157,10 +158,9 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
}
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages,
uint64_t start, uint64_t npages,
struct hmm_range **phmm_range, bool readonly,
bool mmap_locked, void *owner)
uint64_t start, uint64_t npages, bool readonly,
void *owner, struct page **pages,
struct hmm_range **phmm_range)
{
struct hmm_range *hmm_range;
unsigned long timeout;
@ -193,14 +193,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
retry:
hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
if (likely(!mmap_locked))
mmap_read_lock(mm);
r = hmm_range_fault(hmm_range);
if (likely(!mmap_locked))
mmap_read_unlock(mm);
if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from

View File

@ -31,23 +31,22 @@
#include <linux/interval_tree.h>
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages,
uint64_t start, uint64_t npages,
struct hmm_range **phmm_range, bool readonly,
bool mmap_locked, void *owner);
uint64_t start, uint64_t npages, bool readonly,
void *owner, struct page **pages,
struct hmm_range **phmm_range);
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR)
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
#else
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{
DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, "
"add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
return -ENODEV;
}
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
#endif
#endif

View File

@ -100,41 +100,6 @@ const char *soc15_ih_clientid_name[] = {
"MP1"
};
/**
* amdgpu_hotplug_work_func - work handler for display hotplug event
*
* @work: work struct pointer
*
* This is the hotplug event work handler (all ASICs).
* The work gets scheduled from the IRQ handler if there
* was a hotplug interrupt. It walks through the connector table
* and calls hotplug handler for each connector. After this, it sends
* a DRM hotplug event to alert userspace.
*
* This design approach is required in order to defer hotplug event handling
* from the IRQ handler to a work handler because hotplug handler has to use
* mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
* sleep).
*/
static void amdgpu_hotplug_work_func(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
hotplug_work);
struct drm_device *dev = adev_to_drm(adev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
mutex_lock(&mode_config->mutex);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
amdgpu_connector_hotplug(connector);
drm_connector_list_iter_end(&iter);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
/**
* amdgpu_irq_disable_all - disable *all* interrupts
*
@ -317,21 +282,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
}
if (!amdgpu_device_has_dc_support(adev)) {
if (!adev->enable_virtual_display)
/* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r)
return r;
/* Pre-DCE11 */
INIT_WORK(&adev->hotplug_work,
amdgpu_hotplug_work_func);
}
INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
@ -345,11 +295,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
/* PCI devices require shared interrupts. */
r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
adev_to_drm(adev));
if (r) {
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
if (r)
return r;
}
adev->irq.installed = true;
adev->irq.irq = irq;
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
@ -366,9 +313,6 @@ void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
adev->irq.installed = false;
if (adev->irq.msi_enabled)
pci_free_irq_vectors(adev->pdev);
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
}
amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);

View File

@ -235,3 +235,20 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
void jpeg_set_ras_funcs(struct amdgpu_device *adev)
{
if (!adev->jpeg.ras)
return;
amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
/* If don't define special ras_late_init function, use default ras_late_init */
if (!adev->jpeg.ras->ras_block.ras_late_init)
adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
}

View File

@ -72,5 +72,6 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
void jpeg_set_ras_funcs(struct amdgpu_device *adev);
#endif /*__AMDGPU_JPEG_H__*/

View File

@ -52,6 +52,32 @@ static int psp_load_smu_fw(struct psp_context *psp);
static int psp_rap_terminate(struct psp_context *psp);
static int psp_securedisplay_terminate(struct psp_context *psp);
static int psp_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
if (ret) {
ring->ring_size = 0;
return ret;
}
return 0;
}
/*
* Due to DF Cstate management centralized to PMFW, the firmware
* loading sequence will be updated as below:
@ -1526,11 +1552,6 @@ int psp_ras_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(adev))
return 0;
if (psp->ras_context.context.initialized) {
dev_warn(adev->dev, "RAS WARN: TA has already been loaded\n");
return 0;
}
if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
!adev->psp.ras_context.context.bin_desc.start_addr) {
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
@ -1602,6 +1623,9 @@ int psp_ras_initialize(struct psp_context *psp)
else {
if (ras_cmd->ras_status)
dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
/* fail to load RAS TA */
psp->ras_context.context.initialized = false;
}
return ret;

View File

@ -118,7 +118,6 @@ struct psp_funcs
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*ring_stop)(struct psp_context *psp,
@ -396,7 +395,6 @@ struct amdgpu_psp_funcs {
};
#define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))

View File

@ -1948,7 +1948,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
/* Perform full reset in fatal error mode */
if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
else
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
}
@ -2343,7 +2348,8 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
1 << AMDGPU_RAS_BLOCK__JPEG);
else

View File

@ -33,12 +33,29 @@
#include "amdgpu_reset.h"
#define EEPROM_I2C_MADDR_VEGA20 0x0
#define EEPROM_I2C_MADDR_ARCTURUS 0x40000
#define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0
#define EEPROM_I2C_MADDR_SIENNA_CICHLID 0x0
#define EEPROM_I2C_MADDR_ALDEBARAN 0x0
#define EEPROM_I2C_MADDR_54H (0x54UL << 16)
/* These are memory addresses as would be seen by one or more EEPROM
* chips strung on the I2C bus, usually by manipulating pins 1-3 of a
* set of EEPROM devices. They form a continuous memory space.
*
* The I2C device address includes the device type identifier, 1010b,
* which is a reserved value and indicates that this is an I2C EEPROM
* device. It also includes the top 3 bits of the 19 bit EEPROM memory
* address, namely bits 18, 17, and 16. This makes up the 7 bit
* address sent on the I2C bus with bit 0 being the direction bit,
* which is not represented here, and sent by the hardware directly.
*
* For instance,
* 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
* 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
* 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
* Depending on the size of the I2C EEPROM device(s), bits 18:16 may
* address memory in a device or a device on the I2C bus, depending on
* the status of pins 1-3. See top of amdgpu_eeprom.c.
*
* The RAS table lives either at address 0 or address 40000h of EEPROM.
*/
#define EEPROM_I2C_MADDR_0 0x0
#define EEPROM_I2C_MADDR_4 0x40000
/*
* The 2 macros bellow represent the actual size in bytes that
@ -117,9 +134,9 @@ static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
if (strnstr(atom_ctx->vbios_version,
"D342",
sizeof(atom_ctx->vbios_version)))
control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS_D342;
control->i2c_address = EEPROM_I2C_MADDR_0;
else
control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS;
control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
}
@ -130,7 +147,7 @@ static bool __get_eeprom_i2c_addr_ip_discovery(struct amdgpu_device *adev,
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 10):
control->i2c_address = EEPROM_I2C_MADDR_54H;
control->i2c_address = EEPROM_I2C_MADDR_4;
return true;
default:
return false;
@ -140,6 +157,7 @@ static bool __get_eeprom_i2c_addr_ip_discovery(struct amdgpu_device *adev,
static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
struct amdgpu_ras_eeprom_control *control)
{
struct atom_context *atom_ctx = adev->mode_info.atom_context;
u8 i2c_addr;
if (!control)
@ -162,18 +180,22 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
switch (adev->asic_type) {
case CHIP_VEGA20:
control->i2c_address = EEPROM_I2C_MADDR_VEGA20;
control->i2c_address = EEPROM_I2C_MADDR_0;
break;
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, control);
case CHIP_SIENNA_CICHLID:
control->i2c_address = EEPROM_I2C_MADDR_SIENNA_CICHLID;
control->i2c_address = EEPROM_I2C_MADDR_0;
break;
case CHIP_ALDEBARAN:
control->i2c_address = EEPROM_I2C_MADDR_ALDEBARAN;
if (strnstr(atom_ctx->vbios_version, "D673",
sizeof(atom_ctx->vbios_version)))
control->i2c_address = EEPROM_I2C_MADDR_4;
else
control->i2c_address = EEPROM_I2C_MADDR_0;
break;
case CHIP_IP_DISCOVERY:
@ -185,7 +207,7 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 0):
control->i2c_address = EEPROM_I2C_MADDR_54H;
control->i2c_address = EEPROM_I2C_MADDR_4;
break;
default:

View File

@ -58,6 +58,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
#include "amdgpu_hmm.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_res_cursor.h"
#include "bif/bif_4_1_d.h"
@ -634,9 +635,6 @@ struct amdgpu_ttm_tt {
struct task_struct *usertask;
uint32_t userflags;
bool bound;
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
struct hmm_range *range;
#endif
};
#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
@ -649,7 +647,8 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking
*/
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
struct hmm_range **range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@ -659,16 +658,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
bool readonly;
int r = 0;
/* Make sure get_user_pages_done() can cleanup gracefully */
*range = NULL;
mm = bo->notifier.mm;
if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n");
return -EFAULT;
}
/* Another get_user_pages is running at the same time?? */
if (WARN_ON(gtt->range))
return -EFAULT;
if (!mmget_not_zero(mm)) /* Happens during process shutdown */
return -ESRCH;
@ -685,9 +683,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
}
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
ttm->num_pages, &gtt->range, readonly,
true, NULL);
r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
readonly, NULL, pages, range);
out_unlock:
mmap_read_unlock(mm);
if (r)
@ -704,30 +701,24 @@ out_unlock:
*
* Returns: true if pages are still valid
*/
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range)
{
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
bool r = false;
if (!gtt || !gtt->userptr)
if (!gtt || !gtt->userptr || !range)
return false;
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
gtt->userptr, ttm->num_pages);
WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
"No user pages to check\n");
WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
if (gtt->range) {
/*
* FIXME: Must always hold notifier_lock for this, and must
* not ignore the return code.
*/
r = amdgpu_hmm_range_get_pages_done(gtt->range);
gtt->range = NULL;
}
return !r;
/*
* FIXME: Must always hold notifier_lock for this, and must
* not ignore the return code.
*/
return !amdgpu_hmm_range_get_pages_done(range);
}
#endif
@ -804,20 +795,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
/* unmap the pages mapped to the device */
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
if (gtt->range) {
unsigned long i;
for (i = 0; i < ttm->num_pages; i++) {
if (ttm->pages[i] !=
hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
break;
}
WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
}
#endif
}
static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
@ -1168,8 +1145,9 @@ int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
* @addr: The address in the current tasks VM space to use
* @flags: Requirements of userptr object.
*
* Called by amdgpu_gem_userptr_ioctl() to bind userptr pages
* to current task
* Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
* bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
* initialize GPU VM for a KFD process.
*/
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
uint64_t addr, uint32_t flags)
@ -1553,6 +1531,23 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
NULL, &adev->mman.fw_vram_usage_va);
}
/*
* Driver Reservation functions
*/
/**
* amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
*
* @adev: amdgpu_device pointer
*
* free drv reserved vram if it has been reserved.
*/
static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
NULL,
NULL);
}
/**
* amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
*
@ -1579,6 +1574,31 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
&adev->mman.fw_vram_usage_va);
}
/**
* amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
*
* @adev: amdgpu_device pointer
*
* create bo vram reservation from drv.
*/
static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
{
uint64_t vram_size = adev->gmc.visible_vram_size;
adev->mman.drv_vram_usage_reserved_bo = NULL;
if (adev->mman.drv_vram_usage_size == 0 ||
adev->mman.drv_vram_usage_size > vram_size)
return 0;
return amdgpu_bo_create_kernel_at(adev,
adev->mman.drv_vram_usage_start_offset,
adev->mman.drv_vram_usage_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.drv_vram_usage_reserved_bo,
NULL);
}
/*
* Memoy training reservation functions
*/
@ -1746,6 +1766,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
/*
*The reserved vram for driver must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
r = amdgpu_ttm_drv_reserve_vram_init(adev);
if (r)
return r;
/*
* only NAVI10 and onwards ASIC support for IP discovery.
* If IP discovery enabled, a block of memory should be
@ -1871,6 +1899,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {

View File

@ -39,6 +39,8 @@
#define AMDGPU_POISON 0xd0bed0be
struct hmm_range;
struct amdgpu_gtt_mgr {
struct ttm_resource_manager manager;
struct drm_mm mm;
@ -84,6 +86,11 @@ struct amdgpu_mman {
struct amdgpu_bo *fw_vram_usage_reserved_bo;
void *fw_vram_usage_va;
/* driver VRAM reservation */
u64 drv_vram_usage_start_offset;
u64 drv_vram_usage_size;
struct amdgpu_bo *drv_vram_usage_reserved_bo;
/* PAGE_SIZE'd BO for process memory r/w over SDMA. */
struct amdgpu_bo *sdma_access_bo;
void *sdma_access_ptr;
@ -149,15 +156,19 @@ void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
struct hmm_range **range);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct page **pages)
struct page **pages,
struct hmm_range **range)
{
return -EPERM;
}
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range)
{
return false;
}

View File

@ -1252,3 +1252,20 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
return 0;
}
void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev)
{
if (!adev->vcn.ras)
return;
amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
/* If don't define special ras_late_init function, use default ras_late_init */
if (!adev->vcn.ras->ras_block.ras_late_init)
adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
}

View File

@ -399,5 +399,6 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev);
#endif

View File

@ -511,6 +511,10 @@ static int amdgpu_vkms_sw_init(void *handle)
return r;
}
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r)
return r;
drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;

View File

@ -143,32 +143,6 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return 0;
}
/*
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
* happens while holding this lock anywhere to prevent deadlocks when
* an MMU notifier runs in reclaim-FS context.
*/
static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
{
mutex_lock(&vm->eviction_lock);
vm->saved_flags = memalloc_noreclaim_save();
}
static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
{
if (mutex_trylock(&vm->eviction_lock)) {
vm->saved_flags = memalloc_noreclaim_save();
return 1;
}
return 0;
}
static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
{
memalloc_noreclaim_restore(vm->saved_flags);
mutex_unlock(&vm->eviction_lock);
}
/**
* amdgpu_vm_bo_evicted - vm_bo is evicted
*

View File

@ -492,7 +492,48 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
*/
static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
{
unsigned long flags;
spinlock_t *lock;
/*
* Workaround to stop racing between the fence signaling and handling
* the cb. The lock is static after initially setting it up, just make
* sure that the dma_fence structure isn't freed up.
*/
rcu_read_lock();
lock = vm->last_tlb_flush->lock;
rcu_read_unlock();
spin_lock_irqsave(lock, flags);
spin_unlock_irqrestore(lock, flags);
return atomic64_read(&vm->tlb_seq);
}
/*
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
* happens while holding this lock anywhere to prevent deadlocks when
* an MMU notifier runs in reclaim-FS context.
*/
static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
{
mutex_lock(&vm->eviction_lock);
vm->saved_flags = memalloc_noreclaim_save();
}
static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
{
if (mutex_trylock(&vm->eviction_lock)) {
vm->saved_flags = memalloc_noreclaim_save();
return true;
}
return false;
}
static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
{
memalloc_noreclaim_restore(vm->saved_flags);
mutex_unlock(&vm->eviction_lock);
}
#endif

View File

@ -597,7 +597,9 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
if (entry->bo)
return 0;
amdgpu_vm_eviction_unlock(vm);
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
amdgpu_vm_eviction_lock(vm);
if (r)
return r;

View File

@ -2829,6 +2829,17 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
/* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r)
return r;
INIT_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
@ -2891,6 +2902,8 @@ static int dce_v10_0_hw_fini(void *handle)
dce_v10_0_pageflip_interrupt_fini(adev);
flush_work(&adev->hotplug_work);
return 0;
}

View File

@ -2948,6 +2948,17 @@ static int dce_v11_0_sw_init(void *handle)
if (r)
return r;
/* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r)
return r;
INIT_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
@ -3021,6 +3032,8 @@ static int dce_v11_0_hw_fini(void *handle)
dce_v11_0_pageflip_interrupt_fini(adev);
flush_work(&adev->hotplug_work);
return 0;
}

View File

@ -2706,6 +2706,18 @@ static int dce_v6_0_sw_init(void *handle)
if (r)
return r;
/* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r)
return r;
/* Pre-DCE11 */
INIT_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
return r;
@ -2764,6 +2776,8 @@ static int dce_v6_0_hw_fini(void *handle)
dce_v6_0_pageflip_interrupt_fini(adev);
flush_work(&adev->hotplug_work);
return 0;
}

View File

@ -2730,6 +2730,18 @@ static int dce_v8_0_sw_init(void *handle)
if (r)
return r;
/* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r)
return r;
/* Pre-DCE11 */
INIT_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
@ -2790,6 +2802,8 @@ static int dce_v8_0_hw_fini(void *handle)
dce_v8_0_pageflip_interrupt_fini(adev);
flush_work(&adev->hotplug_work);
return 0;
}

View File

@ -4392,7 +4392,6 @@ static int gfx_v11_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
uint32_t tmp;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@ -4411,15 +4410,14 @@ static int gfx_v11_0_hw_fini(void *handle)
amdgpu_mes_kiq_hw_fini(adev);
}
if (amdgpu_sriov_vf(adev)) {
gfx_v11_0_cp_gfx_enable(adev, false);
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
if (amdgpu_sriov_vf(adev))
/* Remove the steps disabling CPG and clearing KIQ position,
* so that CP could perform IDLE-SAVE during switch. Those
* steps are necessary to avoid a DMAR error in gfx9 but it is
* not reproduced on gfx11.
*/
return 0;
}
gfx_v11_0_cp_enable(adev, false);
gfx_v11_0_enable_gui_idle_interrupt(adev, false);

View File

@ -357,18 +357,6 @@ static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev)
static int gfxhub_v3_0_3_gart_enable(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
/*
* GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
* VF copy registers so vbios post doesn't program them, for
* SRIOV driver need to program them
*/
WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE,
adev->gmc.vram_start >> 24);
WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_TOP,
adev->gmc.vram_end >> 24);
}
/* GART Enable. */
gfxhub_v3_0_3_init_gart_aperture_regs(adev);
gfxhub_v3_0_3_init_system_aperture_regs(adev);

View File

@ -608,6 +608,8 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
struct amdgpu_bo *bo = mapping->bo_va->base.bo;
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
@ -624,6 +626,11 @@ static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
*flags |= AMDGPU_PTE_SYSTEM;
*flags &= ~AMDGPU_PTE_VALID;
}
if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
AMDGPU_GEM_CREATE_UNCACHED))
*flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
}
static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)

View File

@ -503,6 +503,8 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
struct amdgpu_bo *bo = mapping->bo_va->base.bo;
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
@ -519,6 +521,11 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
*flags |= AMDGPU_PTE_SYSTEM;
*flags &= ~AMDGPU_PTE_VALID;
}
if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
AMDGPU_GEM_CREATE_UNCACHED))
*flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
}
static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
@ -551,7 +558,10 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.node_inst_num = adev->gmc.num_umc;
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
if (adev->umc.node_inst_num == 4)
adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
else
adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
adev->umc.ras = &umc_v8_10_ras;
break;
case IP_VERSION(8, 11, 0):

View File

@ -1113,6 +1113,74 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
unsigned int mtype;
bool snoop = false;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
if (is_vram) {
if (bo_adev == adev) {
if (uncached)
mtype = MTYPE_UC;
else if (coherent)
mtype = MTYPE_CC;
else
mtype = MTYPE_RW;
/* FIXME: is this still needed? Or does
* amdgpu_ttm_tt_pde_flags already handle this?
*/
if (adev->ip_versions[GC_HWIP][0] ==
IP_VERSION(9, 4, 2) &&
adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
if (uncached || coherent)
mtype = MTYPE_UC;
else
mtype = MTYPE_NC;
if (mapping->bo_va->is_xgmi)
snoop = true;
}
} else {
if (uncached || coherent)
mtype = MTYPE_UC;
else
mtype = MTYPE_NC;
/* FIXME: is this still needed? Or does
* amdgpu_ttm_tt_pde_flags already handle this?
*/
snoop = true;
}
break;
default:
if (uncached || coherent)
mtype = MTYPE_UC;
else
mtype = MTYPE_NC;
/* FIXME: is this still needed? Or does
* amdgpu_ttm_tt_pde_flags already handle this?
*/
if (!is_vram)
snoop = true;
}
if (mtype != MTYPE_NC)
*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
AMDGPU_PTE_MTYPE_VG10(mtype);
*flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
}
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
@ -1128,14 +1196,9 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) &&
!(*flags & AMDGPU_PTE_SYSTEM) &&
mapping->bo_va->is_xgmi)
*flags |= AMDGPU_PTE_SNOOPED;
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
*flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
if (mapping->bo_va->base.bo)
gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
mapping, flags);
}
static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)

View File

@ -807,16 +807,5 @@ static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev)
break;
}
if (adev->jpeg.ras) {
amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
/* If don't define special ras_late_init function, use default ras_late_init */
if (!adev->jpeg.ras->ras_block.ras_late_init)
adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
}
jpeg_set_ras_funcs(adev);
}

View File

@ -27,6 +27,7 @@
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v2_0.h"
#include "jpeg_v4_0.h"
#include "vcn/vcn_4_0_0_offset.h"
#include "vcn/vcn_4_0_0_sh_mask.h"
@ -38,6 +39,7 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
/**
* jpeg_v4_0_early_init - set function pointers
@ -55,6 +57,7 @@ static int jpeg_v4_0_early_init(void *handle)
jpeg_v4_0_set_dec_ring_funcs(adev);
jpeg_v4_0_set_irq_funcs(adev);
jpeg_v4_0_set_ras_funcs(adev);
return 0;
}
@ -78,6 +81,18 @@ static int jpeg_v4_0_sw_init(void *handle)
if (r)
return r;
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq);
if (r)
return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
@ -167,6 +182,8 @@ static int jpeg_v4_0_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
return 0;
}
@ -524,6 +541,10 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
case VCN_4_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
break;
case VCN_4_0__SRCID_DJPEG0_POISON:
case VCN_4_0__SRCID_EJPEG0_POISON:
amdgpu_jpeg_process_poison_irq(adev, source, entry);
break;
default:
DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@ -607,3 +628,63 @@ const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v4_0_ip_funcs,
};
static uint32_t jpeg_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
uint32_t instance, uint32_t sub_block)
{
uint32_t poison_stat = 0, reg_value = 0;
switch (sub_block) {
case AMDGPU_JPEG_V4_0_JPEG0:
reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
break;
case AMDGPU_JPEG_V4_0_JPEG1:
reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
break;
default:
break;
}
if (poison_stat)
dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
instance, sub_block);
return poison_stat;
}
static bool jpeg_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
{
uint32_t inst = 0, sub = 0, poison_stat = 0;
for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
for (sub = 0; sub < AMDGPU_JPEG_V4_0_MAX_SUB_BLOCK; sub++)
poison_stat +=
jpeg_v4_0_query_poison_by_instance(adev, inst, sub);
return !!poison_stat;
}
const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
.query_poison_status = jpeg_v4_0_query_ras_poison_status,
};
static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
.ras_block = {
.hw_ops = &jpeg_v4_0_ras_hw_ops,
},
};
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[JPEG_HWIP][0]) {
case IP_VERSION(4, 0, 0):
adev->jpeg.ras = &jpeg_v4_0_ras;
break;
default:
break;
}
jpeg_set_ras_funcs(adev);
}

View File

@ -24,6 +24,13 @@
#ifndef __JPEG_V4_0_H__
#define __JPEG_V4_0_H__
enum amdgpu_jpeg_v4_0_sub_block {
AMDGPU_JPEG_V4_0_JPEG0 = 0,
AMDGPU_JPEG_V4_0_JPEG1,
AMDGPU_JPEG_V4_0_MAX_SUB_BLOCK,
};
extern const struct amdgpu_ip_block_version jpeg_v4_0_ip_block;
#endif /* __JPEG_V4_0_H__ */

View File

@ -1253,7 +1253,9 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
if (adev->mes.ring.sched.ready)
mes_v11_0_kiq_dequeue_sched(adev);
mes_v11_0_enable(adev, false);
if (!amdgpu_sriov_vf(adev))
mes_v11_0_enable(adev, false);
return 0;
}

View File

@ -126,32 +126,6 @@ out:
return err;
}
static int psp_v10_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
if (ret) {
ring->ring_size = 0;
return ret;
}
return 0;
}
static int psp_v10_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@ -245,7 +219,6 @@ static void psp_v10_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
static const struct psp_funcs psp_v10_0_funcs = {
.init_microcode = psp_v10_0_init_microcode,
.ring_init = psp_v10_0_ring_init,
.ring_create = psp_v10_0_ring_create,
.ring_stop = psp_v10_0_ring_stop,
.ring_destroy = psp_v10_0_ring_destroy,

View File

@ -360,32 +360,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
static int psp_v11_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
if (ret) {
ring->ring_size = 0;
return ret;
}
return 0;
}
static int psp_v11_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@ -779,7 +753,6 @@ static const struct psp_funcs psp_v11_0_funcs = {
.bootloader_load_spl = psp_v11_0_bootloader_load_spl,
.bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v11_0_bootloader_load_sos,
.ring_init = psp_v11_0_ring_init,
.ring_create = psp_v11_0_ring_create,
.ring_stop = psp_v11_0_ring_stop,
.ring_destroy = psp_v11_0_ring_destroy,

View File

@ -28,32 +28,6 @@
#include "mp/mp_11_0_8_offset.h"
static int psp_v11_0_8_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
if (ret) {
ring->ring_size = 0;
return ret;
}
return 0;
}
static int psp_v11_0_8_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@ -194,7 +168,6 @@ static void psp_v11_0_8_ring_set_wptr(struct psp_context *psp, uint32_t value)
}
static const struct psp_funcs psp_v11_0_8_funcs = {
.ring_init = psp_v11_0_8_ring_init,
.ring_create = psp_v11_0_8_ring_create,
.ring_stop = psp_v11_0_8_ring_stop,
.ring_destroy = psp_v11_0_8_ring_destroy,

View File

@ -236,34 +236,6 @@ static void psp_v12_0_reroute_ih(struct psp_context *psp)
0x80000000, 0x8000FFFF, false);
}
static int psp_v12_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
psp_v12_0_reroute_ih(psp);
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
if (ret) {
ring->ring_size = 0;
return ret;
}
return 0;
}
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@ -272,6 +244,8 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
psp_v12_0_reroute_ih(psp);
if (amdgpu_sriov_vf(psp->adev)) {
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
@ -425,7 +399,6 @@ static const struct psp_funcs psp_v12_0_funcs = {
.init_microcode = psp_v12_0_init_microcode,
.bootloader_load_sysdrv = psp_v12_0_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v12_0_bootloader_load_sos,
.ring_init = psp_v12_0_ring_init,
.ring_create = psp_v12_0_ring_create,
.ring_stop = psp_v12_0_ring_stop,
.ring_destroy = psp_v12_0_ring_destroy,

View File

@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@ -267,32 +268,6 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
static int psp_v13_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
if (ret) {
ring->ring_size = 0;
return ret;
}
return 0;
}
static int psp_v13_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@ -728,7 +703,6 @@ static const struct psp_funcs psp_v13_0_funcs = {
.bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv,
.bootloader_load_ras_drv = psp_v13_0_bootloader_load_ras_drv,
.bootloader_load_sos = psp_v13_0_bootloader_load_sos,
.ring_init = psp_v13_0_ring_init,
.ring_create = psp_v13_0_ring_create,
.ring_stop = psp_v13_0_ring_stop,
.ring_destroy = psp_v13_0_ring_destroy,

View File

@ -199,32 +199,6 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp)
return ret;
}
static int psp_v13_0_4_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
if (ret) {
ring->ring_size = 0;
return ret;
}
return 0;
}
static int psp_v13_0_4_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@ -373,7 +347,6 @@ static const struct psp_funcs psp_v13_0_4_funcs = {
.bootloader_load_intf_drv = psp_v13_0_4_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v13_0_4_bootloader_load_dbg_drv,
.bootloader_load_sos = psp_v13_0_4_bootloader_load_sos,
.ring_init = psp_v13_0_4_ring_init,
.ring_create = psp_v13_0_4_ring_create,
.ring_stop = psp_v13_0_4_ring_stop,
.ring_destroy = psp_v13_0_4_ring_destroy,

View File

@ -160,32 +160,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
return ret;
}
static int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
if (ret) {
ring->ring_size = 0;
return ret;
}
return 0;
}
static void psp_v3_1_reroute_ih(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@ -401,7 +375,6 @@ static const struct psp_funcs psp_v3_1_funcs = {
.init_microcode = psp_v3_1_init_microcode,
.bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v3_1_bootloader_load_sos,
.ring_init = psp_v3_1_ring_init,
.ring_create = psp_v3_1_ring_create,
.ring_stop = psp_v3_1_ring_stop,
.ring_destroy = psp_v3_1_ring_destroy,

View File

@ -322,6 +322,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
return AMD_RESET_METHOD_MODE1;
case IP_VERSION(13, 0, 4):
return AMD_RESET_METHOD_MODE2;

View File

@ -45,6 +45,16 @@ const struct channelnum_map_colbit umc_v8_10_channelnum_map_colbit_table[] = {
{6, 11},
};
const uint32_t
umc_v8_10_channel_idx_tbl_ext0[]
[UMC_V8_10_UMC_INSTANCE_NUM]
[UMC_V8_10_CHANNEL_INSTANCE_NUM] = {
{{1, 5}, {7, 3}},
{{14, 15}, {13, 12}},
{{10, 11}, {9, 8}},
{{6, 2}, {0, 4}}
};
const uint32_t
umc_v8_10_channel_idx_tbl[]
[UMC_V8_10_UMC_INSTANCE_NUM]

View File

@ -66,5 +66,9 @@ extern const uint32_t
[UMC_V8_10_UMC_INSTANCE_NUM]
[UMC_V8_10_CHANNEL_INSTANCE_NUM];
extern const uint32_t
umc_v8_10_channel_idx_tbl_ext0[]
[UMC_V8_10_UMC_INSTANCE_NUM]
[UMC_V8_10_CHANNEL_INSTANCE_NUM];
#endif

View File

@ -2002,16 +2002,5 @@ static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
break;
}
if (adev->vcn.ras) {
amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
/* If don't define special ras_late_init function, use default ras_late_init */
if (!adev->vcn.ras->ras_block.ras_late_init)
adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
}
amdgpu_vcn_set_ras_funcs(adev);
}

View File

@ -31,6 +31,7 @@
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
#include "mmsch_v4_0.h"
#include "vcn_v4_0.h"
#include "vcn/vcn_4_0_0_offset.h"
#include "vcn/vcn_4_0_0_sh_mask.h"
@ -64,6 +65,7 @@ static int vcn_v4_0_set_powergating_state(void *handle,
static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
/**
* vcn_v4_0_early_init - set function pointers
@ -84,6 +86,7 @@ static int vcn_v4_0_early_init(void *handle)
vcn_v4_0_set_unified_ring_funcs(adev);
vcn_v4_0_set_irq_funcs(adev);
vcn_v4_0_set_ras_funcs(adev);
return 0;
}
@ -132,6 +135,12 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
if (r)
return r;
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev))
@ -296,6 +305,7 @@ static int vcn_v4_0_hw_fini(void *handle)
}
}
amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
}
return 0;
@ -1939,6 +1949,9 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break;
case VCN_4_0__SRCID_UVD_POISON:
amdgpu_vcn_process_poison_irq(adev, source, entry);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
@ -2001,3 +2014,60 @@ const struct amdgpu_ip_block_version vcn_v4_0_ip_block =
.rev = 0,
.funcs = &vcn_v4_0_ip_funcs,
};
static uint32_t vcn_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
uint32_t instance, uint32_t sub_block)
{
uint32_t poison_stat = 0, reg_value = 0;
switch (sub_block) {
case AMDGPU_VCN_V4_0_VCPU_VCODEC:
reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
break;
default:
break;
}
if (poison_stat)
dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
instance, sub_block);
return poison_stat;
}
static bool vcn_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
{
uint32_t inst, sub;
uint32_t poison_stat = 0;
for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
for (sub = 0; sub < AMDGPU_VCN_V4_0_MAX_SUB_BLOCK; sub++)
poison_stat +=
vcn_v4_0_query_poison_by_instance(adev, inst, sub);
return !!poison_stat;
}
const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
.query_poison_status = vcn_v4_0_query_ras_poison_status,
};
static struct amdgpu_vcn_ras vcn_v4_0_ras = {
.ras_block = {
.hw_ops = &vcn_v4_0_ras_hw_ops,
},
};
static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[VCN_HWIP][0]) {
case IP_VERSION(4, 0, 0):
adev->vcn.ras = &vcn_v4_0_ras;
break;
default:
break;
}
amdgpu_vcn_set_ras_funcs(adev);
}

View File

@ -24,6 +24,12 @@
#ifndef __VCN_V4_0_H__
#define __VCN_V4_0_H__
enum amdgpu_vcn_v4_0_sub_block {
AMDGPU_VCN_V4_0_VCPU_VCODEC = 0,
AMDGPU_VCN_V4_0_MAX_SUB_BLOCK,
};
extern const struct amdgpu_ip_block_version vcn_v4_0_ip_block;
#endif /* __VCN_V4_0_H__ */

View File

@ -2111,6 +2111,8 @@ void vi_set_virt_ops(struct amdgpu_device *adev)
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
amdgpu_device_set_sriov_virtual_display(adev);
switch (adev->asic_type) {
case CHIP_TOPAZ:
/* topaz has no DCE, UVD, VCE */
@ -2130,7 +2132,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
@ -2150,7 +2152,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))

View File

@ -1950,7 +1950,7 @@ static int criu_checkpoint(struct file *filep,
{
int ret;
uint32_t num_devices, num_bos, num_objects;
uint64_t priv_size, priv_offset = 0;
uint64_t priv_size, priv_offset = 0, bo_priv_offset;
if (!args->devices || !args->bos || !args->priv_data)
return -EINVAL;
@ -1994,38 +1994,34 @@ static int criu_checkpoint(struct file *filep,
if (ret)
goto exit_unlock;
ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
(uint8_t __user *)args->priv_data, &priv_offset);
if (ret)
goto exit_unlock;
/* Leave room for BOs in the private data. They need to be restored
* before events, but we checkpoint them last to simplify the error
* handling.
*/
bo_priv_offset = priv_offset;
priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
if (num_objects) {
ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
&priv_offset);
if (ret)
goto close_bo_fds;
goto exit_unlock;
ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
&priv_offset);
if (ret)
goto close_bo_fds;
goto exit_unlock;
ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
if (ret)
goto close_bo_fds;
goto exit_unlock;
}
close_bo_fds:
if (ret) {
/* If IOCTL returns err, user assumes all FDs opened in criu_dump_bos are closed */
uint32_t i;
struct kfd_criu_bo_bucket *bo_buckets = (struct kfd_criu_bo_bucket *) args->bos;
for (i = 0; i < num_bos; i++) {
if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
close_fd(bo_buckets[i].dmabuf_fd);
}
}
/* This must be the last thing in this function that can fail.
* Otherwise we leak dmabuf file descriptors.
*/
ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
(uint8_t __user *)args->priv_data, &bo_priv_offset);
exit_unlock:
mutex_unlock(&p->mutex);

View File

@ -1111,7 +1111,7 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
props->cache_latency = cache->cache_latency;
memcpy(props->sibling_map, cache->sibling_map,
sizeof(props->sibling_map));
CRAT_SIBLINGMAP_SIZE);
/* set the sibling_map_size as 32 for CRAT from ACPI */
props->sibling_map_size = CRAT_SIBLINGMAP_SIZE;

View File

@ -495,7 +495,10 @@ static int kfd_gws_init(struct kfd_dev *kfd)
(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
&& kfd->mec2_fw_version >= 0x30) ||
(KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
&& kfd->mec2_fw_version >= 0x28))))
&& kfd->mec2_fw_version >= 0x28) ||
(KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0)
&& KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)
&& kfd->mec2_fw_version >= 0x6b))))
ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
kfd->adev->gds.gws_size, &kfd->gws);

View File

@ -506,6 +506,7 @@ int kfd_criu_restore_event(struct file *devkfd,
ret = create_other_event(p, ev, &ev_priv->event_id);
break;
}
mutex_unlock(&p->event_mutex);
exit:
if (ret)
@ -513,8 +514,6 @@ exit:
kfree(ev_priv);
mutex_unlock(&p->event_mutex);
return ret;
}

View File

@ -28,7 +28,6 @@
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_mn.h"
#include "amdgpu_res_cursor.h"
#include "kfd_priv.h"
#include "kfd_svm.h"

View File

@ -26,7 +26,7 @@
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_mn.h"
#include "amdgpu_hmm.h"
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "kfd_priv.h"
@ -1596,9 +1596,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
addr, npages, &hmm_range,
readonly, true, owner);
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
readonly, owner, NULL,
&hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
pr_debug("failed %d to get svm range pages\n", r);

View File

@ -1723,7 +1723,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
/* kfd_fill_cache_non_crat_info - Fill GPU cache info using kfd_gpu_cache_info
* tables
*/
void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev)
static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_dev *kdev)
{
struct kfd_gpu_cache_info *pcache_info = NULL;
int i, j, k;

View File

@ -146,6 +146,14 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
/* Number of bytes in PSP footer for firmware. */
#define PSP_FOOTER_BYTES 0x100
/*
* DMUB Async to Sync Mechanism Status
*/
#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4
/**
* DOC: overview
*
@ -1635,12 +1643,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
}
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
* It is expected that DMUB will resend any pending notifications at this point, for
* example HPD from DPIA.
@ -1648,6 +1650,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (dc_is_dmub_outbox_supported(adev->dm.dc))
dc_enable_dmub_outbox(adev->dm.dc);
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
@ -4590,6 +4598,7 @@ static int dm_early_init(void *handle)
adev_to_drm(adev)->dev,
&dev_attr_s3_debug);
#endif
adev->dc_enabled = true;
return 0;
}
@ -5682,7 +5691,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_connector_state *con_state =
dm_state ? &dm_state->base : NULL;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
struct drm_display_mode mode;
struct drm_display_mode saved_mode;
struct drm_display_mode *freesync_mode = NULL;
bool native_mode_found = false;
@ -5697,6 +5706,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink = NULL;
drm_mode_init(&mode, drm_mode);
memset(&saved_mode, 0, sizeof(saved_mode));
if (aconnector == NULL) {
@ -6527,7 +6537,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
struct drm_connector_state *new_con_state;
struct amdgpu_dm_connector *aconnector;
struct dm_connector_state *dm_conn_state;
int i, j;
int i, j, ret;
int vcpi, pbn_div, pbn, slot_num = 0;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
@ -6574,8 +6584,11 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
dm_conn_state->pbn = pbn;
dm_conn_state->vcpi_slots = slot_num;
drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn,
false);
ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
dm_conn_state->pbn, false);
if (ret < 0)
return ret;
continue;
}
@ -7682,9 +7695,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
new_crtc_state,
&bundle->flip_addrs[planes_count]);
if (acrtc_state->stream->link->psr_settings.psr_feature_enabled)
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count]);
/*
* Only allow immediate flips for fast updates that don't
@ -9591,10 +9605,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc_resource_is_dsc_encoding_supported(dc)) {
if (!pre_validate_dsc(state, &dm_state, vars)) {
ret = -EINVAL;
ret = pre_validate_dsc(state, &dm_state, vars);
if (ret != 0)
goto fail;
}
}
#endif
@ -9689,9 +9702,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto fail;
}
@ -10171,6 +10184,8 @@ static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
*operation_result = AUX_RET_ERROR_TIMEOUT;
} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) {
*operation_result = AUX_RET_ERROR_INVALID_REPLY;
} else {
*operation_result = AUX_RET_ERROR_UNKNOWN;
}
@ -10218,6 +10233,16 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
if (payload->length != adev->dm.dmub_notify->aux_reply.length) {
DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n",
payload->address, payload->length,
adev->dm.dmub_notify->aux_reply.length);
return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx,
DMUB_ASYNC_TO_SYNC_ACCESS_INVALID,
(uint32_t *)operation_result);
}
memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
adev->dm.dmub_notify->aux_reply.length);
}
@ -10238,8 +10263,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
*/
bool check_seamless_boot_capability(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VANGOGH:
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(3, 0, 1):
if (!adev->mman.keep_stolen_vga_memory)
return true;
break;

View File

@ -50,12 +50,6 @@
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
/*
* DMUB Async to Sync Mechanism Status
*/
#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"

View File

@ -415,7 +415,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
bool is_dcn;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@ -453,8 +453,14 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
acrtc->otg_inst = -1;
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
/* Don't enable DRM CRTC degamma property for DCE since it doesn't
* support programmable degamma anywhere.
*/
is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
return 0;

View File

@ -971,3 +971,11 @@ void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
{
/* TODO: add periodic detection implementation */
}
void dm_helpers_dp_mst_update_branch_bandwidth(
struct dc_context *ctx,
struct dc_link *link)
{
// TODO
}

View File

@ -710,13 +710,13 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
return dsc_config.bits_per_pixel;
}
static bool increase_dsc_bpp(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state,
struct dc_link *dc_link,
struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars,
int count,
int k)
static int increase_dsc_bpp(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state,
struct dc_link *dc_link,
struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars,
int count,
int k)
{
int i;
bool bpp_increased[MAX_PIPES];
@ -726,6 +726,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
int remaining_to_increase = 0;
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
@ -764,52 +765,60 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
if (initial_slack[next_index] > fair_pbn_alloc) {
vars[next_index].pbn += fair_pbn_alloc;
if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn) < 0)
return false;
if (!drm_dp_mst_atomic_check(state)) {
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
if (ret < 0)
return ret;
ret = drm_dp_mst_atomic_check(state);
if (ret == 0) {
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
} else {
vars[next_index].pbn -= fair_pbn_alloc;
if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn) < 0)
return false;
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
if (ret < 0)
return ret;
}
} else {
vars[next_index].pbn += initial_slack[next_index];
if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn) < 0)
return false;
if (!drm_dp_mst_atomic_check(state)) {
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
if (ret < 0)
return ret;
ret = drm_dp_mst_atomic_check(state);
if (ret == 0) {
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
} else {
vars[next_index].pbn -= initial_slack[next_index];
if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn) < 0)
return false;
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
if (ret < 0)
return ret;
}
}
bpp_increased[next_index] = true;
remaining_to_increase--;
}
return true;
return 0;
}
static bool try_disable_dsc(struct drm_atomic_state *state,
struct dc_link *dc_link,
struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars,
int count,
int k)
static int try_disable_dsc(struct drm_atomic_state *state,
struct dc_link *dc_link,
struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars,
int count,
int k)
{
int i;
bool tried[MAX_PIPES];
@ -817,6 +826,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
int max_kbps_increase;
int next_index;
int remaining_to_try = 0;
int ret;
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@ -847,49 +857,52 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
break;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn) < 0)
return false;
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
if (ret < 0)
return ret;
if (!drm_dp_mst_atomic_check(state)) {
ret = drm_dp_mst_atomic_check(state);
if (ret == 0) {
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn) < 0)
return false;
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
vars[next_index].pbn);
if (ret < 0)
return ret;
}
tried[next_index] = true;
remaining_to_try--;
}
return true;
return 0;
}
static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dc_link *dc_link,
struct dsc_mst_fairness_vars *vars,
struct drm_dp_mst_topology_mgr *mgr,
int *link_vars_start_index)
static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dc_link *dc_link,
struct dsc_mst_fairness_vars *vars,
struct drm_dp_mst_topology_mgr *mgr,
int *link_vars_start_index)
{
struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
int count = 0;
int i, k;
int i, k, ret;
bool debugfs_overwrite = false;
memset(params, 0, sizeof(params));
if (IS_ERR(mst_state))
return false;
return PTR_ERR(mst_state);
mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
#if defined(CONFIG_DRM_AMD_DC_DCN)
@ -940,7 +953,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (count == 0) {
ASSERT(0);
return true;
return 0;
}
/* k is start index of vars for current phy link used by mst hub */
@ -954,13 +967,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
vars[i + k].pbn) < 0)
return false;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
vars[i + k].pbn);
if (ret < 0)
return ret;
}
if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
ret = drm_dp_mst_atomic_check(state);
if (ret == 0 && !debugfs_overwrite) {
set_dsc_configs_from_fairness_vars(params, vars, count, k);
return true;
return 0;
} else if (ret != -ENOSPC) {
return ret;
}
/* Try max compression */
@ -969,31 +986,36 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
params[i].port, vars[i + k].pbn) < 0)
return false;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
params[i].port, vars[i + k].pbn);
if (ret < 0)
return ret;
} else {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
params[i].port, vars[i + k].pbn) < 0)
return false;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
params[i].port, vars[i + k].pbn);
if (ret < 0)
return ret;
}
}
if (drm_dp_mst_atomic_check(state))
return false;
ret = drm_dp_mst_atomic_check(state);
if (ret != 0)
return ret;
/* Optimize degree of compression */
if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k))
return false;
ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
if (ret < 0)
return ret;
if (!try_disable_dsc(state, dc_link, params, vars, count, k))
return false;
ret = try_disable_dsc(state, dc_link, params, vars, count, k);
if (ret < 0)
return ret;
set_dsc_configs_from_fairness_vars(params, vars, count, k);
return true;
return 0;
}
static bool is_dsc_need_re_compute(
@ -1094,15 +1116,17 @@ static bool is_dsc_need_re_compute(
return is_dsc_need_re_compute;
}
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
{
int i, j;
struct dc_stream_state *stream;
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
int link_vars_start_index = 0;
int ret = 0;
for (i = 0; i < dc_state->stream_count; i++)
computed_streams[i] = false;
@ -1115,7 +1139,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
if (!aconnector || !aconnector->dc_sink)
if (!aconnector || !aconnector->dc_sink || !aconnector->port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@ -1125,19 +1149,16 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
continue;
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return false;
return -EINVAL;
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
&aconnector->mst_mgr,
&link_vars_start_index)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
}
mutex_unlock(&aconnector->mst_mgr.lock);
mst_mgr = aconnector->port->mgr;
ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
&link_vars_start_index);
if (ret != 0)
return ret;
for (j = 0; j < dc_state->stream_count; j++) {
if (dc_state->streams[j]->link == stream->link)
@ -1150,22 +1171,23 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (stream->timing.flags.DSC == 1)
if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
return false;
return -EINVAL;
}
return true;
return ret;
}
static bool
pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars)
{
int i, j;
struct dc_stream_state *stream;
bool computed_streams[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_mgr *mst_mgr;
int link_vars_start_index = 0;
int ret;
for (i = 0; i < dc_state->stream_count; i++)
computed_streams[i] = false;
@ -1178,7 +1200,7 @@ static bool
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
if (!aconnector || !aconnector->dc_sink)
if (!aconnector || !aconnector->dc_sink || !aconnector->port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@ -1190,14 +1212,11 @@ static bool
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
&aconnector->mst_mgr,
&link_vars_start_index)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
}
mutex_unlock(&aconnector->mst_mgr.lock);
mst_mgr = aconnector->port->mgr;
ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
&link_vars_start_index);
if (ret != 0)
return ret;
for (j = 0; j < dc_state->stream_count; j++) {
if (dc_state->streams[j]->link == stream->link)
@ -1205,7 +1224,7 @@ static bool
}
}
return true;
return ret;
}
static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state,
@ -1260,9 +1279,9 @@ static bool is_dsc_precompute_needed(struct drm_atomic_state *state)
return ret;
}
bool pre_validate_dsc(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state_ptr,
struct dsc_mst_fairness_vars *vars)
int pre_validate_dsc(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state_ptr,
struct dsc_mst_fairness_vars *vars)
{
int i;
struct dm_atomic_state *dm_state;
@ -1271,11 +1290,12 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
if (!is_dsc_precompute_needed(state)) {
DRM_INFO_ONCE("DSC precompute is not needed.\n");
return true;
return 0;
}
if (dm_atomic_get_state(state, dm_state_ptr)) {
ret = dm_atomic_get_state(state, dm_state_ptr);
if (ret != 0) {
DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
return false;
return ret;
}
dm_state = *dm_state_ptr;
@ -1287,7 +1307,7 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL);
if (!local_dc_state)
return false;
return -ENOMEM;
for (i = 0; i < local_dc_state->stream_count; i++) {
struct dc_stream_state *stream = dm_state->context->streams[i];
@ -1323,9 +1343,9 @@ bool pre_validate_dsc(struct drm_atomic_state *state,
if (ret != 0)
goto clean_exit;
if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) {
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
if (ret != 0) {
DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto clean_exit;
}
@ -1356,7 +1376,7 @@ clean_exit:
kfree(local_dc_state);
return (ret == 0);
return ret;
}
static unsigned int kbps_from_pbn(unsigned int pbn)
@ -1399,6 +1419,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
unsigned int max_compressed_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
struct drm_dp_mst_topology_mgr *mst_mgr;
/*
* check if the mode could be supported if DSC pass-through is supported
@ -1407,7 +1428,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
*/
if (is_dsc_common_config_possible(stream, &bw_range) &&
aconnector->port->passthrough_aux) {
mutex_lock(&aconnector->mst_mgr.lock);
mst_mgr = aconnector->port->mgr;
mutex_lock(&mst_mgr->lock);
cur_link_settings = stream->link->verified_link_cap;
@ -1420,7 +1442,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
down_link_bw_in_kbps);
mutex_unlock(&aconnector->mst_mgr.lock);
mutex_unlock(&mst_mgr->lock);
/*
* use the maximum dsc compression bandwidth as the required

View File

@ -53,15 +53,15 @@ struct dsc_mst_fairness_vars {
struct amdgpu_dm_connector *aconnector;
};
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars);
int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dsc_mst_fairness_vars *vars);
bool needs_dsc_aux_workaround(struct dc_link *link);
bool pre_validate_dsc(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state_ptr,
struct dsc_mst_fairness_vars *vars);
int pre_validate_dsc(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state_ptr,
struct dsc_mst_fairness_vars *vars);
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,

View File

@ -138,7 +138,9 @@ static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset)
uint32_t object_table_offset = bp->object_info_tbl_offset + offset;
table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset);
table = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base,
object_table_offset,
struct_size(table, asObjects, 1)));
if (!table)
return 0;
@ -166,8 +168,9 @@ static struct graphics_object_id bios_parser_get_connector_id(
uint32_t connector_table_offset = bp->object_info_tbl_offset
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
ATOM_OBJECT_TABLE *tbl =
GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
ATOM_OBJECT_TABLE *tbl = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base,
connector_table_offset,
struct_size(tbl, asObjects, 1)));
if (!tbl) {
dm_error("Can't get connector table from atom bios.\n");
@ -662,8 +665,9 @@ static enum bp_result get_ss_info_v3_1(
if (!DATA_TABLES(ASIC_InternalSS_Info))
return BP_RESULT_UNSUPPORTED;
ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
DATA_TABLES(ASIC_InternalSS_Info));
ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
DATA_TABLES(ASIC_InternalSS_Info),
struct_size(ss_table_header_include, asSpreadSpectrum, 1)));
table_size =
(le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
- sizeof(ATOM_COMMON_TABLE_HEADER))
@ -1029,8 +1033,10 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
if (!DATA_TABLES(ASIC_InternalSS_Info))
return result;
header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
DATA_TABLES(ASIC_InternalSS_Info));
header = ((ATOM_ASIC_INTERNAL_SS_INFO_V2 *) bios_get_image(
&bp->base,
DATA_TABLES(ASIC_InternalSS_Info),
struct_size(header, asSpreadSpectrum, 1)));
memset(info, 0, sizeof(struct spread_spectrum_info));
@ -1709,8 +1715,10 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
if (!DATA_TABLES(ASIC_InternalSS_Info))
return 0;
header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
DATA_TABLES(ASIC_InternalSS_Info));
header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V2 *) bios_get_image(
&bp->base,
DATA_TABLES(ASIC_InternalSS_Info),
struct_size(header_include, asSpreadSpectrum, 1)));
size = (le16_to_cpu(header_include->sHeader.usStructureSize)
- sizeof(ATOM_COMMON_TABLE_HEADER))
@ -1746,8 +1754,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
if (!DATA_TABLES(ASIC_InternalSS_Info))
return number;
header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
DATA_TABLES(ASIC_InternalSS_Info));
header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
DATA_TABLES(ASIC_InternalSS_Info),
struct_size(header_include, asSpreadSpectrum, 1)));
size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
@ -1789,11 +1798,13 @@ static enum bp_result bios_parser_get_gpio_pin_info(
if (!DATA_TABLES(GPIO_Pin_LUT))
return BP_RESULT_BADBIOSTABLE;
header = GET_IMAGE(ATOM_GPIO_PIN_LUT, DATA_TABLES(GPIO_Pin_LUT));
header = ((ATOM_GPIO_PIN_LUT *) bios_get_image(&bp->base,
DATA_TABLES(GPIO_Pin_LUT),
struct_size(header, asGPIO_Pin, 1)));
if (!header)
return BP_RESULT_BADBIOSTABLE;
if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_PIN_LUT)
if (sizeof(ATOM_COMMON_TABLE_HEADER) + struct_size(header, asGPIO_Pin, 1)
> le16_to_cpu(header->sHeader.usStructureSize))
return BP_RESULT_BADBIOSTABLE;
@ -1978,7 +1989,8 @@ static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
offset += bp->object_info_tbl_offset;
tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
tbl = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base, offset,
struct_size(tbl, asObjects, 1)));
if (!tbl)
return NULL;
@ -2600,8 +2612,7 @@ static enum bp_result update_slot_layout_info(
for (;;) {
record_header = (ATOM_COMMON_RECORD_HEADER *)
GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
if (record_header == NULL) {
result = BP_RESULT_BADBIOSTABLE;
break;
@ -2615,7 +2626,7 @@ static enum bp_result update_slot_layout_info(
if (record_header->ucRecordType ==
ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
sizeof(ATOM_BRACKET_LAYOUT_RECORD)
struct_size(record, asConnInfo, 1)
<= record_header->ucRecordSize) {
record = (ATOM_BRACKET_LAYOUT_RECORD *)
(record_header);
@ -2709,8 +2720,9 @@ static enum bp_result get_bracket_layout_record(
genericTableOffset = bp->object_info_tbl_offset +
bp->object_info_tbl.v1_3->usMiscObjectTableOffset;
object_table = (ATOM_OBJECT_TABLE *)
GET_IMAGE(ATOM_OBJECT_TABLE, genericTableOffset);
object_table = ((ATOM_OBJECT_TABLE *) bios_get_image(&bp->base,
genericTableOffset,
struct_size(object_table, asObjects, 1)));
if (!object_table)
return BP_RESULT_FAILURE;

View File

@ -462,6 +462,7 @@ static enum bp_result get_gpio_i2c_info(
uint32_t count = 0;
unsigned int table_index = 0;
bool find_valid = false;
struct atom_gpio_pin_assignment *pin;
if (!info)
return BP_RESULT_BADINPUT;
@ -489,20 +490,17 @@ static enum bp_result get_gpio_i2c_info(
- sizeof(struct atom_common_table_header))
/ sizeof(struct atom_gpio_pin_assignment);
pin = (struct atom_gpio_pin_assignment *) header->gpio_pin;
for (table_index = 0; table_index < count; table_index++) {
if (((record->i2c_id & I2C_HW_CAP) == (
header->gpio_pin[table_index].gpio_id &
I2C_HW_CAP)) &&
((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
(header->gpio_pin[table_index].gpio_id &
I2C_HW_ENGINE_ID_MASK)) &&
((record->i2c_id & I2C_HW_LANE_MUX) ==
(header->gpio_pin[table_index].gpio_id &
I2C_HW_LANE_MUX))) {
if (((record->i2c_id & I2C_HW_CAP) == (pin->gpio_id & I2C_HW_CAP)) &&
((record->i2c_id & I2C_HW_ENGINE_ID_MASK) == (pin->gpio_id & I2C_HW_ENGINE_ID_MASK)) &&
((record->i2c_id & I2C_HW_LANE_MUX) == (pin->gpio_id & I2C_HW_LANE_MUX))) {
/* still valid */
find_valid = true;
break;
}
pin = (struct atom_gpio_pin_assignment *)((uint8_t *)pin + sizeof(struct atom_gpio_pin_assignment));
}
/* If we don't find the entry that we are looking for then
@ -2393,6 +2391,26 @@ static enum bp_result get_vram_info_v25(
return result;
}
static enum bp_result get_vram_info_v30(
struct bios_parser *bp,
struct dc_vram_info *info)
{
struct atom_vram_info_header_v3_0 *info_v30;
enum bp_result result = BP_RESULT_OK;
info_v30 = GET_IMAGE(struct atom_vram_info_header_v3_0,
DATA_TABLES(vram_info));
if (info_v30 == NULL)
return BP_RESULT_BADBIOSTABLE;
info->num_chans = info_v30->channel_num;
info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
return result;
}
/*
* get_integrated_info_v11
*
@ -3060,6 +3078,16 @@ static enum bp_result bios_parser_get_vram_info(
}
break;
case 3:
switch (revision.minor) {
case 0:
result = get_vram_info_v30(bp, info);
break;
default:
break;
}
break;
default:
return result;
}

View File

@ -609,8 +609,10 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
}
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
//bw_params->dram_channel_width_bytes = dc->ctx->asic_id.vram_width;
bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;

View File

@ -363,32 +363,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
.sr_exit_time_us = 9,
.sr_enter_plus_exit_time_us = 11,
.sr_exit_time_us = 12.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
}
@ -400,32 +400,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 16.5,
.sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 16.5,
.sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 16.5,
.sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.sr_exit_time_us = 16.5,
.sr_enter_plus_exit_time_us = 18.5,
.valid = true,
},
}

View File

@ -123,9 +123,10 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
uint32_t result;
result = dcn314_smu_wait_for_response(clk_mgr, 10, 200000);
ASSERT(result == VBIOSSMC_Result_OK);
smu_print("SMU response after wait: %d\n", result);
if (result != VBIOSSMC_Result_OK)
smu_print("SMU Response was not OK. SMU response after wait received is: %d\n",
result);
if (result == VBIOSSMC_Status_BUSY)
return -1;
@ -216,6 +217,12 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
khz_to_mhz_ceil(requested_dcfclk_khz));
#ifdef DBG
smu_print("actual_dcfclk_set_mhz %d is set to : %d\n",
actual_dcfclk_set_mhz,
actual_dcfclk_set_mhz * 1000);
#endif
return actual_dcfclk_set_mhz * 1000;
}

View File

@ -553,6 +553,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params(
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;

View File

@ -1054,6 +1054,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
int i, j;
struct dc_state *dangling_context = dc_create_state(dc);
struct dc_state *current_ctx;
struct pipe_ctx *pipe;
struct timing_generator *tg;
if (dangling_context == NULL)
return;
@ -1096,6 +1098,18 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
}
if (should_disable && old_stream) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
/* When disabling plane for a phantom pipe, we must turn on the
* phantom OTG so the disable programming gets the double buffer
* update. Otherwise the pipe will be left in a partially disabled
* state that can result in underflow or hang when enabling it
* again for different use.
*/
if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
if (tg->funcs->enable_crtc)
tg->funcs->enable_crtc(tg);
}
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
@ -1111,6 +1125,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
dc->hwss.post_unlock_program_front_end(dc, dangling_context);
}
/* We need to put the phantom OTG back into it's default (disabled) state or we
* can get corruption when transition from one SubVP config to a different one.
* The OTG is set to disable on falling edge of VUPDATE so the plane disable
* will still get it's double buffer update.
*/
if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
if (tg->funcs->disable_phantom_crtc)
tg->funcs->disable_phantom_crtc(tg);
}
}
}
@ -1169,7 +1192,7 @@ static void disable_vbios_mode_if_required(
if (pix_clk_100hz != requested_pix_clk_100hz) {
core_link_disable_stream(pipe);
pipe->stream->dpms_off = false;
pipe->stream->dpms_off = true;
}
}
}
@ -1749,6 +1772,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
/* When SubVP is active, all HW programming must be done while
* SubVP lock is acquired
*/
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
if (dc->debug.enable_double_buffered_dsc_pg_support)
dc->hwss.update_dsc_pg(dc, context, false);
@ -1776,9 +1805,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
result = dc->hwss.apply_ctx_to_hw(dc, context);
if (result != DC_OK) {
@ -3382,22 +3408,6 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
}
if (update_type != UPDATE_TYPE_FAST) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
subvp_prev_use) {
// If old context or new context has phantom pipes, apply
// the phantom timings now. We can't change the phantom
// pipe configuration safely without driver acquiring
// the DMCUB lock first.
dc->hwss.apply_ctx_to_hw(dc, context);
break;
}
}
}
dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
if (update_type != UPDATE_TYPE_FAST) {
@ -3455,6 +3465,24 @@ static void commit_planes_for_stream(struct dc *dc,
return;
}
if (update_type != UPDATE_TYPE_FAST) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP &&
pipe_ctx->stream && pipe_ctx->plane_state) {
/* Only update visual confirm for SUBVP here.
* The bar appears on all pipes, so we need to update the bar on all displays,
* so the information doesn't get stale.
*/
struct mpcc_blnd_cfg blnd_cfg = { 0 };
dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color,
pipe_ctx->plane_res.hubp->inst);
}
}
}
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@ -3572,7 +3600,6 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
}
}
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
@ -3609,6 +3636,44 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
/* For phantom pipe OTG enable, it has to be done after any previous pipe
* that was in use has already been programmed at gotten its double buffer
* update for "disable".
*/
if (update_type != UPDATE_TYPE_FAST) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* If an active, non-phantom pipe is being transitioned into a phantom
* pipe, wait for the double buffer update to complete first before we do
* ANY phantom pipe programming.
*/
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
old_pipe->stream_res.tg->funcs->wait_for_state(
old_pipe->stream_res.tg,
CRTC_STATE_VBLANK);
old_pipe->stream_res.tg->funcs->wait_for_state(
old_pipe->stream_res.tg,
CRTC_STATE_VACTIVE);
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
subvp_prev_use) {
// If old context or new context has phantom pipes, apply
// the phantom timings now. We can't change the phantom
// pipe configuration safely without driver acquiring
// the DMCUB lock first.
dc->hwss.apply_ctx_to_hw(dc, context);
break;
}
}
}
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
if (update_type != UPDATE_TYPE_FAST)
@ -3675,7 +3740,6 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
bool force_minimal_pipe_splitting = false;
uint32_t i;
*is_plane_addition = false;
@ -3707,33 +3771,17 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
}
}
/* For SubVP pipe split case when adding MPO video
* we need to add a minimal transition. In this case
* there will be 2 streams (1 main stream, 1 phantom
* stream).
/* For SubVP when adding or removing planes we need to add a minimal transition
* (even when disabling all planes). Whenever disabling a phantom pipe, we
* must use the minimal transition path to disable the pipe correctly.
*/
if (cur_stream_status &&
dc->current_state->stream_count == 2 &&
stream->mall_stream_config.type == SUBVP_MAIN) {
bool is_pipe_split = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
(dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
is_pipe_split = true;
break;
}
}
if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) {
/* determine if minimal transition is required due to SubVP*/
if (surface_count > 0 && is_pipe_split) {
if (cur_stream_status->plane_count > surface_count) {
force_minimal_pipe_splitting = true;
} else if (cur_stream_status->plane_count < surface_count) {
force_minimal_pipe_splitting = true;
*is_plane_addition = true;
}
if (cur_stream_status->plane_count > surface_count) {
force_minimal_pipe_splitting = true;
} else if (cur_stream_status->plane_count < surface_count) {
force_minimal_pipe_splitting = true;
*is_plane_addition = true;
}
}
@ -3768,6 +3816,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
unsigned int pipe_in_use = 0;
bool subvp_in_use = false;
if (!transition_context)
return false;
@ -3784,6 +3833,18 @@ static bool commit_minimal_transition_state(struct dc *dc,
pipe_in_use++;
}
/* If SubVP is enabled and we are adding or removing planes from any main subvp
* pipe, we must use the minimal transition.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
subvp_in_use = true;
break;
}
}
/* When the OS add a new surface if we have been used all of pipes with odm combine
* and mpc split feature, it need use commit_minimal_transition_state to transition safely.
* After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
@ -3792,7 +3853,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
* enter/exit MPO when DCN still have enough resources.
*/
if (pipe_in_use != dc->res_pool->pipe_count) {
if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) {
dc_release_state(transition_context);
return true;
}

View File

@ -4663,6 +4663,10 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
}
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
link->type == dc_connection_mst_branch)
dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link);
/* Retrain now, or wait until next stream update to apply */
if (skip_immediate_retrain == false)
dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link);

View File

@ -82,6 +82,7 @@ struct dp_hdmi_dongle_signature_data {
#define HDMI_SCDC_STATUS_FLAGS 0x40
#define HDMI_SCDC_ERR_DETECT 0x50
#define HDMI_SCDC_TEST_CONFIG 0xC0
#define HDMI_SCDC_DEVICE_ID 0xD3
union hdmi_scdc_update_read_data {
uint8_t byte[2];

View File

@ -1912,7 +1912,7 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
return status;
}
static void dpcd_exit_training_mode(struct dc_link *link)
static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
{
uint8_t sink_status = 0;
uint8_t i;
@ -1920,12 +1920,14 @@ static void dpcd_exit_training_mode(struct dc_link *link)
/* clear training pattern set */
dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
/* poll for intra-hop disable */
for (i = 0; i < 10; i++) {
if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
break;
udelay(1000);
if (encoding == DP_128b_132b_ENCODING) {
/* poll for intra-hop disable */
for (i = 0; i < 10; i++) {
if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
break;
udelay(1000);
}
}
}
@ -2649,7 +2651,7 @@ enum link_training_result dc_link_dp_perform_link_training(
&lt_settings);
/* reset previous training states */
dpcd_exit_training_mode(link);
dpcd_exit_training_mode(link, encoding);
/* configure link prior to entering training mode */
dpcd_configure_lttpr_mode(link, &lt_settings);
@ -2670,7 +2672,7 @@ enum link_training_result dc_link_dp_perform_link_training(
ASSERT(0);
/* exit training mode */
dpcd_exit_training_mode(link);
dpcd_exit_training_mode(link, encoding);
/* switch to video idle */
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
@ -2771,8 +2773,11 @@ bool perform_link_training_with_retries(
/* Update verified link settings to current one
* Because DPIA LT might fallback to lower link setting.
*/
link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link);
}
}
} else {
status = dc_link_dp_perform_link_training(link,
@ -3020,7 +3025,7 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
{
enum dc_link_rate cable_max_link_rate = LINK_RATE_HIGH3;
enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN;
if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
cable_max_link_rate = LINK_RATE_UHBR20;
@ -3083,15 +3088,29 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
max_link_cap.link_spread =
link->reported_link_cap.link_spread;
/* Lower link settings based on cable attributes */
/* Lower link settings based on cable attributes
* Cable ID is a DP2 feature to identify max certified link rate that
* a cable can carry. The cable identification method requires both
* cable and display hardware support. Since the specs comes late, it is
* anticipated that the first round of DP2 cables and displays may not
* be fully compatible to reliably return cable ID data. Therefore the
* decision of our cable id policy is that if the cable can return non
* zero cable id data, we will take cable's link rate capability into
* account. However if we get zero data, the cable link rate capability
* is considered inconclusive. In this case, we will not take cable's
* capability into account to avoid of over limiting hardware capability
* from users. The max overall link rate capability is still determined
* after actual dp pre-training. Cable id is considered as an auxiliary
* method of determining max link bandwidth capability.
*/
cable_max_link_rate = get_cable_max_link_rate(link);
if (!link->dc->debug.ignore_cable_id &&
cable_max_link_rate != LINK_RATE_UNKNOWN &&
cable_max_link_rate < max_link_cap.link_rate)
max_link_cap.link_rate = cable_max_link_rate;
/*
* account for lttpr repeaters cap
/* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
if (dp_is_lttpr_present(link)) {
@ -4540,9 +4559,19 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off
&& pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
// Always use max settings here for DP 1.4a LL Compliance CTS
if (link->is_automated) {
pipe_ctx->link_config.dp_link_settings.lane_count =
link->verified_link_cap.lane_count;
pipe_ctx->link_config.dp_link_settings.link_rate =
link->verified_link_cap.link_rate;
pipe_ctx->link_config.dp_link_settings.link_spread =
link->verified_link_cap.link_spread;
}
core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
}
}
@ -4583,6 +4612,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
}
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
link->is_automated = true;
device_service_clear.bits.AUTOMATED_TEST = 1;
core_link_write_dpcd(
link,
@ -7226,6 +7257,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
struct pipe_ctx *pipes =
&link->dc->current_state->res_ctx.pipe_ctx[0];
unsigned int i;
bool do_fallback = false;
for (i = 0; i < MAX_PIPES; i++) {
@ -7258,13 +7290,16 @@ void dp_retrain_link_dp_test(struct dc_link *link,
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
do_fallback = true;
perform_link_training_with_retries(
link_setting,
skip_video_pattern,
LINK_TRAINING_ATTEMPTS,
&pipes[i],
SIGNAL_TYPE_DISPLAY_PORT,
false);
do_fallback);
link->dc->hwss.enable_stream(&pipes[i]);

View File

@ -791,10 +791,14 @@ static enum link_training_result dpia_training_eq_transparent(
}
if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
dp_is_interlane_aligned(dpcd_lane_status_updated)) {
result = LINK_TRAINING_SUCCESS;
break;
dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status)) {
/* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4
* has to share encoders unlike DP and USBC
*/
if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) {
result = LINK_TRAINING_SUCCESS;
break;
}
}
/* Update VS/PE. */
@ -1008,7 +1012,8 @@ enum link_training_result dc_link_dpia_perform_link_training(
*/
if (result == LINK_TRAINING_SUCCESS) {
msleep(5);
result = dp_check_link_loss_status(link, &lt_settings);
if (!link->is_automated)
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT) {
dpia_training_abort(link, &lt_settings, repeater_id);
} else {

View File

@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.210"
#define DC_VER "3.2.212"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@ -261,6 +261,7 @@ struct dc_caps {
uint32_t cache_line_size;
uint32_t cache_num_ways;
uint16_t subvp_fw_processing_delay_us;
uint8_t subvp_drr_max_vblank_margin_us;
uint16_t subvp_prefetch_end_to_mall_start_us;
uint8_t subvp_swath_height_margin_lines; // subvp start line must be aligned to 2 x swath height
uint16_t subvp_pstate_allow_width_us;
@ -407,6 +408,7 @@ struct dc_config {
bool use_default_clock_table;
bool force_bios_enable_lttpr;
uint8_t force_bios_fixed_vs;
int sdpif_request_limit_words_per_umc;
};
@ -456,15 +458,15 @@ enum pipe_split_policy {
MPC_SPLIT_DYNAMIC = 0,
/**
* @MPC_SPLIT_DYNAMIC: Avoid pipe split, which means that DC will not
* @MPC_SPLIT_AVOID: Avoid pipe split, which means that DC will not
* try any sort of split optimization.
*/
MPC_SPLIT_AVOID = 1,
/**
* @MPC_SPLIT_DYNAMIC: With this option, DC will only try to optimize
* the pipe utilization when using a single display; if the user
* connects to a second display, DC will avoid pipe split.
* @MPC_SPLIT_AVOID_MULT_DISP: With this option, DC will only try to
* optimize the pipe utilization when using a single display; if the
* user connects to a second display, DC will avoid pipe split.
*/
MPC_SPLIT_AVOID_MULT_DISP = 2,
};
@ -495,7 +497,7 @@ enum dcn_zstate_support_state {
};
/**
* dc_clocks - DC pipe clocks
* struct dc_clocks - DC pipe clocks
*
* For any clocks that may differ per pipe only the max is stored in this
* structure
@ -526,7 +528,7 @@ struct dc_clocks {
bool fclk_prev_p_state_change_support;
int num_ways;
/**
/*
* @fw_based_mclk_switching
*
* DC has a mechanism that leverage the variable refresh rate to switch
@ -864,6 +866,7 @@ struct dc_debug_options {
bool enable_dp_dig_pixel_rate_div_policy;
enum lttpr_mode lttpr_mode_override;
unsigned int dsc_delay_factor_wa_x1000;
unsigned int min_prefetch_in_strobe_ns;
};
struct gpu_info_soc_bounding_box_v1_0;

View File

@ -477,12 +477,20 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
(((uint64_t)main_timing->pix_clk_100hz * 100)));
drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
(((uint64_t)drr_timing->pix_clk_100hz * 100)));
max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us -
dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us;
max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us;
max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
(((uint64_t)drr_timing->h_total * 1000000)));
/* When calculating the max vtotal supported for SubVP + DRR cases, add
* margin due to possible rounding errors (being off by 1 line in the
* FW calculation can incorrectly push the P-State switch to wait 1 frame
* longer).
*/
max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us;
pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
}
@ -859,11 +867,59 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
diag_data.is_cw6_enabled);
}
static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
{
struct pipe_ctx *test_pipe, *split_pipe;
const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
struct rect r1 = scl_data->recout, r2, r2_half;
int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
int cur_layer = pipe_ctx->plane_state->layer_index;
/**
* Disable the cursor if there's another pipe above this with a
* plane that contains this pipe's viewport to prevent double cursor
* and incorrect scaling artifacts.
*/
for (test_pipe = pipe_ctx->top_pipe; test_pipe;
test_pipe = test_pipe->top_pipe) {
// Skip invisible layer and pipe-split plane on same layer
if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
continue;
r2 = test_pipe->plane_res.scl_data.recout;
r2_r = r2.x + r2.width;
r2_b = r2.y + r2.height;
split_pipe = test_pipe;
/**
* There is another half plane on same layer because of
* pipe-split, merge together per same height.
*/
for (split_pipe = pipe_ctx->top_pipe; split_pipe;
split_pipe = split_pipe->top_pipe)
if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
r2_half = split_pipe->plane_res.scl_data.recout;
r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
r2.width = r2.width + r2_half.width;
r2_r = r2.x + r2.width;
break;
}
if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
return true;
}
return false;
}
static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state != NULL) {
if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
return false;
if (dc_can_pipe_disable_cursor(pipe_ctx))
return false;
}
if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||

View File

@ -184,6 +184,7 @@ struct dc_link {
bool is_dig_mapping_flexible;
bool hpd_status; /* HPD status of link without physical HPD pin. */
bool is_hpd_pending; /* Indicates a new received hpd */
bool is_automated; /* Indicates automated testing */
bool edp_sink_present;

View File

@ -413,6 +413,11 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
else
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;
//WA for PSR1 on specific TCON, require frame delay for frame re-lock
copy_settings_data->relock_delay_frame_cnt = 0;
if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
copy_settings_data->relock_delay_frame_cnt = 2;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);

View File

@ -171,6 +171,7 @@ struct dcn_hubbub_registers {
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B;
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C;
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D;
uint32_t SDPIF_REQUEST_RATE_LIMIT;
};
#define HUBBUB_REG_FIELD_LIST_DCN32(type) \
@ -360,7 +361,8 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C;\
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\
type SDPIF_REQUEST_RATE_LIMIT
struct dcn_hubbub_shift {

View File

@ -97,10 +97,12 @@ void dcn10_lock_all_pipes(struct dc *dc,
bool lock)
{
struct pipe_ctx *pipe_ctx;
struct pipe_ctx *old_pipe_ctx;
struct timing_generator *tg;
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
@ -110,7 +112,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream ||
!pipe_ctx->plane_state ||
(!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
!tg->funcs->is_tg_enabled(tg))
continue;

View File

@ -27,204 +27,177 @@
#define TO_DCN20_DWBC(dwbc_base) \
container_of(dwbc_base, struct dcn20_dwbc, base)
/* DCN */
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
#define BASE(seg) \
BASE_INNER(seg)
#define SR(reg_name)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define SRI(reg_name, block, id)\
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
#define SRII(reg_name, block, id)\
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
#define SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
#define DWBC_COMMON_REG_LIST_DCN2_0(inst) \
SRI2(WB_ENABLE, CNV, inst),\
SRI2(WB_EC_CONFIG, CNV, inst),\
SRI2(CNV_MODE, CNV, inst),\
SRI2(CNV_WINDOW_START, CNV, inst),\
SRI2(CNV_WINDOW_SIZE, CNV, inst),\
SRI2(CNV_UPDATE, CNV, inst),\
SRI2(CNV_SOURCE_SIZE, CNV, inst),\
SRI2(CNV_TEST_CNTL, CNV, inst),\
SRI2(CNV_TEST_CRC_RED, CNV, inst),\
SRI2(CNV_TEST_CRC_GREEN, CNV, inst),\
SRI2(CNV_TEST_CRC_BLUE, CNV, inst),\
SRI2(WBSCL_COEF_RAM_SELECT, WBSCL, inst),\
SRI2(WBSCL_COEF_RAM_TAP_DATA, WBSCL, inst),\
SRI2(WBSCL_MODE, WBSCL, inst),\
SRI2(WBSCL_TAP_CONTROL, WBSCL, inst),\
SRI2(WBSCL_DEST_SIZE, WBSCL, inst),\
SRI2(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL, inst),\
SRI2(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL, inst),\
SRI2(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL, inst),\
SRI2(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL, inst),\
SRI2(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL, inst),\
SRI2(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL, inst),\
SRI2(WBSCL_ROUND_OFFSET, WBSCL, inst),\
SRI2(WBSCL_OVERFLOW_STATUS, WBSCL, inst),\
SRI2(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL, inst),\
SRI2(WBSCL_TEST_CNTL, WBSCL, inst),\
SRI2(WBSCL_TEST_CRC_RED, WBSCL, inst),\
SRI2(WBSCL_TEST_CRC_GREEN, WBSCL, inst),\
SRI2(WBSCL_TEST_CRC_BLUE, WBSCL, inst),\
SRI2(WBSCL_BACKPRESSURE_CNT_EN, WBSCL, inst),\
SRI2(WB_MCIF_BACKPRESSURE_CNT, WBSCL, inst),\
SRI2(WBSCL_CLAMP_Y_RGB, WBSCL, inst),\
SRI2(WBSCL_CLAMP_CBCR, WBSCL, inst),\
SRI2(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL, inst),\
SRI2(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL, inst),\
SRI2(WBSCL_DEBUG, WBSCL, inst),\
SRI2(WBSCL_TEST_DEBUG_INDEX, WBSCL, inst),\
SRI2(WBSCL_TEST_DEBUG_DATA, WBSCL, inst),\
SRI2(WB_DEBUG_CTRL, CNV, inst),\
SRI2(WB_DBG_MODE, CNV, inst),\
SRI2(WB_HW_DEBUG, CNV, inst),\
SRI2(CNV_TEST_DEBUG_INDEX, CNV, inst),\
SRI2(CNV_TEST_DEBUG_DATA, CNV, inst),\
SRI2(WB_SOFT_RESET, CNV, inst),\
SRI2(WB_WARM_UP_MODE_CTL1, CNV, inst),\
SRI2(WB_WARM_UP_MODE_CTL2, CNV, inst)
SRI2_DWB(WB_ENABLE, CNV, inst),\
SRI2_DWB(WB_EC_CONFIG, CNV, inst),\
SRI2_DWB(CNV_MODE, CNV, inst),\
SRI2_DWB(CNV_WINDOW_START, CNV, inst),\
SRI2_DWB(CNV_WINDOW_SIZE, CNV, inst),\
SRI2_DWB(CNV_UPDATE, CNV, inst),\
SRI2_DWB(CNV_SOURCE_SIZE, CNV, inst),\
SRI2_DWB(CNV_TEST_CNTL, CNV, inst),\
SRI2_DWB(CNV_TEST_CRC_RED, CNV, inst),\
SRI2_DWB(CNV_TEST_CRC_GREEN, CNV, inst),\
SRI2_DWB(CNV_TEST_CRC_BLUE, CNV, inst),\
SRI2_DWB(WBSCL_COEF_RAM_SELECT, WBSCL, inst),\
SRI2_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL, inst),\
SRI2_DWB(WBSCL_MODE, WBSCL, inst),\
SRI2_DWB(WBSCL_TAP_CONTROL, WBSCL, inst),\
SRI2_DWB(WBSCL_DEST_SIZE, WBSCL, inst),\
SRI2_DWB(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL, inst),\
SRI2_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL, inst),\
SRI2_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL, inst),\
SRI2_DWB(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL, inst),\
SRI2_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL, inst),\
SRI2_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL, inst),\
SRI2_DWB(WBSCL_ROUND_OFFSET, WBSCL, inst),\
SRI2_DWB(WBSCL_OVERFLOW_STATUS, WBSCL, inst),\
SRI2_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL, inst),\
SRI2_DWB(WBSCL_TEST_CNTL, WBSCL, inst),\
SRI2_DWB(WBSCL_TEST_CRC_RED, WBSCL, inst),\
SRI2_DWB(WBSCL_TEST_CRC_GREEN, WBSCL, inst),\
SRI2_DWB(WBSCL_TEST_CRC_BLUE, WBSCL, inst),\
SRI2_DWB(WBSCL_BACKPRESSURE_CNT_EN, WBSCL, inst),\
SRI2_DWB(WB_MCIF_BACKPRESSURE_CNT, WBSCL, inst),\
SRI2_DWB(WBSCL_CLAMP_Y_RGB, WBSCL, inst),\
SRI2_DWB(WBSCL_CLAMP_CBCR, WBSCL, inst),\
SRI2_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL, inst),\
SRI2_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL, inst),\
SRI2_DWB(WBSCL_DEBUG, WBSCL, inst),\
SRI2_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL, inst),\
SRI2_DWB(WBSCL_TEST_DEBUG_DATA, WBSCL, inst),\
SRI2_DWB(WB_DEBUG_CTRL, CNV, inst),\
SRI2_DWB(WB_DBG_MODE, CNV, inst),\
SRI2_DWB(WB_HW_DEBUG, CNV, inst),\
SRI2_DWB(CNV_TEST_DEBUG_INDEX, CNV, inst),\
SRI2_DWB(CNV_TEST_DEBUG_DATA, CNV, inst),\
SRI2_DWB(WB_SOFT_RESET, CNV, inst),\
SRI2_DWB(WB_WARM_UP_MODE_CTL1, CNV, inst),\
SRI2_DWB(WB_WARM_UP_MODE_CTL2, CNV, inst)
#define DWBC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh) \
SF(WB_ENABLE, WB_ENABLE, mask_sh),\
SF(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, mask_sh),\
SF(WB_EC_CONFIG, DISPCLK_G_WB_GATE_DIS, mask_sh),\
SF(WB_EC_CONFIG, DISPCLK_G_WBSCL_GATE_DIS, mask_sh),\
SF(WB_EC_CONFIG, WB_TEST_CLK_SEL, mask_sh),\
SF(WB_EC_CONFIG, WB_LB_LS_DIS, mask_sh),\
SF(WB_EC_CONFIG, WB_LB_SD_DIS, mask_sh),\
SF(WB_EC_CONFIG, WB_LUT_LS_DIS, mask_sh),\
SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_MODE_SEL, mask_sh),\
SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_DIS, mask_sh),\
SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_FORCE, mask_sh),\
SF(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_STATE, mask_sh),\
SF(WB_EC_CONFIG, WB_RAM_PW_SAVE_MODE, mask_sh),\
SF(WB_EC_CONFIG, WBSCL_LUT_MEM_PWR_STATE, mask_sh),\
SF(CNV_MODE, CNV_OUT_BPC, mask_sh),\
SF(CNV_MODE, CNV_FRAME_CAPTURE_RATE, mask_sh),\
SF(CNV_MODE, CNV_WINDOW_CROP_EN, mask_sh),\
SF(CNV_MODE, CNV_STEREO_TYPE, mask_sh),\
SF(CNV_MODE, CNV_INTERLACED_MODE, mask_sh),\
SF(CNV_MODE, CNV_EYE_SELECTION, mask_sh),\
SF(CNV_MODE, CNV_STEREO_POLARITY, mask_sh),\
SF(CNV_MODE, CNV_INTERLACED_FIELD_ORDER, mask_sh),\
SF(CNV_MODE, CNV_STEREO_SPLIT, mask_sh),\
SF(CNV_MODE, CNV_NEW_CONTENT, mask_sh),\
SF(CNV_MODE, CNV_FRAME_CAPTURE_EN_CURRENT, mask_sh),\
SF(CNV_MODE, CNV_FRAME_CAPTURE_EN, mask_sh),\
SF(CNV_WINDOW_START, CNV_WINDOW_START_X, mask_sh),\
SF(CNV_WINDOW_START, CNV_WINDOW_START_Y, mask_sh),\
SF(CNV_WINDOW_SIZE, CNV_WINDOW_WIDTH, mask_sh),\
SF(CNV_WINDOW_SIZE, CNV_WINDOW_HEIGHT, mask_sh),\
SF(CNV_UPDATE, CNV_UPDATE_PENDING, mask_sh),\
SF(CNV_UPDATE, CNV_UPDATE_TAKEN, mask_sh),\
SF(CNV_UPDATE, CNV_UPDATE_LOCK, mask_sh),\
SF(CNV_SOURCE_SIZE, CNV_SOURCE_WIDTH, mask_sh),\
SF(CNV_SOURCE_SIZE, CNV_SOURCE_HEIGHT, mask_sh),\
SF(CNV_TEST_CNTL, CNV_TEST_CRC_EN, mask_sh),\
SF(CNV_TEST_CNTL, CNV_TEST_CRC_CONT_EN, mask_sh),\
SF(CNV_TEST_CRC_RED, CNV_TEST_CRC_RED_MASK, mask_sh),\
SF(CNV_TEST_CRC_RED, CNV_TEST_CRC_SIG_RED, mask_sh),\
SF(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_GREEN_MASK, mask_sh),\
SF(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_SIG_GREEN, mask_sh),\
SF(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_BLUE_MASK, mask_sh),\
SF(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_SIG_BLUE, mask_sh),\
SF(WB_DEBUG_CTRL, WB_DEBUG_EN, mask_sh),\
SF(WB_DEBUG_CTRL, WB_DEBUG_SEL, mask_sh),\
SF(WB_DBG_MODE, WB_DBG_MODE_EN, mask_sh),\
SF(WB_DBG_MODE, WB_DBG_DIN_FMT, mask_sh),\
SF(WB_DBG_MODE, WB_DBG_36MODE, mask_sh),\
SF(WB_DBG_MODE, WB_DBG_CMAP, mask_sh),\
SF(WB_DBG_MODE, WB_DBG_PXLRATE_ERROR, mask_sh),\
SF(WB_DBG_MODE, WB_DBG_SOURCE_WIDTH, mask_sh),\
SF(WB_HW_DEBUG, WB_HW_DEBUG, mask_sh),\
SF(WB_SOFT_RESET, WB_SOFT_RESET, mask_sh),\
SF(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_INDEX, mask_sh),\
SF(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_WRITE_EN, mask_sh),\
SF(CNV_TEST_DEBUG_DATA, CNV_TEST_DEBUG_DATA, mask_sh),\
SF(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
SF(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_PHASE, mask_sh),\
SF(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_FILTER_TYPE, mask_sh),\
SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
SF(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
SF(WBSCL_MODE, WBSCL_MODE, mask_sh),\
SF(WBSCL_MODE, WBSCL_OUT_BIT_DEPTH, mask_sh),\
SF(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_Y_RGB, mask_sh),\
SF(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_CBCR, mask_sh),\
SF(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_Y_RGB, mask_sh),\
SF(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_CBCR, mask_sh),\
SF(WBSCL_DEST_SIZE, WBSCL_DEST_HEIGHT, mask_sh),\
SF(WBSCL_DEST_SIZE, WBSCL_DEST_WIDTH, mask_sh),\
SF(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, mask_sh),\
SF(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_FRAC_Y_RGB, mask_sh),\
SF(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_INT_Y_RGB, mask_sh),\
SF(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_FRAC_CBCR, mask_sh),\
SF(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_INT_CBCR, mask_sh),\
SF(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, mask_sh),\
SF(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_FRAC_Y_RGB, mask_sh),\
SF(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_INT_Y_RGB, mask_sh),\
SF(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_FRAC_CBCR, mask_sh),\
SF(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_INT_CBCR, mask_sh),\
SF(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_Y_RGB, mask_sh),\
SF(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_CBCR, mask_sh),\
SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_FLAG, mask_sh),\
SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_ACK, mask_sh),\
SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_MASK, mask_sh),\
SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_STATUS, mask_sh),\
SF(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_TYPE, mask_sh),\
SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_FLAG, mask_sh),\
SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_ACK, mask_sh),\
SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_MASK, mask_sh),\
SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_STATUS, mask_sh),\
SF(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_TYPE, mask_sh),\
SF(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_EN, mask_sh),\
SF(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_CONT_EN, mask_sh),\
SF(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_RED_MASK, mask_sh),\
SF(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_SIG_RED, mask_sh),\
SF(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_GREEN_MASK, mask_sh),\
SF(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_SIG_GREEN, mask_sh),\
SF(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_BLUE_MASK, mask_sh),\
SF(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_SIG_BLUE, mask_sh),\
SF(WBSCL_BACKPRESSURE_CNT_EN, WBSCL_BACKPRESSURE_CNT_EN, mask_sh),\
SF(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_Y_MAX_BACKPRESSURE, mask_sh),\
SF(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_C_MAX_BACKPRESSURE, mask_sh),\
SF(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_UPPER_Y_RGB, mask_sh),\
SF(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_LOWER_Y_RGB, mask_sh),\
SF(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_UPPER_CBCR, mask_sh),\
SF(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_LOWER_CBCR, mask_sh),\
SF(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_OUTSIDE_PIX_STRATEGY, mask_sh),\
SF(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_BLACK_COLOR_G_Y, mask_sh),\
SF(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_B_CB, mask_sh),\
SF(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_R_CR, mask_sh),\
SF(WBSCL_DEBUG, WBSCL_DEBUG, mask_sh),\
SF(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_INDEX, mask_sh),\
SF(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_WRITE_EN, mask_sh),\
SF(WBSCL_TEST_DEBUG_DATA, WBSCL_TEST_DEBUG_DATA, mask_sh),\
SF(WB_WARM_UP_MODE_CTL1, WIDTH_WARMUP, mask_sh),\
SF(WB_WARM_UP_MODE_CTL1, HEIGHT_WARMUP, mask_sh),\
SF(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, mask_sh),\
SF(WB_WARM_UP_MODE_CTL2, DATA_VALUE_WARMUP, mask_sh),\
SF(WB_WARM_UP_MODE_CTL2, MODE_WARMUP, mask_sh),\
SF(WB_WARM_UP_MODE_CTL2, DATA_DEPTH_WARMUP, mask_sh)
SF_DWB(WB_ENABLE, WB_ENABLE, mask_sh),\
SF_DWB(WB_EC_CONFIG, DISPCLK_R_WB_GATE_DIS, mask_sh),\
SF_DWB(WB_EC_CONFIG, DISPCLK_G_WB_GATE_DIS, mask_sh),\
SF_DWB(WB_EC_CONFIG, DISPCLK_G_WBSCL_GATE_DIS, mask_sh),\
SF_DWB(WB_EC_CONFIG, WB_TEST_CLK_SEL, mask_sh),\
SF_DWB(WB_EC_CONFIG, WB_LB_LS_DIS, mask_sh),\
SF_DWB(WB_EC_CONFIG, WB_LB_SD_DIS, mask_sh),\
SF_DWB(WB_EC_CONFIG, WB_LUT_LS_DIS, mask_sh),\
SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_MODE_SEL, mask_sh),\
SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_DIS, mask_sh),\
SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_FORCE, mask_sh),\
SF_DWB(WB_EC_CONFIG, WBSCL_LB_MEM_PWR_STATE, mask_sh),\
SF_DWB(WB_EC_CONFIG, WB_RAM_PW_SAVE_MODE, mask_sh),\
SF_DWB(WB_EC_CONFIG, WBSCL_LUT_MEM_PWR_STATE, mask_sh),\
SF_DWB(CNV_MODE, CNV_OUT_BPC, mask_sh),\
SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_RATE, mask_sh),\
SF_DWB(CNV_MODE, CNV_WINDOW_CROP_EN, mask_sh),\
SF_DWB(CNV_MODE, CNV_STEREO_TYPE, mask_sh),\
SF_DWB(CNV_MODE, CNV_INTERLACED_MODE, mask_sh),\
SF_DWB(CNV_MODE, CNV_EYE_SELECTION, mask_sh),\
SF_DWB(CNV_MODE, CNV_STEREO_POLARITY, mask_sh),\
SF_DWB(CNV_MODE, CNV_INTERLACED_FIELD_ORDER, mask_sh),\
SF_DWB(CNV_MODE, CNV_STEREO_SPLIT, mask_sh),\
SF_DWB(CNV_MODE, CNV_NEW_CONTENT, mask_sh),\
SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_EN_CURRENT, mask_sh),\
SF_DWB(CNV_MODE, CNV_FRAME_CAPTURE_EN, mask_sh),\
SF_DWB(CNV_WINDOW_START, CNV_WINDOW_START_X, mask_sh),\
SF_DWB(CNV_WINDOW_START, CNV_WINDOW_START_Y, mask_sh),\
SF_DWB(CNV_WINDOW_SIZE, CNV_WINDOW_WIDTH, mask_sh),\
SF_DWB(CNV_WINDOW_SIZE, CNV_WINDOW_HEIGHT, mask_sh),\
SF_DWB(CNV_UPDATE, CNV_UPDATE_PENDING, mask_sh),\
SF_DWB(CNV_UPDATE, CNV_UPDATE_TAKEN, mask_sh),\
SF_DWB(CNV_UPDATE, CNV_UPDATE_LOCK, mask_sh),\
SF_DWB(CNV_SOURCE_SIZE, CNV_SOURCE_WIDTH, mask_sh),\
SF_DWB(CNV_SOURCE_SIZE, CNV_SOURCE_HEIGHT, mask_sh),\
SF_DWB(CNV_TEST_CNTL, CNV_TEST_CRC_EN, mask_sh),\
SF_DWB(CNV_TEST_CNTL, CNV_TEST_CRC_CONT_EN, mask_sh),\
SF_DWB(CNV_TEST_CRC_RED, CNV_TEST_CRC_RED_MASK, mask_sh),\
SF_DWB(CNV_TEST_CRC_RED, CNV_TEST_CRC_SIG_RED, mask_sh),\
SF_DWB(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_GREEN_MASK, mask_sh),\
SF_DWB(CNV_TEST_CRC_GREEN, CNV_TEST_CRC_SIG_GREEN, mask_sh),\
SF_DWB(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_BLUE_MASK, mask_sh),\
SF_DWB(CNV_TEST_CRC_BLUE, CNV_TEST_CRC_SIG_BLUE, mask_sh),\
SF_DWB(WB_DEBUG_CTRL, WB_DEBUG_EN, mask_sh),\
SF_DWB(WB_DEBUG_CTRL, WB_DEBUG_SEL, mask_sh),\
SF_DWB(WB_DBG_MODE, WB_DBG_MODE_EN, mask_sh),\
SF_DWB(WB_DBG_MODE, WB_DBG_DIN_FMT, mask_sh),\
SF_DWB(WB_DBG_MODE, WB_DBG_36MODE, mask_sh),\
SF_DWB(WB_DBG_MODE, WB_DBG_CMAP, mask_sh),\
SF_DWB(WB_DBG_MODE, WB_DBG_PXLRATE_ERROR, mask_sh),\
SF_DWB(WB_DBG_MODE, WB_DBG_SOURCE_WIDTH, mask_sh),\
SF_DWB(WB_HW_DEBUG, WB_HW_DEBUG, mask_sh),\
SF_DWB(WB_SOFT_RESET, WB_SOFT_RESET, mask_sh),\
SF_DWB(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_INDEX, mask_sh),\
SF_DWB(CNV_TEST_DEBUG_INDEX, CNV_TEST_DEBUG_WRITE_EN, mask_sh),\
SF_DWB(CNV_TEST_DEBUG_DATA, CNV_TEST_DEBUG_DATA, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_PHASE, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_SELECT, WBSCL_COEF_RAM_FILTER_TYPE, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_TAP_DATA, WBSCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
SF_DWB(WBSCL_MODE, WBSCL_MODE, mask_sh),\
SF_DWB(WBSCL_MODE, WBSCL_OUT_BIT_DEPTH, mask_sh),\
SF_DWB(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_Y_RGB, mask_sh),\
SF_DWB(WBSCL_TAP_CONTROL, WBSCL_V_NUM_OF_TAPS_CBCR, mask_sh),\
SF_DWB(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_Y_RGB, mask_sh),\
SF_DWB(WBSCL_TAP_CONTROL, WBSCL_H_NUM_OF_TAPS_CBCR, mask_sh),\
SF_DWB(WBSCL_DEST_SIZE, WBSCL_DEST_HEIGHT, mask_sh),\
SF_DWB(WBSCL_DEST_SIZE, WBSCL_DEST_WIDTH, mask_sh),\
SF_DWB(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, mask_sh),\
SF_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_FRAC_Y_RGB, mask_sh),\
SF_DWB(WBSCL_HORZ_FILTER_INIT_Y_RGB, WBSCL_H_INIT_INT_Y_RGB, mask_sh),\
SF_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_FRAC_CBCR, mask_sh),\
SF_DWB(WBSCL_HORZ_FILTER_INIT_CBCR, WBSCL_H_INIT_INT_CBCR, mask_sh),\
SF_DWB(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, mask_sh),\
SF_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_FRAC_Y_RGB, mask_sh),\
SF_DWB(WBSCL_VERT_FILTER_INIT_Y_RGB, WBSCL_V_INIT_INT_Y_RGB, mask_sh),\
SF_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_FRAC_CBCR, mask_sh),\
SF_DWB(WBSCL_VERT_FILTER_INIT_CBCR, WBSCL_V_INIT_INT_CBCR, mask_sh),\
SF_DWB(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_Y_RGB, mask_sh),\
SF_DWB(WBSCL_ROUND_OFFSET, WBSCL_ROUND_OFFSET_CBCR, mask_sh),\
SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_FLAG, mask_sh),\
SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_ACK, mask_sh),\
SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_MASK, mask_sh),\
SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_STATUS, mask_sh),\
SF_DWB(WBSCL_OVERFLOW_STATUS, WBSCL_DATA_OVERFLOW_INT_TYPE, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_FLAG, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_ACK, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_MASK, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_STATUS, mask_sh),\
SF_DWB(WBSCL_COEF_RAM_CONFLICT_STATUS, WBSCL_HOST_CONFLICT_INT_TYPE, mask_sh),\
SF_DWB(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_EN, mask_sh),\
SF_DWB(WBSCL_TEST_CNTL, WBSCL_TEST_CRC_CONT_EN, mask_sh),\
SF_DWB(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_RED_MASK, mask_sh),\
SF_DWB(WBSCL_TEST_CRC_RED, WBSCL_TEST_CRC_SIG_RED, mask_sh),\
SF_DWB(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_GREEN_MASK, mask_sh),\
SF_DWB(WBSCL_TEST_CRC_GREEN, WBSCL_TEST_CRC_SIG_GREEN, mask_sh),\
SF_DWB(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_BLUE_MASK, mask_sh),\
SF_DWB(WBSCL_TEST_CRC_BLUE, WBSCL_TEST_CRC_SIG_BLUE, mask_sh),\
SF_DWB(WBSCL_BACKPRESSURE_CNT_EN, WBSCL_BACKPRESSURE_CNT_EN, mask_sh),\
SF_DWB(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_Y_MAX_BACKPRESSURE, mask_sh),\
SF_DWB(WB_MCIF_BACKPRESSURE_CNT, WB_MCIF_C_MAX_BACKPRESSURE, mask_sh),\
SF_DWB(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_UPPER_Y_RGB, mask_sh),\
SF_DWB(WBSCL_CLAMP_Y_RGB, WBSCL_CLAMP_LOWER_Y_RGB, mask_sh),\
SF_DWB(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_UPPER_CBCR, mask_sh),\
SF_DWB(WBSCL_CLAMP_CBCR, WBSCL_CLAMP_LOWER_CBCR, mask_sh),\
SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_OUTSIDE_PIX_STRATEGY, mask_sh),\
SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY, WBSCL_BLACK_COLOR_G_Y, mask_sh),\
SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_B_CB, mask_sh),\
SF_DWB(WBSCL_OUTSIDE_PIX_STRATEGY_CBCR, WBSCL_BLACK_COLOR_R_CR, mask_sh),\
SF_DWB(WBSCL_DEBUG, WBSCL_DEBUG, mask_sh),\
SF_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_INDEX, mask_sh),\
SF_DWB(WBSCL_TEST_DEBUG_INDEX, WBSCL_TEST_DEBUG_WRITE_EN, mask_sh),\
SF_DWB(WBSCL_TEST_DEBUG_DATA, WBSCL_TEST_DEBUG_DATA, mask_sh),\
SF_DWB(WB_WARM_UP_MODE_CTL1, WIDTH_WARMUP, mask_sh),\
SF_DWB(WB_WARM_UP_MODE_CTL1, HEIGHT_WARMUP, mask_sh),\
SF_DWB(WB_WARM_UP_MODE_CTL1, GMC_WARM_UP_ENABLE, mask_sh),\
SF_DWB(WB_WARM_UP_MODE_CTL2, DATA_VALUE_WARMUP, mask_sh),\
SF_DWB(WB_WARM_UP_MODE_CTL2, MODE_WARMUP, mask_sh),\
SF_DWB(WB_WARM_UP_MODE_CTL2, DATA_DEPTH_WARMUP, mask_sh)
#define DWBC_REG_FIELD_LIST_DCN2_0(type) \
type WB_ENABLE;\

View File

@ -1310,6 +1310,19 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
{
new_pipe->update_flags.raw = 0;
/* If non-phantom pipe is being transitioned to a phantom pipe,
* set disable and return immediately. This is because the pipe
* that was previously in use must be fully disabled before we
* can "enable" it as a phantom pipe (since the OTG will certainly
* be different). The post_unlock sequence will set the correct
* update flags to enable the phantom pipe.
*/
if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom &&
new_pipe->plane_state && new_pipe->plane_state->is_phantom) {
new_pipe->update_flags.bits.disable = 1;
return;
}
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
@ -1663,6 +1676,7 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_width);
if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
}
@ -1833,6 +1847,17 @@ void dcn20_program_front_end_for_ctx(
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
/* when dynamic ODM is active, pipes must be reconfigured when all planes are
* disabled, as some transitions will leave software and hardware state
* mismatched.
*/
if (dc->debug.enable_single_display_2to1_odm_policy &&
pipe->stream &&
pipe->update_flags.bits.disable &&
!pipe->prev_odm_pipe &&
hws->funcs.update_odm)
hws->funcs.update_odm(dc, context, pipe);
}
}
@ -1870,26 +1895,6 @@ void dcn20_post_unlock_program_front_end(
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* If an active, non-phantom pipe is being transitioned into a phantom
* pipe, wait for the double buffer update to complete first before we do
* phantom pipe programming (HUBP_VTG_SEL updates right away so that can
* cause issues).
*/
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
old_pipe->stream_res.tg->funcs->wait_for_state(
old_pipe->stream_res.tg,
CRTC_STATE_VBLANK);
old_pipe->stream_res.tg->funcs->wait_for_state(
old_pipe->stream_res.tg,
CRTC_STATE_VACTIVE);
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@ -1901,6 +1906,11 @@ void dcn20_post_unlock_program_front_end(
*/
while (pipe) {
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
/* When turning on the phantom pipe we want to run through the
* entire enable sequence, so apply all the "enable" flags.
*/
if (dc->hwss.apply_update_flags_for_phantom)
dc->hwss.apply_update_flags_for_phantom(pipe);
if (dc->hwss.update_phantom_vp_position)
dc->hwss.update_phantom_vp_position(dc, context, pipe);
dcn20_program_pipe(dc, pipe, context);

View File

@ -29,13 +29,6 @@
#define TO_DCN20_MMHUBBUB(mcif_wb_base) \
container_of(mcif_wb_base, struct dcn20_mmhubbub, base)
/* DCN */
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
#define BASE(seg) \
BASE_INNER(seg)
#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \
SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\

Some files were not shown because too many files have changed in this diff Show More