amd-drm-next-6.7-2023-10-27:

amdgpu:
 - RAS fixes
 - Seamless boot fixes
 - NBIO 7.7 fix
 - SMU 14.0 fixes
 - GC 11.5 fixes
 - DML2 fixes
 - ASPM fixes
 - VPE fixes
 - Misc code cleanups
 - SRIOV fixes
 - Add some missing copyright notices
 - DCN 3.5 fixes
 - FAMS fixes
 - Backlight fix
 - S/G display fix
 - fdinfo cleanups
 - EXT_COHERENT fixes for APU and NUMA systems
 
 amdkfd:
 - Misc fixes
 - Misc code cleanups
 - SVM fixes
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZTwWOAAKCRC93/aFa7yZ
 2Lz3AP0cInVS4qZYTZCh2O/k5AoidJjcmRl2DVm8OdowBPCa4wEAoNsekTIQnZsI
 Ru4SoVKhT2bs1LEMOcdzexsVrwlaxA8=
 =G3QX
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-next-6.7-2023-10-27' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.7-2023-10-27:

amdgpu:
- RAS fixes
- Seamless boot fixes
- NBIO 7.7 fix
- SMU 14.0 fixes
- GC 11.5 fixes
- DML2 fixes
- ASPM fixes
- VPE fixes
- Misc code cleanups
- SRIOV fixes
- Add some missing copyright notices
- DCN 3.5 fixes
- FAMS fixes
- Backlight fix
- S/G display fix
- fdinfo cleanups
- EXT_COHERENT fixes for APU and NUMA systems

amdkfd:
- Misc fixes
- Misc code cleanups
- SVM fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231027200343.57132-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2023-10-31 12:36:55 +10:00
commit 631808095a
137 changed files with 1461 additions and 685 deletions

View File

@ -104,7 +104,8 @@ amdgpu-y += \
amdgpu-y += \
df_v1_7.o \
df_v3_6.o \
df_v4_3.o
df_v4_3.o \
df_v4_6_2.o
# add GMC block
amdgpu-y += \

View File

@ -1119,6 +1119,13 @@ static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
return adev->ip_versions[ip][inst] & ~0xFFU;
}
static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev,
uint8_t ip, uint8_t inst)
{
/* This returns full version - major/minor/rev/variant/subrevision */
return adev->ip_versions[ip][inst];
}
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
{
return container_of(ddev, struct amdgpu_device, ddev);
@ -1333,9 +1340,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);

View File

@ -68,7 +68,7 @@ struct amdgpu_acpi_xcc_info {
struct amdgpu_acpi_dev_info {
struct list_head list;
struct list_head xcc_list;
uint16_t bdf;
uint32_t sbdf;
uint16_t supp_xcp_mode;
uint16_t xcp_mode;
uint16_t mem_mode;
@ -927,7 +927,7 @@ static acpi_status amdgpu_acpi_get_node_id(acpi_handle handle,
#endif
}
static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf)
static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u32 sbdf)
{
struct amdgpu_acpi_dev_info *acpi_dev;
@ -935,14 +935,14 @@ static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf)
return NULL;
list_for_each_entry(acpi_dev, &amdgpu_acpi_dev_list, list)
if (acpi_dev->bdf == bdf)
if (acpi_dev->sbdf == sbdf)
return acpi_dev;
return NULL;
}
static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info,
struct amdgpu_acpi_xcc_info *xcc_info, u16 bdf)
struct amdgpu_acpi_xcc_info *xcc_info, u32 sbdf)
{
struct amdgpu_acpi_dev_info *tmp;
union acpi_object *obj;
@ -955,7 +955,7 @@ static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info,
INIT_LIST_HEAD(&tmp->xcc_list);
INIT_LIST_HEAD(&tmp->list);
tmp->bdf = bdf;
tmp->sbdf = sbdf;
obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
AMD_XCC_DSM_GET_SUPP_MODE, NULL,
@ -1007,7 +1007,7 @@ static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info,
DRM_DEBUG_DRIVER(
"New dev(%x): Supported xcp mode: %x curr xcp_mode : %x mem mode : %x, tmr base: %llx tmr size: %llx ",
tmp->bdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode,
tmp->sbdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode,
tmp->tmr_base, tmp->tmr_size);
list_add_tail(&tmp->list, &amdgpu_acpi_dev_list);
*dev_info = tmp;
@ -1023,7 +1023,7 @@ out:
}
static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info,
u16 *bdf)
u32 *sbdf)
{
union acpi_object *obj;
acpi_status status;
@ -1054,8 +1054,10 @@ static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info,
xcc_info->phy_id = (obj->integer.value >> 32) & 0xFF;
/* xcp node of this xcc [47:40] */
xcc_info->xcp_node = (obj->integer.value >> 40) & 0xFF;
/* PF domain of this xcc [31:16] */
*sbdf = (obj->integer.value) & 0xFFFF0000;
/* PF bus/dev/fn of this xcc [63:48] */
*bdf = (obj->integer.value >> 48) & 0xFFFF;
*sbdf |= (obj->integer.value >> 48) & 0xFFFF;
ACPI_FREE(obj);
obj = NULL;
@ -1079,7 +1081,7 @@ static int amdgpu_acpi_enumerate_xcc(void)
struct acpi_device *acpi_dev;
char hid[ACPI_ID_LEN];
int ret, id;
u16 bdf;
u32 sbdf;
INIT_LIST_HEAD(&amdgpu_acpi_dev_list);
xa_init(&numa_info_xa);
@ -1107,16 +1109,16 @@ static int amdgpu_acpi_enumerate_xcc(void)
xcc_info->handle = acpi_device_handle(acpi_dev);
acpi_dev_put(acpi_dev);
ret = amdgpu_acpi_get_xcc_info(xcc_info, &bdf);
ret = amdgpu_acpi_get_xcc_info(xcc_info, &sbdf);
if (ret) {
kfree(xcc_info);
continue;
}
dev_info = amdgpu_acpi_get_dev(bdf);
dev_info = amdgpu_acpi_get_dev(sbdf);
if (!dev_info)
ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, bdf);
ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, sbdf);
if (ret == -ENOMEM)
return ret;
@ -1136,13 +1138,14 @@ int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
u64 *tmr_size)
{
struct amdgpu_acpi_dev_info *dev_info;
u16 bdf;
u32 sbdf;
if (!tmr_offset || !tmr_size)
return -EINVAL;
bdf = pci_dev_id(adev->pdev);
dev_info = amdgpu_acpi_get_dev(bdf);
sbdf = (pci_domain_nr(adev->pdev->bus) << 16);
sbdf |= pci_dev_id(adev->pdev);
dev_info = amdgpu_acpi_get_dev(sbdf);
if (!dev_info)
return -ENOENT;
@ -1157,13 +1160,14 @@ int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
{
struct amdgpu_acpi_dev_info *dev_info;
struct amdgpu_acpi_xcc_info *xcc_info;
u16 bdf;
u32 sbdf;
if (!numa_info)
return -EINVAL;
bdf = pci_dev_id(adev->pdev);
dev_info = amdgpu_acpi_get_dev(bdf);
sbdf = (pci_domain_nr(adev->pdev->bus) << 16);
sbdf |= pci_dev_id(adev->pdev);
dev_info = amdgpu_acpi_get_dev(sbdf);
if (!dev_info)
return -ENOENT;

View File

@ -748,6 +748,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
ssize_t result = 0;
int r;
if (!adev->smc_rreg)
return -EPERM;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@ -804,6 +807,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
ssize_t result = 0;
int r;
if (!adev->smc_wreg)
return -EPERM;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;

View File

@ -1456,14 +1456,14 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
}
/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
* supports it, it's safer that we keep it disabled for all.
* Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
* don't support dynamic speed switching. Until we have confirmation from Intel
* that a specific host supports it, it's safer that we keep it disabled for all.
*
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
*/
bool amdgpu_device_pcie_dynamic_switching_supported(void)
static bool amdgpu_device_pcie_dynamic_switching_supported(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
@ -1496,20 +1496,13 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
default:
return false;
}
if (adev->flags & AMD_IS_APU)
return false;
if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK))
return false;
return pcie_aspm_enabled(adev->pdev);
}
bool amdgpu_device_aspm_support_quirk(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
#else
return true;
#endif
}
/* if we get transitioned to only one device, take VGA back */
/**
* amdgpu_device_vga_set_decode - enable/disable vga decode
@ -2315,6 +2308,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
if (!amdgpu_device_pcie_dynamic_switching_supported())
adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {

View File

@ -35,6 +35,7 @@
#include "df_v1_7.h"
#include "df_v3_6.h"
#include "df_v4_3.h"
#include "df_v4_6_2.h"
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
@ -1487,7 +1488,7 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
if (le16_to_cpu(gc_info->v2.header.version_minor == 1)) {
if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
@ -2559,6 +2560,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 3, 0):
adev->df.funcs = &df_v4_3_funcs;
break;
case IP_VERSION(4, 6, 2):
adev->df.funcs = &df_v4_6_2_funcs;
break;
default:
break;
}

View File

@ -56,21 +56,15 @@ static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = {
void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_mem_stats stats;
ktime_t usage[AMDGPU_HW_IP_NUM];
uint32_t bus, dev, fn, domain;
unsigned int hw_ip;
int ret;
memset(&stats, 0, sizeof(stats));
bus = adev->pdev->bus->number;
domain = pci_domain_nr(adev->pdev->bus);
dev = PCI_SLOT(adev->pdev->devfn);
fn = PCI_FUNC(adev->pdev->devfn);
ret = amdgpu_bo_reserve(vm->root.bo, false);
if (ret)
@ -88,9 +82,6 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
*/
drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
drm_printf(p, "drm-driver:\t%s\n", file->minor->dev->driver->name);
drm_printf(p, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn);
drm_printf(p, "drm-client-id:\t%llu\n", vm->immediate.fence_context);
drm_printf(p, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL);
drm_printf(p, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL);
drm_printf(p, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL);

View File

@ -786,6 +786,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
/* YELLOW_CARP*/
case IP_VERSION(10, 3, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 5, 0):
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {

View File

@ -635,8 +635,11 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
static inline void put_obj(struct ras_manager *obj)
{
if (obj && (--obj->use == 0))
if (obj && (--obj->use == 0)) {
list_del(&obj->node);
amdgpu_ras_error_data_fini(&obj->err_data);
}
if (obj && (obj->use < 0))
DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
}
@ -666,6 +669,9 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
if (alive_obj(obj))
return NULL;
if (amdgpu_ras_error_data_init(&obj->err_data))
return NULL;
obj->head = *head;
obj->adev = adev;
list_add(&obj->node, &con->head);
@ -1023,44 +1029,68 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d
}
static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
struct ras_query_if *query_if,
struct ras_manager *ras_mgr,
struct ras_err_data *err_data,
const char *blk_name,
bool is_ue)
{
struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
const char *blk_name = get_ras_block_str(&query_if->head);
struct amdgpu_smuio_mcm_config_info *mcm_info;
struct ras_err_node *err_node;
struct ras_err_info *err_info;
if (is_ue)
dev_info(adev->dev, "%ld uncorrectable hardware errors detected in %s block\n",
ras_mgr->err_data.ue_count, blk_name);
else
dev_info(adev->dev, "%ld correctable hardware errors detected in %s block\n",
ras_mgr->err_data.ce_count, blk_name);
if (is_ue) {
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
if (err_info->ue_count) {
dev_info(adev->dev, "socket: %d, die: %d, "
"%lld new uncorrectable hardware errors detected in %s block\n",
mcm_info->socket_id,
mcm_info->die_id,
err_info->ue_count,
blk_name);
}
}
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
if (is_ue && err_info->ue_count) {
dev_info(adev->dev, "socket: %d, die: %d "
"%lld uncorrectable hardware errors detected in %s block\n",
mcm_info->socket_id,
mcm_info->die_id,
err_info->ue_count,
blk_name);
} else if (!is_ue && err_info->ce_count) {
dev_info(adev->dev, "socket: %d, die: %d "
"%lld correctable hardware errors detected in %s block\n",
mcm_info->socket_id,
mcm_info->die_id,
err_info->ce_count,
blk_name);
for_each_ras_error(err_node, &ras_mgr->err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
dev_info(adev->dev, "socket: %d, die: %d, "
"%lld uncorrectable hardware errors detected in total in %s block\n",
mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
}
} else {
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
if (err_info->ce_count) {
dev_info(adev->dev, "socket: %d, die: %d, "
"%lld new correctable hardware errors detected in %s block, "
"no user action is needed\n",
mcm_info->socket_id,
mcm_info->die_id,
err_info->ce_count,
blk_name);
}
}
for_each_ras_error(err_node, &ras_mgr->err_data) {
err_info = &err_node->err_info;
mcm_info = &err_info->mcm_info;
dev_info(adev->dev, "socket: %d, die: %d, "
"%lld correctable hardware errors detected in total in %s block, "
"no user action is needed\n",
mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
}
}
}
static inline bool err_data_has_source_info(struct ras_err_data *data)
{
return !list_empty(&data->err_node_list);
}
static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
struct ras_query_if *query_if,
struct ras_err_data *err_data)
@ -1069,9 +1099,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
const char *blk_name = get_ras_block_str(&query_if->head);
if (err_data->ce_count) {
if (!list_empty(&err_data->err_node_list)) {
amdgpu_ras_error_print_error_data(adev, query_if,
err_data, false);
if (err_data_has_source_info(err_data)) {
amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
@ -1094,9 +1123,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
}
if (err_data->ue_count) {
if (!list_empty(&err_data->err_node_list)) {
amdgpu_ras_error_print_error_data(adev, query_if,
err_data, true);
if (err_data_has_source_info(err_data)) {
amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true);
} else if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
@ -1118,6 +1146,25 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
}
static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
{
struct ras_err_node *err_node;
struct ras_err_info *err_info;
if (err_data_has_source_info(err_data)) {
for_each_ras_error(err_node, err_data) {
err_info = &err_node->err_info;
amdgpu_ras_error_statistic_ce_count(&obj->err_data, &err_info->mcm_info, err_info->ce_count);
amdgpu_ras_error_statistic_ue_count(&obj->err_data, &err_info->mcm_info, err_info->ue_count);
}
} else {
/* for legacy asic path which doesn't has error source info */
obj->err_data.ue_count += err_data->ue_count;
obj->err_data.ce_count += err_data->ce_count;
}
}
/* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info)
@ -1156,8 +1203,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
}
}
obj->err_data.ue_count += err_data.ue_count;
obj->err_data.ce_count += err_data.ce_count;
amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
@ -1174,6 +1220,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
enum amdgpu_ras_block block)
{
struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
if (!block_obj || !block_obj->hw_ops) {
dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
@ -1181,7 +1229,13 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
return -EOPNOTSUPP;
}
if (!amdgpu_ras_is_supported(adev, block))
/* skip ras error reset in gpu reset */
if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery)) &&
mca_funcs && mca_funcs->mca_set_debug_mode)
return -EOPNOTSUPP;
if (!amdgpu_ras_is_supported(adev, block) ||
!amdgpu_ras_get_mca_debug_mode(adev))
return -EOPNOTSUPP;
if (block_obj->hw_ops->reset_ras_error_count)
@ -2692,7 +2746,8 @@ static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
return;
/* Init poison supported flag, the default value is false */
if (adev->gmc.xgmi.connected_to_cpu) {
if (adev->gmc.xgmi.connected_to_cpu ||
adev->gmc.is_app_apu) {
/* enabled by default when GPU is connected to CPU */
con->poison_supported = true;
} else if (adev->df.funcs &&
@ -3529,11 +3584,10 @@ static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data
for_each_ras_error(err_node, err_data) {
ref_id = &err_node->err_info.mcm_info;
if ((mcm_info->socket_id >= 0 && mcm_info->socket_id != ref_id->socket_id) ||
(mcm_info->die_id >= 0 && mcm_info->die_id != ref_id->die_id))
continue;
return err_node;
if (mcm_info->socket_id == ref_id->socket_id &&
mcm_info->die_id == ref_id->die_id)
return err_node;
}
return NULL;

View File

@ -515,10 +515,7 @@ struct ras_manager {
/* IH data */
struct ras_ih_data ih_data;
struct {
unsigned long ue_count;
unsigned long ce_count;
} err_data;
struct ras_err_data err_data;
};
struct ras_badpage {

View File

@ -844,6 +844,7 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
* @immediate: immediate submission in a page fault
* @unlocked: unlocked invalidation during MM callback
* @flush_tlb: trigger tlb invalidation after update completed
* @allow_override: change MTYPE for local NUMA nodes
* @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
@ -860,7 +861,7 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
* 0 for success, negative erro code for failure.
*/
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bool immediate, bool unlocked, bool flush_tlb,
bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
struct dma_resv *resv, uint64_t start, uint64_t last,
uint64_t flags, uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,
@ -898,6 +899,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.immediate = immediate;
params.pages_addr = pages_addr;
params.unlocked = unlocked;
params.allow_override = allow_override;
/* Implicitly sync to command submissions in the same VM before
* unmapping. Sync to moving fences before mapping.
@ -1073,6 +1075,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct ttm_resource *mem;
struct dma_fence **last_update;
bool flush_tlb = clear;
bool uncached;
struct dma_resv *resv;
uint64_t vram_base;
uint64_t flags;
@ -1110,9 +1113,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
vram_base = bo_adev->vm_manager.vram_base_offset;
uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
} else {
flags = 0x0;
vram_base = 0;
uncached = false;
}
if (clear || (bo && bo->tbo.base.resv ==
@ -1146,7 +1151,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
trace_amdgpu_vm_bo_update(mapping);
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
resv, mapping->start, mapping->last,
!uncached, resv, mapping->start, mapping->last,
update_flags, mapping->offset,
vram_base, mem, pages_addr,
last_update);
@ -1341,8 +1346,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
r = amdgpu_vm_update_range(adev, vm, false, false, true, resv,
mapping->start, mapping->last,
r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
resv, mapping->start, mapping->last,
init_pte_value, 0, 0, NULL, NULL,
&f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@ -2618,8 +2623,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
goto error_unlock;
}
r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr,
addr, flags, value, 0, NULL, NULL, NULL);
r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
if (r)
goto error_unlock;

View File

@ -246,6 +246,12 @@ struct amdgpu_vm_update_params {
* @table_freed: return true if page table is freed when updating
*/
bool table_freed;
/**
* @allow_override: true for memory that is not uncached: allows MTYPE
* to be overridden for NUMA local memory.
*/
bool allow_override;
};
struct amdgpu_vm_update_funcs {
@ -441,7 +447,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bool immediate, bool unlocked, bool flush_tlb,
bool immediate, bool unlocked, bool flush_tlb, bool allow_override,
struct dma_resv *resv, uint64_t start, uint64_t last,
uint64_t flags, uint64_t offset, uint64_t vram_base,
struct ttm_resource *res, dma_addr_t *pages_addr,

View File

@ -843,7 +843,7 @@ static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
*/
if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
adev->gmc.gmc_funcs->override_vm_pte_flags &&
num_possible_nodes() > 1 && !params->pages_addr)
num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
params->vm->update_funcs->update(params, pt, pe, addr, count, incr,

View File

@ -1709,10 +1709,6 @@ static void cik_program_aspm(struct amdgpu_device *adev)
if (pci_is_root_bus(adev->pdev->bus))
return;
/* XXX double check APUs */
if (adev->flags & AMD_IS_APU)
return;
orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) |

View File

@ -0,0 +1,34 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "df_v4_6_2.h"
static bool df_v4_6_2_query_ras_poison_mode(struct amdgpu_device *adev)
{
/* return true since related regs are inaccessible */
return true;
}
const struct amdgpu_df_funcs df_v4_6_2_funcs = {
.query_ras_poison_mode = df_v4_6_2_query_ras_poison_mode,
};

View File

@ -0,0 +1,31 @@
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __DF_V4_6_2_H__
#define __DF_V4_6_2_H__
#include "soc15_common.h"
extern const struct amdgpu_df_funcs df_v4_6_2_funcs;
#endif

View File

@ -256,6 +256,7 @@ static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
WREG32(scratch_reg0_offset, 0xCAFEDEAD);
tmp = RREG32(scratch_reg0_offset);
r = amdgpu_ring_alloc(ring, 3);
if (r)

View File

@ -73,7 +73,8 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
* fini/suspend, so the overall state doesn't
* change over the course of suspend/resume.
*/
if (!adev->in_s0ix)
if (!adev->in_s0ix && (adev->in_runpm || adev->in_suspend ||
amdgpu_in_reset(adev)))
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
break;
case AMDGPU_IRQ_STATE_ENABLE:

View File

@ -1251,12 +1251,15 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
return;
}
/* Only override mappings with MTYPE_NC, which is the safe default for
* cacheable memory.
/* MTYPE_NC is the same default and can be overridden.
* MTYPE_UC will be present if the memory is extended-coherent
* and can also be overridden.
*/
if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
dev_dbg_ratelimited(adev->dev, "MTYPE is not NC\n");
AMDGPU_PTE_MTYPE_VG10(MTYPE_NC) &&
(*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
AMDGPU_PTE_MTYPE_VG10(MTYPE_UC)) {
dev_dbg_ratelimited(adev->dev, "MTYPE is not NC or UC\n");
return;
}
@ -1283,15 +1286,23 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
vm->mem_id, local_node, nid);
if (nid == local_node) {
uint64_t old_flags = *flags;
unsigned int mtype_local = MTYPE_RW;
if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) ==
AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
unsigned int mtype_local = MTYPE_RW;
if (amdgpu_mtype_local == 1)
mtype_local = MTYPE_NC;
else if (amdgpu_mtype_local == 2)
mtype_local = MTYPE_CC;
if (amdgpu_mtype_local == 1)
mtype_local = MTYPE_NC;
else if (amdgpu_mtype_local == 2)
mtype_local = MTYPE_CC;
*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
AMDGPU_PTE_MTYPE_VG10(mtype_local);
} else {
/* MTYPE_UC case */
*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
}
*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
AMDGPU_PTE_MTYPE_VG10(mtype_local);
dev_dbg_ratelimited(adev->dev, "flags updated from %llx to %llx\n",
old_flags, *flags);
}
@ -2018,11 +2029,8 @@ static int gmc_v9_0_sw_init(void *handle)
* vm size is 256TB (48bit), maximum size of Vega10,
* block size 512 (9bit)
*/
/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
if (amdgpu_sriov_vf(adev))
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
adev->gmc.translate_further = adev->vm_manager.num_level > 1;
break;

View File

@ -254,7 +254,7 @@ static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *ade
{
uint32_t def, data;
if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
return;
def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
@ -283,7 +283,7 @@ static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev
{
uint32_t def, data;
if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
return;
def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);

View File

@ -56,8 +56,15 @@ static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
/* If it is VF or subrevision holds a non-zero value, that should be used */
if (tmp || amdgpu_sriov_vf(adev))
return tmp;
/* If discovery subrev is not updated, use register version */
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, STRAP_ATI_REV_ID_DEV0_F0);
tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
STRAP_ATI_REV_ID_DEV0_F0);
return tmp;
}

View File

@ -513,11 +513,10 @@ static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
static void nv_program_aspm(struct amdgpu_device *adev)
{
if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
if (!amdgpu_device_should_use_aspm(adev))
return;
if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->program_aspm))
if (adev->nbio.funcs->program_aspm)
adev->nbio.funcs->program_aspm(adev);
}
@ -609,9 +608,8 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
if (adev->gfx.funcs->update_perfmon_mgcg)
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->enable_aspm) &&
amdgpu_device_should_use_aspm(adev))
if (adev->nbio.funcs->enable_aspm &&
amdgpu_device_should_use_aspm(adev))
adev->nbio.funcs->enable_aspm(adev, !enter);
return 0;

View File

@ -2440,8 +2440,6 @@ static void si_program_aspm(struct amdgpu_device *adev)
if (!amdgpu_device_should_use_aspm(adev))
return;
if (adev->flags & AMD_IS_APU)
return;
orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
data &= ~LC_XMIT_N_FTS_MASK;
data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;

View File

@ -646,8 +646,7 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
if (!amdgpu_device_should_use_aspm(adev))
return;
if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->program_aspm))
if (adev->nbio.funcs->program_aspm)
adev->nbio.funcs->program_aspm(adev);
}

View File

@ -433,8 +433,7 @@ static void soc21_program_aspm(struct amdgpu_device *adev)
if (!amdgpu_device_should_use_aspm(adev))
return;
if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->program_aspm))
if (adev->nbio.funcs->program_aspm)
adev->nbio.funcs->program_aspm(adev);
}

View File

@ -88,6 +88,27 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
umc_v12_0_reset_error_count_per_channel, NULL);
}
static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status)
{
return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
}
static bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
{
return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) ||
/* Identify data parity error in replay mode */
((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
!(umc_v12_0_is_uncorrectable_error(mc_umc_status)))));
}
static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t umc_reg_offset,
unsigned long *error_count)
@ -104,10 +125,7 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
mc_umc_status =
RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0)))
if (umc_v12_0_is_correctable_error(mc_umc_status))
*error_count += 1;
}
@ -125,11 +143,7 @@ static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev
mc_umc_status =
RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
if (umc_v12_0_is_uncorrectable_error(mc_umc_status))
*error_count += 1;
}

View File

@ -336,7 +336,7 @@ static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_devic
uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint16_t ecc_ce_cnt;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
@ -345,12 +345,10 @@ static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_devic
umc_inst * adev->umc.channel_inst_num +
ch_inst;
/* check the MCUMC_STATUS */
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
*error_count += 1;
}
/* Retrieve CE count */
ecc_ce_cnt = ras->umc_ecc.ecc[eccinfo_table_idx].ce_count_lo_chip;
if (ecc_ce_cnt)
*error_count += ecc_ce_cnt;
}
static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev,

View File

@ -1124,11 +1124,10 @@ static void vi_program_aspm(struct amdgpu_device *adev)
bool bL1SS = false;
bool bClkReqSupport = true;
if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
if (!amdgpu_device_should_use_aspm(adev))
return;
if (adev->flags & AMD_IS_APU ||
adev->asic_type < CHIP_POLARIS10)
if (adev->asic_type < CHIP_POLARIS10)
return;
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);

View File

@ -205,19 +205,21 @@ static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe)
static int vpe_v_6_1_ring_stop(struct amdgpu_vpe *vpe)
{
struct amdgpu_device *adev = vpe->ring.adev;
uint32_t rb_cntl, ib_cntl;
uint32_t queue_reset;
int ret;
rb_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 0);
WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_RB_CNTL), rb_cntl);
queue_reset = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ));
queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1);
WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE_RESET_REQ), queue_reset);
ib_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 0);
WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_QUEUE0_IB_CNTL), ib_cntl);
ret = SOC15_WAIT_ON_RREG(VPE, 0, regVPEC_QUEUE_RESET_REQ, 0,
VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK);
if (ret)
dev_err(adev->dev, "VPE queue reset failed\n");
vpe->ring.sched.ready = false;
return 0;
return ret;
}
static int vpe_v6_1_set_trap_irq_state(struct amdgpu_device *adev,

View File

@ -442,10 +442,10 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
goto out_free;
}
if (cpages != npages)
pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
cpages, npages);
else
pr_debug("0x%lx pages collected\n", cpages);
pr_debug("0x%lx pages migrated\n", cpages);
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
@ -479,8 +479,6 @@ out:
* svm_migrate_ram_to_vram - migrate svm range from system to device
* @prange: range structure
* @best_loc: the device to migrate to
* @start_mgr: start page to migrate
* @last_mgr: last page to migrate
* @mm: the process mm structure
* @trigger: reason of migration
*
@ -491,7 +489,6 @@ out:
*/
static int
svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long start_mgr, unsigned long last_mgr,
struct mm_struct *mm, uint32_t trigger)
{
unsigned long addr, start, end;
@ -501,30 +498,23 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long cpages = 0;
long r = 0;
if (!best_loc) {
pr_debug("svms 0x%p [0x%lx 0x%lx] migrate to sys ram\n",
prange->svms, start_mgr, last_mgr);
if (prange->actual_loc == best_loc) {
pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
prange->svms, prange->start, prange->last, best_loc);
return 0;
}
if (start_mgr < prange->start || last_mgr > prange->last) {
pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
start_mgr, last_mgr, prange->start, prange->last);
return -EFAULT;
}
node = svm_range_get_node_by_id(prange, best_loc);
if (!node) {
pr_debug("failed to get kfd node by id 0x%x\n", best_loc);
return -ENODEV;
}
pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n",
prange->svms, start_mgr, last_mgr, prange->start, prange->last,
best_loc);
pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
prange->start, prange->last, best_loc);
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
r = svm_range_vram_node_new(node, prange, true);
if (r) {
@ -554,11 +544,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
if (cpages) {
prange->actual_loc = best_loc;
prange->vram_pages = prange->vram_pages + cpages;
} else if (!prange->actual_loc) {
/* if no page migrated and all pages from prange are at
* sys ram drop svm_bo got from svm_range_vram_node_new
*/
svm_range_dma_unmap(prange);
} else {
svm_range_vram_node_free(prange);
}
@ -676,8 +663,9 @@ out_oom:
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
* Return:
* 0 - success with all pages migrated
* negative values - indicate error
* positive values or zero - number of pages got migrated
* positive values - partial migration, number of pages not migrated
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
@ -688,7 +676,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
unsigned long upages = npages;
unsigned long cpages = 0;
unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@ -738,10 +725,10 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
goto out_free;
}
if (cpages != npages)
pr_debug("partial migration, 0x%lx/0x%llx pages collected\n",
pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
cpages, npages);
else
pr_debug("0x%lx pages collected\n", cpages);
pr_debug("0x%lx pages migrated\n", cpages);
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch, npages);
@ -764,21 +751,17 @@ out_free:
kvfree(buf);
out:
if (!r && cpages) {
mpages = cpages - upages;
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
}
return r ? r : mpages;
return r ? r : upages;
}
/**
* svm_migrate_vram_to_ram - migrate svm range from device to system
* @prange: range structure
* @mm: process mm, use current->mm if NULL
* @start_mgr: start page need be migrated to sys ram
* @last_mgr: last page need be migrated to sys ram
* @trigger: reason of migration
* @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
@ -788,7 +771,6 @@ out:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long start_mgr, unsigned long last_mgr,
uint32_t trigger, struct page *fault_page)
{
struct kfd_node *node;
@ -796,33 +778,26 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long addr;
unsigned long start;
unsigned long end;
unsigned long mpages = 0;
unsigned long upages = 0;
long r = 0;
/* this pragne has no any vram page to migrate to sys ram */
if (!prange->actual_loc) {
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
prange->start, prange->last);
return 0;
}
if (start_mgr < prange->start || last_mgr > prange->last) {
pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n",
start_mgr, last_mgr, prange->start, prange->last);
return -EFAULT;
}
node = svm_range_get_node_by_id(prange, prange->actual_loc);
if (!node) {
pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
return -ENODEV;
}
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
prange->svms, prange, start_mgr, last_mgr,
prange->svms, prange, prange->start, prange->last,
prange->actual_loc);
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@ -841,21 +816,14 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
} else {
mpages += r;
upages += r;
}
addr = next;
}
if (r >= 0) {
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system
* and drop its svm_bo ref
*/
if (prange->vram_pages == 0 && prange->ttm_res) {
prange->actual_loc = 0;
svm_range_vram_node_free(prange);
}
if (r >= 0 && !upages) {
svm_range_vram_node_free(prange);
prange->actual_loc = 0;
}
return r < 0 ? r : 0;
@ -865,23 +833,17 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
* svm_migrate_vram_to_vram - migrate svm range from device to device
* @prange: range structure
* @best_loc: the device to migrate to
* @start: start page need be migrated to sys ram
* @last: last page need be migrated to sys ram
* @mm: process mm, use current->mm if NULL
* @trigger: reason of migration
*
* Context: Process context, caller hold mmap read lock, svms lock, prange lock
*
* migrate all vram pages in prange to sys ram, then migrate
* [start, last] pages from sys ram to gpu node best_loc.
*
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger)
struct mm_struct *mm, uint32_t trigger)
{
int r, retries = 3;
@ -893,8 +855,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
trigger, NULL);
r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
@ -902,21 +863,17 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
if (prange->actual_loc)
return -EDEADLK;
return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
}
int
svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger)
struct mm_struct *mm, uint32_t trigger)
{
if (!prange->actual_loc || prange->actual_loc == best_loc)
return svm_migrate_ram_to_vram(prange, best_loc, start, last,
mm, trigger);
if (!prange->actual_loc)
return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
else
return svm_migrate_vram_to_vram(prange, best_loc, start, last,
mm, trigger);
return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
}
@ -932,9 +889,10 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
*/
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
unsigned long start, last, size;
unsigned long addr = vmf->address;
struct svm_range_bo *svm_bo;
enum svm_work_list_ops op;
struct svm_range *parent;
struct svm_range *prange;
struct kfd_process *p;
struct mm_struct *mm;
@ -971,31 +929,51 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
mutex_lock(&p->svms.lock);
prange = svm_range_from_addr(&p->svms, addr, NULL);
prange = svm_range_from_addr(&p->svms, addr, &parent);
if (!prange) {
pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
goto out_unlock_svms;
}
mutex_lock(&prange->migrate_mutex);
mutex_lock(&parent->migrate_mutex);
if (prange != parent)
mutex_lock_nested(&prange->migrate_mutex, 1);
if (!prange->actual_loc)
goto out_unlock_prange;
/* Align migration range start and size to granularity size */
size = 1UL << prange->granularity;
start = max(ALIGN_DOWN(addr, size), prange->start);
last = min(ALIGN(addr + 1, size) - 1, prange->last);
svm_range_lock(parent);
if (prange != parent)
mutex_lock_nested(&prange->lock, 1);
r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
if (prange != parent)
mutex_unlock(&prange->lock);
svm_range_unlock(parent);
if (r) {
pr_debug("failed %d to split range by granularity\n", r);
goto out_unlock_prange;
}
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last,
KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page);
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
vmf->page);
if (r)
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
r, prange->svms, prange, start, last);
r, prange->svms, prange, prange->start, prange->last);
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && parent == prange)
op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
else
op = SVM_OP_UPDATE_RANGE_NOTIFIER;
svm_range_add_list_work(&p->svms, parent, mm, op);
schedule_deferred_list_work(&p->svms);
out_unlock_prange:
mutex_unlock(&prange->migrate_mutex);
if (prange != parent)
mutex_unlock(&prange->migrate_mutex);
mutex_unlock(&parent->migrate_mutex);
out_unlock_svms:
mutex_unlock(&p->svms.lock);
out_unref_process:

View File

@ -41,13 +41,9 @@ enum MIGRATION_COPY_DIR {
};
int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long start, unsigned long last,
struct mm_struct *mm, uint32_t trigger);
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
unsigned long start, unsigned long last,
uint32_t trigger, struct page *fault_page);
uint32_t trigger, struct page *fault_page);
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);

View File

@ -158,13 +158,12 @@ svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
static int
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long offset, unsigned long npages,
unsigned long *hmm_pfns, uint32_t gpuidx, uint64_t *vram_pages)
unsigned long *hmm_pfns, uint32_t gpuidx)
{
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
dma_addr_t *addr = prange->dma_addr[gpuidx];
struct device *dev = adev->dev;
struct page *page;
uint64_t vram_pages_dev;
int i, r;
if (!addr) {
@ -174,7 +173,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
prange->dma_addr[gpuidx] = addr;
}
vram_pages_dev = 0;
addr += offset;
for (i = 0; i < npages; i++) {
if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
@ -184,7 +182,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
if (is_zone_device_page(page)) {
struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
vram_pages_dev++;
addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
bo_adev->vm_manager.vram_base_offset -
bo_adev->kfd.pgmap.range.start;
@ -201,14 +198,13 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
addr[i] >> PAGE_SHIFT, page_to_pfn(page));
}
*vram_pages = vram_pages_dev;
return 0;
}
static int
svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
unsigned long offset, unsigned long npages,
unsigned long *hmm_pfns, uint64_t *vram_pages)
unsigned long *hmm_pfns)
{
struct kfd_process *p;
uint32_t gpuidx;
@ -227,7 +223,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
}
r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
hmm_pfns, gpuidx, vram_pages);
hmm_pfns, gpuidx);
if (r)
break;
}
@ -353,7 +349,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
INIT_LIST_HEAD(&prange->child_list);
atomic_set(&prange->invalid, 0);
prange->validate_timestamp = 0;
prange->vram_pages = 0;
mutex_init(&prange->migrate_mutex);
mutex_init(&prange->lock);
@ -400,8 +395,6 @@ static void svm_range_bo_release(struct kref *kref)
prange->start, prange->last);
mutex_lock(&prange->lock);
prange->svm_bo = NULL;
/* prange should not hold vram page now */
WARN_ON(prange->actual_loc);
mutex_unlock(&prange->lock);
spin_lock(&svm_bo->list_lock);
@ -783,7 +776,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
prange->flags &= ~attrs[i].value;
break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
prange->granularity = attrs[i].value;
prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
break;
default:
WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
@ -982,11 +975,6 @@ svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
new->svm_bo = svm_range_bo_ref(old->svm_bo);
new->ttm_res = old->ttm_res;
/* set new's vram_pages as old range's now, the acurate vram_pages
* will be updated during mapping
*/
new->vram_pages = min(old->vram_pages, new->npages);
spin_lock(&new->svm_bo->list_lock);
list_add(&new->svm_bo_list, &new->svm_bo->range_list);
spin_unlock(&new->svm_bo->list_lock);
@ -1147,6 +1135,66 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
list_add_tail(&pchild->child_list, &prange->child_list);
}
/**
* svm_range_split_by_granularity - collect ranges within granularity boundary
*
* @p: the process with svms list
* @mm: mm structure
* @addr: the vm fault address in pages, to split the prange
* @parent: parent range if prange is from child list
* @prange: prange to split
*
* Trims @prange to be a single aligned block of prange->granularity if
* possible. The head and tail are added to the child_list in @parent.
*
* Context: caller must hold mmap_read_lock and prange->lock
*
* Return:
* 0 - OK, otherwise error code
*/
int
svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
unsigned long addr, struct svm_range *parent,
struct svm_range *prange)
{
struct svm_range *head, *tail;
unsigned long start, last, size;
int r;
/* Align splited range start and size to granularity size, then a single
* PTE will be used for whole range, this reduces the number of PTE
* updated and the L1 TLB space used for translation.
*/
size = 1UL << prange->granularity;
start = ALIGN_DOWN(addr, size);
last = ALIGN(addr + 1, size) - 1;
pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
prange->svms, prange->start, prange->last, start, last, size);
if (start > prange->start) {
r = svm_range_split(prange, start, prange->last, &head);
if (r)
return r;
svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
}
if (last < prange->last) {
r = svm_range_split(prange, prange->start, last, &tail);
if (r)
return r;
svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
}
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
prange, prange->start, prange->last,
SVM_OP_ADD_RANGE_AND_MAP);
}
return 0;
}
static bool
svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
{
@ -1234,7 +1282,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (num_possible_nodes() <= 1)
mapping_flags |= mtype_local;
else
mapping_flags |= AMDGPU_VM_MTYPE_NC;
mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
mapping_flags |= AMDGPU_VM_MTYPE_UC;
@ -1269,7 +1317,7 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_debug("[0x%llx 0x%llx]\n", start, last);
return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
last, init_pte_value, 0, 0, NULL, NULL,
fence);
}
@ -1376,8 +1424,8 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
* different memory partition based on fpfn/lpfn, we should use
* same vm_manager.vram_base_offset regardless memory partition.
*/
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
last_start, prange->start + i,
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
NULL, last_start, prange->start + i,
pte_flags,
(last_start - prange->start) << PAGE_SHIFT,
bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
@ -1570,7 +1618,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
struct svm_validate_context *ctx;
unsigned long start, end, addr;
struct kfd_process *p;
uint64_t vram_pages;
void *owner;
int32_t idx;
int r = 0;
@ -1639,13 +1686,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
}
vram_pages = 0;
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
struct hmm_range *hmm_range;
struct vm_area_struct *vma;
uint64_t vram_pages_vma;
unsigned long next = 0;
unsigned long offset;
unsigned long npages;
@ -1674,11 +1719,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (!r) {
offset = (addr - start) >> PAGE_SHIFT;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
hmm_range->hmm_pfns, &vram_pages_vma);
hmm_range->hmm_pfns);
if (r)
pr_debug("failed %d to dma map range\n", r);
else
vram_pages += vram_pages_vma;
}
svm_range_lock(prange);
@ -1704,19 +1747,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
addr = next;
}
if (addr == end) {
prange->vram_pages = vram_pages;
/* if prange does not include any vram page and it
* has not released svm_bo drop its svm_bo reference
* and set its actaul_loc to sys ram
*/
if (!vram_pages && prange->ttm_res) {
prange->actual_loc = 0;
svm_range_vram_node_free(prange);
}
}
svm_range_unreserve_bos(ctx);
if (!r)
prange->validate_timestamp = ktime_get_boottime();
@ -1969,7 +1999,6 @@ static struct svm_range *svm_range_clone(struct svm_range *old)
new->actual_loc = old->actual_loc;
new->granularity = old->granularity;
new->mapped_to_gpu = old->mapped_to_gpu;
new->vram_pages = old->vram_pages;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
@ -2035,6 +2064,7 @@ svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
* @update_list: output, the ranges need validate and update GPU mapping
* @insert_list: output, the ranges need insert to svms
* @remove_list: output, the ranges are replaced and need remove from svms
* @remap_list: output, remap unaligned svm ranges
*
* Check if the virtual address range has overlap with any existing ranges,
* split partly overlapping ranges and add new ranges in the gaps. All changes
@ -2876,7 +2906,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id,
uint64_t addr, bool write_fault)
{
unsigned long start, last, size;
struct mm_struct *mm = NULL;
struct svm_range_list *svms;
struct svm_range *prange;
@ -3012,35 +3041,32 @@ retry_write_locked:
kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
write_fault, timestamp);
if (prange->actual_loc != 0 || best_loc != 0) {
if (prange->actual_loc != best_loc) {
migration = true;
/* Align migration range start and size to granularity size */
size = 1UL << prange->granularity;
start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start);
last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last);
if (best_loc) {
r = svm_migrate_to_vram(prange, best_loc, start, last,
mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
r = svm_migrate_to_vram(prange, best_loc, mm,
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
if (r) {
pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
r, addr);
/* Fallback to system memory if migration to
* VRAM failed
*/
if (prange->actual_loc && prange->actual_loc != best_loc)
r = svm_migrate_vram_to_ram(prange, mm, start, last,
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
if (prange->actual_loc)
r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
NULL);
else
r = 0;
}
} else {
r = svm_migrate_vram_to_ram(prange, mm, start, last,
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL);
r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
NULL);
}
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
r, svms, start, last);
r, svms, prange->start, prange->last);
goto out_unlock_range;
}
}
@ -3394,24 +3420,18 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
*migrated = false;
best_loc = svm_range_best_prefetch_location(prange);
/* when best_loc is a gpu node and same as prange->actual_loc
* we still need do migration as prange->actual_loc !=0 does
* not mean all pages in prange are vram. hmm migrate will pick
* up right pages during migration.
*/
if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) ||
(best_loc == 0 && prange->actual_loc == 0))
if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
best_loc == prange->actual_loc)
return 0;
if (!best_loc) {
r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
*migrated = !r;
return r;
}
r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last,
mm, KFD_MIGRATE_TRIGGER_PREFETCH);
r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
return r;
@ -3466,11 +3486,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
/* migrate all vram pages in this prange to sys ram
* after that prange->actual_loc should be zero
*/
r = svm_migrate_vram_to_ram(prange, mm,
prange->start, prange->last,
KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
} while (!r && prange->actual_loc && --retries);

View File

@ -78,7 +78,6 @@ struct svm_work_list_item {
* @update_list:link list node used to add to update_list
* @mapping: bo_va mapping structure to create and update GPU page table
* @npages: number of pages
* @vram_pages: vram pages number in this svm_range
* @dma_addr: dma mapping address on each GPU for system memory physical page
* @ttm_res: vram ttm resource map
* @offset: range start offset within mm_nodes
@ -89,9 +88,7 @@ struct svm_work_list_item {
* @flags: flags defined as KFD_IOCTL_SVM_FLAG_*
* @perferred_loc: perferred location, 0 for CPU, or GPU id
* @perfetch_loc: last prefetch location, 0 for CPU, or GPU id
* @actual_loc: this svm_range location. 0: all pages are from sys ram;
* GPU id: this svm_range may include vram pages from GPU with
* id actual_loc.
* @actual_loc: the actual location, 0 for CPU, or GPU id
* @granularity:migration granularity, log2 num pages
* @invalid: not 0 means cpu page table is invalidated
* @validate_timestamp: system timestamp when range is validated
@ -115,7 +112,6 @@ struct svm_range {
struct list_head list;
struct list_head update_list;
uint64_t npages;
uint64_t vram_pages;
dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
struct ttm_resource *ttm_res;
uint64_t offset;
@ -172,6 +168,9 @@ struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange,
int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear);
void svm_range_vram_node_free(struct svm_range *prange);
int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
unsigned long addr, struct svm_range *parent,
struct svm_range *prange);
int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id, uint64_t addr,
bool write_fault);

View File

@ -89,9 +89,10 @@ EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc);
void amdgpu_xcp_drv_release(void)
{
for (--pdev_num; pdev_num >= 0; --pdev_num) {
devres_release_group(&xcp_dev[pdev_num]->pdev->dev, NULL);
platform_device_unregister(xcp_dev[pdev_num]->pdev);
xcp_dev[pdev_num]->pdev = NULL;
struct platform_device *pdev = xcp_dev[pdev_num]->pdev;
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
xcp_dev[pdev_num] = NULL;
}
pdev_num = 0;

View File

@ -1642,7 +1642,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
}
init_data.flags.gpu_vm_support = adev->mode_info.gpu_vm_support;
adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
@ -6513,6 +6513,9 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
return;
}
if (drm_detect_hdmi_monitor(edid))
init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
aconnector->edid = edid;
aconnector->dc_em_sink = dc_link_add_remote_sink(
@ -9877,7 +9880,7 @@ static int dm_update_plane_state(struct dc *dc,
/* Block top most plane from being a video plane */
if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
return -EINVAL;
*is_top_most_overlay = false;

View File

@ -96,7 +96,7 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
static void vblank_control_worker(struct work_struct *work)
static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
{
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
@ -151,7 +151,7 @@ static void vblank_control_worker(struct work_struct *work)
kfree(vblank_work);
}
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
@ -191,7 +191,7 @@ skip:
if (!work)
return -ENOMEM;
INIT_WORK(&work->work, vblank_control_worker);
INIT_WORK(&work->work, amdgpu_dm_crtc_vblank_control_worker);
work->dm = dm;
work->acrtc = acrtc;
work->enable = enable;
@ -209,15 +209,15 @@ skip:
int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc)
{
return dm_set_vblank(crtc, true);
return amdgpu_dm_crtc_set_vblank(crtc, true);
}
void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc)
{
dm_set_vblank(crtc, false);
amdgpu_dm_crtc_set_vblank(crtc, false);
}
static void dm_crtc_destroy_state(struct drm_crtc *crtc,
static void amdgpu_dm_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct dm_crtc_state *cur = to_dm_crtc_state(state);
@ -233,7 +233,7 @@ static void dm_crtc_destroy_state(struct drm_crtc *crtc,
kfree(state);
}
static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc)
static struct drm_crtc_state *amdgpu_dm_crtc_duplicate_state(struct drm_crtc *crtc)
{
struct dm_crtc_state *state, *cur;
@ -273,12 +273,12 @@ static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
static void dm_crtc_reset_state(struct drm_crtc *crtc)
static void amdgpu_dm_crtc_reset_state(struct drm_crtc *crtc)
{
struct dm_crtc_state *state;
if (crtc->state)
dm_crtc_destroy_state(crtc, crtc->state);
amdgpu_dm_crtc_destroy_state(crtc, crtc->state);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (WARN_ON(!state))
@ -298,12 +298,12 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
/* Implemented only the options currently available for the driver */
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = dm_crtc_reset_state,
.reset = amdgpu_dm_crtc_reset_state,
.destroy = amdgpu_dm_crtc_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = dm_crtc_duplicate_state,
.atomic_destroy_state = dm_crtc_destroy_state,
.atomic_duplicate_state = amdgpu_dm_crtc_duplicate_state,
.atomic_destroy_state = amdgpu_dm_crtc_destroy_state,
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
@ -316,11 +316,11 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
#endif
};
static void dm_crtc_helper_disable(struct drm_crtc *crtc)
static void amdgpu_dm_crtc_helper_disable(struct drm_crtc *crtc)
{
}
static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
static int amdgpu_dm_crtc_count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
{
struct drm_atomic_state *state = new_crtc_state->state;
struct drm_plane *plane;
@ -352,8 +352,8 @@ static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
return num_active;
}
static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
static void amdgpu_dm_crtc_update_crtc_active_planes(struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
{
struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state);
@ -364,18 +364,18 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
return;
dm_new_crtc_state->active_planes =
count_crtc_active_planes(new_crtc_state);
amdgpu_dm_crtc_count_crtc_active_planes(new_crtc_state);
}
static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
static bool amdgpu_dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
@ -386,7 +386,7 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
trace_amdgpu_dm_crtc_atomic_check(crtc_state);
dm_update_crtc_active_planes(crtc, crtc_state);
amdgpu_dm_crtc_update_crtc_active_planes(crtc, crtc_state);
if (WARN_ON(unlikely(!dm_crtc_state->stream &&
amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
@ -429,9 +429,9 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
.disable = dm_crtc_helper_disable,
.atomic_check = dm_crtc_helper_atomic_check,
.mode_fixup = dm_crtc_helper_mode_fixup,
.disable = amdgpu_dm_crtc_helper_disable,
.atomic_check = amdgpu_dm_crtc_helper_atomic_check,
.mode_fixup = amdgpu_dm_crtc_helper_mode_fixup,
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};

View File

@ -3645,7 +3645,9 @@ static int capabilities_show(struct seq_file *m, void *unused)
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
struct dc *dc = adev->dm.dc;
bool mall_supported = dc->caps.mall_size_total;
bool subvp_supported = dc->caps.subvp_fw_processing_delay_us;
unsigned int mall_in_use = false;
unsigned int subvp_in_use = dc->cap_funcs.get_subvp_en(dc, dc->current_state);
struct hubbub *hubbub = dc->res_pool->hubbub;
if (hubbub->funcs->get_mall_en)
@ -3653,6 +3655,8 @@ static int capabilities_show(struct seq_file *m, void *unused)
seq_printf(m, "mall supported: %s, enabled: %s\n",
mall_supported ? "yes" : "no", mall_in_use ? "yes" : "no");
seq_printf(m, "sub-viewport supported: %s, enabled: %s\n",
subvp_supported ? "yes" : "no", subvp_in_use ? "yes" : "no");
return 0;
}

View File

@ -139,7 +139,7 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state
}
}
static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
{
if (!*mods)
return;
@ -164,12 +164,12 @@ static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_
*size += 1;
}
static bool modifier_has_dcc(uint64_t modifier)
static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)
{
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier)
static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@ -177,8 +177,8 @@ static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier)
return AMD_FMT_MOD_GET(TILE, modifier);
}
static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
uint64_t tiling_flags)
static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
uint64_t tiling_flags)
{
/* Fill GFX8 params */
if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
@ -209,8 +209,8 @@ static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
}
static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
union dc_tiling_info *tiling_info)
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
union dc_tiling_info *tiling_info)
{
/* Fill GFX9 params */
tiling_info->gfx9.num_pipes =
@ -230,9 +230,9 @@ static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
}
static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
union dc_tiling_info *tiling_info,
uint64_t modifier)
static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
union dc_tiling_info *tiling_info,
uint64_t modifier)
{
unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
@ -241,7 +241,7 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev
pipes_log2 = min(5u, mod_pipe_xor_bits);
fill_gfx9_tiling_info_from_device(adev, tiling_info);
amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
if (!IS_AMD_FMT_MOD(modifier))
return;
@ -258,13 +258,13 @@ static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev
}
}
static int validate_dcc(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const union dc_tiling_info *tiling_info,
const struct dc_plane_dcc_param *dcc,
const struct dc_plane_address *address,
const struct plane_size *plane_size)
static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const union dc_tiling_info *tiling_info,
const struct dc_plane_dcc_param *dcc,
const struct dc_plane_address *address,
const struct plane_size *plane_size)
{
struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input;
@ -303,23 +303,23 @@ static int validate_dcc(struct amdgpu_device *adev,
return 0;
}
static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
union dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
const bool force_disable_dcc)
static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
union dc_tiling_info *tiling_info,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address,
const bool force_disable_dcc)
{
const uint64_t modifier = afb->base.modifier;
int ret = 0;
fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
if (modifier_has_dcc(modifier) && !force_disable_dcc) {
if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
uint64_t dcc_address = afb->address + afb->base.offsets[1];
bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
@ -347,60 +347,64 @@ static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
}
ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
if (ret)
drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
return ret;
}
static void add_gfx10_1_modifiers(const struct amdgpu_device *adev,
uint64_t **mods, uint64_t *size, uint64_t *capacity)
static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev,
uint64_t **mods,
uint64_t *size,
uint64_t *capacity)
{
int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
/* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
}
static void add_gfx9_modifiers(const struct amdgpu_device *adev,
uint64_t **mods, uint64_t *size, uint64_t *capacity)
static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev,
uint64_t **mods,
uint64_t *size,
uint64_t *capacity)
{
int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
int pipe_xor_bits = min(8, pipes +
@ -421,163 +425,164 @@ static void add_gfx9_modifiers(const struct amdgpu_device *adev,
*/
if (has_constant_encode) {
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
}
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
if (has_constant_encode) {
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(RB, rb) |
AMD_FMT_MOD_SET(PIPE, pipes));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(RB, rb) |
AMD_FMT_MOD_SET(PIPE, pipes));
}
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
AMD_FMT_MOD_SET(RB, rb) |
AMD_FMT_MOD_SET(PIPE, pipes));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
AMD_FMT_MOD_SET(RB, rb) |
AMD_FMT_MOD_SET(PIPE, pipes));
}
/*
* Only supported for 64bpp on Raven, will be filtered on format in
* dm_plane_format_mod_supported.
* amdgpu_dm_plane_format_mod_supported.
*/
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
if (adev->family == AMDGPU_FAMILY_RV) {
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
}
/*
* Only supported for 64bpp on Raven, will be filtered on format in
* dm_plane_format_mod_supported.
* amdgpu_dm_plane_format_mod_supported.
*/
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
if (adev->family == AMDGPU_FAMILY_RV) {
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
}
}
static void add_gfx10_3_modifiers(const struct amdgpu_device *adev,
uint64_t **mods, uint64_t *size, uint64_t *capacity)
static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev,
uint64_t **mods,
uint64_t *size,
uint64_t *capacity)
{
int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs));
/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
/* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
}
static void add_gfx11_modifiers(struct amdgpu_device *adev,
static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev,
uint64_t **mods, uint64_t *size, uint64_t *capacity)
{
int num_pipes = 0;
@ -628,21 +633,21 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev,
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
add_modifier(mods, size, capacity, modifier_dcc_best);
add_modifier(mods, size, capacity, modifier_dcc_4k);
amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best);
amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k);
add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
add_modifier(mods, size, capacity, modifier_r_x);
amdgpu_dm_plane_add_modifier(mods, size, capacity, modifier_r_x);
}
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
amdgpu_dm_plane_add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
}
static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
{
uint64_t size = 0, capacity = 128;
*mods = NULL;
@ -654,15 +659,15 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
if (plane_type == DRM_PLANE_TYPE_CURSOR) {
add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
return *mods ? 0 : -ENOMEM;
}
switch (adev->family) {
case AMDGPU_FAMILY_AI:
case AMDGPU_FAMILY_RV:
add_gfx9_modifiers(adev, mods, &size, &capacity);
amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, &size, &capacity);
break;
case AMDGPU_FAMILY_NV:
case AMDGPU_FAMILY_VGH:
@ -670,21 +675,21 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_10_3_7:
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
add_gfx10_3_modifiers(adev, mods, &size, &capacity);
amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, &size, &capacity);
else
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, &size, &capacity);
break;
case AMDGPU_FAMILY_GC_11_0_0:
case AMDGPU_FAMILY_GC_11_0_1:
case AMDGPU_FAMILY_GC_11_5_0:
add_gfx11_modifiers(adev, mods, &size, &capacity);
amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, &size, &capacity);
break;
}
add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
/* INVALID marks the end of the list. */
add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
if (!*mods)
return -ENOMEM;
@ -692,9 +697,9 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
return 0;
}
static int get_plane_formats(const struct drm_plane *plane,
const struct dc_plane_cap *plane_cap,
uint32_t *formats, int max_formats)
static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane,
const struct dc_plane_cap *plane_cap,
uint32_t *formats, int max_formats)
{
int i, num_formats = 0;
@ -818,22 +823,22 @@ int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
}
if (adev->family >= AMDGPU_FAMILY_AI) {
ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
address,
force_disable_dcc);
ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
rotation, plane_size,
tiling_info, dcc,
address,
force_disable_dcc);
if (ret)
return ret;
} else {
fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
}
return 0;
}
static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct amdgpu_framebuffer *afb;
struct drm_gem_object *obj;
@ -928,8 +933,8 @@ error_unlock:
return r;
}
static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct amdgpu_bo *rbo;
int r;
@ -949,7 +954,7 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
amdgpu_bo_unref(&rbo);
}
static void get_min_max_dc_plane_scaling(struct drm_device *dev,
static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev,
struct drm_framebuffer *fb,
int *min_downscale, int *max_upscale)
{
@ -1030,8 +1035,8 @@ int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
}
/* Get min/max allowed scaling factors from plane caps. */
get_min_max_dc_plane_scaling(state->crtc->dev, fb,
&min_downscale, &max_upscale);
amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->crtc->dev, fb,
&min_downscale, &max_upscale);
/*
* Convert to drm convention: 16.16 fixed point, instead of dc's
* 1.0 == 1000. Also drm scaling is src/dst instead of dc's
@ -1101,8 +1106,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
/* Validate scaling per-format with DC plane caps */
if (state->plane && state->plane->dev && state->fb) {
get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
&min_downscale, &max_upscale);
amdgpu_dm_plane_get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
&min_downscale, &max_upscale);
} else {
min_downscale = 250;
max_upscale = 16000;
@ -1128,8 +1133,8 @@ int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
return 0;
}
static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
@ -1167,8 +1172,8 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
static int dm_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
@ -1177,8 +1182,8 @@ static int dm_plane_atomic_async_check(struct drm_plane *plane,
return 0;
}
static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
struct dc_cursor_position *position)
static int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
struct dc_cursor_position *position)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
int x, y;
@ -1241,7 +1246,7 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
amdgpu_crtc->crtc_id, plane->state->crtc_w,
plane->state->crtc_h);
ret = get_cursor_position(plane, crtc, &position);
ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, &position);
if (ret)
return;
@ -1290,8 +1295,8 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
}
}
static void dm_plane_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
@ -1315,14 +1320,14 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.prepare_fb = dm_plane_helper_prepare_fb,
.cleanup_fb = dm_plane_helper_cleanup_fb,
.atomic_check = dm_plane_atomic_check,
.atomic_async_check = dm_plane_atomic_async_check,
.atomic_async_update = dm_plane_atomic_async_update
.prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
.cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
.atomic_check = amdgpu_dm_plane_atomic_check,
.atomic_async_check = amdgpu_dm_plane_atomic_async_check,
.atomic_async_update = amdgpu_dm_plane_atomic_async_update
};
static void dm_drm_plane_reset(struct drm_plane *plane)
static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
{
struct dm_plane_state *amdgpu_state = NULL;
@ -1336,8 +1341,7 @@ static void dm_drm_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
}
static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane *plane)
static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane)
{
struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
@ -1356,15 +1360,15 @@ dm_drm_plane_duplicate_state(struct drm_plane *plane)
return &dm_plane_state->base;
}
static bool dm_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
struct amdgpu_device *adev = drm_to_adev(plane->dev);
const struct drm_format_info *info = drm_format_info(format);
int i;
enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
if (!info)
return false;
@ -1401,7 +1405,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
info->cpp[0] < 8)
return false;
if (modifier_has_dcc(modifier)) {
if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
/* Per radeonsi comments 16/64 bpp are more complicated. */
if (info->cpp[0] != 4)
return false;
@ -1415,8 +1419,8 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
return true;
}
static void dm_drm_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
@ -1430,10 +1434,10 @@ static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_helper_destroy,
.reset = dm_drm_plane_reset,
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
.format_mod_supported = dm_plane_format_mod_supported,
.reset = amdgpu_dm_plane_drm_plane_reset,
.atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state,
.atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state,
.format_mod_supported = amdgpu_dm_plane_format_mod_supported,
};
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
@ -1447,10 +1451,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
unsigned int supported_rotations;
uint64_t *modifiers = NULL;
num_formats = get_plane_formats(plane, plane_cap, formats,
ARRAY_SIZE(formats));
num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats,
ARRAY_SIZE(formats));
res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
res = amdgpu_dm_plane_get_plane_modifiers(dm->adev, plane->type, &modifiers);
if (res)
return res;
@ -1520,7 +1524,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
return 0;
}
bool is_video_format(uint32_t format)
bool amdgpu_dm_plane_is_video_format(uint32_t format)
{
int i;

View File

@ -62,5 +62,5 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
bool *global_alpha, int *global_alpha_value);
bool is_video_format(uint32_t format);
bool amdgpu_dm_plane_is_video_format(uint32_t format);
#endif

View File

@ -3575,7 +3575,8 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
mpcc_inst = hubp->inst;
// MPCC inst is equal to pipe index in practice
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
if ((dc->res_pool->opps[opp_inst] != NULL) &&
(dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
@ -4884,6 +4885,9 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
if (dc->debug.disable_idle_power_optimizations)
return;
if (dc->caps.ips_support && dc->config.disable_ips)
return;
if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
return;
@ -4895,6 +4899,26 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
uint32_t idle_state = 0;
if (dc->debug.disable_idle_power_optimizations)
return false;
if (!dc->caps.ips_support || dc->config.disable_ips)
return false;
if (dc->hwss.get_idle_state)
idle_state = dc->hwss.get_idle_state(dc);
if ((idle_state & DMUB_IPS1_ALLOW_MASK) ||
(idle_state & DMUB_IPS2_ALLOW_MASK))
return true;
return false;
}
/* set min and max memory clock to lowest and highest DPM level, respectively */
void dc_unlock_memory_clock_frequency(struct dc *dc)
{

View File

@ -321,10 +321,11 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
res_pool->ref_clocks.xtalin_clock_inKhz;
res_pool->ref_clocks.dchub_ref_clock_inKhz =
res_pool->ref_clocks.xtalin_clock_inKhz;
if (res_pool->hubbub && res_pool->hubbub->funcs->get_dchub_ref_freq)
res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
if (dc->debug.using_dml2)
if (res_pool->hubbub && res_pool->hubbub->funcs->get_dchub_ref_freq)
res_pool->hubbub->funcs->get_dchub_ref_freq(res_pool->hubbub,
res_pool->ref_clocks.dccg_ref_clock_inKhz,
&res_pool->ref_clocks.dchub_ref_clock_inKhz);
} else
ASSERT_CRITICAL(false);
}
@ -4228,7 +4229,7 @@ static void set_avi_info_frame(
switch (stream->content_type) {
case DISPLAY_CONTENT_TYPE_NO_DATA:
hdmi_info.bits.CN0_CN1 = 0;
hdmi_info.bits.ITC = 0;
hdmi_info.bits.ITC = 1;
break;
case DISPLAY_CONTENT_TYPE_GRAPHICS:
hdmi_info.bits.CN0_CN1 = 0;

View File

@ -49,7 +49,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
#define DC_VER "3.2.255"
#define DC_VER "3.2.256"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@ -382,6 +382,7 @@ struct dc_cap_funcs {
bool (*get_dcc_compression_cap)(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output);
bool (*get_subvp_en)(struct dc *dc, struct dc_state *context);
};
struct link_training_settings;
@ -2317,6 +2318,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_
struct dc_cursor_attributes *cursor_attr);
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
void dc_unlock_memory_clock_frequency(struct dc *dc);

View File

@ -1110,8 +1110,10 @@ void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
const uint32_t max_num_polls = 10000;
uint32_t allow_state = 0;
uint32_t commit_state = 0;
uint32_t i;
if (dc->debug.dmcub_emulation)
return;
@ -1138,22 +1140,36 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(dc->debug.ips2_entry_delay_us);
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
do {
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
} while (commit_state & DMUB_IPS2_COMMIT_MASK);
if (!(commit_state & DMUB_IPS2_COMMIT_MASK))
break;
udelay(1);
}
ASSERT(i < max_num_polls);
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
return;
/* TODO: See if we can return early here - IPS2 should go
* back directly to IPS0 and clear the flags, but it will
* be safer to directly notify DMCUB of this.
*/
allow_state = dc->hwss.get_idle_state(dc);
}
}
dc_dmub_srv_notify_idle(dc, false);
if (allow_state & DMUB_IPS1_ALLOW_MASK) {
do {
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
} while (commit_state & DMUB_IPS1_COMMIT_MASK);
if (!(commit_state & DMUB_IPS1_COMMIT_MASK))
break;
udelay(1);
}
ASSERT(i < max_num_polls);
}
}

View File

@ -554,6 +554,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.max_downscale_src_width = 3840,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_legacy_fast_update = true,
.using_dml2 = false,
};
static const struct dc_debug_options debug_defaults_diags = {

View File

@ -723,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_legacy_fast_update = true,
.using_dml2 = false,
};
void dcn20_dpp_destroy(struct dpp **dpp)

View File

@ -614,6 +614,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_tri_buf = false,
.enable_legacy_fast_update = true,
.using_dml2 = false,
};
static void dcn201_dpp_destroy(struct dpp **dpp)

View File

@ -654,6 +654,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.enable_legacy_fast_update = true,
.using_dml2 = false,
};
static const struct dc_panel_config panel_config_defaults = {

View File

@ -729,6 +729,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
.enable_legacy_fast_update = false,
.using_dml2 = false,
};
static const struct dc_panel_config panel_config_defaults = {

View File

@ -701,7 +701,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.use_max_lb = false,
.exit_idle_opt_for_cursor_updates = true
.exit_idle_opt_for_cursor_updates = true,
.using_dml2 = false,
};
static void dcn301_dpp_destroy(struct dpp **dpp)

View File

@ -99,6 +99,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
.enable_legacy_fast_update = false,
.using_dml2 = false,
};
static const struct dc_panel_config panel_config_defaults = {

View File

@ -2,6 +2,24 @@
/*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*/

View File

@ -2,6 +2,24 @@
/*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*/

View File

@ -2,6 +2,24 @@
/*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*/

View File

@ -2,6 +2,24 @@
/*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*/
@ -81,6 +99,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.exit_idle_opt_for_cursor_updates = true,
.disable_idle_power_optimizations = false,
.using_dml2 = false,
};
static const struct dc_panel_config panel_config_defaults = {

View File

@ -2,6 +2,24 @@
/*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*/

View File

@ -1,5 +1,5 @@
#
# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
# Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
#
# All rights reserved. This notice is intended as a precaution against
# inadvertent publication and does not imply publication or any waiver

View File

@ -109,6 +109,28 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
}
static void dcn31_wait_for_det_apply(struct hubbub *hubbub, int hubp_inst)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
switch (hubp_inst) {
case 0:
REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1000, 30);
break;
case 1:
REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1000, 30);
break;
case 2:
REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1000, 30);
break;
case 3:
REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1000, 30);
break;
default:
break;
}
}
static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@ -1041,6 +1063,7 @@ static const struct hubbub_funcs hubbub31_funcs = {
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
.verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high,
.program_det_size = dcn31_program_det_size,
.wait_for_det_apply = dcn31_wait_for_det_apply,
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
.hubbub_read_state = hubbub2_read_state,

View File

@ -893,6 +893,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
.using_dml2 = false,
};
static const struct dc_panel_config panel_config_defaults = {

View File

@ -916,7 +916,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.hdmistream = true,
.hdmichar = true,
.dpstream = true,
.symclk32_se = true,
.symclk32_se = false,
.symclk32_le = true,
.symclk_fe = true,
.physymclk = true,
@ -924,7 +924,8 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.seamless_boot_odm_combine = true
.seamless_boot_odm_combine = true,
.using_dml2 = false,
};
static const struct dc_debug_options debug_defaults_diags = {

View File

@ -889,6 +889,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.enable_legacy_fast_update = true,
.psr_power_use_phy_fsm = 0,
.using_dml2 = false,
};
static const struct dc_panel_config panel_config_defaults = {

View File

@ -885,6 +885,7 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.enable_legacy_fast_update = true,
.using_dml2 = false,
};
static const struct dc_panel_config panel_config_defaults = {

View File

@ -60,7 +60,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn30_prepare_bandwidth,
.prepare_bandwidth = dcn32_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,

View File

@ -1993,7 +1993,8 @@ int dcn32_populate_dml_pipes_from_context(
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
.get_subvp_en = dcn32_subvp_in_use,
};
void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context,
@ -2445,6 +2446,11 @@ static bool dcn32_resource_construct(
dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
dc->dml2_options.svp_pstate.callbacks.dc = dc;
dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
@ -2476,6 +2482,7 @@ static bool dcn32_resource_construct(
dc->dml2_options.max_segments_per_hubp = 18;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;
dc->dml2_options.map_dc_pipes_with_callbacks = true;
if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0))
dc->config.sdpif_request_limit_words_per_umc = 16;

View File

@ -732,6 +732,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.fpo_vactive_max_blank_us = 1000,
.enable_legacy_fast_update = false,
.disable_dc_mode_overwrite = true,
.using_dml2 = false,
};
static struct dce_aux *dcn321_aux_engine_create(
@ -1570,7 +1571,8 @@ static void dcn321_destroy_resource_pool(struct resource_pool **pool)
}
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap,
.get_subvp_en = dcn32_subvp_in_use,
};
static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@ -1998,6 +2000,11 @@ static bool dcn321_resource_construct(
dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
dc->dml2_options.svp_pstate.callbacks.dc = dc;
dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "core_types.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DCN35_DPP_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn35_dsc.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DCN35_DSC_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DCN35_DWB_H

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_HUBBUB_DCN35_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn35_hubp.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_HUBP_DCN35_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dce110/dce110_hwseq.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_DCN35_INIT_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn35_mmhubbub.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DCN35_MMHUBBUB_H

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn35_opp.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DCN35_OPP_H

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dcn35_optc.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DC_OPTC_DCN35_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "reg_helper.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef _DCN35_PG_CNTL_H_

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dm_services.h"
@ -2084,6 +2086,11 @@ static bool dcn35_resource_construct(
dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params;
dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch;
dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef _DCN35_RESOURCE_H_

View File

@ -20,7 +20,9 @@
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# makefile for dml2
# Authors: AMD
#
# Makefile for dml2.
ifdef CONFIG_X86
dml2_ccflags-$(CONFIG_CC_IS_GCC) := -mhard-float

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __CMNTYPES_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "display_mode_core.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DISPLAY_MODE_CORE_STRUCT_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DISPLAY_MODE_LIB_DEFINES_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "display_mode_util.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DISPLAY_MODE_UTIL_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dml2_mall_phantom.h"
@ -756,6 +758,148 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id);
}
static unsigned int get_mpc_factor(struct dml2_context *ctx,
const struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_stream_status *status, unsigned int stream_id,
int plane_idx)
{
unsigned int plane_id;
unsigned int cfg_idx;
get_plane_id(state, status->plane_states[plane_idx], stream_id, &plane_id);
cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
if (ctx->architecture == dml2_architecture_20)
return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
ASSERT(false);
return 1;
}
static unsigned int get_odm_factor(
const struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_stream_state *stream)
{
unsigned int cfg_idx = find_disp_cfg_idx_by_stream_id(
mapping, stream->stream_id);
if (ctx->architecture == dml2_architecture_20)
switch (disp_cfg->hw.ODMMode[cfg_idx]) {
case dml_odm_mode_bypass:
return 1;
case dml_odm_mode_combine_2to1:
return 2;
case dml_odm_mode_combine_4to1:
return 4;
default:
break;
}
ASSERT(false);
return 1;
}
static void populate_mpc_factors_for_stream(
struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_state *state,
unsigned int stream_idx,
unsigned int odm_factor,
unsigned int mpc_factors[MAX_PIPES])
{
const struct dc_stream_status *status = &state->stream_status[stream_idx];
unsigned int stream_id = state->streams[stream_idx]->stream_id;
int i;
for (i = 0; i < status->plane_count; i++)
if (odm_factor == 1)
mpc_factors[i] = get_mpc_factor(
ctx, state, disp_cfg, mapping, status,
stream_id, i);
else
mpc_factors[i] = 1;
}
static void populate_odm_factors(const struct dml2_context *ctx,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_state *state,
unsigned int odm_factors[MAX_PIPES])
{
int i;
for (i = 0; i < state->stream_count; i++)
odm_factors[i] = get_odm_factor(
ctx, disp_cfg, mapping, state->streams[i]);
}
static bool map_dc_pipes_for_stream(struct dml2_context *ctx,
struct dc_state *state,
const struct dc_state *existing_state,
const struct dc_stream_state *stream,
const struct dc_stream_status *status,
unsigned int odm_factor,
unsigned int mpc_factors[MAX_PIPES])
{
int plane_idx;
bool result = true;
if (odm_factor == 1)
/*
* ODM and MPC combines are by DML design mutually exclusive.
* ODM factor of 1 means MPC factors may be greater than 1.
* In this case, we want to set ODM factor to 1 first to free up
* pipe resources from previous ODM configuration before setting
* up MPC combine to acquire more pipe resources.
*/
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
odm_factor);
for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++)
result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
status->plane_states[plane_idx],
mpc_factors[plane_idx]);
if (odm_factor > 1)
result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count(
state,
existing_state,
ctx->config.callbacks.dc->res_pool,
stream,
odm_factor);
return result;
}
static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx,
struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
const struct dc_state *existing_state)
{
unsigned int odm_factors[MAX_PIPES];
unsigned int mpc_factors_for_stream[MAX_PIPES];
int i;
bool result = true;
populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors);
for (i = 0; i < state->stream_count; i++) {
populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state,
i, odm_factors[i], mpc_factors_for_stream);
result &= map_dc_pipes_for_stream(ctx, state, existing_state,
state->streams[i],
&state->stream_status[i],
odm_factors[i], mpc_factors_for_stream);
}
return result;
}
bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state)
{
int stream_index, plane_index, i;
@ -770,6 +914,10 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
unsigned int odm_mode_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}, dpp_per_surface_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
struct dc_pipe_mapping_scratch scratch;
if (ctx->config.map_dc_pipes_with_callbacks)
return map_dc_pipes_with_callbacks(
ctx, state, disp_cfg, mapping, existing_state);
if (ctx->architecture == dml2_architecture_21) {
/*
* Extract ODM and DPP outputs from DML2.1 and map them in an array as required for pipe mapping in dml2_map_dc_pipes.

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DML2_DC_RESOURCE_MGMT_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DML2_INTERNAL_TYPES_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dml2_dc_types.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __DML2_MALL_PHANTOM_H__

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dml2_policy.h"

View File

@ -20,6 +20,8 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "display_mode_core.h"
@ -569,6 +571,8 @@ static void populate_dml_timing_cfg_from_stream_state(struct dml_timing_cfg_st *
out->RefreshRate[location] = ((in->timing.pix_clk_100hz * 100) / in->timing.h_total) / in->timing.v_total;
out->VFrontPorch[location] = in->timing.v_front_porch;
out->PixelClock[location] = in->timing.pix_clk_100hz / 10000.00;
if (in->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
out->PixelClock[location] *= 2;
out->HTotal[location] = in->timing.h_total;
out->VTotal[location] = in->timing.v_total;
out->Interlace[location] = in->timing.flags.INTERLACE;

Some files were not shown because too many files have changed in this diff Show More